mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-11 21:49:26 +00:00
Compare commits
45 Commits
testnet-2
...
68060b4efc
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
68060b4efc | ||
|
|
4af83bd0e5 | ||
|
|
d4b22e5136 | ||
|
|
58fe79da10 | ||
|
|
3f07dd13c6 | ||
|
|
4e1d86dae2 | ||
|
|
7ef21830a5 | ||
|
|
8cb4c5d167 | ||
|
|
f9e4b420ed | ||
|
|
817b8e99d3 | ||
|
|
925cef17f2 | ||
|
|
3283cd79e4 | ||
|
|
51e2f24bc1 | ||
|
|
372e29fe08 | ||
|
|
fccb1aea51 | ||
|
|
a25e6330bd | ||
|
|
558a2bfa46 | ||
|
|
c73acb3d62 | ||
|
|
933b17aa91 | ||
|
|
5fa7e3d450 | ||
|
|
749d783b1e | ||
|
|
5a3ea80943 | ||
|
|
fddbebc7c0 | ||
|
|
e01848aa9e | ||
|
|
320b5627b5 | ||
|
|
be7780e69d | ||
|
|
0ddbaefb38 | ||
|
|
0f0db14f05 | ||
|
|
43083dfd49 | ||
|
|
523d2ac911 | ||
|
|
fd4f247917 | ||
|
|
ac9e356af4 | ||
|
|
bba7d2a356 | ||
|
|
4c349ae605 | ||
|
|
a4428761f7 | ||
|
|
940e9553fd | ||
|
|
593aefd229 | ||
|
|
5830c2463d | ||
|
|
bcc88c3e86 | ||
|
|
fea16df567 | ||
|
|
4960c3222e | ||
|
|
6b4df4f2c0 | ||
|
|
dac46c8d7d | ||
|
|
db2e8376df | ||
|
|
33dd412e67 |
2
.github/actions/bitcoin/action.yml
vendored
2
.github/actions/bitcoin/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: 24.0.1
|
||||
default: "27.0"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
6
.github/actions/test-dependencies/action.yml
vendored
6
.github/actions/test-dependencies/action.yml
vendored
@@ -10,7 +10,7 @@ inputs:
|
||||
bitcoin-version:
|
||||
description: "Bitcoin version to download and run as a regtest node"
|
||||
required: false
|
||||
default: 24.0.1
|
||||
default: "27.0"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -19,9 +19,9 @@ runs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install Foundry
|
||||
uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf
|
||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||
with:
|
||||
version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2
|
||||
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
|
||||
cache: false
|
||||
|
||||
- name: Run a Monero Regtest Node
|
||||
|
||||
1
.github/workflows/coins-tests.yml
vendored
1
.github/workflows/coins-tests.yml
vendored
@@ -30,6 +30,7 @@ jobs:
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p bitcoin-serai \
|
||||
-p alloy-simple-request-transport \
|
||||
-p ethereum-serai \
|
||||
-p monero-generators \
|
||||
-p monero-serai
|
||||
|
||||
3
.github/workflows/common-tests.yml
vendored
3
.github/workflows/common-tests.yml
vendored
@@ -28,4 +28,5 @@ jobs:
|
||||
-p std-shims \
|
||||
-p zalloc \
|
||||
-p serai-db \
|
||||
-p serai-env
|
||||
-p serai-env \
|
||||
-p simple-request
|
||||
|
||||
2
.github/workflows/coordinator-tests.yml
vendored
2
.github/workflows/coordinator-tests.yml
vendored
@@ -37,4 +37,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run coordinator Docker tests
|
||||
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
2
.github/workflows/full-stack-tests.yml
vendored
2
.github/workflows/full-stack-tests.yml
vendored
@@ -19,4 +19,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Full Stack Docker tests
|
||||
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
2
.github/workflows/message-queue-tests.yml
vendored
2
.github/workflows/message-queue-tests.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run message-queue Docker tests
|
||||
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
2
.github/workflows/processor-tests.yml
vendored
2
.github/workflows/processor-tests.yml
vendored
@@ -37,4 +37,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run processor Docker tests
|
||||
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
2
.github/workflows/reproducible-runtime.yml
vendored
2
.github/workflows/reproducible-runtime.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Reproducible Runtime tests
|
||||
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
3
.github/workflows/tests.yml
vendored
3
.github/workflows/tests.yml
vendored
@@ -43,6 +43,7 @@ jobs:
|
||||
-p tendermint-machine \
|
||||
-p tributary-chain \
|
||||
-p serai-coordinator \
|
||||
-p serai-orchestrator \
|
||||
-p serai-docker-tests
|
||||
|
||||
test-substrate:
|
||||
@@ -64,7 +65,9 @@ jobs:
|
||||
-p serai-validator-sets-pallet \
|
||||
-p serai-in-instructions-primitives \
|
||||
-p serai-in-instructions-pallet \
|
||||
-p serai-signals-primitives \
|
||||
-p serai-signals-pallet \
|
||||
-p serai-abi \
|
||||
-p serai-runtime \
|
||||
-p serai-node
|
||||
|
||||
|
||||
1464
Cargo.lock
generated
1464
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -36,6 +36,7 @@ members = [
|
||||
"crypto/schnorrkel",
|
||||
|
||||
"coins/bitcoin",
|
||||
"coins/ethereum/alloy-simple-request-transport",
|
||||
"coins/ethereum",
|
||||
"coins/monero/generators",
|
||||
"coins/monero",
|
||||
|
||||
@@ -375,7 +375,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||
msg: &[u8],
|
||||
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
|
||||
if !msg.is_empty() {
|
||||
panic!("message was passed to the TransactionMachine when it generates its own");
|
||||
panic!("message was passed to the TransactionSignMachine when it generates its own");
|
||||
}
|
||||
|
||||
let commitments = (0 .. self.sigs.len())
|
||||
|
||||
4
coins/ethereum/.gitignore
vendored
4
coins/ethereum/.gitignore
vendored
@@ -1,7 +1,3 @@
|
||||
# Solidity build outputs
|
||||
cache
|
||||
artifacts
|
||||
|
||||
# Auto-generated ABI files
|
||||
src/abi/schnorr.rs
|
||||
src/abi/router.rs
|
||||
|
||||
@@ -18,28 +18,29 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { version = "1", default-features = false }
|
||||
eyre = { version = "0.6", default-features = false }
|
||||
|
||||
sha3 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
|
||||
group = { version = "0.13", default-features = false }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] }
|
||||
|
||||
ethers-core = { version = "2", default-features = false }
|
||||
ethers-providers = { version = "2", default-features = false }
|
||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
||||
|
||||
[build-dependencies]
|
||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
serde = { version = "1", default-features = false, features = ["std"] }
|
||||
serde_json = { version = "1", default-features = false, features = ["std"] }
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] }
|
||||
|
||||
sha2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
group = { version = "0.13", default-features = false }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] }
|
||||
|
||||
alloy-core = { version = "0.7", default-features = false }
|
||||
alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] }
|
||||
alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false, features = ["k256"] }
|
||||
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false }
|
||||
alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] }
|
||||
|
||||
tokio = { version = "1", features = ["macros"] }
|
||||
|
||||
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
|
||||
[features]
|
||||
tests = []
|
||||
|
||||
@@ -3,6 +3,12 @@
|
||||
This package contains Ethereum-related functionality, specifically deploying and
|
||||
interacting with Serai contracts.
|
||||
|
||||
While `monero-serai` and `bitcoin-serai` are general purpose libraries,
|
||||
`ethereum-serai` is Serai specific. If any of the utilities are generally
|
||||
desired, please fork and maintain your own copy to ensure the desired
|
||||
functionality is preserved, or open an issue to request we make this library
|
||||
general purpose.
|
||||
|
||||
### Dependencies
|
||||
|
||||
- solc
|
||||
|
||||
29
coins/ethereum/alloy-simple-request-transport/Cargo.toml
Normal file
29
coins/ethereum/alloy-simple-request-transport/Cargo.toml
Normal file
@@ -0,0 +1,29 @@
|
||||
[package]
|
||||
name = "alloy-simple-request-transport"
|
||||
version = "0.1.0"
|
||||
description = "A transport for alloy based off simple-request"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/alloy-simple-request-transport"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.74"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
tower = "0.4"
|
||||
|
||||
serde_json = { version = "1", default-features = false }
|
||||
simple-request = { path = "../../../common/request", default-features = false }
|
||||
|
||||
alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["tls"]
|
||||
tls = ["simple-request/tls"]
|
||||
21
coins/ethereum/alloy-simple-request-transport/LICENSE
Normal file
21
coins/ethereum/alloy-simple-request-transport/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
4
coins/ethereum/alloy-simple-request-transport/README.md
Normal file
4
coins/ethereum/alloy-simple-request-transport/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Alloy Simple Request Transport
|
||||
|
||||
A transport for alloy based on simple-request, a small HTTP client built around
|
||||
hyper.
|
||||
60
coins/ethereum/alloy-simple-request-transport/src/lib.rs
Normal file
60
coins/ethereum/alloy-simple-request-transport/src/lib.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
use core::task;
|
||||
use std::io;
|
||||
|
||||
use alloy_json_rpc::{RequestPacket, ResponsePacket};
|
||||
use alloy_transport::{TransportError, TransportErrorKind, TransportFut};
|
||||
|
||||
use simple_request::{hyper, Request, Client};
|
||||
|
||||
use tower::Service;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SimpleRequest {
|
||||
client: Client,
|
||||
url: String,
|
||||
}
|
||||
|
||||
impl SimpleRequest {
|
||||
pub fn new(url: String) -> Self {
|
||||
Self { client: Client::with_connection_pool(), url }
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<RequestPacket> for SimpleRequest {
|
||||
type Response = ResponsePacket;
|
||||
type Error = TransportError;
|
||||
type Future = TransportFut<'static>;
|
||||
|
||||
#[inline]
|
||||
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll<Result<(), Self::Error>> {
|
||||
task::Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn call(&mut self, req: RequestPacket) -> Self::Future {
|
||||
let inner = self.clone();
|
||||
Box::pin(async move {
|
||||
let packet = req.serialize().map_err(TransportError::SerError)?;
|
||||
let request = Request::from(
|
||||
hyper::Request::post(&inner.url)
|
||||
.header("Content-Type", "application/json")
|
||||
.body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into())
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let mut res = inner
|
||||
.client
|
||||
.request(request)
|
||||
.await
|
||||
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?
|
||||
.body()
|
||||
.await
|
||||
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?;
|
||||
|
||||
serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, ""))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,7 +1,5 @@
|
||||
use std::process::Command;
|
||||
|
||||
use ethers_contract::Abigen;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=contracts/*");
|
||||
println!("cargo:rerun-if-changed=artifacts/*");
|
||||
@@ -21,22 +19,23 @@ fn main() {
|
||||
"--base-path", ".",
|
||||
"-o", "./artifacts", "--overwrite",
|
||||
"--bin", "--abi",
|
||||
"--optimize",
|
||||
"./contracts/Schnorr.sol", "./contracts/Router.sol",
|
||||
"--via-ir", "--optimize",
|
||||
|
||||
"./contracts/IERC20.sol",
|
||||
|
||||
"./contracts/Schnorr.sol",
|
||||
"./contracts/Deployer.sol",
|
||||
"./contracts/Sandbox.sol",
|
||||
"./contracts/Router.sol",
|
||||
|
||||
"./src/tests/contracts/Schnorr.sol",
|
||||
"./src/tests/contracts/ERC20.sol",
|
||||
|
||||
"--no-color",
|
||||
];
|
||||
assert!(Command::new("solc").args(args).status().unwrap().success());
|
||||
|
||||
Abigen::new("Schnorr", "./artifacts/Schnorr.abi")
|
||||
.unwrap()
|
||||
.generate()
|
||||
.unwrap()
|
||||
.write_to_file("./src/abi/schnorr.rs")
|
||||
.unwrap();
|
||||
|
||||
Abigen::new("Router", "./artifacts/Router.abi")
|
||||
.unwrap()
|
||||
.generate()
|
||||
.unwrap()
|
||||
.write_to_file("./src/abi/router.rs")
|
||||
.unwrap();
|
||||
let solc = Command::new("solc").args(args).output().unwrap();
|
||||
assert!(solc.status.success());
|
||||
for line in String::from_utf8(solc.stderr).unwrap().lines() {
|
||||
assert!(!line.starts_with("Error:"));
|
||||
}
|
||||
}
|
||||
|
||||
52
coins/ethereum/contracts/Deployer.sol
Normal file
52
coins/ethereum/contracts/Deployer.sol
Normal file
@@ -0,0 +1,52 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
/*
|
||||
The expected deployment process of the Router is as follows:
|
||||
|
||||
1) A transaction deploying Deployer is made. Then, a deterministic signature is
|
||||
created such that an account with an unknown private key is the creator of
|
||||
the contract. Anyone can fund this address, and once anyone does, the
|
||||
transaction deploying Deployer can be published by anyone. No other
|
||||
transaction may be made from that account.
|
||||
|
||||
2) Anyone deploys the Router through the Deployer. This uses a sequential nonce
|
||||
such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.
|
||||
While such attacks would still be feasible if the Deployer's address was
|
||||
controllable, the usage of a deterministic signature with a NUMS method
|
||||
prevents that.
|
||||
|
||||
This doesn't have any denial-of-service risks and will resolve once anyone steps
|
||||
forward as deployer. This does fail to guarantee an identical address across
|
||||
every chain, though it enables letting anyone efficiently ask the Deployer for
|
||||
the address (with the Deployer having an identical address on every chain).
|
||||
|
||||
Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the
|
||||
Deployer contract to use a consistent salt for the Router, yet the Router must
|
||||
be deployed with a specific public key for Serai. Since Ethereum isn't able to
|
||||
determine a valid public key (one the result of a Serai DKG) from a dishonest
|
||||
public key, we have to allow multiple deployments with Serai being the one to
|
||||
determine which to use.
|
||||
|
||||
The alternative would be to have a council publish the Serai key on-Ethereum,
|
||||
with Serai verifying the published result. This would introduce a DoS risk in
|
||||
the council not publishing the correct key/not publishing any key.
|
||||
*/
|
||||
|
||||
contract Deployer {
|
||||
event Deployment(bytes32 indexed init_code_hash, address created);
|
||||
|
||||
error DeploymentFailed();
|
||||
|
||||
function deploy(bytes memory init_code) external {
|
||||
address created;
|
||||
assembly {
|
||||
created := create(0, add(init_code, 0x20), mload(init_code))
|
||||
}
|
||||
if (created == address(0)) {
|
||||
revert DeploymentFailed();
|
||||
}
|
||||
// These may be emitted out of order upon re-entrancy
|
||||
emit Deployment(keccak256(init_code), created);
|
||||
}
|
||||
}
|
||||
20
coins/ethereum/contracts/IERC20.sol
Normal file
20
coins/ethereum/contracts/IERC20.sol
Normal file
@@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: CC0
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IERC20 {
|
||||
event Transfer(address indexed from, address indexed to, uint256 value);
|
||||
event Approval(address indexed owner, address indexed spender, uint256 value);
|
||||
|
||||
function name() external view returns (string memory);
|
||||
function symbol() external view returns (string memory);
|
||||
function decimals() external view returns (uint8);
|
||||
|
||||
function totalSupply() external view returns (uint256);
|
||||
|
||||
function balanceOf(address owner) external view returns (uint256);
|
||||
function transfer(address to, uint256 value) external returns (bool);
|
||||
function transferFrom(address from, address to, uint256 value) external returns (bool);
|
||||
|
||||
function approve(address spender, uint256 value) external returns (bool);
|
||||
function allowance(address owner, address spender) external view returns (uint256);
|
||||
}
|
||||
@@ -1,27 +1,24 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "./IERC20.sol";
|
||||
|
||||
import "./Schnorr.sol";
|
||||
import "./Sandbox.sol";
|
||||
|
||||
contract Router is Schnorr {
|
||||
// Contract initializer
|
||||
// TODO: Replace with a MuSig of the genesis validators
|
||||
address public initializer;
|
||||
|
||||
// Nonce is incremented for each batch of transactions executed
|
||||
contract Router {
|
||||
// Nonce is incremented for each batch of transactions executed/key update
|
||||
uint256 public nonce;
|
||||
|
||||
// fixed parity for the public keys used in this contract
|
||||
uint8 constant public KEY_PARITY = 27;
|
||||
|
||||
// current public key's x-coordinate
|
||||
// note: this key must always use the fixed parity defined above
|
||||
// Current public key's x-coordinate
|
||||
// This key must always have the parity defined within the Schnorr contract
|
||||
bytes32 public seraiKey;
|
||||
|
||||
struct OutInstruction {
|
||||
address to;
|
||||
Call[] calls;
|
||||
|
||||
uint256 value;
|
||||
bytes data;
|
||||
}
|
||||
|
||||
struct Signature {
|
||||
@@ -29,62 +26,197 @@ contract Router is Schnorr {
|
||||
bytes32 s;
|
||||
}
|
||||
|
||||
event SeraiKeyUpdated(
|
||||
uint256 indexed nonce,
|
||||
bytes32 indexed key,
|
||||
Signature signature
|
||||
);
|
||||
event InInstruction(
|
||||
address indexed from,
|
||||
address indexed coin,
|
||||
uint256 amount,
|
||||
bytes instruction
|
||||
);
|
||||
// success is a uint256 representing a bitfield of transaction successes
|
||||
event Executed(uint256 nonce, bytes32 batch, uint256 success);
|
||||
event Executed(
|
||||
uint256 indexed nonce,
|
||||
bytes32 indexed batch,
|
||||
uint256 success,
|
||||
Signature signature
|
||||
);
|
||||
|
||||
// error types
|
||||
error NotInitializer();
|
||||
error AlreadyInitialized();
|
||||
error InvalidKey();
|
||||
error InvalidSignature();
|
||||
error InvalidAmount();
|
||||
error FailedTransfer();
|
||||
error TooManyTransactions();
|
||||
|
||||
constructor() {
|
||||
initializer = msg.sender;
|
||||
modifier _updateSeraiKeyAtEndOfFn(
|
||||
uint256 _nonce,
|
||||
bytes32 key,
|
||||
Signature memory sig
|
||||
) {
|
||||
if (
|
||||
(key == bytes32(0)) ||
|
||||
((bytes32(uint256(key) % Schnorr.Q)) != key)
|
||||
) {
|
||||
revert InvalidKey();
|
||||
}
|
||||
|
||||
_;
|
||||
|
||||
seraiKey = key;
|
||||
emit SeraiKeyUpdated(_nonce, key, sig);
|
||||
}
|
||||
|
||||
// initSeraiKey can be called by the contract initializer to set the first
|
||||
// public key, only if the public key has yet to be set.
|
||||
function initSeraiKey(bytes32 _seraiKey) external {
|
||||
if (msg.sender != initializer) revert NotInitializer();
|
||||
if (seraiKey != 0) revert AlreadyInitialized();
|
||||
if (_seraiKey == bytes32(0)) revert InvalidKey();
|
||||
seraiKey = _seraiKey;
|
||||
constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn(
|
||||
0,
|
||||
_seraiKey,
|
||||
Signature({ c: bytes32(0), s: bytes32(0) })
|
||||
) {
|
||||
nonce = 1;
|
||||
}
|
||||
|
||||
// updateSeraiKey validates the given Schnorr signature against the current public key,
|
||||
// and if successful, updates the contract's public key to the given one.
|
||||
// updateSeraiKey validates the given Schnorr signature against the current
|
||||
// public key, and if successful, updates the contract's public key to the
|
||||
// given one.
|
||||
function updateSeraiKey(
|
||||
bytes32 _seraiKey,
|
||||
Signature memory sig
|
||||
) public {
|
||||
if (_seraiKey == bytes32(0)) revert InvalidKey();
|
||||
bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey));
|
||||
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
|
||||
seraiKey = _seraiKey;
|
||||
Signature calldata sig
|
||||
) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) {
|
||||
bytes memory message =
|
||||
abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey);
|
||||
nonce++;
|
||||
|
||||
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||
revert InvalidSignature();
|
||||
}
|
||||
}
|
||||
|
||||
// execute accepts a list of transactions to execute as well as a Schnorr signature.
|
||||
function inInstruction(
|
||||
address coin,
|
||||
uint256 amount,
|
||||
bytes memory instruction
|
||||
) external payable {
|
||||
if (coin == address(0)) {
|
||||
if (amount != msg.value) {
|
||||
revert InvalidAmount();
|
||||
}
|
||||
} else {
|
||||
(bool success, bytes memory res) =
|
||||
address(coin).call(
|
||||
abi.encodeWithSelector(
|
||||
IERC20.transferFrom.selector,
|
||||
msg.sender,
|
||||
address(this),
|
||||
amount
|
||||
)
|
||||
);
|
||||
|
||||
// Require there was nothing returned, which is done by some non-standard
|
||||
// tokens, or that the ERC20 contract did in fact return true
|
||||
bool nonStandardResOrTrue =
|
||||
(res.length == 0) || abi.decode(res, (bool));
|
||||
if (!(success && nonStandardResOrTrue)) {
|
||||
revert FailedTransfer();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Due to fee-on-transfer tokens, emitting the amount directly is frowned upon.
|
||||
The amount instructed to transfer may not actually be the amount
|
||||
transferred.
|
||||
|
||||
If we add nonReentrant to every single function which can effect the
|
||||
balance, we can check the amount exactly matches. This prevents transfers of
|
||||
less value than expected occurring, at least, not without an additional
|
||||
transfer to top up the difference (which isn't routed through this contract
|
||||
and accordingly isn't trying to artificially create events).
|
||||
|
||||
If we don't add nonReentrant, a transfer can be started, and then a new
|
||||
transfer for the difference can follow it up (again and again until a
|
||||
rounding error is reached). This contract would believe all transfers were
|
||||
done in full, despite each only being done in part (except for the last
|
||||
one).
|
||||
|
||||
Given fee-on-transfer tokens aren't intended to be supported, the only
|
||||
token planned to be supported is Dai and it doesn't have any fee-on-transfer
|
||||
logic, fee-on-transfer tokens aren't even able to be supported at this time,
|
||||
we simply classify this entire class of tokens as non-standard
|
||||
implementations which induce undefined behavior. It is the Serai network's
|
||||
role not to add support for any non-standard implementations.
|
||||
*/
|
||||
emit InInstruction(msg.sender, coin, amount, instruction);
|
||||
}
|
||||
|
||||
// execute accepts a list of transactions to execute as well as a signature.
|
||||
// if signature verification passes, the given transactions are executed.
|
||||
// if signature verification fails, this function will revert.
|
||||
function execute(
|
||||
OutInstruction[] calldata transactions,
|
||||
Signature memory sig
|
||||
) public {
|
||||
if (transactions.length > 256) revert TooManyTransactions();
|
||||
Signature calldata sig
|
||||
) external {
|
||||
if (transactions.length > 256) {
|
||||
revert TooManyTransactions();
|
||||
}
|
||||
|
||||
bytes32 message = keccak256(abi.encode("execute", nonce, transactions));
|
||||
bytes memory message =
|
||||
abi.encode("execute", block.chainid, nonce, transactions);
|
||||
uint256 executed_with_nonce = nonce;
|
||||
// This prevents re-entrancy from causing double spends yet does allow
|
||||
// out-of-order execution via re-entrancy
|
||||
nonce++;
|
||||
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
|
||||
|
||||
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||
revert InvalidSignature();
|
||||
}
|
||||
|
||||
uint256 successes;
|
||||
for(uint256 i = 0; i < transactions.length; i++) {
|
||||
(bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data);
|
||||
for (uint256 i = 0; i < transactions.length; i++) {
|
||||
bool success;
|
||||
|
||||
// If there are no calls, send to `to` the value
|
||||
if (transactions[i].calls.length == 0) {
|
||||
(success, ) = transactions[i].to.call{
|
||||
value: transactions[i].value,
|
||||
gas: 5_000
|
||||
}("");
|
||||
} else {
|
||||
// If there are calls, ignore `to`. Deploy a new Sandbox and proxy the
|
||||
// calls through that
|
||||
//
|
||||
// We could use a single sandbox in order to reduce gas costs, yet that
|
||||
// risks one person creating an approval that's hooked before another
|
||||
// user's intended action executes, in order to drain their coins
|
||||
//
|
||||
// While technically, that would be a flaw in the sandboxed flow, this
|
||||
// is robust and prevents such flaws from being possible
|
||||
//
|
||||
// We also don't want people to set state via the Sandbox and expect it
|
||||
// future available when anyone else could set a distinct value
|
||||
Sandbox sandbox = new Sandbox();
|
||||
(success, ) = address(sandbox).call{
|
||||
value: transactions[i].value,
|
||||
// TODO: Have the Call specify the gas up front
|
||||
gas: 350_000
|
||||
}(
|
||||
abi.encodeWithSelector(
|
||||
Sandbox.sandbox.selector,
|
||||
transactions[i].calls
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
assembly {
|
||||
successes := or(successes, shl(i, success))
|
||||
}
|
||||
}
|
||||
emit Executed(nonce, message, successes);
|
||||
emit Executed(
|
||||
executed_with_nonce,
|
||||
keccak256(message),
|
||||
successes,
|
||||
sig
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
48
coins/ethereum/contracts/Sandbox.sol
Normal file
48
coins/ethereum/contracts/Sandbox.sol
Normal file
@@ -0,0 +1,48 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
struct Call {
|
||||
address to;
|
||||
uint256 value;
|
||||
bytes data;
|
||||
}
|
||||
|
||||
// A minimal sandbox focused on gas efficiency.
|
||||
//
|
||||
// The first call is executed if any of the calls fail, making it a fallback.
|
||||
// All other calls are executed sequentially.
|
||||
contract Sandbox {
|
||||
error AlreadyCalled();
|
||||
error CallsFailed();
|
||||
|
||||
function sandbox(Call[] calldata calls) external payable {
|
||||
// Prevent re-entrancy due to this executing arbitrary calls from anyone
|
||||
// and anywhere
|
||||
bool called;
|
||||
assembly { called := tload(0) }
|
||||
if (called) {
|
||||
revert AlreadyCalled();
|
||||
}
|
||||
assembly { tstore(0, 1) }
|
||||
|
||||
// Execute the calls, starting from 1
|
||||
for (uint256 i = 1; i < calls.length; i++) {
|
||||
(bool success, ) =
|
||||
calls[i].to.call{ value: calls[i].value }(calls[i].data);
|
||||
|
||||
// If this call failed, execute the fallback (call 0)
|
||||
if (!success) {
|
||||
(success, ) =
|
||||
calls[0].to.call{ value: address(this).balance }(calls[0].data);
|
||||
// If this call also failed, revert entirely
|
||||
if (!success) {
|
||||
revert CallsFailed();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// We don't clear the re-entrancy guard as this contract should never be
|
||||
// called again, so there's no reason to spend the effort
|
||||
}
|
||||
}
|
||||
@@ -2,38 +2,43 @@
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
// see https://github.com/noot/schnorr-verify for implementation details
|
||||
contract Schnorr {
|
||||
library Schnorr {
|
||||
// secp256k1 group order
|
||||
uint256 constant public Q =
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
||||
|
||||
error InvalidSOrA();
|
||||
error InvalidSignature();
|
||||
// Fixed parity for the public keys used in this contract
|
||||
// This avoids spending a word passing the parity in a similar style to
|
||||
// Bitcoin's Taproot
|
||||
uint8 constant public KEY_PARITY = 27;
|
||||
|
||||
// parity := public key y-coord parity (27 or 28)
|
||||
// px := public key x-coord
|
||||
error InvalidSOrA();
|
||||
error MalformedSignature();
|
||||
|
||||
// px := public key x-coord, where the public key has a parity of KEY_PARITY
|
||||
// message := 32-byte hash of the message
|
||||
// c := schnorr signature challenge
|
||||
// s := schnorr signature
|
||||
function verify(
|
||||
uint8 parity,
|
||||
bytes32 px,
|
||||
bytes32 message,
|
||||
bytes memory message,
|
||||
bytes32 c,
|
||||
bytes32 s
|
||||
) public view returns (bool) {
|
||||
// ecrecover = (m, v, r, s);
|
||||
) internal pure returns (bool) {
|
||||
// ecrecover = (m, v, r, s) -> key
|
||||
// We instead pass the following to obtain the nonce (not the key)
|
||||
// Then we hash it and verify it matches the challenge
|
||||
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
||||
|
||||
// For safety, we want each input to ecrecover to be 0 (sa, px, ca)
|
||||
// The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero
|
||||
// That leaves us to check `sa` are non-zero
|
||||
if (sa == 0) revert InvalidSOrA();
|
||||
// the ecrecover precompile implementation checks that the `r` and `s`
|
||||
// inputs are non-zero (in this case, `px` and `ca`), thus we don't need to
|
||||
// check if they're zero.
|
||||
address R = ecrecover(sa, parity, px, ca);
|
||||
if (R == address(0)) revert InvalidSignature();
|
||||
return c == keccak256(
|
||||
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
|
||||
);
|
||||
address R = ecrecover(sa, KEY_PARITY, px, ca);
|
||||
if (R == address(0)) revert MalformedSignature();
|
||||
|
||||
// Check the signature is correct by rebuilding the challenge
|
||||
return c == keccak256(abi.encodePacked(R, px, message));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,37 @@
|
||||
use alloy_sol_types::sol;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
pub(crate) mod schnorr;
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod erc20_container {
|
||||
use super::*;
|
||||
sol!("contracts/IERC20.sol");
|
||||
}
|
||||
pub use erc20_container::IERC20 as erc20;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
pub(crate) mod router;
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod deployer_container {
|
||||
use super::*;
|
||||
sol!("contracts/Deployer.sol");
|
||||
}
|
||||
pub use deployer_container::Deployer as deployer;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod router_container {
|
||||
use super::*;
|
||||
sol!(Router, "artifacts/Router.abi");
|
||||
}
|
||||
pub use router_container::Router as router;
|
||||
|
||||
@@ -1,91 +1,185 @@
|
||||
use sha3::{Digest, Keccak256};
|
||||
|
||||
use group::ff::PrimeField;
|
||||
use k256::{
|
||||
elliptic_curve::{
|
||||
bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint,
|
||||
},
|
||||
ProjectivePoint, Scalar, U256,
|
||||
elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint},
|
||||
ProjectivePoint, Scalar, U256 as KU256,
|
||||
};
|
||||
#[cfg(test)]
|
||||
use k256::{elliptic_curve::point::DecompressPoint, AffinePoint};
|
||||
|
||||
use frost::{
|
||||
algorithm::{Hram, SchnorrSignature},
|
||||
curve::Secp256k1,
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
};
|
||||
|
||||
use alloy_core::primitives::{Parity, Signature as AlloySignature};
|
||||
use alloy_consensus::{SignableTransaction, Signed, TxLegacy};
|
||||
|
||||
use crate::abi::router::{Signature as AbiSignature};
|
||||
|
||||
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
|
||||
Keccak256::digest(data).into()
|
||||
alloy_core::primitives::keccak256(data).into()
|
||||
}
|
||||
|
||||
pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] {
|
||||
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(data).into())
|
||||
}
|
||||
|
||||
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
|
||||
let encoded_point = point.to_encoded_point(false);
|
||||
// Last 20 bytes of the hash of the concatenated x and y coordinates
|
||||
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
|
||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
|
||||
}
|
||||
|
||||
pub(crate) fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
|
||||
assert!(
|
||||
tx.chain_id.is_none(),
|
||||
"chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)"
|
||||
);
|
||||
|
||||
let sig_hash = tx.signature_hash().0;
|
||||
let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat());
|
||||
let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat());
|
||||
loop {
|
||||
let r_bytes: [u8; 32] = r.to_repr().into();
|
||||
let s_bytes: [u8; 32] = s.to_repr().into();
|
||||
let v = Parity::NonEip155(false);
|
||||
let signature =
|
||||
AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap();
|
||||
let tx = tx.clone().into_signed(signature);
|
||||
if tx.recover_signer().is_ok() {
|
||||
return tx;
|
||||
}
|
||||
|
||||
// Re-hash until valid
|
||||
r = hash_to_scalar(r_bytes.as_ref());
|
||||
s = hash_to_scalar(s_bytes.as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
/// The public key for a Schnorr-signing account.
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct PublicKey {
|
||||
pub A: ProjectivePoint,
|
||||
pub px: Scalar,
|
||||
pub parity: u8,
|
||||
pub(crate) A: ProjectivePoint,
|
||||
pub(crate) px: Scalar,
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// Construct a new `PublicKey`.
|
||||
///
|
||||
/// This will return None if the provided point isn't eligible to be a public key (due to
|
||||
/// bounds such as parity).
|
||||
#[allow(non_snake_case)]
|
||||
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
||||
let affine = A.to_affine();
|
||||
let parity = u8::from(bool::from(affine.y_is_odd())) + 27;
|
||||
if parity != 27 {
|
||||
// Only allow even keys to save a word within Ethereum
|
||||
let is_odd = bool::from(affine.y_is_odd());
|
||||
if is_odd {
|
||||
None?;
|
||||
}
|
||||
|
||||
let x_coord = affine.x();
|
||||
let x_coord_scalar = <Scalar as Reduce<U256>>::reduce_bytes(&x_coord);
|
||||
let x_coord_scalar = <Scalar as Reduce<KU256>>::reduce_bytes(&x_coord);
|
||||
// Return None if a reduction would occur
|
||||
// Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less
|
||||
// headache/concern to have
|
||||
// This does ban a trivial amoount of public keys
|
||||
if x_coord_scalar.to_repr() != x_coord {
|
||||
None?;
|
||||
}
|
||||
|
||||
Some(PublicKey { A, px: x_coord_scalar, parity })
|
||||
Some(PublicKey { A, px: x_coord_scalar })
|
||||
}
|
||||
|
||||
pub fn point(&self) -> ProjectivePoint {
|
||||
self.A
|
||||
}
|
||||
|
||||
pub(crate) fn eth_repr(&self) -> [u8; 32] {
|
||||
self.px.to_repr().into()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
|
||||
#[allow(non_snake_case)]
|
||||
let A = Option::<AffinePoint>::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into();
|
||||
Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px })
|
||||
}
|
||||
}
|
||||
|
||||
/// The HRAm to use for the Schnorr contract.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct EthereumHram {}
|
||||
impl Hram<Secp256k1> for EthereumHram {
|
||||
#[allow(non_snake_case)]
|
||||
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
|
||||
let a_encoded_point = A.to_encoded_point(true);
|
||||
let mut a_encoded = a_encoded_point.as_ref().to_owned();
|
||||
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
|
||||
assert!((a_encoded[0] == 27) || (a_encoded[0] == 28));
|
||||
let x_coord = A.to_affine().x();
|
||||
|
||||
let mut data = address(R).to_vec();
|
||||
data.append(&mut a_encoded);
|
||||
data.extend(x_coord.as_slice());
|
||||
data.extend(m);
|
||||
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
|
||||
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(&data).into())
|
||||
}
|
||||
}
|
||||
|
||||
/// A signature for the Schnorr contract.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Signature {
|
||||
pub(crate) c: Scalar,
|
||||
pub(crate) s: Scalar,
|
||||
}
|
||||
impl Signature {
|
||||
pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {
|
||||
#[allow(non_snake_case)]
|
||||
let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c);
|
||||
EthereumHram::hram(&R, &public_key.A, message) == self.c
|
||||
}
|
||||
|
||||
/// Construct a new `Signature`.
|
||||
///
|
||||
/// This will return None if the signature is invalid.
|
||||
pub fn new(
|
||||
public_key: &PublicKey,
|
||||
chain_id: U256,
|
||||
m: &[u8],
|
||||
message: &[u8],
|
||||
signature: SchnorrSignature<Secp256k1>,
|
||||
) -> Option<Signature> {
|
||||
let c = EthereumHram::hram(
|
||||
&signature.R,
|
||||
&public_key.A,
|
||||
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
|
||||
);
|
||||
let c = EthereumHram::hram(&signature.R, &public_key.A, message);
|
||||
if !signature.verify(public_key.A, c) {
|
||||
None?;
|
||||
}
|
||||
Some(Signature { c, s: signature.s })
|
||||
|
||||
let res = Signature { c, s: signature.s };
|
||||
assert!(res.verify(public_key, message));
|
||||
Some(res)
|
||||
}
|
||||
|
||||
pub fn c(&self) -> Scalar {
|
||||
self.c
|
||||
}
|
||||
pub fn s(&self) -> Scalar {
|
||||
self.s
|
||||
}
|
||||
|
||||
pub fn to_bytes(&self) -> [u8; 64] {
|
||||
let mut res = [0; 64];
|
||||
res[.. 32].copy_from_slice(self.c.to_repr().as_ref());
|
||||
res[32 ..].copy_from_slice(self.s.to_repr().as_ref());
|
||||
res
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {
|
||||
let mut reader = bytes.as_slice();
|
||||
let c = Secp256k1::read_F(&mut reader)?;
|
||||
let s = Secp256k1::read_F(&mut reader)?;
|
||||
Ok(Signature { c, s })
|
||||
}
|
||||
}
|
||||
impl From<&Signature> for AbiSignature {
|
||||
fn from(sig: &Signature) -> AbiSignature {
|
||||
let c: [u8; 32] = sig.c.to_repr().into();
|
||||
let s: [u8; 32] = sig.s.to_repr().into();
|
||||
AbiSignature { c: c.into(), s: s.into() }
|
||||
}
|
||||
}
|
||||
|
||||
119
coins/ethereum/src/deployer.rs
Normal file
119
coins/ethereum/src/deployer.rs
Normal file
@@ -0,0 +1,119 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind};
|
||||
use alloy_consensus::{Signed, TxLegacy};
|
||||
|
||||
use alloy_sol_types::{SolCall, SolEvent};
|
||||
|
||||
use alloy_rpc_types::{BlockNumberOrTag, Filter};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::{self, keccak256, PublicKey},
|
||||
router::Router,
|
||||
};
|
||||
pub use crate::abi::deployer as abi;
|
||||
|
||||
/// The Deployer contract for the Router contract.
|
||||
///
|
||||
/// This Deployer has a deterministic address, letting it be immediately identified on any
|
||||
/// compatible chain. It then supports retrieving the Router contract's address (which isn't
|
||||
/// deterministic) using a single log query.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Deployer;
|
||||
impl Deployer {
|
||||
/// Obtain the transaction to deploy this contract, already signed.
|
||||
///
|
||||
/// The account this transaction is sent from (which is populated in `from`) must be sufficiently
|
||||
/// funded for this transaction to be submitted. This account has no known private key to anyone,
|
||||
/// so ETH sent can be neither misappropriated nor returned.
|
||||
pub fn deployment_tx() -> Signed<TxLegacy> {
|
||||
let bytecode = include_str!("../artifacts/Deployer.bin");
|
||||
let bytecode =
|
||||
Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex");
|
||||
|
||||
let tx = TxLegacy {
|
||||
chain_id: None,
|
||||
nonce: 0,
|
||||
gas_price: 100_000_000_000u128,
|
||||
// TODO: Use a more accurate gas limit
|
||||
gas_limit: 1_000_000u128,
|
||||
to: TxKind::Create,
|
||||
value: U256::ZERO,
|
||||
input: bytecode,
|
||||
};
|
||||
|
||||
crypto::deterministically_sign(&tx)
|
||||
}
|
||||
|
||||
/// Obtain the deterministic address for this contract.
|
||||
pub fn address() -> [u8; 20] {
|
||||
let deployer_deployer =
|
||||
Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature");
|
||||
**Address::create(&deployer_deployer, 0)
|
||||
}
|
||||
|
||||
/// Construct a new view of the `Deployer`.
|
||||
pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result<Option<Self>, Error> {
|
||||
let address = Self::address();
|
||||
#[cfg(not(test))]
|
||||
let required_block = BlockNumberOrTag::Finalized;
|
||||
#[cfg(test)]
|
||||
let required_block = BlockNumberOrTag::Latest;
|
||||
let code = provider
|
||||
.get_code_at(address.into(), required_block.into())
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
// Contract has yet to be deployed
|
||||
if code.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(Self))
|
||||
}
|
||||
|
||||
/// Yield the `ContractCall` necessary to deploy the Router.
|
||||
pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {
|
||||
TxLegacy {
|
||||
to: TxKind::Call(Self::address().into()),
|
||||
input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(),
|
||||
gas_limit: 1_000_000,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the first Router deployed with the specified key as its first key.
|
||||
///
|
||||
/// This is the Router Serai will use, and is the only way to construct a `Router`.
|
||||
pub async fn find_router(
|
||||
&self,
|
||||
provider: Arc<RootProvider<SimpleRequest>>,
|
||||
key: &PublicKey,
|
||||
) -> Result<Option<Router>, Error> {
|
||||
let init_code = Router::init_code(key);
|
||||
let init_code_hash = keccak256(&init_code);
|
||||
|
||||
#[cfg(not(test))]
|
||||
let to_block = BlockNumberOrTag::Finalized;
|
||||
#[cfg(test)]
|
||||
let to_block = BlockNumberOrTag::Latest;
|
||||
|
||||
// Find the first log using this init code (where the init code is binding to the key)
|
||||
let filter =
|
||||
Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address()));
|
||||
let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH);
|
||||
let filter = filter.topic1(B256::from(init_code_hash));
|
||||
let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let Some(first_log) = logs.first() else { return Ok(None) };
|
||||
let router = first_log
|
||||
.log_decode::<abi::Deployment>()
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.inner
|
||||
.data
|
||||
.created;
|
||||
|
||||
Ok(Some(Router::new(provider, router)))
|
||||
}
|
||||
}
|
||||
118
coins/ethereum/src/erc20.rs
Normal file
118
coins/ethereum/src/erc20.rs
Normal file
@@ -0,0 +1,118 @@
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use alloy_core::primitives::{Address, B256, U256};
|
||||
|
||||
use alloy_sol_types::{SolInterface, SolEvent};
|
||||
|
||||
use alloy_rpc_types::{BlockNumberOrTag, Filter};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::Error;
|
||||
pub use crate::abi::erc20 as abi;
|
||||
use abi::{IERC20Calls, Transfer, transferCall, transferFromCall};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TopLevelErc20Transfer {
|
||||
pub id: [u8; 32],
|
||||
pub from: [u8; 20],
|
||||
pub amount: U256,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A view for an ERC20 contract.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ERC20(Arc<RootProvider<SimpleRequest>>, Address);
|
||||
impl ERC20 {
|
||||
/// Construct a new view of the specified ERC20 contract.
|
||||
///
|
||||
/// This checks a contract is deployed at that address yet does not check the contract is
|
||||
/// actually an ERC20.
|
||||
pub async fn new(
|
||||
provider: Arc<RootProvider<SimpleRequest>>,
|
||||
address: [u8; 20],
|
||||
) -> Result<Option<Self>, Error> {
|
||||
let code = provider
|
||||
.get_code_at(address.into(), BlockNumberOrTag::Finalized.into())
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
// Contract has yet to be deployed
|
||||
if code.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(Self(provider.clone(), Address::from(&address))))
|
||||
}
|
||||
|
||||
pub async fn top_level_transfers(
|
||||
&self,
|
||||
block: u64,
|
||||
to: [u8; 20],
|
||||
) -> Result<Vec<TopLevelErc20Transfer>, Error> {
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(Transfer::SIGNATURE_HASH);
|
||||
let mut to_topic = [0; 32];
|
||||
to_topic[12 ..].copy_from_slice(&to);
|
||||
let filter = filter.topic2(B256::from(to_topic));
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let mut handled = HashSet::new();
|
||||
|
||||
let mut top_level_transfers = vec![];
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||
let tx = self.0.get_transaction_by_hash(tx_id).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
// If this is a top-level call...
|
||||
if tx.to == Some(self.1) {
|
||||
// And we recognize the call...
|
||||
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due
|
||||
// to the InInstruction appended
|
||||
if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) {
|
||||
// Extract the top-level call's from/to/value
|
||||
let (from, call_to, value) = match call {
|
||||
IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value),
|
||||
IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => {
|
||||
(from, call_to, value)
|
||||
}
|
||||
// Treat any other function selectors as unrecognized
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let log = log.log_decode::<Transfer>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
// Ensure the top-level transfer is equivalent, and this presumably isn't a log for an
|
||||
// internal transfer
|
||||
if (log.from != from) || (call_to != to) || (value != log.value) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's
|
||||
// the only log we handle
|
||||
if handled.contains(&tx_id) {
|
||||
continue;
|
||||
}
|
||||
handled.insert(tx_id);
|
||||
|
||||
// Read the data appended after
|
||||
let encoded = call.abi_encode();
|
||||
let data = tx.input.as_ref()[encoded.len() ..].to_vec();
|
||||
|
||||
// Push the transfer
|
||||
top_level_transfers.push(TopLevelErc20Transfer {
|
||||
// Since we'll only handle one log for this TX, set the ID to the TX ID
|
||||
id: *tx_id,
|
||||
from: *log.from.0,
|
||||
amount: log.value,
|
||||
data,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(top_level_transfers)
|
||||
}
|
||||
}
|
||||
@@ -1,16 +1,30 @@
|
||||
use thiserror::Error;
|
||||
|
||||
pub use alloy_core;
|
||||
pub use alloy_consensus;
|
||||
|
||||
pub use alloy_rpc_types;
|
||||
pub use alloy_simple_request_transport;
|
||||
pub use alloy_rpc_client;
|
||||
pub use alloy_provider;
|
||||
|
||||
pub mod crypto;
|
||||
|
||||
pub(crate) mod abi;
|
||||
pub mod schnorr;
|
||||
|
||||
pub mod erc20;
|
||||
pub mod deployer;
|
||||
pub mod router;
|
||||
|
||||
pub mod machine;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("failed to verify Schnorr signature")]
|
||||
InvalidSignature,
|
||||
#[error("couldn't make call/send TX")]
|
||||
ConnectionError,
|
||||
}
|
||||
|
||||
414
coins/ethereum/src/machine.rs
Normal file
414
coins/ethereum/src/machine.rs
Normal file
@@ -0,0 +1,414 @@
|
||||
use std::{
|
||||
io::{self, Read},
|
||||
collections::HashMap,
|
||||
};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use group::GroupEncoding;
|
||||
use frost::{
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
Participant, ThresholdKeys, FrostError,
|
||||
algorithm::Schnorr,
|
||||
sign::*,
|
||||
};
|
||||
|
||||
use alloy_core::primitives::U256;
|
||||
|
||||
use crate::{
|
||||
crypto::{PublicKey, EthereumHram, Signature},
|
||||
router::{
|
||||
abi::{Call as AbiCall, OutInstruction as AbiOutInstruction},
|
||||
Router,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Call {
|
||||
pub to: [u8; 20],
|
||||
pub value: U256,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
impl Call {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut to = [0; 20];
|
||||
reader.read_exact(&mut to)?;
|
||||
|
||||
let value = {
|
||||
let mut value_bytes = [0; 32];
|
||||
reader.read_exact(&mut value_bytes)?;
|
||||
U256::from_le_slice(&value_bytes)
|
||||
};
|
||||
|
||||
let mut data_len = {
|
||||
let mut data_len = [0; 4];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize")
|
||||
};
|
||||
|
||||
// A valid DoS would be to claim a 4 GB data is present for only 4 bytes
|
||||
// We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB)
|
||||
let mut data = vec![];
|
||||
while data_len > 0 {
|
||||
let chunk_len = data_len.min(1024);
|
||||
let mut chunk = vec![0; chunk_len];
|
||||
reader.read_exact(&mut chunk)?;
|
||||
data.extend(&chunk);
|
||||
data_len -= chunk_len;
|
||||
}
|
||||
|
||||
Ok(Call { to, value, data })
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.to)?;
|
||||
writer.write_all(&self.value.as_le_bytes())?;
|
||||
|
||||
let data_len = u32::try_from(self.data.len())
|
||||
.map_err(|_| io::Error::other("call data length exceeded 2**32"))?;
|
||||
writer.write_all(&data_len.to_le_bytes())?;
|
||||
writer.write_all(&self.data)
|
||||
}
|
||||
}
|
||||
impl From<Call> for AbiCall {
|
||||
fn from(call: Call) -> AbiCall {
|
||||
AbiCall { to: call.to.into(), value: call.value, data: call.data.into() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum OutInstructionTarget {
|
||||
Direct([u8; 20]),
|
||||
Calls(Vec<Call>),
|
||||
}
|
||||
impl OutInstructionTarget {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut addr = [0; 20];
|
||||
reader.read_exact(&mut addr)?;
|
||||
Ok(OutInstructionTarget::Direct(addr))
|
||||
}
|
||||
1 => {
|
||||
let mut calls_len = [0; 4];
|
||||
reader.read_exact(&mut calls_len)?;
|
||||
let calls_len = u32::from_le_bytes(calls_len);
|
||||
|
||||
let mut calls = vec![];
|
||||
for _ in 0 .. calls_len {
|
||||
calls.push(Call::read(reader)?);
|
||||
}
|
||||
Ok(OutInstructionTarget::Calls(calls))
|
||||
}
|
||||
_ => Err(io::Error::other("unrecognized OutInstructionTarget"))?,
|
||||
}
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
OutInstructionTarget::Direct(addr) => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(addr)?;
|
||||
}
|
||||
OutInstructionTarget::Calls(calls) => {
|
||||
writer.write_all(&[1])?;
|
||||
let call_len = u32::try_from(calls.len())
|
||||
.map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?;
|
||||
writer.write_all(&call_len.to_le_bytes())?;
|
||||
for call in calls {
|
||||
call.write(writer)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct OutInstruction {
|
||||
pub target: OutInstructionTarget,
|
||||
pub value: U256,
|
||||
}
|
||||
impl OutInstruction {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let target = OutInstructionTarget::read(reader)?;
|
||||
|
||||
let value = {
|
||||
let mut value_bytes = [0; 32];
|
||||
reader.read_exact(&mut value_bytes)?;
|
||||
U256::from_le_slice(&value_bytes)
|
||||
};
|
||||
|
||||
Ok(OutInstruction { target, value })
|
||||
}
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.target.write(writer)?;
|
||||
writer.write_all(&self.value.as_le_bytes())
|
||||
}
|
||||
}
|
||||
impl From<OutInstruction> for AbiOutInstruction {
|
||||
fn from(instruction: OutInstruction) -> AbiOutInstruction {
|
||||
match instruction.target {
|
||||
OutInstructionTarget::Direct(addr) => {
|
||||
AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value }
|
||||
}
|
||||
OutInstructionTarget::Calls(calls) => AbiOutInstruction {
|
||||
to: [0; 20].into(),
|
||||
calls: calls.into_iter().map(Into::into).collect(),
|
||||
value: instruction.value,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum RouterCommand {
|
||||
UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey },
|
||||
Execute { chain_id: U256, nonce: U256, outs: Vec<OutInstruction> },
|
||||
}
|
||||
|
||||
impl RouterCommand {
|
||||
pub fn msg(&self) -> Vec<u8> {
|
||||
match self {
|
||||
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||
Router::update_serai_key_message(*chain_id, *nonce, key)
|
||||
}
|
||||
RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message(
|
||||
*chain_id,
|
||||
*nonce,
|
||||
outs.iter().map(|out| out.clone().into()).collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut chain_id = [0; 32];
|
||||
reader.read_exact(&mut chain_id)?;
|
||||
|
||||
let mut nonce = [0; 32];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
|
||||
let key = PublicKey::new(Secp256k1::read_G(reader)?)
|
||||
.ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?;
|
||||
Ok(RouterCommand::UpdateSeraiKey {
|
||||
chain_id: U256::from_le_slice(&chain_id),
|
||||
nonce: U256::from_le_slice(&nonce),
|
||||
key,
|
||||
})
|
||||
}
|
||||
1 => {
|
||||
let mut chain_id = [0; 32];
|
||||
reader.read_exact(&mut chain_id)?;
|
||||
let chain_id = U256::from_le_slice(&chain_id);
|
||||
|
||||
let mut nonce = [0; 32];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
let nonce = U256::from_le_slice(&nonce);
|
||||
|
||||
let mut outs_len = [0; 4];
|
||||
reader.read_exact(&mut outs_len)?;
|
||||
let outs_len = u32::from_le_bytes(outs_len);
|
||||
|
||||
let mut outs = vec![];
|
||||
for _ in 0 .. outs_len {
|
||||
outs.push(OutInstruction::read(reader)?);
|
||||
}
|
||||
|
||||
Ok(RouterCommand::Execute { chain_id, nonce, outs })
|
||||
}
|
||||
_ => Err(io::Error::other("reading unknown type of RouterCommand"))?,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(&chain_id.as_le_bytes())?;
|
||||
writer.write_all(&nonce.as_le_bytes())?;
|
||||
writer.write_all(&key.A.to_bytes())
|
||||
}
|
||||
RouterCommand::Execute { chain_id, nonce, outs } => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(&chain_id.as_le_bytes())?;
|
||||
writer.write_all(&nonce.as_le_bytes())?;
|
||||
writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;
|
||||
for out in outs {
|
||||
out.write(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut res = vec![];
|
||||
self.write(&mut res).unwrap();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct SignedRouterCommand {
|
||||
command: RouterCommand,
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl SignedRouterCommand {
|
||||
pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option<Self> {
|
||||
let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?;
|
||||
let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?;
|
||||
let signature = Signature { c, s };
|
||||
|
||||
if !signature.verify(key, &command.msg()) {
|
||||
None?
|
||||
}
|
||||
Some(SignedRouterCommand { command, signature })
|
||||
}
|
||||
|
||||
pub fn command(&self) -> &RouterCommand {
|
||||
&self.command
|
||||
}
|
||||
|
||||
pub fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let command = RouterCommand::read(reader)?;
|
||||
|
||||
let mut sig = [0; 64];
|
||||
reader.read_exact(&mut sig)?;
|
||||
let signature = Signature::from_bytes(sig)?;
|
||||
|
||||
Ok(SignedRouterCommand { command, signature })
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.command.write(writer)?;
|
||||
writer.write_all(&self.signature.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine: AlgorithmMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl RouterCommandMachine {
|
||||
pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> Option<Self> {
|
||||
// The Schnorr algorithm should be fine without this, even when using the IETF variant
|
||||
// If this is better and more comprehensive, we should do it, even if not necessary
|
||||
let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1");
|
||||
let key = keys.group_key();
|
||||
transcript.append_message(b"key", key.to_bytes());
|
||||
transcript.append_message(b"command", command.serialize());
|
||||
|
||||
Some(Self {
|
||||
key: PublicKey::new(key)?,
|
||||
command,
|
||||
machine: AlgorithmMachine::new(Schnorr::new(transcript), keys),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PreprocessMachine for RouterCommandMachine {
|
||||
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||
type Signature = SignedRouterCommand;
|
||||
type SignMachine = RouterCommandSignMachine;
|
||||
|
||||
fn preprocess<R: RngCore + CryptoRng>(
|
||||
self,
|
||||
rng: &mut R,
|
||||
) -> (Self::SignMachine, Self::Preprocess) {
|
||||
let (machine, preprocess) = self.machine.preprocess(rng);
|
||||
|
||||
(RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandSignMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine: AlgorithmSignMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl SignMachine<SignedRouterCommand> for RouterCommandSignMachine {
|
||||
type Params = ();
|
||||
type Keys = ThresholdKeys<Secp256k1>;
|
||||
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||
type SignatureShare = SignatureShare<Secp256k1>;
|
||||
type SignatureMachine = RouterCommandSignatureMachine;
|
||||
|
||||
fn cache(self) -> CachedPreprocess {
|
||||
unimplemented!(
|
||||
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||
"being already bound to a specific command"
|
||||
);
|
||||
}
|
||||
|
||||
fn from_cache(
|
||||
(): (),
|
||||
_: ThresholdKeys<Secp256k1>,
|
||||
_: CachedPreprocess,
|
||||
) -> (Self, Self::Preprocess) {
|
||||
unimplemented!(
|
||||
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||
"being already bound to a specific command"
|
||||
);
|
||||
}
|
||||
|
||||
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
||||
self.machine.read_preprocess(reader)
|
||||
}
|
||||
|
||||
fn sign(
|
||||
self,
|
||||
commitments: HashMap<Participant, Self::Preprocess>,
|
||||
msg: &[u8],
|
||||
) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> {
|
||||
if !msg.is_empty() {
|
||||
panic!("message was passed to a RouterCommand machine when it generates its own");
|
||||
}
|
||||
|
||||
let (machine, share) = self.machine.sign(commitments, &self.command.msg())?;
|
||||
|
||||
Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandSignatureMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine:
|
||||
AlgorithmSignatureMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl SignatureMachine<SignedRouterCommand> for RouterCommandSignatureMachine {
|
||||
type SignatureShare = SignatureShare<Secp256k1>;
|
||||
|
||||
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
|
||||
self.machine.read_share(reader)
|
||||
}
|
||||
|
||||
fn complete(
|
||||
self,
|
||||
shares: HashMap<Participant, Self::SignatureShare>,
|
||||
) -> Result<SignedRouterCommand, FrostError> {
|
||||
let sig = self.machine.complete(shares)?;
|
||||
let signature = Signature::new(&self.key, &self.command.msg(), sig)
|
||||
.expect("machine produced an invalid signature");
|
||||
Ok(SignedRouterCommand { command: self.command, signature })
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,426 @@
|
||||
pub use crate::abi::router::*;
|
||||
use std::{sync::Arc, io, collections::HashSet};
|
||||
|
||||
/*
|
||||
use crate::crypto::{ProcessedSignature, PublicKey};
|
||||
use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode};
|
||||
use eyre::Result;
|
||||
use std::{convert::From, fs::File, sync::Arc};
|
||||
use k256::{
|
||||
elliptic_curve::{group::GroupEncoding, sec1},
|
||||
ProjectivePoint,
|
||||
};
|
||||
|
||||
pub async fn router_update_public_key<M: Middleware + 'static>(
|
||||
contract: &Router<M>,
|
||||
public_key: &PublicKey,
|
||||
signature: &ProcessedSignature,
|
||||
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
|
||||
let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into());
|
||||
let pending_tx = tx.send().await?;
|
||||
let receipt = pending_tx.await?;
|
||||
Ok(receipt)
|
||||
use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind};
|
||||
#[cfg(test)]
|
||||
use alloy_core::primitives::B256;
|
||||
use alloy_consensus::TxLegacy;
|
||||
|
||||
use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent};
|
||||
|
||||
use alloy_rpc_types::Filter;
|
||||
#[cfg(test)]
|
||||
use alloy_rpc_types::{BlockId, TransactionRequest, TransactionInput};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
pub use crate::{
|
||||
Error,
|
||||
crypto::{PublicKey, Signature},
|
||||
abi::{erc20::Transfer, router as abi},
|
||||
};
|
||||
use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Coin {
|
||||
Ether,
|
||||
Erc20([u8; 20]),
|
||||
}
|
||||
|
||||
pub async fn router_execute<M: Middleware + 'static>(
|
||||
contract: &Router<M>,
|
||||
txs: Vec<Rtransaction>,
|
||||
signature: &ProcessedSignature,
|
||||
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
|
||||
let tx = contract.execute(txs, signature.into()).send();
|
||||
let pending_tx = tx.send().await?;
|
||||
let receipt = pending_tx.await?;
|
||||
Ok(receipt)
|
||||
impl Coin {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
Ok(match kind[0] {
|
||||
0 => Coin::Ether,
|
||||
1 => {
|
||||
let mut address = [0; 20];
|
||||
reader.read_exact(&mut address)?;
|
||||
Coin::Erc20(address)
|
||||
}
|
||||
_ => Err(io::Error::other("unrecognized Coin type"))?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
Coin::Ether => writer.write_all(&[0]),
|
||||
Coin::Erc20(token) => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(token)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct InInstruction {
|
||||
pub id: ([u8; 32], u64),
|
||||
pub from: [u8; 20],
|
||||
pub coin: Coin,
|
||||
pub amount: U256,
|
||||
pub data: Vec<u8>,
|
||||
pub key_at_end_of_block: ProjectivePoint,
|
||||
}
|
||||
|
||||
impl InInstruction {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let id = {
|
||||
let mut id_hash = [0; 32];
|
||||
reader.read_exact(&mut id_hash)?;
|
||||
let mut id_pos = [0; 8];
|
||||
reader.read_exact(&mut id_pos)?;
|
||||
let id_pos = u64::from_le_bytes(id_pos);
|
||||
(id_hash, id_pos)
|
||||
};
|
||||
|
||||
let mut from = [0; 20];
|
||||
reader.read_exact(&mut from)?;
|
||||
|
||||
let coin = Coin::read(reader)?;
|
||||
let mut amount = [0; 32];
|
||||
reader.read_exact(&mut amount)?;
|
||||
let amount = U256::from_le_slice(&amount);
|
||||
|
||||
let mut data_len = [0; 4];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
let data_len = usize::try_from(u32::from_le_bytes(data_len))
|
||||
.map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?;
|
||||
let mut data = vec![0; data_len];
|
||||
reader.read_exact(&mut data)?;
|
||||
|
||||
let mut key_at_end_of_block = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||
reader.read_exact(&mut key_at_end_of_block)?;
|
||||
let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block))
|
||||
.ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?;
|
||||
|
||||
Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block })
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.id.0)?;
|
||||
writer.write_all(&self.id.1.to_le_bytes())?;
|
||||
|
||||
writer.write_all(&self.from)?;
|
||||
|
||||
self.coin.write(writer)?;
|
||||
writer.write_all(&self.amount.as_le_bytes())?;
|
||||
|
||||
writer.write_all(
|
||||
&u32::try_from(self.data.len())
|
||||
.map_err(|_| {
|
||||
io::Error::other("InInstruction being written had data exceeding 2**32 in length")
|
||||
})?
|
||||
.to_le_bytes(),
|
||||
)?;
|
||||
writer.write_all(&self.data)?;
|
||||
|
||||
writer.write_all(&self.key_at_end_of_block.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Executed {
|
||||
pub tx_id: [u8; 32],
|
||||
pub nonce: u64,
|
||||
pub signature: [u8; 64],
|
||||
}
|
||||
|
||||
/// The contract Serai uses to manage its state.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Router(Arc<RootProvider<SimpleRequest>>, Address);
|
||||
impl Router {
|
||||
pub(crate) fn code() -> Vec<u8> {
|
||||
let bytecode = include_str!("../artifacts/Router.bin");
|
||||
Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {
|
||||
let mut bytecode = Self::code();
|
||||
// Append the constructor arguments
|
||||
bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode());
|
||||
bytecode
|
||||
}
|
||||
|
||||
// This isn't pub in order to force users to use `Deployer::find_router`.
|
||||
pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
|
||||
Self(provider, address)
|
||||
}
|
||||
|
||||
pub fn address(&self) -> [u8; 20] {
|
||||
**self.1
|
||||
}
|
||||
|
||||
/// Get the key for Serai at the specified block.
|
||||
#[cfg(test)]
|
||||
pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {
|
||||
let call = TransactionRequest::default()
|
||||
.to(Some(self.1))
|
||||
.input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into()));
|
||||
let bytes = self
|
||||
.0
|
||||
.call(&call, Some(BlockId::Hash(B256::from(at).into())))
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError)
|
||||
}
|
||||
|
||||
/// Get the message to be signed in order to update the key for Serai.
|
||||
pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec<u8> {
|
||||
let mut buffer = b"updateSeraiKey".to_vec();
|
||||
buffer.extend(&chain_id.to_be_bytes::<32>());
|
||||
buffer.extend(&nonce.to_be_bytes::<32>());
|
||||
buffer.extend(&key.eth_repr());
|
||||
buffer
|
||||
}
|
||||
|
||||
/// Update the key representing Serai.
|
||||
pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy {
|
||||
// TODO: Set a more accurate gas
|
||||
TxLegacy {
|
||||
to: TxKind::Call(self.1),
|
||||
input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into()))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
gas_limit: 100_000,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current nonce for the published batches.
|
||||
#[cfg(test)]
|
||||
pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {
|
||||
let call = TransactionRequest::default()
|
||||
.to(Some(self.1))
|
||||
.input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into()));
|
||||
let bytes = self
|
||||
.0
|
||||
.call(&call, Some(BlockId::Hash(B256::from(at).into())))
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
Ok(res._0)
|
||||
}
|
||||
|
||||
/// Get the message to be signed in order to update the key for Serai.
|
||||
pub(crate) fn execute_message(
|
||||
chain_id: U256,
|
||||
nonce: U256,
|
||||
outs: Vec<abi::OutInstruction>,
|
||||
) -> Vec<u8> {
|
||||
("execute".to_string(), chain_id, nonce, outs).abi_encode_params()
|
||||
}
|
||||
|
||||
/// Execute a batch of `OutInstruction`s.
|
||||
pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy {
|
||||
TxLegacy {
|
||||
to: TxKind::Call(self.1),
|
||||
input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(),
|
||||
// TODO
|
||||
gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn in_instructions(
|
||||
&self,
|
||||
block: u64,
|
||||
allowed_tokens: &HashSet<[u8; 20]>,
|
||||
) -> Result<Vec<InInstruction>, Error> {
|
||||
let key_at_end_of_block = {
|
||||
let filter = Filter::new().from_block(0).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||
let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?;
|
||||
let last_key_x_coordinate = last_key_x_coordinate_log
|
||||
.log_decode::<SeraiKeyUpdated>()
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.inner
|
||||
.data
|
||||
.key;
|
||||
|
||||
let mut compressed_point = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||
compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY);
|
||||
compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice());
|
||||
|
||||
ProjectivePoint::from_bytes(&compressed_point).expect("router's last key wasn't a valid key")
|
||||
};
|
||||
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let mut transfer_check = HashSet::new();
|
||||
let mut in_instructions = vec![];
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let id = (
|
||||
log.block_hash.ok_or(Error::ConnectionError)?.into(),
|
||||
log.log_index.ok_or(Error::ConnectionError)?,
|
||||
);
|
||||
|
||||
let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||
let tx = self.0.get_transaction_by_hash(tx_hash).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let log =
|
||||
log.log_decode::<InInstructionEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let coin = if log.coin.0 == [0; 20] {
|
||||
Coin::Ether
|
||||
} else {
|
||||
let token = *log.coin.0;
|
||||
|
||||
if !allowed_tokens.contains(&token) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this also counts as a top-level transfer via the token, drop it
|
||||
//
|
||||
// Necessary in order to handle a potential edge case with some theoretical token
|
||||
// implementations
|
||||
//
|
||||
// This will either let it be handled by the top-level transfer hook or will drop it
|
||||
// entirely on the side of caution
|
||||
if tx.to == Some(token.into()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get all logs for this TX
|
||||
let receipt = self
|
||||
.0
|
||||
.get_transaction_receipt(tx_hash)
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.ok_or(Error::ConnectionError)?;
|
||||
let tx_logs = receipt.inner.logs();
|
||||
|
||||
// Find a matching transfer log
|
||||
let mut found_transfer = false;
|
||||
for tx_log in tx_logs {
|
||||
let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?;
|
||||
// Ensure we didn't already use this transfer to check a distinct InInstruction event
|
||||
if transfer_check.contains(&log_index) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this log is from the token we expected to be transferred
|
||||
if tx_log.address().0 != token {
|
||||
continue;
|
||||
}
|
||||
// Check if this is a transfer log
|
||||
// https://github.com/alloy-rs/core/issues/589
|
||||
if tx_log.topics()[0] != Transfer::SIGNATURE_HASH {
|
||||
continue;
|
||||
}
|
||||
let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue };
|
||||
// Check if this is a transfer to us for the expected amount
|
||||
if (transfer.to == self.1) && (transfer.value == log.amount) {
|
||||
transfer_check.insert(log_index);
|
||||
found_transfer = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !found_transfer {
|
||||
// This shouldn't be a ConnectionError
|
||||
// This is an exploit, a non-conforming ERC20, or an invalid connection
|
||||
// This should halt the process which is sufficient, yet this is sub-optimal
|
||||
// TODO
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
Coin::Erc20(token)
|
||||
};
|
||||
|
||||
in_instructions.push(InInstruction {
|
||||
id,
|
||||
from: *log.from.0,
|
||||
coin,
|
||||
amount: log.amount,
|
||||
data: log.instruction.as_ref().to_vec(),
|
||||
key_at_end_of_block,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(in_instructions)
|
||||
}
|
||||
|
||||
pub async fn executed_commands(&self, block: u64) -> Result<Vec<Executed>, Error> {
|
||||
let mut res = vec![];
|
||||
|
||||
{
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||
|
||||
let log =
|
||||
log.log_decode::<SeraiKeyUpdated>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let mut signature = [0; 64];
|
||||
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||
res.push(Executed {
|
||||
tx_id,
|
||||
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||
|
||||
let log = log.log_decode::<ExecutedEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let mut signature = [0; 64];
|
||||
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||
res.push(Executed {
|
||||
tx_id,
|
||||
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[cfg(feature = "tests")]
|
||||
pub fn key_updated_filter(&self) -> Filter {
|
||||
Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH)
|
||||
}
|
||||
#[cfg(feature = "tests")]
|
||||
pub fn executed_filter(&self) -> Filter {
|
||||
Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH)
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
use eyre::{eyre, Result};
|
||||
|
||||
use group::ff::PrimeField;
|
||||
|
||||
use ethers_providers::{Provider, Http};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::{keccak256, PublicKey, Signature},
|
||||
};
|
||||
pub use crate::abi::schnorr::*;
|
||||
|
||||
pub async fn call_verify(
|
||||
contract: &Schnorr<Provider<Http>>,
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: &Signature,
|
||||
) -> Result<()> {
|
||||
if contract
|
||||
.verify(
|
||||
public_key.parity,
|
||||
public_key.px.to_repr().into(),
|
||||
keccak256(message),
|
||||
signature.c.to_repr().into(),
|
||||
signature.s.to_repr().into(),
|
||||
)
|
||||
.call()
|
||||
.await?
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
Err(eyre!(Error::InvalidSignature))
|
||||
}
|
||||
}
|
||||
13
coins/ethereum/src/tests/abi/mod.rs
Normal file
13
coins/ethereum/src/tests/abi/mod.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
use alloy_sol_types::sol;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod schnorr_container {
|
||||
use super::*;
|
||||
sol!("src/tests/contracts/Schnorr.sol");
|
||||
}
|
||||
pub(crate) use schnorr_container::TestSchnorr as schnorr;
|
||||
51
coins/ethereum/src/tests/contracts/ERC20.sol
Normal file
51
coins/ethereum/src/tests/contracts/ERC20.sol
Normal file
@@ -0,0 +1,51 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract TestERC20 {
|
||||
event Transfer(address indexed from, address indexed to, uint256 value);
|
||||
event Approval(address indexed owner, address indexed spender, uint256 value);
|
||||
|
||||
function name() public pure returns (string memory) {
|
||||
return "Test ERC20";
|
||||
}
|
||||
function symbol() public pure returns (string memory) {
|
||||
return "TEST";
|
||||
}
|
||||
function decimals() public pure returns (uint8) {
|
||||
return 18;
|
||||
}
|
||||
|
||||
function totalSupply() public pure returns (uint256) {
|
||||
return 1_000_000 * 10e18;
|
||||
}
|
||||
|
||||
mapping(address => uint256) balances;
|
||||
mapping(address => mapping(address => uint256)) allowances;
|
||||
|
||||
constructor() {
|
||||
balances[msg.sender] = totalSupply();
|
||||
}
|
||||
|
||||
function balanceOf(address owner) public view returns (uint256) {
|
||||
return balances[owner];
|
||||
}
|
||||
function transfer(address to, uint256 value) public returns (bool) {
|
||||
balances[msg.sender] -= value;
|
||||
balances[to] += value;
|
||||
return true;
|
||||
}
|
||||
function transferFrom(address from, address to, uint256 value) public returns (bool) {
|
||||
allowances[from][msg.sender] -= value;
|
||||
balances[from] -= value;
|
||||
balances[to] += value;
|
||||
return true;
|
||||
}
|
||||
|
||||
function approve(address spender, uint256 value) public returns (bool) {
|
||||
allowances[msg.sender][spender] = value;
|
||||
return true;
|
||||
}
|
||||
function allowance(address owner, address spender) public view returns (uint256) {
|
||||
return allowances[owner][spender];
|
||||
}
|
||||
}
|
||||
15
coins/ethereum/src/tests/contracts/Schnorr.sol
Normal file
15
coins/ethereum/src/tests/contracts/Schnorr.sol
Normal file
@@ -0,0 +1,15 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "../../../contracts/Schnorr.sol";
|
||||
|
||||
contract TestSchnorr {
|
||||
function verify(
|
||||
bytes32 px,
|
||||
bytes calldata message,
|
||||
bytes32 c,
|
||||
bytes32 s
|
||||
) external pure returns (bool) {
|
||||
return Schnorr.verify(px, message, c, s);
|
||||
}
|
||||
}
|
||||
@@ -1,49 +1,33 @@
|
||||
use rand_core::OsRng;
|
||||
|
||||
use sha2::Sha256;
|
||||
use sha3::{Digest, Keccak256};
|
||||
|
||||
use group::Group;
|
||||
use group::ff::{Field, PrimeField};
|
||||
use k256::{
|
||||
ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey},
|
||||
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint},
|
||||
U256, Scalar, AffinePoint, ProjectivePoint,
|
||||
ecdsa::{
|
||||
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,
|
||||
},
|
||||
Scalar, ProjectivePoint,
|
||||
};
|
||||
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
algorithm::{Hram, IetfSchnorr},
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use crate::{crypto::*, tests::key_gen};
|
||||
|
||||
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
|
||||
}
|
||||
|
||||
pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||
if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) {
|
||||
return None;
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into());
|
||||
#[allow(non_snake_case)]
|
||||
if let Some(R) = Option::<AffinePoint>::from(R) {
|
||||
#[allow(non_snake_case)]
|
||||
let R = ProjectivePoint::from(R);
|
||||
|
||||
let r = r.invert().unwrap();
|
||||
let u1 = ProjectivePoint::GENERATOR * (-message * r);
|
||||
let u2 = R * (s * r);
|
||||
let key: ProjectivePoint = u1 + u2;
|
||||
if !bool::from(key.is_identity()) {
|
||||
return Some(address(&key));
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
// The ecrecover opcode, yet with parity replacing v
|
||||
pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||
let sig = ecdsa::Signature::from_scalars(r, s).ok()?;
|
||||
let message: [u8; 32] = message.to_repr().into();
|
||||
alloy_core::primitives::Signature::from_signature_and_parity(
|
||||
sig,
|
||||
alloy_core::primitives::Parity::Parity(odd_y),
|
||||
)
|
||||
.ok()?
|
||||
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
|
||||
.ok()
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -55,20 +39,23 @@ fn test_ecrecover() {
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let (sig, recovery_id) = private
|
||||
.as_nonzero_scalar()
|
||||
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
|
||||
.try_sign_prehashed(
|
||||
<Secp256k1 as Ciphersuite>::F::random(&mut OsRng),
|
||||
&keccak256(MESSAGE).into(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Sanity check the signature verifies
|
||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||
{
|
||||
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
|
||||
assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ());
|
||||
}
|
||||
|
||||
// Perform the ecrecover
|
||||
assert_eq!(
|
||||
ecrecover(
|
||||
hash_to_scalar(MESSAGE),
|
||||
u8::from(recovery_id.unwrap().is_y_odd()) + 27,
|
||||
u8::from(recovery_id.unwrap().is_y_odd()) == 1,
|
||||
*sig.r(),
|
||||
*sig.s()
|
||||
)
|
||||
@@ -93,18 +80,13 @@ fn test_signing() {
|
||||
pub fn preprocess_signature_for_ecrecover(
|
||||
R: ProjectivePoint,
|
||||
public_key: &PublicKey,
|
||||
chain_id: U256,
|
||||
m: &[u8],
|
||||
s: Scalar,
|
||||
) -> (u8, Scalar, Scalar) {
|
||||
let c = EthereumHram::hram(
|
||||
&R,
|
||||
&public_key.A,
|
||||
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
|
||||
);
|
||||
) -> (Scalar, Scalar) {
|
||||
let c = EthereumHram::hram(&R, &public_key.A, m);
|
||||
let sa = -(s * public_key.px);
|
||||
let ca = -(c * public_key.px);
|
||||
(public_key.parity, sa, ca)
|
||||
(sa, ca)
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -112,21 +94,12 @@ fn test_ecrecover_hack() {
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let hashed_message = keccak256(MESSAGE);
|
||||
let chain_id = U256::ONE;
|
||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig = sign(
|
||||
&mut OsRng,
|
||||
&algo,
|
||||
keys.clone(),
|
||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
||||
full_message,
|
||||
);
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
|
||||
let (parity, sa, ca) =
|
||||
preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s);
|
||||
let q = ecrecover(sa, parity, public_key.px, ca).unwrap();
|
||||
let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s);
|
||||
let q = ecrecover(sa, false, public_key.px, ca).unwrap();
|
||||
assert_eq!(q, address(&sig.R));
|
||||
}
|
||||
|
||||
@@ -1,21 +1,25 @@
|
||||
use std::{sync::Arc, time::Duration, fs::File, collections::HashMap};
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::PrimeField;
|
||||
use k256::{Scalar, ProjectivePoint};
|
||||
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
|
||||
|
||||
use ethers_core::{
|
||||
types::{H160, Signature as EthersSignature},
|
||||
abi::Abi,
|
||||
use alloy_core::{
|
||||
primitives::{Address, U256, Bytes, TxKind},
|
||||
hex::FromHex,
|
||||
};
|
||||
use ethers_contract::ContractFactory;
|
||||
use ethers_providers::{Middleware, Provider, Http};
|
||||
use alloy_consensus::{SignableTransaction, TxLegacy};
|
||||
|
||||
use crate::crypto::PublicKey;
|
||||
use alloy_rpc_types::TransactionReceipt;
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::crypto::{address, deterministically_sign, PublicKey};
|
||||
|
||||
mod crypto;
|
||||
|
||||
mod abi;
|
||||
mod schnorr;
|
||||
mod router;
|
||||
|
||||
@@ -36,57 +40,88 @@ pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey)
|
||||
(keys, public_key)
|
||||
}
|
||||
|
||||
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
|
||||
// to fund the deployer, not create/pass a wallet
|
||||
// TODO: Deterministic deployments across chains
|
||||
// TODO: Use a proper error here
|
||||
pub async fn send(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
mut tx: TxLegacy,
|
||||
) -> Option<TransactionReceipt> {
|
||||
let verifying_key = *wallet.verifying_key().as_affine();
|
||||
let address = Address::from(address(&verifying_key.into()));
|
||||
|
||||
// https://github.com/alloy-rs/alloy/issues/539
|
||||
// let chain_id = provider.get_chain_id().await.unwrap();
|
||||
// tx.chain_id = Some(chain_id);
|
||||
tx.chain_id = None;
|
||||
tx.nonce = provider.get_transaction_count(address, None).await.unwrap();
|
||||
// 100 gwei
|
||||
tx.gas_price = 100_000_000_000u128;
|
||||
|
||||
let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap();
|
||||
assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap());
|
||||
assert!(
|
||||
provider.get_balance(address, None).await.unwrap() >
|
||||
((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value)
|
||||
);
|
||||
|
||||
let mut bytes = vec![];
|
||||
tx.encode_with_signature_fields(&sig.into(), &mut bytes);
|
||||
let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?;
|
||||
pending_tx.get_receipt().await.ok()
|
||||
}
|
||||
|
||||
pub async fn fund_account(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
to_fund: Address,
|
||||
value: U256,
|
||||
) -> Option<()> {
|
||||
let funding_tx =
|
||||
TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() };
|
||||
assert!(send(provider, wallet, funding_tx).await.unwrap().status());
|
||||
|
||||
Some(())
|
||||
}
|
||||
|
||||
// TODO: Use a proper error here
|
||||
pub async fn deploy_contract(
|
||||
chain_id: u32,
|
||||
client: Arc<Provider<Http>>,
|
||||
client: Arc<RootProvider<SimpleRequest>>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
name: &str,
|
||||
) -> eyre::Result<H160> {
|
||||
let abi: Abi =
|
||||
serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap();
|
||||
|
||||
) -> Option<Address> {
|
||||
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
|
||||
let hex_bin =
|
||||
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
||||
let bin = hex::decode(hex_bin).unwrap();
|
||||
let factory = ContractFactory::new(abi, bin.into(), client.clone());
|
||||
let bin = Bytes::from_hex(hex_bin).unwrap();
|
||||
|
||||
let mut deployment_tx = factory.deploy(())?.tx;
|
||||
deployment_tx.set_chain_id(chain_id);
|
||||
deployment_tx.set_gas(1_000_000);
|
||||
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
|
||||
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
|
||||
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
|
||||
let deployment_tx = TxLegacy {
|
||||
chain_id: None,
|
||||
nonce: 0,
|
||||
// 100 gwei
|
||||
gas_price: 100_000_000_000u128,
|
||||
gas_limit: 1_000_000,
|
||||
to: TxKind::Create,
|
||||
value: U256::ZERO,
|
||||
input: bin,
|
||||
};
|
||||
|
||||
let sig_hash = deployment_tx.sighash();
|
||||
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
|
||||
let deployment_tx = deterministically_sign(&deployment_tx);
|
||||
|
||||
// EIP-155 v
|
||||
let mut v = u64::from(rid.to_byte());
|
||||
assert!((v == 0) || (v == 1));
|
||||
v += u64::from((chain_id * 2) + 35);
|
||||
// Fund the deployer address
|
||||
fund_account(
|
||||
&client,
|
||||
wallet,
|
||||
deployment_tx.recover_signer().unwrap(),
|
||||
U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let r = sig.r().to_repr();
|
||||
let r_ref: &[u8] = r.as_ref();
|
||||
let s = sig.s().to_repr();
|
||||
let s_ref: &[u8] = s.as_ref();
|
||||
let deployment_tx =
|
||||
deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v });
|
||||
let (deployment_tx, sig, _) = deployment_tx.into_parts();
|
||||
let mut bytes = vec![];
|
||||
deployment_tx.encode_with_signature_fields(&sig, &mut bytes);
|
||||
let pending_tx = client.send_raw_transaction(&bytes).await.ok()?;
|
||||
let receipt = pending_tx.get_receipt().await.ok()?;
|
||||
assert!(receipt.status());
|
||||
|
||||
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
|
||||
|
||||
let mut receipt;
|
||||
while {
|
||||
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
|
||||
receipt.is_none()
|
||||
} {
|
||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
||||
}
|
||||
let receipt = receipt.unwrap();
|
||||
assert!(receipt.status == Some(1.into()));
|
||||
|
||||
Ok(receipt.contract_address.unwrap())
|
||||
Some(receipt.contract_address.unwrap())
|
||||
}
|
||||
|
||||
@@ -2,7 +2,8 @@ use std::{convert::TryFrom, sync::Arc, collections::HashMap};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::PrimeField;
|
||||
use group::Group;
|
||||
use k256::ProjectivePoint;
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
Participant, ThresholdKeys,
|
||||
@@ -10,100 +11,173 @@ use frost::{
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use ethers_core::{
|
||||
types::{H160, U256, Bytes},
|
||||
abi::AbiEncode,
|
||||
utils::{Anvil, AnvilInstance},
|
||||
};
|
||||
use ethers_providers::{Middleware, Provider, Http};
|
||||
use alloy_core::primitives::{Address, U256};
|
||||
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_rpc_client::ClientBuilder;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||
|
||||
use crate::{
|
||||
crypto::{keccak256, PublicKey, EthereumHram, Signature},
|
||||
router::{self, *},
|
||||
tests::{key_gen, deploy_contract},
|
||||
crypto::*,
|
||||
deployer::Deployer,
|
||||
router::{Router, abi as router},
|
||||
tests::{key_gen, send, fund_account},
|
||||
};
|
||||
|
||||
async fn setup_test() -> (
|
||||
u32,
|
||||
AnvilInstance,
|
||||
Router<Provider<Http>>,
|
||||
Arc<RootProvider<SimpleRequest>>,
|
||||
u64,
|
||||
Router,
|
||||
HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
||||
PublicKey,
|
||||
) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
|
||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
||||
let provider = RootProvider::new(
|
||||
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||
);
|
||||
let chain_id = provider.get_chain_id().await.unwrap();
|
||||
let wallet = anvil.keys()[0].clone().into();
|
||||
let client = Arc::new(provider);
|
||||
|
||||
let contract_address =
|
||||
deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap();
|
||||
let contract = Router::new(contract_address, client.clone());
|
||||
// Make sure the Deployer constructor returns None, as it doesn't exist yet
|
||||
assert!(Deployer::new(client.clone()).await.unwrap().is_none());
|
||||
|
||||
// Deploy the Deployer
|
||||
let tx = Deployer::deployment_tx();
|
||||
fund_account(
|
||||
&client,
|
||||
&wallet,
|
||||
tx.recover_signer().unwrap(),
|
||||
U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (tx, sig, _) = tx.into_parts();
|
||||
let mut bytes = vec![];
|
||||
tx.encode_with_signature_fields(&sig, &mut bytes);
|
||||
|
||||
let pending_tx = client.send_raw_transaction(&bytes).await.unwrap();
|
||||
let receipt = pending_tx.get_receipt().await.unwrap();
|
||||
assert!(receipt.status());
|
||||
let deployer =
|
||||
Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed");
|
||||
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
// Set the key to the threshold keys
|
||||
let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000);
|
||||
let pending_tx = tx.send().await.unwrap();
|
||||
let receipt = pending_tx.await.unwrap().unwrap();
|
||||
assert!(receipt.status == Some(1.into()));
|
||||
// Verify the Router constructor returns None, as it doesn't exist yet
|
||||
assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none());
|
||||
|
||||
(chain_id, anvil, contract, keys, public_key)
|
||||
// Deploy the router
|
||||
let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(receipt.status());
|
||||
let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap();
|
||||
|
||||
(anvil, client, chain_id, contract, keys, public_key)
|
||||
}
|
||||
|
||||
async fn latest_block_hash(client: &RootProvider<SimpleRequest>) -> [u8; 32] {
|
||||
client
|
||||
.get_block(client.get_block_number().await.unwrap().into(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.header
|
||||
.hash
|
||||
.unwrap()
|
||||
.0
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_contract() {
|
||||
setup_test().await;
|
||||
let (_anvil, client, _, router, _, public_key) = setup_test().await;
|
||||
|
||||
let block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key);
|
||||
assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
|
||||
// TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis
|
||||
}
|
||||
|
||||
pub fn hash_and_sign(
|
||||
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
||||
public_key: &PublicKey,
|
||||
chain_id: U256,
|
||||
message: &[u8],
|
||||
) -> Signature {
|
||||
let hashed_message = keccak256(message);
|
||||
|
||||
let mut chain_id_bytes = [0; 32];
|
||||
chain_id.to_big_endian(&mut chain_id_bytes);
|
||||
let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat();
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig = sign(
|
||||
&mut OsRng,
|
||||
&algo,
|
||||
keys.clone(),
|
||||
algorithm_machines(&mut OsRng, &algo, keys),
|
||||
full_message,
|
||||
);
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message);
|
||||
|
||||
Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap()
|
||||
Signature::new(public_key, message, sig).unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_router_update_serai_key() {
|
||||
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
|
||||
|
||||
let next_key = loop {
|
||||
let point = ProjectivePoint::random(&mut OsRng);
|
||||
let Some(next_key) = PublicKey::new(point) else { continue };
|
||||
break next_key;
|
||||
};
|
||||
|
||||
let message = Router::update_serai_key_message(
|
||||
U256::try_from(chain_id).unwrap(),
|
||||
U256::try_from(1u64).unwrap(),
|
||||
&next_key,
|
||||
);
|
||||
let sig = hash_and_sign(&keys, &public_key, &message);
|
||||
|
||||
let first_block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
|
||||
|
||||
let receipt =
|
||||
send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(receipt.status());
|
||||
|
||||
let second_block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key);
|
||||
// Check this does still offer the historical state
|
||||
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
|
||||
// TODO: Check logs
|
||||
|
||||
println!("gas used: {:?}", receipt.gas_used);
|
||||
// println!("logs: {:?}", receipt.logs);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_router_execute() {
|
||||
let (chain_id, _anvil, contract, keys, public_key) = setup_test().await;
|
||||
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
|
||||
|
||||
let to = H160([0u8; 20]);
|
||||
let value = U256([0u64; 4]);
|
||||
let data = Bytes::from([0]);
|
||||
let tx = OutInstruction { to, value, data: data.clone() };
|
||||
let to = Address::from([0; 20]);
|
||||
let value = U256::ZERO;
|
||||
let tx = router::OutInstruction { to, value, calls: vec![] };
|
||||
let txs = vec![tx];
|
||||
|
||||
let nonce_call = contract.nonce();
|
||||
let nonce = nonce_call.call().await.unwrap();
|
||||
let first_block_hash = latest_block_hash(&client).await;
|
||||
let nonce = contract.nonce(first_block_hash).await.unwrap();
|
||||
assert_eq!(nonce, U256::try_from(1u64).unwrap());
|
||||
|
||||
let encoded =
|
||||
("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode();
|
||||
let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded);
|
||||
let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone());
|
||||
let sig = hash_and_sign(&keys, &public_key, &message);
|
||||
|
||||
let tx = contract
|
||||
.execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() })
|
||||
.gas(300_000);
|
||||
let pending_tx = tx.send().await.unwrap();
|
||||
let receipt = dbg!(pending_tx.await.unwrap().unwrap());
|
||||
assert!(receipt.status == Some(1.into()));
|
||||
let receipt =
|
||||
send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap();
|
||||
assert!(receipt.status());
|
||||
|
||||
println!("gas used: {:?}", receipt.cumulative_gas_used);
|
||||
println!("logs: {:?}", receipt.logs);
|
||||
let second_block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap());
|
||||
// Check this does still offer the historical state
|
||||
assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
|
||||
// TODO: Check logs
|
||||
|
||||
println!("gas used: {:?}", receipt.gas_used);
|
||||
// println!("logs: {:?}", receipt.logs);
|
||||
}
|
||||
|
||||
@@ -1,11 +1,9 @@
|
||||
use std::{convert::TryFrom, sync::Arc};
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar};
|
||||
|
||||
use ethers_core::utils::{keccak256, Anvil, AnvilInstance};
|
||||
use ethers_providers::{Middleware, Provider, Http};
|
||||
use group::ff::PrimeField;
|
||||
use k256::Scalar;
|
||||
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
@@ -13,24 +11,34 @@ use frost::{
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use alloy_core::primitives::Address;
|
||||
|
||||
use alloy_sol_types::SolCall;
|
||||
|
||||
use alloy_rpc_types::{TransactionInput, TransactionRequest};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_rpc_client::ClientBuilder;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::*,
|
||||
schnorr::*,
|
||||
tests::{key_gen, deploy_contract},
|
||||
tests::{key_gen, deploy_contract, abi::schnorr as abi},
|
||||
};
|
||||
|
||||
async fn setup_test() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
|
||||
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
|
||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
||||
let provider = RootProvider::new(
|
||||
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||
);
|
||||
let wallet = anvil.keys()[0].clone().into();
|
||||
let client = Arc::new(provider);
|
||||
|
||||
let contract_address =
|
||||
deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap();
|
||||
let contract = Schnorr::new(contract_address, client.clone());
|
||||
(chain_id, anvil, contract)
|
||||
let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap();
|
||||
(anvil, client, address)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -38,30 +46,48 @@ async fn test_deploy_contract() {
|
||||
setup_test().await;
|
||||
}
|
||||
|
||||
pub async fn call_verify(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
contract: Address,
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: &Signature,
|
||||
) -> Result<(), Error> {
|
||||
let px: [u8; 32] = public_key.px.to_repr().into();
|
||||
let c_bytes: [u8; 32] = signature.c.to_repr().into();
|
||||
let s_bytes: [u8; 32] = signature.s.to_repr().into();
|
||||
let call = TransactionRequest::default().to(Some(contract)).input(TransactionInput::new(
|
||||
abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into()))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
));
|
||||
let bytes = provider.call(&call, None).await.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
if res._0 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::InvalidSignature)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ecrecover_hack() {
|
||||
let (chain_id, _anvil, contract) = setup_test().await;
|
||||
let chain_id = U256::from(chain_id);
|
||||
let (_anvil, client, contract) = setup_test().await;
|
||||
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let hashed_message = keccak256(MESSAGE);
|
||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig = sign(
|
||||
&mut OsRng,
|
||||
&algo,
|
||||
keys.clone(),
|
||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
||||
full_message,
|
||||
);
|
||||
let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
let sig = Signature::new(&public_key, MESSAGE, sig).unwrap();
|
||||
|
||||
call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap();
|
||||
call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap();
|
||||
// Test an invalid signature fails
|
||||
let mut sig = sig;
|
||||
sig.s += Scalar::ONE;
|
||||
assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err());
|
||||
assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err());
|
||||
}
|
||||
|
||||
@@ -43,13 +43,10 @@ multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features =
|
||||
|
||||
# Needed for multisig
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
|
||||
dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true }
|
||||
|
||||
monero-generators = { path = "generators", version = "0.4", default-features = false }
|
||||
|
||||
async-lock = { version = "3", default-features = false, optional = true }
|
||||
|
||||
hex-literal = "0.4"
|
||||
hex = { version = "0.4", default-features = false, features = ["alloc"] }
|
||||
serde = { version = "1", default-features = false, features = ["derive", "alloc"] }
|
||||
@@ -91,12 +88,9 @@ std = [
|
||||
"multiexp/std",
|
||||
|
||||
"transcript/std",
|
||||
"dleq/std",
|
||||
|
||||
"monero-generators/std",
|
||||
|
||||
"async-lock?/std",
|
||||
|
||||
"hex/std",
|
||||
"serde/std",
|
||||
"serde_json/std",
|
||||
@@ -104,10 +98,8 @@ std = [
|
||||
"base58-monero/std",
|
||||
]
|
||||
|
||||
cache-distribution = ["async-lock"]
|
||||
http-rpc = ["digest_auth", "simple-request", "tokio"]
|
||||
multisig = ["transcript", "frost", "dleq", "std"]
|
||||
multisig = ["transcript", "frost", "std"]
|
||||
binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"]
|
||||
experimental = []
|
||||
|
||||
default = ["std", "http-rpc"]
|
||||
|
||||
@@ -47,3 +47,15 @@ It also won't act as a wallet, just as a transaction library. wallet2 has
|
||||
several *non-transaction-level* policies, such as always attempting to use two
|
||||
inputs to create transactions. These are considered out of scope to
|
||||
monero-serai.
|
||||
|
||||
### Feature flags
|
||||
monero-serai has certain functionality behind feature flags:
|
||||
|
||||
- `std:` Enables usage of Rust's `std` and several other functionality. See `Cargo.toml` for the full list.
|
||||
- `http-rpc`: Enables an HTTP(S) transport type within the `rpc` module
|
||||
- `multisig`: Enables multi-signature features within the `wallet` module
|
||||
- `binaries`: TODO
|
||||
|
||||
The features enabled by default are:
|
||||
- `std`
|
||||
- `http-rpc`
|
||||
@@ -15,16 +15,43 @@ const CORRECT_BLOCK_HASH_202612: [u8; 32] =
|
||||
const EXISTING_BLOCK_HASH_202612: [u8; 32] =
|
||||
hex_literal::hex!("bbd604d2ba11ba27935e006ed39c9bfdd99b76bf4a50654bc1e1e61217962698");
|
||||
|
||||
/// The header of a [`Block`].
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct BlockHeader {
|
||||
/// This represents the hardfork number of the block.
|
||||
pub major_version: u8,
|
||||
/// This field is used to vote for a particular [hardfork](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_basic/cryptonote_basic.h#L460).
|
||||
pub minor_version: u8,
|
||||
/// The UNIX time at which the block was mined.
|
||||
pub timestamp: u64,
|
||||
/// The previous [`Block::hash`].
|
||||
pub previous: [u8; 32],
|
||||
/// The block's nonce.
|
||||
pub nonce: u32,
|
||||
}
|
||||
|
||||
impl BlockHeader {
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::block::*;
|
||||
/// # fn main() -> std::io::Result<()> {
|
||||
/// let block_header = BlockHeader {
|
||||
/// major_version: 1,
|
||||
/// minor_version: 2,
|
||||
/// timestamp: 3,
|
||||
/// previous: [4; 32],
|
||||
/// nonce: 5,
|
||||
/// };
|
||||
///
|
||||
/// let mut writer = vec![];
|
||||
/// block_header.write(&mut writer)?;
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
write_varint(&self.major_version, w)?;
|
||||
write_varint(&self.minor_version, w)?;
|
||||
@@ -33,12 +60,58 @@ impl BlockHeader {
|
||||
w.write_all(&self.nonce.to_le_bytes())
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into a new byte buffer.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::block::*;
|
||||
/// # fn main() -> std::io::Result<()> {
|
||||
/// let block_header = BlockHeader {
|
||||
/// major_version: 1,
|
||||
/// minor_version: 2,
|
||||
/// timestamp: 3,
|
||||
/// previous: [4; 32],
|
||||
/// nonce: 5,
|
||||
/// };
|
||||
///
|
||||
/// let mut writer = vec![];
|
||||
/// block_header.write(&mut writer)?;
|
||||
///
|
||||
/// let serialized = block_header.serialize();
|
||||
/// assert_eq!(serialized, writer);
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r`.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::block::*;
|
||||
/// # fn main() -> std::io::Result<()> {
|
||||
/// let block_header = BlockHeader {
|
||||
/// major_version: 1,
|
||||
/// minor_version: 2,
|
||||
/// timestamp: 3,
|
||||
/// previous: [4; 32],
|
||||
/// nonce: 5,
|
||||
/// };
|
||||
///
|
||||
/// let mut vec = vec![];
|
||||
/// block_header.write(&mut vec)?;
|
||||
///
|
||||
/// let read = BlockHeader::read(&mut vec.as_slice())?;
|
||||
/// assert_eq!(read, block_header);
|
||||
/// # Ok(()) }
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<BlockHeader> {
|
||||
Ok(BlockHeader {
|
||||
major_version: read_varint(r)?,
|
||||
@@ -50,14 +123,19 @@ impl BlockHeader {
|
||||
}
|
||||
}
|
||||
|
||||
/// Block on the Monero blockchain.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Block {
|
||||
/// The header of this block.
|
||||
pub header: BlockHeader,
|
||||
/// The miner/coinbase transaction.
|
||||
pub miner_tx: Transaction,
|
||||
/// Hashes of all the transactions within this block.
|
||||
pub txs: Vec<[u8; 32]>,
|
||||
}
|
||||
|
||||
impl Block {
|
||||
/// Return the amount of Monero generated in this block in atomic units.
|
||||
pub fn number(&self) -> Option<u64> {
|
||||
match self.miner_tx.prefix.inputs.first() {
|
||||
Some(Input::Gen(number)) => Some(*number),
|
||||
@@ -65,6 +143,10 @@ impl Block {
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
self.header.write(w)?;
|
||||
self.miner_tx.write(w)?;
|
||||
@@ -75,6 +157,11 @@ impl Block {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return the merkle root of this block.
|
||||
///
|
||||
/// In the case that this block has no transactions other than
|
||||
/// the miner transaction, the miner transaction hash is returned,
|
||||
/// i.e. the [`Transaction::hash`] of [`Self::miner_tx`] is returned.
|
||||
fn tx_merkle_root(&self) -> [u8; 32] {
|
||||
merkle_root(self.miner_tx.hash(), &self.txs)
|
||||
}
|
||||
@@ -91,6 +178,7 @@ impl Block {
|
||||
blob
|
||||
}
|
||||
|
||||
/// Calculate the hash of this block.
|
||||
pub fn hash(&self) -> [u8; 32] {
|
||||
let mut hashable = self.serialize_hashable();
|
||||
// Monero pre-appends a VarInt of the block hashing blobs length before getting the block hash
|
||||
@@ -107,12 +195,18 @@ impl Block {
|
||||
hash
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into a new byte buffer.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Block> {
|
||||
let header = BlockHeader::read(r)?;
|
||||
|
||||
@@ -124,7 +218,7 @@ impl Block {
|
||||
Ok(Block {
|
||||
header,
|
||||
miner_tx,
|
||||
txs: (0_usize .. read_varint(r)?).map(|_| read_bytes(r)).collect::<Result<_, _>>()?,
|
||||
txs: (0_usize..read_varint(r)?).map(|_| read_bytes(r)).collect::<Result<_, _>>()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,12 @@ use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use sha3::{Digest, Keccak256};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
|
||||
use curve25519_dalek::{
|
||||
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
|
||||
scalar::Scalar,
|
||||
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
||||
traits::VartimePrecomputedMultiscalarMul,
|
||||
};
|
||||
|
||||
pub use monero_generators::{H, decompress_point};
|
||||
|
||||
@@ -46,8 +51,26 @@ pub mod wallet;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
/// Default block lock time for transactions.
|
||||
///
|
||||
/// This is the amount of new blocks that must
|
||||
/// pass before a new transaction can be spent.
|
||||
///
|
||||
/// Equivalent to Monero's [`CRYPTONOTE_DEFAULT_TX_SPENDABLE_AGE`](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_config.h#L49).
|
||||
pub const DEFAULT_LOCK_WINDOW: usize = 10;
|
||||
/// Block lock time for coinbase transactions.
|
||||
///
|
||||
/// This is the amount of new blocks that must
|
||||
/// pass before a coinbase/miner transaction can be spent.
|
||||
///
|
||||
/// Equivalent to Monero's [`CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW`](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_config.h#L44).
|
||||
pub const COINBASE_LOCK_WINDOW: usize = 60;
|
||||
/// Average amount of seconds it takes for a block to be mined.
|
||||
///
|
||||
/// This is target amount of seconds mining difficulty will adjust to,
|
||||
/// i.e. a block will be mined every `BLOCK_TIME` seconds on average.
|
||||
///
|
||||
/// Equivalent to Monero's [`DIFFICULTY_TARGET_V2`](https://github.com/monero-project/monero/blob/c8214782fb2a769c57382a999eaf099691c836e7/src/cryptonote_config.h#L44).
|
||||
pub const BLOCK_TIME: usize = 120;
|
||||
|
||||
static INV_EIGHT_CELL: OnceLock<Scalar> = OnceLock::new();
|
||||
@@ -56,6 +79,13 @@ pub(crate) fn INV_EIGHT() -> Scalar {
|
||||
*INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert())
|
||||
}
|
||||
|
||||
static BASEPOINT_PRECOMP_CELL: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new();
|
||||
#[allow(non_snake_case)]
|
||||
pub(crate) fn BASEPOINT_PRECOMP() -> &'static VartimeEdwardsPrecomputation {
|
||||
BASEPOINT_PRECOMP_CELL
|
||||
.get_or_init(|| VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT]))
|
||||
}
|
||||
|
||||
/// Monero protocol version.
|
||||
///
|
||||
/// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the
|
||||
@@ -63,19 +93,34 @@ pub(crate) fn INV_EIGHT() -> Scalar {
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum Protocol {
|
||||
/// Version 14.
|
||||
v14,
|
||||
/// Version 16.
|
||||
v16,
|
||||
/// A custom version with customized properties.
|
||||
Custom {
|
||||
/// See [`Self::ring_len`].
|
||||
ring_len: usize,
|
||||
/// See [`Self::bp_plus`].
|
||||
bp_plus: bool,
|
||||
/// See [`Self::optimal_rct_type`].
|
||||
optimal_rct_type: RctType,
|
||||
/// See [`Self::view_tags`].
|
||||
view_tags: bool,
|
||||
/// See [`Self::v16_fee`].
|
||||
v16_fee: bool,
|
||||
},
|
||||
}
|
||||
|
||||
impl Protocol {
|
||||
/// Amount of ring members under this protocol version.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::*;
|
||||
/// assert_eq!(Protocol::v14.ring_len(), 11);
|
||||
/// assert_eq!(Protocol::v16.ring_len(), 16);
|
||||
/// ```
|
||||
pub fn ring_len(&self) -> usize {
|
||||
match self {
|
||||
Protocol::v14 => 11,
|
||||
@@ -87,6 +132,13 @@ impl Protocol {
|
||||
/// Whether or not the specified version uses Bulletproofs or Bulletproofs+.
|
||||
///
|
||||
/// This method will likely be reworked when versions not using Bulletproofs at all are added.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::*;
|
||||
/// assert_eq!(Protocol::v14.bp_plus(), false);
|
||||
/// assert_eq!(Protocol::v16.bp_plus(), true);
|
||||
/// ```
|
||||
pub fn bp_plus(&self) -> bool {
|
||||
match self {
|
||||
Protocol::v14 => false,
|
||||
@@ -95,6 +147,14 @@ impl Protocol {
|
||||
}
|
||||
}
|
||||
|
||||
/// The optimal RingCT type for this version.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::{*, ringct::*};
|
||||
/// assert_eq!(Protocol::v14.optimal_rct_type(), RctType::Clsag);
|
||||
/// assert_eq!(Protocol::v16.optimal_rct_type(), RctType::BulletproofsPlus);
|
||||
/// ```
|
||||
// TODO: Make this an Option when we support pre-RCT protocols
|
||||
pub fn optimal_rct_type(&self) -> RctType {
|
||||
match self {
|
||||
@@ -105,6 +165,13 @@ impl Protocol {
|
||||
}
|
||||
|
||||
/// Whether or not the specified version uses view tags.
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::{*, ringct::*};
|
||||
/// assert_eq!(Protocol::v14.view_tags(), false);
|
||||
/// assert_eq!(Protocol::v16.view_tags(), true);
|
||||
/// ```
|
||||
pub fn view_tags(&self) -> bool {
|
||||
match self {
|
||||
Protocol::v14 => false,
|
||||
@@ -115,6 +182,13 @@ impl Protocol {
|
||||
|
||||
/// Whether or not the specified version uses the fee algorithm from Monero
|
||||
/// hard fork version 16 (released in v18 binaries).
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// # use monero_serai::{*, ringct::*};
|
||||
/// assert_eq!(Protocol::v14.v16_fee(), false);
|
||||
/// assert_eq!(Protocol::v16.v16_fee(), true);
|
||||
/// ```
|
||||
pub fn v16_fee(&self) -> bool {
|
||||
match self {
|
||||
Protocol::v14 => false,
|
||||
@@ -176,11 +250,15 @@ impl Protocol {
|
||||
}
|
||||
}
|
||||
|
||||
/// Transparent structure representing a Pedersen commitment's contents.
|
||||
/// Transparent structure representing a [Pedersen commitment](https://web.getmonero.org/resources/moneropedia/pedersen-commitment.html)'s contents.
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, PartialEq, Eq, Zeroize, ZeroizeOnDrop)]
|
||||
pub struct Commitment {
|
||||
/// The value used to mask the `amount`.
|
||||
pub mask: Scalar,
|
||||
/// The value being masked.
|
||||
///
|
||||
/// In Monero's case, this is the amount of XMR in atomic units.
|
||||
pub amount: u64,
|
||||
}
|
||||
|
||||
@@ -196,6 +274,7 @@ impl Commitment {
|
||||
Commitment { mask: Scalar::ONE, amount: 0 }
|
||||
}
|
||||
|
||||
/// Create a new [`Self`].
|
||||
pub fn new(mask: Scalar, amount: u64) -> Commitment {
|
||||
Commitment { mask, amount }
|
||||
}
|
||||
|
||||
@@ -11,6 +11,7 @@ use monero_generators::hash_to_point;
|
||||
|
||||
use crate::{serialize::*, hash_to_scalar};
|
||||
|
||||
/// A signature within a [`RingSignature`].
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct Signature {
|
||||
c: Scalar,
|
||||
@@ -18,23 +19,37 @@ pub struct Signature {
|
||||
}
|
||||
|
||||
impl Signature {
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
write_scalar(&self.c, w)?;
|
||||
write_scalar(&self.r, w)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Signature> {
|
||||
Ok(Signature { c: read_scalar(r)?, r: read_scalar(r)? })
|
||||
}
|
||||
}
|
||||
|
||||
/// A [ring signature](https://en.wikipedia.org/wiki/Ring_signature).
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct RingSignature {
|
||||
sigs: Vec<Signature>,
|
||||
}
|
||||
|
||||
impl RingSignature {
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
for sig in &self.sigs {
|
||||
sig.write(w)?;
|
||||
@@ -42,6 +57,11 @@ impl RingSignature {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(members: usize, r: &mut R) -> io::Result<RingSignature> {
|
||||
Ok(RingSignature { sigs: read_raw_vec(Signature::read, members, r)? })
|
||||
}
|
||||
|
||||
@@ -7,20 +7,21 @@ use monero_generators::H_pow_2;
|
||||
|
||||
use crate::{hash_to_scalar, unreduced_scalar::UnreducedScalar, serialize::*};
|
||||
|
||||
/// 64 Borromean ring signatures.
|
||||
/// 64 Borromean ring signatures, as needed for a 64-bit range proof.
|
||||
///
|
||||
/// s0 and s1 are stored as `UnreducedScalar`s due to Monero not requiring they were reduced.
|
||||
/// `UnreducedScalar` preserves their original byte encoding and implements a custom reduction
|
||||
/// algorithm which was in use.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct BorromeanSignatures {
|
||||
pub s0: [UnreducedScalar; 64],
|
||||
pub s1: [UnreducedScalar; 64],
|
||||
pub ee: Scalar,
|
||||
struct BorromeanSignatures {
|
||||
s0: [UnreducedScalar; 64],
|
||||
s1: [UnreducedScalar; 64],
|
||||
ee: Scalar,
|
||||
}
|
||||
|
||||
impl BorromeanSignatures {
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<BorromeanSignatures> {
|
||||
/// Read a set of BorromeanSignatures from a reader.
|
||||
fn read<R: Read>(r: &mut R) -> io::Result<BorromeanSignatures> {
|
||||
Ok(BorromeanSignatures {
|
||||
s0: read_array(UnreducedScalar::read, r)?,
|
||||
s1: read_array(UnreducedScalar::read, r)?,
|
||||
@@ -28,7 +29,8 @@ impl BorromeanSignatures {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
/// Write the set of BorromeanSignatures to a writer.
|
||||
fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
for s0 in &self.s0 {
|
||||
s0.write(w)?;
|
||||
}
|
||||
@@ -64,22 +66,26 @@ impl BorromeanSignatures {
|
||||
/// A range proof premised on Borromean ring signatures.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct BorromeanRange {
|
||||
pub sigs: BorromeanSignatures,
|
||||
pub bit_commitments: [EdwardsPoint; 64],
|
||||
sigs: BorromeanSignatures,
|
||||
bit_commitments: [EdwardsPoint; 64],
|
||||
}
|
||||
|
||||
impl BorromeanRange {
|
||||
/// Read a BorromeanRange proof from a reader.
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<BorromeanRange> {
|
||||
Ok(BorromeanRange {
|
||||
sigs: BorromeanSignatures::read(r)?,
|
||||
bit_commitments: read_array(read_point, r)?,
|
||||
})
|
||||
}
|
||||
|
||||
/// Write the BorromeanRange proof to a reader.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
self.sigs.write(w)?;
|
||||
write_raw_vec(write_point, &self.bit_commitments, w)
|
||||
}
|
||||
|
||||
/// Verify the commitment contains a 64-bit value.
|
||||
pub fn verify(&self, commitment: &EdwardsPoint) -> bool {
|
||||
if &self.bit_commitments.iter().sum::<EdwardsPoint>() != commitment {
|
||||
return false;
|
||||
|
||||
@@ -26,15 +26,15 @@ use self::plus::*;
|
||||
|
||||
pub(crate) const MAX_OUTPUTS: usize = self::core::MAX_M;
|
||||
|
||||
/// Bulletproofs enum, supporting the original and plus formulations.
|
||||
/// Bulletproof enum, encapsulating both Bulletproofs and Bulletproofs+.
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Bulletproofs {
|
||||
pub enum Bulletproof {
|
||||
Original(OriginalStruct),
|
||||
Plus(AggregateRangeProof),
|
||||
}
|
||||
|
||||
impl Bulletproofs {
|
||||
impl Bulletproof {
|
||||
fn bp_fields(plus: bool) -> usize {
|
||||
if plus {
|
||||
6
|
||||
@@ -57,7 +57,7 @@ impl Bulletproofs {
|
||||
|
||||
let mut bp_clawback = 0;
|
||||
if n_padded_outputs > 2 {
|
||||
let fields = Bulletproofs::bp_fields(plus);
|
||||
let fields = Bulletproof::bp_fields(plus);
|
||||
let base = ((fields + (2 * (LOG_N + 1))) * 32) / 2;
|
||||
let size = (fields + (2 * LR_len)) * 32;
|
||||
bp_clawback = ((base * n_padded_outputs) - size) * 4 / 5;
|
||||
@@ -68,49 +68,51 @@ impl Bulletproofs {
|
||||
|
||||
pub(crate) fn fee_weight(plus: bool, outputs: usize) -> usize {
|
||||
#[allow(non_snake_case)]
|
||||
let (bp_clawback, LR_len) = Bulletproofs::calculate_bp_clawback(plus, outputs);
|
||||
32 * (Bulletproofs::bp_fields(plus) + (2 * LR_len)) + 2 + bp_clawback
|
||||
let (bp_clawback, LR_len) = Bulletproof::calculate_bp_clawback(plus, outputs);
|
||||
32 * (Bulletproof::bp_fields(plus) + (2 * LR_len)) + 2 + bp_clawback
|
||||
}
|
||||
|
||||
/// Prove the list of commitments are within [0 .. 2^64).
|
||||
/// Prove the list of commitments are within [0 .. 2^64) with an aggregate Bulletproof.
|
||||
pub fn prove<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
outputs: &[Commitment],
|
||||
plus: bool,
|
||||
) -> Result<Bulletproofs, TransactionError> {
|
||||
) -> Result<Bulletproof, TransactionError> {
|
||||
if outputs.is_empty() {
|
||||
Err(TransactionError::NoOutputs)?;
|
||||
}
|
||||
if outputs.len() > MAX_OUTPUTS {
|
||||
Err(TransactionError::TooManyOutputs)?;
|
||||
}
|
||||
Ok(if !plus {
|
||||
Bulletproofs::Original(OriginalStruct::prove(rng, outputs))
|
||||
} else {
|
||||
use dalek_ff_group::EdwardsPoint as DfgPoint;
|
||||
Bulletproofs::Plus(
|
||||
AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect())
|
||||
.unwrap()
|
||||
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap()))
|
||||
.unwrap(),
|
||||
)
|
||||
})
|
||||
Ok(Bulletproof::Original(OriginalStruct::prove(rng, outputs)))
|
||||
}
|
||||
|
||||
/// Verify the given Bulletproofs.
|
||||
/// Prove the list of commitments are within [0 .. 2^64) with an aggregate Bulletproof+.
|
||||
pub fn prove_plus<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
outputs: Vec<Commitment>,
|
||||
) -> Result<Bulletproof, TransactionError> {
|
||||
if outputs.is_empty() {
|
||||
Err(TransactionError::NoOutputs)?;
|
||||
}
|
||||
if outputs.len() > MAX_OUTPUTS {
|
||||
Err(TransactionError::TooManyOutputs)?;
|
||||
}
|
||||
Ok(Bulletproof::Plus(
|
||||
AggregateRangeStatement::new(outputs.iter().map(Commitment::calculate).collect())
|
||||
.unwrap()
|
||||
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap()))
|
||||
.unwrap(),
|
||||
))
|
||||
}
|
||||
|
||||
/// Verify the given Bulletproof(+).
|
||||
#[must_use]
|
||||
pub fn verify<R: RngCore + CryptoRng>(&self, rng: &mut R, commitments: &[EdwardsPoint]) -> bool {
|
||||
match self {
|
||||
Bulletproofs::Original(bp) => bp.verify(rng, commitments),
|
||||
Bulletproofs::Plus(bp) => {
|
||||
Bulletproof::Original(bp) => bp.verify(rng, commitments),
|
||||
Bulletproof::Plus(bp) => {
|
||||
let mut verifier = BatchVerifier::new(1);
|
||||
// If this commitment is torsioned (which is allowed), this won't be a well-formed
|
||||
// dfg::EdwardsPoint (expected to be of prime-order)
|
||||
// The actual BP+ impl will perform a torsion clear though, making this safe
|
||||
// TODO: Have AggregateRangeStatement take in dalek EdwardsPoint for clarity on this
|
||||
let Some(statement) = AggregateRangeStatement::new(
|
||||
commitments.iter().map(|c| dalek_ff_group::EdwardsPoint(*c)).collect(),
|
||||
) else {
|
||||
let Some(statement) = AggregateRangeStatement::new(commitments.to_vec()) else {
|
||||
return false;
|
||||
};
|
||||
if !statement.verify(rng, &mut verifier, (), bp.clone()) {
|
||||
@@ -121,9 +123,11 @@ impl Bulletproofs {
|
||||
}
|
||||
}
|
||||
|
||||
/// Accumulate the verification for the given Bulletproofs into the specified BatchVerifier.
|
||||
/// Returns false if the Bulletproofs aren't sane, without mutating the BatchVerifier.
|
||||
/// Returns true if the Bulletproofs are sane, regardless of their validity.
|
||||
/// Accumulate the verification for the given Bulletproof into the specified BatchVerifier.
|
||||
///
|
||||
/// Returns false if the Bulletproof isn't sane, leaving the BatchVerifier in an undefined
|
||||
/// state.
|
||||
/// Returns true if the Bulletproof is sane, regardless of their validity.
|
||||
#[must_use]
|
||||
pub fn batch_verify<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
|
||||
&self,
|
||||
@@ -133,11 +137,9 @@ impl Bulletproofs {
|
||||
commitments: &[EdwardsPoint],
|
||||
) -> bool {
|
||||
match self {
|
||||
Bulletproofs::Original(bp) => bp.batch_verify(rng, verifier, id, commitments),
|
||||
Bulletproofs::Plus(bp) => {
|
||||
let Some(statement) = AggregateRangeStatement::new(
|
||||
commitments.iter().map(|c| dalek_ff_group::EdwardsPoint(*c)).collect(),
|
||||
) else {
|
||||
Bulletproof::Original(bp) => bp.batch_verify(rng, verifier, id, commitments),
|
||||
Bulletproof::Plus(bp) => {
|
||||
let Some(statement) = AggregateRangeStatement::new(commitments.to_vec()) else {
|
||||
return false;
|
||||
};
|
||||
statement.verify(rng, verifier, id, bp.clone())
|
||||
@@ -151,7 +153,7 @@ impl Bulletproofs {
|
||||
specific_write_vec: F,
|
||||
) -> io::Result<()> {
|
||||
match self {
|
||||
Bulletproofs::Original(bp) => {
|
||||
Bulletproof::Original(bp) => {
|
||||
write_point(&bp.A, w)?;
|
||||
write_point(&bp.S, w)?;
|
||||
write_point(&bp.T1, w)?;
|
||||
@@ -165,7 +167,7 @@ impl Bulletproofs {
|
||||
write_scalar(&bp.t, w)
|
||||
}
|
||||
|
||||
Bulletproofs::Plus(bp) => {
|
||||
Bulletproof::Plus(bp) => {
|
||||
write_point(&bp.A.0, w)?;
|
||||
write_point(&bp.wip.A.0, w)?;
|
||||
write_point(&bp.wip.B.0, w)?;
|
||||
@@ -182,19 +184,21 @@ impl Bulletproofs {
|
||||
self.write_core(w, |points, w| write_raw_vec(write_point, points, w))
|
||||
}
|
||||
|
||||
/// Write the Bulletproof(+) to a writer.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
self.write_core(w, |points, w| write_vec(write_point, points, w))
|
||||
}
|
||||
|
||||
/// Serialize the Bulletproof(+) to a `Vec<u8>`.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Read Bulletproofs.
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Bulletproofs> {
|
||||
Ok(Bulletproofs::Original(OriginalStruct {
|
||||
/// Read a Bulletproof.
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Bulletproof> {
|
||||
Ok(Bulletproof::Original(OriginalStruct {
|
||||
A: read_point(r)?,
|
||||
S: read_point(r)?,
|
||||
T1: read_point(r)?,
|
||||
@@ -209,11 +213,11 @@ impl Bulletproofs {
|
||||
}))
|
||||
}
|
||||
|
||||
/// Read Bulletproofs+.
|
||||
pub fn read_plus<R: Read>(r: &mut R) -> io::Result<Bulletproofs> {
|
||||
/// Read a Bulletproof+.
|
||||
pub fn read_plus<R: Read>(r: &mut R) -> io::Result<Bulletproof> {
|
||||
use dalek_ff_group::{Scalar as DfgScalar, EdwardsPoint as DfgPoint};
|
||||
|
||||
Ok(Bulletproofs::Plus(AggregateRangeProof {
|
||||
Ok(Bulletproof::Plus(AggregateRangeProof {
|
||||
A: DfgPoint(read_point(r)?),
|
||||
wip: WipProof {
|
||||
A: DfgPoint(read_point(r)?),
|
||||
|
||||
@@ -33,6 +33,7 @@ pub(crate) fn hadamard_fold(
|
||||
res
|
||||
}
|
||||
|
||||
/// Internal structure representing a Bulletproof.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct OriginalStruct {
|
||||
pub(crate) A: DalekPoint,
|
||||
|
||||
@@ -9,6 +9,7 @@ use group::{
|
||||
ff::{Field, PrimeField},
|
||||
Group, GroupEncoding,
|
||||
};
|
||||
use curve25519_dalek::EdwardsPoint as DalekPoint;
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
|
||||
use crate::{
|
||||
@@ -24,11 +25,11 @@ use crate::{
|
||||
},
|
||||
};
|
||||
|
||||
// Figure 3
|
||||
// Figure 3 of the Bulletproofs+ Paper
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct AggregateRangeStatement {
|
||||
generators: Generators,
|
||||
V: Vec<EdwardsPoint>,
|
||||
V: Vec<DalekPoint>,
|
||||
}
|
||||
|
||||
impl Zeroize for AggregateRangeStatement {
|
||||
@@ -38,27 +39,19 @@ impl Zeroize for AggregateRangeStatement {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub(crate) struct AggregateRangeWitness {
|
||||
values: Vec<u64>,
|
||||
gammas: Vec<Scalar>,
|
||||
}
|
||||
pub(crate) struct AggregateRangeWitness(Vec<Commitment>);
|
||||
|
||||
impl AggregateRangeWitness {
|
||||
pub(crate) fn new(commitments: &[Commitment]) -> Option<Self> {
|
||||
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> {
|
||||
if commitments.is_empty() || (commitments.len() > MAX_M) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut values = Vec::with_capacity(commitments.len());
|
||||
let mut gammas = Vec::with_capacity(commitments.len());
|
||||
for commitment in commitments {
|
||||
values.push(commitment.amount);
|
||||
gammas.push(Scalar(commitment.mask));
|
||||
}
|
||||
Some(AggregateRangeWitness { values, gammas })
|
||||
Some(AggregateRangeWitness(commitments))
|
||||
}
|
||||
}
|
||||
|
||||
/// Internal structure representing a Bulletproof+, as used in Monero.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct AggregateRangeProof {
|
||||
pub(crate) A: EdwardsPoint,
|
||||
@@ -66,7 +59,7 @@ pub struct AggregateRangeProof {
|
||||
}
|
||||
|
||||
impl AggregateRangeStatement {
|
||||
pub(crate) fn new(V: Vec<EdwardsPoint>) -> Option<Self> {
|
||||
pub(crate) fn new(V: Vec<DalekPoint>) -> Option<Self> {
|
||||
if V.is_empty() || (V.len() > MAX_M) {
|
||||
return None;
|
||||
}
|
||||
@@ -107,11 +100,14 @@ impl AggregateRangeStatement {
|
||||
}
|
||||
let mn = V.len() * N;
|
||||
|
||||
// 2, 4, 6, 8... powers of z, of length equivalent to the amount of commitments
|
||||
let mut z_pow = Vec::with_capacity(V.len());
|
||||
// z**2
|
||||
z_pow.push(z * z);
|
||||
|
||||
let mut d = ScalarVector::new(mn);
|
||||
for j in 1 ..= V.len() {
|
||||
z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this
|
||||
z_pow.push(*z_pow.last().unwrap() * z_pow[0]);
|
||||
d = d + &(Self::d_j(j, V.len()) * (z_pow[j - 1]));
|
||||
}
|
||||
|
||||
@@ -162,13 +158,11 @@ impl AggregateRangeStatement {
|
||||
witness: &AggregateRangeWitness,
|
||||
) -> Option<AggregateRangeProof> {
|
||||
// Check for consistency with the witness
|
||||
if self.V.len() != witness.values.len() {
|
||||
if self.V.len() != witness.0.len() {
|
||||
return None;
|
||||
}
|
||||
for (commitment, (value, gamma)) in
|
||||
self.V.iter().zip(witness.values.iter().zip(witness.gammas.iter()))
|
||||
{
|
||||
if Commitment::new(**gamma, *value).calculate() != **commitment {
|
||||
for (commitment, witness) in self.V.iter().zip(witness.0.iter()) {
|
||||
if witness.calculate() != *commitment {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
@@ -181,9 +175,9 @@ impl AggregateRangeStatement {
|
||||
// Commitments aren't transmitted INV_EIGHT though, so this multiplies by INV_EIGHT to enable
|
||||
// clearing its cofactor without mutating the value
|
||||
// For some reason, these values are transcripted * INV_EIGHT, not as transmitted
|
||||
let mut V = V.into_iter().map(|V| EdwardsPoint(V.0 * crate::INV_EIGHT())).collect::<Vec<_>>();
|
||||
let V = V.into_iter().map(|V| V * crate::INV_EIGHT()).collect::<Vec<_>>();
|
||||
let mut transcript = initial_transcript(V.iter());
|
||||
V.iter_mut().for_each(|V| *V = V.mul_by_cofactor());
|
||||
let mut V = V.into_iter().map(|V| EdwardsPoint(V.mul_by_cofactor())).collect::<Vec<_>>();
|
||||
|
||||
// Pad V
|
||||
while V.len() < padded_pow_of_2(V.len()) {
|
||||
@@ -196,7 +190,13 @@ impl AggregateRangeStatement {
|
||||
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N));
|
||||
for j in 1 ..= V.len() {
|
||||
d_js.push(Self::d_j(j, V.len()));
|
||||
a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
|
||||
#[allow(clippy::map_unwrap_or)]
|
||||
a_l.0.append(
|
||||
&mut u64_decompose(
|
||||
*witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0),
|
||||
)
|
||||
.0,
|
||||
);
|
||||
}
|
||||
|
||||
let a_r = a_l.clone() - Scalar::ONE;
|
||||
@@ -223,8 +223,8 @@ impl AggregateRangeStatement {
|
||||
let a_l = a_l - z;
|
||||
let a_r = a_r + &d_descending_y_plus_z;
|
||||
let mut alpha = alpha;
|
||||
for j in 1 ..= witness.gammas.len() {
|
||||
alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;
|
||||
for j in 1 ..= witness.0.len() {
|
||||
alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one;
|
||||
}
|
||||
|
||||
Some(AggregateRangeProof {
|
||||
@@ -244,9 +244,11 @@ impl AggregateRangeStatement {
|
||||
) -> bool {
|
||||
let Self { generators, V } = self;
|
||||
|
||||
let mut V = V.into_iter().map(|V| EdwardsPoint(V.0 * crate::INV_EIGHT())).collect::<Vec<_>>();
|
||||
let V = V.into_iter().map(|V| V * crate::INV_EIGHT()).collect::<Vec<_>>();
|
||||
let mut transcript = initial_transcript(V.iter());
|
||||
V.iter_mut().for_each(|V| *V = V.mul_by_cofactor());
|
||||
// With the torsion clear, wrap it into a EdwardsPoint from dalek-ff-group
|
||||
// (which is prime-order)
|
||||
let V = V.into_iter().map(|V| EdwardsPoint(V.mul_by_cofactor())).collect::<Vec<_>>();
|
||||
|
||||
let generators = generators.reduce(V.len() * N);
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std_shims::{sync::OnceLock, vec::Vec};
|
||||
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
use curve25519_dalek::EdwardsPoint;
|
||||
use dalek_ff_group::Scalar;
|
||||
|
||||
use monero_generators::{hash_to_point as raw_hash_to_point};
|
||||
use crate::{hash, hash_to_scalar as dalek_hash};
|
||||
|
||||
@@ -15,7 +15,7 @@ use crate::ringct::bulletproofs::plus::{
|
||||
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
|
||||
};
|
||||
|
||||
// Figure 1
|
||||
// Figure 1 of the Bulletproofs+ paper
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct WipStatement {
|
||||
generators: Generators,
|
||||
|
||||
@@ -12,47 +12,50 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||
use subtle::{ConstantTimeEq, ConditionallySelectable};
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_TABLE,
|
||||
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
|
||||
scalar::Scalar,
|
||||
traits::{IsIdentity, VartimePrecomputedMultiscalarMul},
|
||||
traits::{IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul},
|
||||
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
INV_EIGHT, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
|
||||
INV_EIGHT, BASEPOINT_PRECOMP, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
|
||||
ringct::hash_to_point, serialize::*,
|
||||
};
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
mod multisig;
|
||||
#[cfg(feature = "multisig")]
|
||||
pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
|
||||
#[cfg(feature = "multisig")]
|
||||
pub(crate) use multisig::add_key_image_share;
|
||||
pub(crate) use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
|
||||
|
||||
/// Errors returned when CLSAG signing fails.
|
||||
/// Errors when working with CLSAGs.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum ClsagError {
|
||||
#[cfg_attr(feature = "std", error("internal error ({0})"))]
|
||||
InternalError(&'static str),
|
||||
/// The ring was invalid (such as being too small or too large).
|
||||
#[cfg_attr(feature = "std", error("invalid ring"))]
|
||||
InvalidRing,
|
||||
/// The specified ring member was invalid (index, ring size).
|
||||
#[cfg_attr(feature = "std", error("invalid ring member (member {0}, ring size {1})"))]
|
||||
InvalidRingMember(u8, u8),
|
||||
/// The commitment opening provided did not match the ring member's.
|
||||
#[cfg_attr(feature = "std", error("invalid commitment"))]
|
||||
InvalidCommitment,
|
||||
/// The key image was invalid (such as being identity or torsioned)
|
||||
#[cfg_attr(feature = "std", error("invalid key image"))]
|
||||
InvalidImage,
|
||||
/// The `D` component was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid D"))]
|
||||
InvalidD,
|
||||
/// The `s` vector was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid s"))]
|
||||
InvalidS,
|
||||
/// The `c1` variable was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid c1"))]
|
||||
InvalidC1,
|
||||
}
|
||||
|
||||
/// Input being signed for.
|
||||
/// Context on the ring member being signed for.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub struct ClsagInput {
|
||||
// The actual commitment for the true spend
|
||||
@@ -65,7 +68,7 @@ impl ClsagInput {
|
||||
pub fn new(commitment: Commitment, decoys: Decoys) -> Result<ClsagInput, ClsagError> {
|
||||
let n = decoys.len();
|
||||
if n > u8::MAX.into() {
|
||||
Err(ClsagError::InternalError("max ring size in this library is u8 max"))?;
|
||||
Err(ClsagError::InvalidRing)?;
|
||||
}
|
||||
let n = u8::try_from(n).unwrap();
|
||||
if decoys.i >= n {
|
||||
@@ -100,8 +103,11 @@ fn core(
|
||||
) -> ((EdwardsPoint, Scalar, Scalar), Scalar) {
|
||||
let n = ring.len();
|
||||
|
||||
let images_precomp = VartimeEdwardsPrecomputation::new([I, D]);
|
||||
let D = D * INV_EIGHT();
|
||||
let images_precomp = match A_c1 {
|
||||
Mode::Sign(..) => None,
|
||||
Mode::Verify(..) => Some(VartimeEdwardsPrecomputation::new([I, D])),
|
||||
};
|
||||
let D_INV_EIGHT = D * INV_EIGHT();
|
||||
|
||||
// Generate the transcript
|
||||
// Instead of generating multiple, a single transcript is created and then edited as needed
|
||||
@@ -130,7 +136,7 @@ fn core(
|
||||
}
|
||||
|
||||
to_hash.extend(I.compress().to_bytes());
|
||||
to_hash.extend(D.compress().to_bytes());
|
||||
to_hash.extend(D_INV_EIGHT.compress().to_bytes());
|
||||
to_hash.extend(pseudo_out.compress().to_bytes());
|
||||
// mu_P with agg_0
|
||||
let mu_P = hash_to_scalar(&to_hash);
|
||||
@@ -174,10 +180,25 @@ fn core(
|
||||
let c_p = mu_P * c;
|
||||
let c_c = mu_C * c;
|
||||
|
||||
let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]);
|
||||
// (s_i * G) + (c_p * P_i) + (c_c * C_i)
|
||||
let L = match A_c1 {
|
||||
Mode::Sign(..) => {
|
||||
EdwardsPoint::multiscalar_mul([s[i], c_p, c_c], [ED25519_BASEPOINT_POINT, P[i], C[i]])
|
||||
}
|
||||
Mode::Verify(..) => {
|
||||
BASEPOINT_PRECOMP().vartime_mixed_multiscalar_mul([s[i]], [c_p, c_c], [P[i], C[i]])
|
||||
}
|
||||
};
|
||||
|
||||
let PH = hash_to_point(&P[i]);
|
||||
// Shouldn't be an issue as all of the variables in this vartime statement are public
|
||||
let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]);
|
||||
|
||||
// (c_p * I) + (c_c * D) + (s_i * PH)
|
||||
let R = match A_c1 {
|
||||
Mode::Sign(..) => EdwardsPoint::multiscalar_mul([c_p, c_c, s[i]], [I, D, &PH]),
|
||||
Mode::Verify(..) => {
|
||||
images_precomp.as_ref().unwrap().vartime_mixed_multiscalar_mul([c_p, c_c], [s[i]], [PH])
|
||||
}
|
||||
};
|
||||
|
||||
to_hash.truncate(((2 * n) + 3) * 32);
|
||||
to_hash.extend(L.compress().to_bytes());
|
||||
@@ -191,15 +212,22 @@ fn core(
|
||||
}
|
||||
|
||||
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
|
||||
((D, c * mu_P, c * mu_C), c1)
|
||||
((D_INV_EIGHT, c * mu_P, c * mu_C), c1)
|
||||
}
|
||||
|
||||
/// CLSAG signature, as used in Monero.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Clsag {
|
||||
pub D: EdwardsPoint,
|
||||
pub s: Vec<Scalar>,
|
||||
pub c1: Scalar,
|
||||
D: EdwardsPoint,
|
||||
pub(crate) s: Vec<Scalar>,
|
||||
pub(crate) c1: Scalar,
|
||||
}
|
||||
|
||||
pub(crate) struct ClsagSignCore {
|
||||
incomplete_clsag: Clsag,
|
||||
pseudo_out: EdwardsPoint,
|
||||
key_challenge: Scalar,
|
||||
challenged_mask: Scalar,
|
||||
}
|
||||
|
||||
impl Clsag {
|
||||
@@ -213,28 +241,34 @@ impl Clsag {
|
||||
msg: &[u8; 32],
|
||||
A: EdwardsPoint,
|
||||
AH: EdwardsPoint,
|
||||
) -> (Clsag, EdwardsPoint, Scalar, Scalar) {
|
||||
) -> ClsagSignCore {
|
||||
let r: usize = input.decoys.i.into();
|
||||
|
||||
let pseudo_out = Commitment::new(mask, input.commitment.amount).calculate();
|
||||
let z = input.commitment.mask - mask;
|
||||
let mask_delta = input.commitment.mask - mask;
|
||||
|
||||
let H = hash_to_point(&input.decoys.ring[r][0]);
|
||||
let D = H * z;
|
||||
let D = H * mask_delta;
|
||||
let mut s = Vec::with_capacity(input.decoys.ring.len());
|
||||
for _ in 0 .. input.decoys.ring.len() {
|
||||
s.push(random_scalar(rng));
|
||||
}
|
||||
let ((D, p, c), c1) =
|
||||
let ((D, c_p, c_c), c1) =
|
||||
core(&input.decoys.ring, I, &pseudo_out, msg, &D, &s, &Mode::Sign(r, A, AH));
|
||||
|
||||
(Clsag { D, s, c1 }, pseudo_out, p, c * z)
|
||||
ClsagSignCore {
|
||||
incomplete_clsag: Clsag { D, s, c1 },
|
||||
pseudo_out,
|
||||
key_challenge: c_p,
|
||||
challenged_mask: c_c * mask_delta,
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate CLSAG signatures for the given inputs.
|
||||
///
|
||||
/// inputs is of the form (private key, key image, input).
|
||||
/// sum_outputs is for the sum of the outputs' commitment masks.
|
||||
pub fn sign<R: RngCore + CryptoRng>(
|
||||
pub(crate) fn sign<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
mut inputs: Vec<(Zeroizing<Scalar>, EdwardsPoint, ClsagInput)>,
|
||||
sum_outputs: Scalar,
|
||||
@@ -251,18 +285,25 @@ impl Clsag {
|
||||
}
|
||||
|
||||
let mut nonce = Zeroizing::new(random_scalar(rng));
|
||||
let (mut clsag, pseudo_out, p, c) = Clsag::sign_core(
|
||||
rng,
|
||||
&inputs[i].1,
|
||||
&inputs[i].2,
|
||||
mask,
|
||||
&msg,
|
||||
nonce.deref() * ED25519_BASEPOINT_TABLE,
|
||||
nonce.deref() *
|
||||
hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
|
||||
);
|
||||
clsag.s[usize::from(inputs[i].2.decoys.i)] =
|
||||
(-((p * inputs[i].0.deref()) + c)) + nonce.deref();
|
||||
let ClsagSignCore { mut incomplete_clsag, pseudo_out, key_challenge, challenged_mask } =
|
||||
Clsag::sign_core(
|
||||
rng,
|
||||
&inputs[i].1,
|
||||
&inputs[i].2,
|
||||
mask,
|
||||
&msg,
|
||||
nonce.deref() * ED25519_BASEPOINT_TABLE,
|
||||
nonce.deref() *
|
||||
hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
|
||||
);
|
||||
// Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring
|
||||
// member's commitment and our input commitment (which will only have a known discrete log
|
||||
// over G if the amounts cancel out)
|
||||
incomplete_clsag.s[usize::from(inputs[i].2.decoys.i)] =
|
||||
nonce.deref() - ((key_challenge * inputs[i].0.deref()) + challenged_mask);
|
||||
let clsag = incomplete_clsag;
|
||||
|
||||
// Zeroize private keys and nonces.
|
||||
inputs[i].0.zeroize();
|
||||
nonce.zeroize();
|
||||
|
||||
@@ -292,7 +333,7 @@ impl Clsag {
|
||||
if ring.len() != self.s.len() {
|
||||
Err(ClsagError::InvalidS)?;
|
||||
}
|
||||
if I.is_identity() {
|
||||
if I.is_identity() || (!I.is_torsion_free()) {
|
||||
Err(ClsagError::InvalidImage)?;
|
||||
}
|
||||
|
||||
@@ -312,12 +353,14 @@ impl Clsag {
|
||||
(ring_len * 32) + 32 + 32
|
||||
}
|
||||
|
||||
/// Write the CLSAG to a writer.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
write_raw_vec(write_scalar, &self.s, w)?;
|
||||
w.write_all(&self.c1.to_bytes())?;
|
||||
write_point(&self.D, w)
|
||||
}
|
||||
|
||||
/// Read a CLSAG from a reader.
|
||||
pub fn read<R: Read>(decoys: usize, r: &mut R) -> io::Result<Clsag> {
|
||||
Ok(Clsag { s: read_raw_vec(read_scalar, decoys, r)?, c1: read_scalar(r)?, D: read_point(r)? })
|
||||
}
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use core::{ops::Deref, fmt::Debug};
|
||||
use std_shims::io::{self, Read, Write};
|
||||
use std_shims::{
|
||||
io::{self, Read, Write},
|
||||
collections::HashMap,
|
||||
};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng, SeedableRng};
|
||||
@@ -9,11 +12,13 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use group::{ff::Field, Group, GroupEncoding};
|
||||
use group::{
|
||||
ff::{Field, PrimeField},
|
||||
Group, GroupEncoding,
|
||||
};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
use dalek_ff_group as dfg;
|
||||
use dleq::DLEqProof;
|
||||
use frost::{
|
||||
dkg::lagrange,
|
||||
curve::Ed25519,
|
||||
@@ -26,10 +31,6 @@ use crate::ringct::{
|
||||
clsag::{ClsagInput, Clsag},
|
||||
};
|
||||
|
||||
fn dleq_transcript() -> RecommendedTranscript {
|
||||
RecommendedTranscript::new(b"monero_key_image_dleq")
|
||||
}
|
||||
|
||||
impl ClsagInput {
|
||||
fn transcript<T: Transcript>(&self, transcript: &mut T) {
|
||||
// Doesn't domain separate as this is considered part of the larger CLSAG proof
|
||||
@@ -43,6 +44,7 @@ impl ClsagInput {
|
||||
// They're just a unreliable reference to this data which will be included in the message
|
||||
// if in use
|
||||
transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]);
|
||||
// This also transcripts the key image generator since it's derived from this key
|
||||
transcript.append_message(b"key", pair[0].compress().to_bytes());
|
||||
transcript.append_message(b"commitment", pair[1].compress().to_bytes())
|
||||
}
|
||||
@@ -55,13 +57,13 @@ impl ClsagInput {
|
||||
|
||||
/// CLSAG input and the mask to use for it.
|
||||
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub struct ClsagDetails {
|
||||
pub(crate) struct ClsagDetails {
|
||||
input: ClsagInput,
|
||||
mask: Scalar,
|
||||
}
|
||||
|
||||
impl ClsagDetails {
|
||||
pub fn new(input: ClsagInput, mask: Scalar) -> ClsagDetails {
|
||||
pub(crate) fn new(input: ClsagInput, mask: Scalar) -> ClsagDetails {
|
||||
ClsagDetails { input, mask }
|
||||
}
|
||||
}
|
||||
@@ -70,13 +72,11 @@ impl ClsagDetails {
|
||||
#[derive(Clone, PartialEq, Eq, Zeroize, Debug)]
|
||||
pub struct ClsagAddendum {
|
||||
pub(crate) key_image: dfg::EdwardsPoint,
|
||||
dleq: DLEqProof<dfg::EdwardsPoint>,
|
||||
}
|
||||
|
||||
impl WriteAddendum for ClsagAddendum {
|
||||
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(self.key_image.compress().to_bytes().as_ref())?;
|
||||
self.dleq.write(writer)
|
||||
writer.write_all(self.key_image.compress().to_bytes().as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -93,13 +93,12 @@ struct Interim {
|
||||
/// FROST algorithm for producing a CLSAG signature.
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClsagMultisig {
|
||||
pub(crate) struct ClsagMultisig {
|
||||
transcript: RecommendedTranscript,
|
||||
|
||||
pub(crate) H: EdwardsPoint,
|
||||
// Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires
|
||||
// an extra round
|
||||
image: EdwardsPoint,
|
||||
key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>,
|
||||
image: Option<dfg::EdwardsPoint>,
|
||||
|
||||
details: Arc<RwLock<Option<ClsagDetails>>>,
|
||||
|
||||
@@ -108,7 +107,7 @@ pub struct ClsagMultisig {
|
||||
}
|
||||
|
||||
impl ClsagMultisig {
|
||||
pub fn new(
|
||||
pub(crate) fn new(
|
||||
transcript: RecommendedTranscript,
|
||||
output_key: EdwardsPoint,
|
||||
details: Arc<RwLock<Option<ClsagDetails>>>,
|
||||
@@ -117,7 +116,8 @@ impl ClsagMultisig {
|
||||
transcript,
|
||||
|
||||
H: hash_to_point(&output_key),
|
||||
image: EdwardsPoint::identity(),
|
||||
key_image_shares: HashMap::new(),
|
||||
image: None,
|
||||
|
||||
details,
|
||||
|
||||
@@ -135,20 +135,6 @@ impl ClsagMultisig {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn add_key_image_share(
|
||||
image: &mut EdwardsPoint,
|
||||
generator: EdwardsPoint,
|
||||
offset: Scalar,
|
||||
included: &[Participant],
|
||||
participant: Participant,
|
||||
share: EdwardsPoint,
|
||||
) {
|
||||
if image.is_identity().into() {
|
||||
*image = generator * offset;
|
||||
}
|
||||
*image += share * lagrange::<dfg::Scalar>(participant, included).0;
|
||||
}
|
||||
|
||||
impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
type Transcript = RecommendedTranscript;
|
||||
type Addendum = ClsagAddendum;
|
||||
@@ -160,23 +146,10 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
|
||||
fn preprocess_addendum<R: RngCore + CryptoRng>(
|
||||
&mut self,
|
||||
rng: &mut R,
|
||||
_rng: &mut R,
|
||||
keys: &ThresholdKeys<Ed25519>,
|
||||
) -> ClsagAddendum {
|
||||
ClsagAddendum {
|
||||
key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(),
|
||||
dleq: DLEqProof::prove(
|
||||
rng,
|
||||
// Doesn't take in a larger transcript object due to the usage of this
|
||||
// Every prover would immediately write their own DLEq proof, when they can only do so in
|
||||
// the proper order if they want to reach consensus
|
||||
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to
|
||||
// try to merge later in some form, when it should instead just merge xH (as it does)
|
||||
&mut dleq_transcript(),
|
||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
|
||||
keys.secret_share(),
|
||||
),
|
||||
}
|
||||
ClsagAddendum { key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref() }
|
||||
}
|
||||
|
||||
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> {
|
||||
@@ -190,7 +163,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
Err(io::Error::other("non-canonical key image"))?;
|
||||
}
|
||||
|
||||
Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::<dfg::EdwardsPoint>::read(reader)? })
|
||||
Ok(ClsagAddendum { key_image: xH })
|
||||
}
|
||||
|
||||
fn process_addendum(
|
||||
@@ -199,33 +172,29 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
l: Participant,
|
||||
addendum: ClsagAddendum,
|
||||
) -> Result<(), FrostError> {
|
||||
// TODO: This check is faulty if two shares are additive inverses of each other
|
||||
if self.image.is_identity().into() {
|
||||
if self.image.is_none() {
|
||||
self.transcript.domain_separate(b"CLSAG");
|
||||
// Transcript the ring
|
||||
self.input().transcript(&mut self.transcript);
|
||||
// Transcript the mask
|
||||
self.transcript.append_message(b"mask", self.mask().to_bytes());
|
||||
|
||||
// Init the image to the offset
|
||||
self.image = Some(dfg::EdwardsPoint(self.H) * view.offset());
|
||||
}
|
||||
|
||||
// Transcript this participant's contribution
|
||||
self.transcript.append_message(b"participant", l.to_bytes());
|
||||
|
||||
addendum
|
||||
.dleq
|
||||
.verify(
|
||||
&mut dleq_transcript(),
|
||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
|
||||
&[view.original_verification_share(l), addendum.key_image],
|
||||
)
|
||||
.map_err(|_| FrostError::InvalidPreprocess(l))?;
|
||||
|
||||
self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes());
|
||||
add_key_image_share(
|
||||
&mut self.image,
|
||||
self.H,
|
||||
view.offset().0,
|
||||
view.included(),
|
||||
l,
|
||||
addendum.key_image.0,
|
||||
);
|
||||
|
||||
// Accumulate the interpolated share
|
||||
let interpolated_key_image_share =
|
||||
addendum.key_image * lagrange::<dfg::Scalar>(l, view.included());
|
||||
*self.image.as_mut().unwrap() += interpolated_key_image_share;
|
||||
|
||||
self
|
||||
.key_image_shares
|
||||
.insert(view.verification_share(l).to_bytes(), interpolated_key_image_share);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -250,19 +219,24 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
|
||||
self.msg = Some(msg.try_into().expect("CLSAG message should be 32-bytes"));
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let (clsag, pseudo_out, p, c) = Clsag::sign_core(
|
||||
let sign_core = Clsag::sign_core(
|
||||
&mut rng,
|
||||
&self.image,
|
||||
&self.image.expect("verifying a share despite never processing any addendums").0,
|
||||
&self.input(),
|
||||
self.mask(),
|
||||
self.msg.as_ref().unwrap(),
|
||||
nonce_sums[0][0].0,
|
||||
nonce_sums[0][1].0,
|
||||
);
|
||||
self.interim = Some(Interim { p, c, clsag, pseudo_out });
|
||||
self.interim = Some(Interim {
|
||||
p: sign_core.key_challenge,
|
||||
c: sign_core.challenged_mask,
|
||||
clsag: sign_core.incomplete_clsag,
|
||||
pseudo_out: sign_core.pseudo_out,
|
||||
});
|
||||
|
||||
(-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref()
|
||||
// r - p x, where p is the challenge for the keys
|
||||
*nonces[0] - dfg::Scalar(sign_core.key_challenge) * view.secret_share().deref()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
@@ -274,11 +248,13 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
) -> Option<Self::Signature> {
|
||||
let interim = self.interim.as_ref().unwrap();
|
||||
let mut clsag = interim.clsag.clone();
|
||||
// We produced shares as `r - p x`, yet the signature is `r - p x - c x`
|
||||
// Subtract `c x` (saved as `c`) now
|
||||
clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c;
|
||||
if clsag
|
||||
.verify(
|
||||
&self.input().decoys.ring,
|
||||
&self.image,
|
||||
&self.image.expect("verifying a signature despite never processing any addendums").0,
|
||||
&interim.pseudo_out,
|
||||
self.msg.as_ref().unwrap(),
|
||||
)
|
||||
@@ -296,10 +272,61 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
share: dfg::Scalar,
|
||||
) -> Result<Vec<(dfg::Scalar, dfg::EdwardsPoint)>, ()> {
|
||||
let interim = self.interim.as_ref().unwrap();
|
||||
Ok(vec![
|
||||
|
||||
// For a share `r - p x`, the following two equalities should hold:
|
||||
// - `(r - p x)G == R.0 - pV`, where `V = xG`
|
||||
// - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share)
|
||||
//
|
||||
// This is effectively a discrete log equality proof for:
|
||||
// V, K over G, H
|
||||
// with nonces
|
||||
// R.0, R.1
|
||||
// and solution
|
||||
// s
|
||||
//
|
||||
// Which is a batch-verifiable rewrite of the traditional CP93 proof
|
||||
// (and also writable as Generalized Schnorr Protocol)
|
||||
//
|
||||
// That means that given a proper challenge, this alone can be certainly argued to prove the
|
||||
// key image share is well-formed and the provided signature so proves for that.
|
||||
|
||||
// This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of
|
||||
// the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically
|
||||
// is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be
|
||||
// extracted, and the nonces as used in CLSAG are also part of its prover data/transcript).
|
||||
|
||||
let key_image_share = self.key_image_shares[&verification_share.to_bytes()];
|
||||
|
||||
// Hash every variable relevant here, using the hahs output as the random weight
|
||||
let mut weight_transcript =
|
||||
RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share");
|
||||
weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes());
|
||||
weight_transcript.append_message(b"H", self.H.to_bytes());
|
||||
weight_transcript.append_message(b"xG", verification_share.to_bytes());
|
||||
weight_transcript.append_message(b"xH", key_image_share.to_bytes());
|
||||
weight_transcript.append_message(b"rG", nonces[0][0].to_bytes());
|
||||
weight_transcript.append_message(b"rH", nonces[0][1].to_bytes());
|
||||
weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr());
|
||||
weight_transcript.append_message(b"s", share.to_repr());
|
||||
let weight = weight_transcript.challenge(b"weight");
|
||||
let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into()));
|
||||
|
||||
let part_one = vec![
|
||||
(share, dfg::EdwardsPoint::generator()),
|
||||
(dfg::Scalar(interim.p), verification_share),
|
||||
// -(R.0 - pV) == -R.0 + pV
|
||||
(-dfg::Scalar::ONE, nonces[0][0]),
|
||||
])
|
||||
(dfg::Scalar(interim.p), verification_share),
|
||||
];
|
||||
|
||||
let mut part_two = vec![
|
||||
(weight * share, dfg::EdwardsPoint(self.H)),
|
||||
// -(R.1 - pK) == -R.1 + pK
|
||||
(-weight, nonces[0][1]),
|
||||
(weight * dfg::Scalar(interim.p), key_image_share),
|
||||
];
|
||||
|
||||
let mut all = part_one;
|
||||
all.append(&mut part_two);
|
||||
Ok(all)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,28 +11,36 @@ use monero_generators::H;
|
||||
|
||||
use crate::{hash_to_scalar, ringct::hash_to_point, serialize::*};
|
||||
|
||||
/// Errors when working with MLSAGs.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum MlsagError {
|
||||
/// Invalid ring (such as too small or too large).
|
||||
#[cfg_attr(feature = "std", error("invalid ring"))]
|
||||
InvalidRing,
|
||||
/// Invalid amount of key images.
|
||||
#[cfg_attr(feature = "std", error("invalid amount of key images"))]
|
||||
InvalidAmountOfKeyImages,
|
||||
/// Invalid ss matrix.
|
||||
#[cfg_attr(feature = "std", error("invalid ss"))]
|
||||
InvalidSs,
|
||||
#[cfg_attr(feature = "std", error("key image was identity"))]
|
||||
IdentityKeyImage,
|
||||
/// Invalid key image.
|
||||
#[cfg_attr(feature = "std", error("invalid key image"))]
|
||||
InvalidKeyImage,
|
||||
/// Invalid ci vector.
|
||||
#[cfg_attr(feature = "std", error("invalid ci"))]
|
||||
InvalidCi,
|
||||
}
|
||||
|
||||
/// A vector of rings, forming a matrix, to verify the MLSAG with.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct RingMatrix {
|
||||
matrix: Vec<Vec<EdwardsPoint>>,
|
||||
}
|
||||
|
||||
impl RingMatrix {
|
||||
pub fn new(matrix: Vec<Vec<EdwardsPoint>>) -> Result<Self, MlsagError> {
|
||||
/// Construct a ring matrix from an already formatted series of points.
|
||||
fn new(matrix: Vec<Vec<EdwardsPoint>>) -> Result<Self, MlsagError> {
|
||||
// Monero requires that there is more than one ring member for MLSAG signatures:
|
||||
// https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/
|
||||
// src/ringct/rctSigs.cpp#L462
|
||||
@@ -60,11 +68,12 @@ impl RingMatrix {
|
||||
RingMatrix::new(matrix)
|
||||
}
|
||||
|
||||
pub fn iter(&self) -> impl Iterator<Item = &[EdwardsPoint]> {
|
||||
/// Iterate the members of the matrix.
|
||||
fn iter(&self) -> impl Iterator<Item = &[EdwardsPoint]> {
|
||||
self.matrix.iter().map(AsRef::as_ref)
|
||||
}
|
||||
|
||||
/// Return the amount of members in the ring.
|
||||
/// Returns the amount of members in the ring.
|
||||
pub fn members(&self) -> usize {
|
||||
self.matrix.len()
|
||||
}
|
||||
@@ -79,13 +88,15 @@ impl RingMatrix {
|
||||
}
|
||||
}
|
||||
|
||||
/// The MLSAG linkable ring signature, as used in Monero.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct Mlsag {
|
||||
pub ss: Vec<Vec<Scalar>>,
|
||||
pub cc: Scalar,
|
||||
ss: Vec<Vec<Scalar>>,
|
||||
cc: Scalar,
|
||||
}
|
||||
|
||||
impl Mlsag {
|
||||
/// Write the MLSAG to a writer.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
for ss in &self.ss {
|
||||
write_raw_vec(write_scalar, ss, w)?;
|
||||
@@ -93,6 +104,7 @@ impl Mlsag {
|
||||
write_scalar(&self.cc, w)
|
||||
}
|
||||
|
||||
/// Read the MLSAG from a reader.
|
||||
pub fn read<R: Read>(mixins: usize, ss_2_elements: usize, r: &mut R) -> io::Result<Mlsag> {
|
||||
Ok(Mlsag {
|
||||
ss: (0 .. mixins)
|
||||
@@ -102,6 +114,7 @@ impl Mlsag {
|
||||
})
|
||||
}
|
||||
|
||||
/// Verify the MLSAG.
|
||||
pub fn verify(
|
||||
&self,
|
||||
msg: &[u8; 32],
|
||||
@@ -142,8 +155,8 @@ impl Mlsag {
|
||||
// Not all dimensions need to be linkable, e.g. commitments, and only linkable layers need
|
||||
// to have key images.
|
||||
if let Some(ki) = ki {
|
||||
if ki.is_identity() {
|
||||
Err(MlsagError::IdentityKeyImage)?;
|
||||
if ki.is_identity() || (!ki.is_torsion_free()) {
|
||||
Err(MlsagError::InvalidKeyImage)?;
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
@@ -164,8 +177,9 @@ impl Mlsag {
|
||||
}
|
||||
}
|
||||
|
||||
/// An aggregate ring matrix builder, usable to set up the ring matrix to prove/verify an aggregate
|
||||
/// MLSAG signature.
|
||||
/// Builder for a RingMatrix when using an aggregate signature.
|
||||
///
|
||||
/// This handles the formatting as necessary.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct AggregateRingMatrixBuilder {
|
||||
key_ring: Vec<Vec<EdwardsPoint>>,
|
||||
@@ -206,7 +220,7 @@ impl AggregateRingMatrixBuilder {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build and return the [`RingMatrix`]
|
||||
/// Build and return the [`RingMatrix`].
|
||||
pub fn build(mut self) -> Result<RingMatrix, MlsagError> {
|
||||
for (i, amount_commitment) in self.amounts_ring.drain(..).enumerate() {
|
||||
self.key_ring[i].push(amount_commitment);
|
||||
|
||||
@@ -23,7 +23,7 @@ pub mod bulletproofs;
|
||||
use crate::{
|
||||
Protocol,
|
||||
serialize::*,
|
||||
ringct::{mlsag::Mlsag, clsag::Clsag, borromean::BorromeanRange, bulletproofs::Bulletproofs},
|
||||
ringct::{mlsag::Mlsag, clsag::Clsag, borromean::BorromeanRange, bulletproofs::Bulletproof},
|
||||
};
|
||||
|
||||
/// Generate a key image for a given key. Defined as `x * hash_to_point(xG)`.
|
||||
@@ -31,6 +31,7 @@ pub fn generate_key_image(secret: &Zeroizing<Scalar>) -> EdwardsPoint {
|
||||
hash_to_point(&(ED25519_BASEPOINT_TABLE * secret.deref())) * secret.deref()
|
||||
}
|
||||
|
||||
/// An encrypted amount.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum EncryptedAmount {
|
||||
Original { mask: [u8; 32], amount: [u8; 32] },
|
||||
@@ -38,6 +39,7 @@ pub enum EncryptedAmount {
|
||||
}
|
||||
|
||||
impl EncryptedAmount {
|
||||
/// Read an EncryptedAmount from a reader.
|
||||
pub fn read<R: Read>(compact: bool, r: &mut R) -> io::Result<EncryptedAmount> {
|
||||
Ok(if !compact {
|
||||
EncryptedAmount::Original { mask: read_bytes(r)?, amount: read_bytes(r)? }
|
||||
@@ -46,6 +48,7 @@ impl EncryptedAmount {
|
||||
})
|
||||
}
|
||||
|
||||
/// Write the EncryptedAmount to a writer.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
EncryptedAmount::Original { mask, amount } => {
|
||||
@@ -57,6 +60,7 @@ impl EncryptedAmount {
|
||||
}
|
||||
}
|
||||
|
||||
/// The type of the RingCT data.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub enum RctType {
|
||||
/// No RCT proofs.
|
||||
@@ -77,6 +81,18 @@ pub enum RctType {
|
||||
}
|
||||
|
||||
impl RctType {
|
||||
/// Convert [`self`] to its byte representation.
|
||||
///
|
||||
/// ```rust
|
||||
/// # use monero_serai::ringct::*;
|
||||
/// assert_eq!(RctType::Null.to_byte(), 0);
|
||||
/// assert_eq!(RctType::MlsagAggregate.to_byte(), 1);
|
||||
/// assert_eq!(RctType::MlsagIndividual.to_byte(), 2);
|
||||
/// assert_eq!(RctType::Bulletproofs.to_byte(), 3);
|
||||
/// assert_eq!(RctType::BulletproofsCompactAmount.to_byte(), 4);
|
||||
/// assert_eq!(RctType::Clsag.to_byte(), 5);
|
||||
/// assert_eq!(RctType::BulletproofsPlus.to_byte(), 6);
|
||||
/// ```
|
||||
pub fn to_byte(self) -> u8 {
|
||||
match self {
|
||||
RctType::Null => 0,
|
||||
@@ -89,6 +105,25 @@ impl RctType {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create [`Self`] from a byte representation.
|
||||
///
|
||||
/// ```rust
|
||||
/// # use monero_serai::ringct::*;
|
||||
/// assert_eq!(RctType::from_byte(0).unwrap(), RctType::Null);
|
||||
/// assert_eq!(RctType::from_byte(1).unwrap(), RctType::MlsagAggregate);
|
||||
/// assert_eq!(RctType::from_byte(2).unwrap(), RctType::MlsagIndividual);
|
||||
/// assert_eq!(RctType::from_byte(3).unwrap(), RctType::Bulletproofs);
|
||||
/// assert_eq!(RctType::from_byte(4).unwrap(), RctType::BulletproofsCompactAmount);
|
||||
/// assert_eq!(RctType::from_byte(5).unwrap(), RctType::Clsag);
|
||||
/// assert_eq!(RctType::from_byte(6).unwrap(), RctType::BulletproofsPlus);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns [`None`] if the byte representation is invalid.
|
||||
/// ```rust
|
||||
/// # use monero_serai::ringct::*;
|
||||
/// assert_eq!(RctType::from_byte(7), None);
|
||||
/// ```
|
||||
pub fn from_byte(byte: u8) -> Option<Self> {
|
||||
Some(match byte {
|
||||
0 => RctType::Null,
|
||||
@@ -102,22 +137,45 @@ impl RctType {
|
||||
})
|
||||
}
|
||||
|
||||
/// Returns true if this RctType uses compact encrypted amounts, false otherwise.
|
||||
///
|
||||
/// ```rust
|
||||
/// # use monero_serai::ringct::*;
|
||||
/// assert_eq!(RctType::Null.compact_encrypted_amounts(), false);
|
||||
/// assert_eq!(RctType::MlsagAggregate.compact_encrypted_amounts(), false);
|
||||
/// assert_eq!(RctType::MlsagIndividual.compact_encrypted_amounts(), false);
|
||||
/// assert_eq!(RctType::Bulletproofs.compact_encrypted_amounts(), false);
|
||||
/// assert_eq!(RctType::BulletproofsCompactAmount.compact_encrypted_amounts(), true);
|
||||
/// assert_eq!(RctType::Clsag.compact_encrypted_amounts(), true);
|
||||
/// assert_eq!(RctType::BulletproofsPlus.compact_encrypted_amounts(), true);
|
||||
/// ```
|
||||
pub fn compact_encrypted_amounts(&self) -> bool {
|
||||
match self {
|
||||
RctType::Null |
|
||||
RctType::MlsagAggregate |
|
||||
RctType::MlsagIndividual |
|
||||
RctType::Bulletproofs => false,
|
||||
RctType::Null
|
||||
| RctType::MlsagAggregate
|
||||
| RctType::MlsagIndividual
|
||||
| RctType::Bulletproofs => false,
|
||||
RctType::BulletproofsCompactAmount | RctType::Clsag | RctType::BulletproofsPlus => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The base of the RingCT data.
|
||||
///
|
||||
/// This excludes all proofs (which once initially verified do not need to be kept around) and
|
||||
/// solely keeps data which either impacts the effects of the transactions or is needed to scan it.
|
||||
///
|
||||
/// The one exception for this is `pseudo_outs`, which was originally present here yet moved to
|
||||
/// RctPrunable in a later hard fork (causing it to be present in both).
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct RctBase {
|
||||
/// The fee used by this transaction.
|
||||
pub fee: u64,
|
||||
/// The re-randomized amount commitments used within inputs.
|
||||
pub pseudo_outs: Vec<EdwardsPoint>,
|
||||
/// The encrypted amounts for the recipient to decrypt.
|
||||
pub encrypted_amounts: Vec<EncryptedAmount>,
|
||||
/// The output commitments.
|
||||
pub commitments: Vec<EdwardsPoint>,
|
||||
}
|
||||
|
||||
@@ -127,6 +185,7 @@ impl RctBase {
|
||||
1 + (outputs * (8 + 32)) + varint_len(fee)
|
||||
}
|
||||
|
||||
/// Write the RctBase to a writer.
|
||||
pub fn write<W: Write>(&self, w: &mut W, rct_type: RctType) -> io::Result<()> {
|
||||
w.write_all(&[rct_type.to_byte()])?;
|
||||
match rct_type {
|
||||
@@ -144,16 +203,17 @@ impl RctBase {
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a RctBase from a writer.
|
||||
pub fn read<R: Read>(inputs: usize, outputs: usize, r: &mut R) -> io::Result<(RctBase, RctType)> {
|
||||
let rct_type =
|
||||
RctType::from_byte(read_byte(r)?).ok_or_else(|| io::Error::other("invalid RCT type"))?;
|
||||
|
||||
match rct_type {
|
||||
RctType::Null | RctType::MlsagAggregate | RctType::MlsagIndividual => {}
|
||||
RctType::Bulletproofs |
|
||||
RctType::BulletproofsCompactAmount |
|
||||
RctType::Clsag |
|
||||
RctType::BulletproofsPlus => {
|
||||
RctType::Bulletproofs
|
||||
| RctType::BulletproofsCompactAmount
|
||||
| RctType::Clsag
|
||||
| RctType::BulletproofsPlus => {
|
||||
if outputs == 0 {
|
||||
// Because the Bulletproofs(+) layout must be canonical, there must be 1 Bulletproof if
|
||||
// Bulletproofs are in use
|
||||
@@ -171,12 +231,14 @@ impl RctBase {
|
||||
} else {
|
||||
RctBase {
|
||||
fee: read_varint(r)?,
|
||||
// Only read pseudo_outs if they have yet to be moved to RctPrunable
|
||||
// TODO: Shouldn't this be any Mlsag*?
|
||||
pseudo_outs: if rct_type == RctType::MlsagIndividual {
|
||||
read_raw_vec(read_point, inputs, r)?
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
encrypted_amounts: (0 .. outputs)
|
||||
encrypted_amounts: (0..outputs)
|
||||
.map(|_| EncryptedAmount::read(rct_type.compact_encrypted_amounts(), r))
|
||||
.collect::<Result<_, _>>()?,
|
||||
commitments: read_raw_vec(read_point, outputs, r)?,
|
||||
@@ -187,6 +249,7 @@ impl RctBase {
|
||||
}
|
||||
}
|
||||
|
||||
/// The prunable portion of the RingCT data.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum RctPrunable {
|
||||
Null,
|
||||
@@ -199,12 +262,12 @@ pub enum RctPrunable {
|
||||
mlsags: Vec<Mlsag>,
|
||||
},
|
||||
MlsagBulletproofs {
|
||||
bulletproofs: Bulletproofs,
|
||||
bulletproofs: Bulletproof,
|
||||
mlsags: Vec<Mlsag>,
|
||||
pseudo_outs: Vec<EdwardsPoint>,
|
||||
},
|
||||
Clsag {
|
||||
bulletproofs: Bulletproofs,
|
||||
bulletproofs: Bulletproof,
|
||||
clsags: Vec<Clsag>,
|
||||
pseudo_outs: Vec<EdwardsPoint>,
|
||||
},
|
||||
@@ -213,10 +276,14 @@ pub enum RctPrunable {
|
||||
impl RctPrunable {
|
||||
pub(crate) fn fee_weight(protocol: Protocol, inputs: usize, outputs: usize) -> usize {
|
||||
// 1 byte for number of BPs (technically a VarInt, yet there's always just zero or one)
|
||||
1 + Bulletproofs::fee_weight(protocol.bp_plus(), outputs) +
|
||||
(inputs * (Clsag::fee_weight(protocol.ring_len()) + 32))
|
||||
1 + Bulletproof::fee_weight(protocol.bp_plus(), outputs)
|
||||
+ (inputs * (Clsag::fee_weight(protocol.ring_len()) + 32))
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W, rct_type: RctType) -> io::Result<()> {
|
||||
match self {
|
||||
RctPrunable::Null => Ok(()),
|
||||
@@ -249,12 +316,18 @@ impl RctPrunable {
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into a new byte buffer.
|
||||
pub fn serialize(&self, rct_type: RctType) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized, rct_type).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(
|
||||
rct_type: RctType,
|
||||
ring_length: usize,
|
||||
@@ -281,7 +354,7 @@ impl RctPrunable {
|
||||
},
|
||||
RctType::MlsagIndividual => RctPrunable::MlsagBorromean {
|
||||
borromean: read_raw_vec(BorromeanRange::read, outputs, r)?,
|
||||
mlsags: (0 .. inputs).map(|_| Mlsag::read(ring_length, 2, r)).collect::<Result<_, _>>()?,
|
||||
mlsags: (0..inputs).map(|_| Mlsag::read(ring_length, 2, r)).collect::<Result<_, _>>()?,
|
||||
},
|
||||
RctType::Bulletproofs | RctType::BulletproofsCompactAmount => {
|
||||
RctPrunable::MlsagBulletproofs {
|
||||
@@ -294,11 +367,9 @@ impl RctPrunable {
|
||||
{
|
||||
Err(io::Error::other("n bulletproofs instead of one"))?;
|
||||
}
|
||||
Bulletproofs::read(r)?
|
||||
Bulletproof::read(r)?
|
||||
},
|
||||
mlsags: (0 .. inputs)
|
||||
.map(|_| Mlsag::read(ring_length, 2, r))
|
||||
.collect::<Result<_, _>>()?,
|
||||
mlsags: (0..inputs).map(|_| Mlsag::read(ring_length, 2, r)).collect::<Result<_, _>>()?,
|
||||
pseudo_outs: read_raw_vec(read_point, inputs, r)?,
|
||||
}
|
||||
}
|
||||
@@ -307,11 +378,9 @@ impl RctPrunable {
|
||||
if read_varint::<_, u64>(r)? != 1 {
|
||||
Err(io::Error::other("n bulletproofs instead of one"))?;
|
||||
}
|
||||
(if rct_type == RctType::Clsag { Bulletproofs::read } else { Bulletproofs::read_plus })(
|
||||
r,
|
||||
)?
|
||||
(if rct_type == RctType::Clsag { Bulletproof::read } else { Bulletproof::read_plus })(r)?
|
||||
},
|
||||
clsags: (0 .. inputs).map(|_| Clsag::read(ring_length, r)).collect::<Result<_, _>>()?,
|
||||
clsags: (0..inputs).map(|_| Clsag::read(ring_length, r)).collect::<Result<_, _>>()?,
|
||||
pseudo_outs: read_raw_vec(read_point, inputs, r)?,
|
||||
},
|
||||
})
|
||||
@@ -320,19 +389,22 @@ impl RctPrunable {
|
||||
pub(crate) fn signature_write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
RctPrunable::Null => panic!("Serializing RctPrunable::Null for a signature"),
|
||||
RctPrunable::AggregateMlsagBorromean { borromean, .. } |
|
||||
RctPrunable::MlsagBorromean { borromean, .. } => {
|
||||
RctPrunable::AggregateMlsagBorromean { borromean, .. }
|
||||
| RctPrunable::MlsagBorromean { borromean, .. } => {
|
||||
borromean.iter().try_for_each(|rs| rs.write(w))
|
||||
}
|
||||
RctPrunable::MlsagBulletproofs { bulletproofs, .. } |
|
||||
RctPrunable::Clsag { bulletproofs, .. } => bulletproofs.signature_write(w),
|
||||
RctPrunable::MlsagBulletproofs { bulletproofs, .. }
|
||||
| RctPrunable::Clsag { bulletproofs, .. } => bulletproofs.signature_write(w),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// RingCT signature data.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct RctSignatures {
|
||||
/// The base of the RingCT data.
|
||||
pub base: RctBase,
|
||||
/// The prunable portion of the RingCT data.
|
||||
pub prunable: RctPrunable,
|
||||
}
|
||||
|
||||
@@ -360,7 +432,7 @@ impl RctSignatures {
|
||||
}
|
||||
}
|
||||
RctPrunable::Clsag { bulletproofs, .. } => {
|
||||
if matches!(bulletproofs, Bulletproofs::Original { .. }) {
|
||||
if matches!(bulletproofs, Bulletproof::Original { .. }) {
|
||||
RctType::Clsag
|
||||
} else {
|
||||
RctType::BulletproofsPlus
|
||||
@@ -373,18 +445,28 @@ impl RctSignatures {
|
||||
RctBase::fee_weight(outputs, fee) + RctPrunable::fee_weight(protocol, inputs, outputs)
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into the writer `w`.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns any errors from the writer itself.
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
let rct_type = self.rct_type();
|
||||
self.base.write(w, rct_type)?;
|
||||
self.prunable.write(w, rct_type)
|
||||
}
|
||||
|
||||
/// Serialize [`Self`] into a new byte buffer.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut serialized = vec![];
|
||||
self.write(&mut serialized).unwrap();
|
||||
serialized
|
||||
}
|
||||
|
||||
/// Create [`Self`] from the reader `r` and other data.
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if either the reader failed,
|
||||
/// or if the data could not be deserialized into a [`Self`].
|
||||
pub fn read<R: Read>(
|
||||
ring_length: usize,
|
||||
inputs: usize,
|
||||
|
||||
@@ -34,8 +34,10 @@ pub use http::*;
|
||||
// src/wallet/wallet2.cpp#L121
|
||||
const GRACE_BLOCKS_FOR_FEE_ESTIMATE: u64 = 10;
|
||||
|
||||
/// A empty marker struct representing an empty response.
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct EmptyResponse {}
|
||||
/// A generic JSON-RPC response.
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct JsonRpcResponse<T> {
|
||||
result: T,
|
||||
@@ -54,6 +56,7 @@ struct TransactionsResponse {
|
||||
txs: Vec<TransactionResponse>,
|
||||
}
|
||||
|
||||
/// The response data from an [`Rpc::get_outs`] call.
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct OutputResponse {
|
||||
pub height: usize,
|
||||
@@ -63,27 +66,41 @@ pub struct OutputResponse {
|
||||
txid: String,
|
||||
}
|
||||
|
||||
/// Possible errors that can occur from an RPC call.
|
||||
///
|
||||
/// This represents errors on the client side, as well
|
||||
/// as valid error responses from the server.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum RpcError {
|
||||
/// There was an internal error.
|
||||
#[cfg_attr(feature = "std", error("internal error ({0})"))]
|
||||
InternalError(&'static str),
|
||||
/// There was a connection error.
|
||||
#[cfg_attr(feature = "std", error("connection error ({0})"))]
|
||||
ConnectionError(String),
|
||||
/// The data response received from the node was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid node ({0})"))]
|
||||
InvalidNode(String),
|
||||
/// The Monero [`Protocol`] version was invalid.
|
||||
#[cfg_attr(feature = "std", error("unsupported protocol version ({0})"))]
|
||||
UnsupportedProtocol(usize),
|
||||
/// Requested transaction hashes were not found.
|
||||
#[cfg_attr(feature = "std", error("transactions not found"))]
|
||||
TransactionsNotFound(Vec<[u8; 32]>),
|
||||
/// A curve point received from the node was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid point ({0})"))]
|
||||
InvalidPoint(String),
|
||||
/// The transaction(s) requested were pruned from the node.
|
||||
#[cfg_attr(feature = "std", error("pruned transaction"))]
|
||||
PrunedTransaction,
|
||||
/// An invalid transaction was either sent/received to/from the node.
|
||||
#[cfg_attr(feature = "std", error("invalid transaction ({0:?})"))]
|
||||
InvalidTransaction([u8; 32]),
|
||||
/// The node failed to return a fee.
|
||||
#[cfg_attr(feature = "std", error("unexpected fee response"))]
|
||||
InvalidFee,
|
||||
/// The transaction priority level given was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid priority"))]
|
||||
InvalidPriority,
|
||||
}
|
||||
@@ -114,12 +131,15 @@ fn read_epee_vi<R: io::Read>(reader: &mut R) -> io::Result<u64> {
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let mut vi = u64::from(vi_start >> 2);
|
||||
for i in 1 .. len {
|
||||
for i in 1..len {
|
||||
vi |= u64::from(read_byte(reader)?) << (((i - 1) * 8) + 6);
|
||||
}
|
||||
Ok(vi)
|
||||
}
|
||||
|
||||
/// A trait representing an RPC connection.
|
||||
///
|
||||
/// Note that [`HttpRpc`] already implements this trait.
|
||||
#[async_trait]
|
||||
pub trait RpcConnection: Clone + Debug {
|
||||
/// Perform a POST request to the specified route with the specified body.
|
||||
@@ -128,6 +148,7 @@ pub trait RpcConnection: Clone + Debug {
|
||||
async fn post(&self, route: &str, body: Vec<u8>) -> Result<Vec<u8>, RpcError>;
|
||||
}
|
||||
|
||||
/// A generic RPC client.
|
||||
// TODO: Make this provided methods for RpcConnection?
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Rpc<R: RpcConnection>(R);
|
||||
@@ -202,6 +223,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
)
|
||||
}
|
||||
|
||||
/// Get the node's current block height.
|
||||
pub async fn get_height(&self) -> Result<usize, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct HeightResponse {
|
||||
@@ -210,6 +232,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
Ok(self.rpc_call::<Option<()>, HeightResponse>("get_height", None).await?.height)
|
||||
}
|
||||
|
||||
/// Get [`Transaction`]s by their `hashes`.
|
||||
pub async fn get_transactions(&self, hashes: &[[u8; 32]]) -> Result<Vec<Transaction>, RpcError> {
|
||||
if hashes.is_empty() {
|
||||
return Ok(vec![]);
|
||||
@@ -274,6 +297,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get a single [`Transaction`] by its hash.
|
||||
pub async fn get_transaction(&self, tx: [u8; 32]) -> Result<Transaction, RpcError> {
|
||||
self.get_transactions(&[tx]).await.map(|mut txs| txs.swap_remove(0))
|
||||
}
|
||||
@@ -314,6 +338,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
Ok(block)
|
||||
}
|
||||
|
||||
/// Get a [`Block`] by its height number.
|
||||
pub async fn get_block_by_number(&self, number: usize) -> Result<Block, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct BlockResponse {
|
||||
@@ -341,6 +366,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Get all the [`Transaction`]s belonging to the corresponding block `hash`.
|
||||
pub async fn get_block_transactions(&self, hash: [u8; 32]) -> Result<Vec<Transaction>, RpcError> {
|
||||
let block = self.get_block(hash).await?;
|
||||
let mut res = vec![block.miner_tx];
|
||||
@@ -405,7 +431,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
let read_object = |reader: &mut &[u8]| -> io::Result<Vec<u64>> {
|
||||
let fields = read_byte(reader)? >> 2;
|
||||
|
||||
for _ in 0 .. fields {
|
||||
for _ in 0..fields {
|
||||
let name_len = read_byte(reader)?;
|
||||
let name = read_raw_vec(read_byte, name_len.into(), reader)?;
|
||||
|
||||
@@ -458,7 +484,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
};
|
||||
|
||||
let mut bytes_res = vec![];
|
||||
for _ in 0 .. iters {
|
||||
for _ in 0..iters {
|
||||
bytes_res.push(f(reader)?);
|
||||
}
|
||||
|
||||
@@ -478,8 +504,8 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
if bytes_res
|
||||
.first()
|
||||
.ok_or_else(|| io::Error::other("status wasn't a string"))?
|
||||
.as_slice() !=
|
||||
b"OK"
|
||||
.as_slice()
|
||||
!= b"OK"
|
||||
{
|
||||
// TODO: Better handle non-OK responses
|
||||
Err(io::Error::other("response wasn't OK"))?;
|
||||
@@ -623,6 +649,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get a [`Fee`] based on [`Protocol::v14`] rules.
|
||||
async fn get_fee_v14(&self, priority: FeePriority) -> Result<Fee, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct FeeResponseV14 {
|
||||
@@ -702,6 +729,7 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Broadcast a [`Transaction`] to the network.
|
||||
pub async fn publish_transaction(&self, tx: &Transaction) -> Result<(), RpcError> {
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
@@ -730,6 +758,13 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate blocks.
|
||||
///
|
||||
/// - `address` is the address that will receive the coinbase reward
|
||||
/// - `block_count` is the number of blocks that will be generated
|
||||
///
|
||||
/// Note this is only for testing with nodes started with `--regtest`, see:
|
||||
/// <https://www.getmonero.org/resources/developer-guides/daemon-rpc.html#generateblocks>.
|
||||
// TODO: Take &Address, not &str?
|
||||
pub async fn generate_blocks(
|
||||
&self,
|
||||
|
||||
@@ -7,7 +7,8 @@ use multiexp::BatchVerifier;
|
||||
|
||||
use crate::{
|
||||
Commitment, random_scalar,
|
||||
ringct::bulletproofs::{Bulletproofs, original::OriginalStruct},
|
||||
ringct::bulletproofs::{Bulletproof, original::OriginalStruct},
|
||||
wallet::TransactionError,
|
||||
};
|
||||
|
||||
mod plus;
|
||||
@@ -18,7 +19,7 @@ fn bulletproofs_vector() {
|
||||
let point = |point| decompress_point(point).unwrap();
|
||||
|
||||
// Generated from Monero
|
||||
assert!(Bulletproofs::Original(OriginalStruct {
|
||||
assert!(Bulletproof::Original(OriginalStruct {
|
||||
A: point(hex!("ef32c0b9551b804decdcb107eb22aa715b7ce259bf3c5cac20e24dfa6b28ac71")),
|
||||
S: point(hex!("e1285960861783574ee2b689ae53622834eb0b035d6943103f960cd23e063fa0")),
|
||||
T1: point(hex!("4ea07735f184ba159d0e0eb662bac8cde3eb7d39f31e567b0fbda3aa23fe5620")),
|
||||
@@ -70,7 +71,11 @@ macro_rules! bulletproofs_tests {
|
||||
.map(|i| Commitment::new(random_scalar(&mut OsRng), u64::try_from(i).unwrap()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let bp = Bulletproofs::prove(&mut OsRng, &commitments, $plus).unwrap();
|
||||
let bp = if $plus {
|
||||
Bulletproof::prove_plus(&mut OsRng, commitments.clone()).unwrap()
|
||||
} else {
|
||||
Bulletproof::prove(&mut OsRng, &commitments).unwrap()
|
||||
};
|
||||
|
||||
let commitments = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
|
||||
assert!(bp.verify(&mut OsRng, &commitments));
|
||||
@@ -86,7 +91,15 @@ macro_rules! bulletproofs_tests {
|
||||
for _ in 0 .. 17 {
|
||||
commitments.push(Commitment::new(Scalar::ZERO, 0));
|
||||
}
|
||||
assert!(Bulletproofs::prove(&mut OsRng, &commitments, $plus).is_err());
|
||||
assert_eq!(
|
||||
(if $plus {
|
||||
Bulletproof::prove_plus(&mut OsRng, commitments)
|
||||
} else {
|
||||
Bulletproof::prove(&mut OsRng, &commitments)
|
||||
})
|
||||
.unwrap_err(),
|
||||
TransactionError::TooManyOutputs,
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ use rand_core::{RngCore, OsRng};
|
||||
|
||||
use multiexp::BatchVerifier;
|
||||
use group::ff::Field;
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
use dalek_ff_group::Scalar;
|
||||
|
||||
use crate::{
|
||||
Commitment,
|
||||
@@ -19,9 +19,9 @@ fn test_aggregate_range_proof() {
|
||||
for _ in 0 .. m {
|
||||
commitments.push(Commitment::new(*Scalar::random(&mut OsRng), OsRng.next_u64()));
|
||||
}
|
||||
let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect();
|
||||
let commitment_points = commitments.iter().map(Commitment::calculate).collect();
|
||||
let statement = AggregateRangeStatement::new(commitment_points).unwrap();
|
||||
let witness = AggregateRangeWitness::new(&commitments).unwrap();
|
||||
let witness = AggregateRangeWitness::new(commitments).unwrap();
|
||||
|
||||
let proof = statement.clone().prove(&mut OsRng, &witness).unwrap();
|
||||
statement.verify(&mut OsRng, &mut verifier, (), proof);
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::{
|
||||
Protocol, hash,
|
||||
serialize::*,
|
||||
ring_signatures::RingSignature,
|
||||
ringct::{bulletproofs::Bulletproofs, RctType, RctBase, RctPrunable, RctSignatures},
|
||||
ringct::{bulletproofs::Bulletproof, RctType, RctBase, RctPrunable, RctSignatures},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
@@ -426,7 +426,7 @@ impl Transaction {
|
||||
if !(bp || bp_plus) {
|
||||
blob_size
|
||||
} else {
|
||||
blob_size + Bulletproofs::calculate_bp_clawback(bp_plus, self.prefix.outputs.len()).0
|
||||
blob_size + Bulletproof::calculate_bp_clawback(bp_plus, self.prefix.outputs.len()).0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -34,10 +34,6 @@ impl UnreducedScalar {
|
||||
Ok(UnreducedScalar(read_bytes(r)?))
|
||||
}
|
||||
|
||||
pub fn as_bytes(&self) -> &[u8; 32] {
|
||||
&self.0
|
||||
}
|
||||
|
||||
fn as_bits(&self) -> [u8; 256] {
|
||||
let mut bits = [0; 256];
|
||||
for (i, bit) in bits.iter_mut().enumerate() {
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
use std_shims::{vec::Vec, collections::HashSet};
|
||||
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
use std_shims::sync::OnceLock;
|
||||
|
||||
#[cfg(all(feature = "cache-distribution", not(feature = "std")))]
|
||||
use std_shims::sync::Mutex;
|
||||
#[cfg(all(feature = "cache-distribution", feature = "std"))]
|
||||
use async_lock::Mutex;
|
||||
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
@@ -29,16 +21,6 @@ const BLOCKS_PER_YEAR: usize = 365 * 24 * 60 * 60 / BLOCK_TIME;
|
||||
#[allow(clippy::cast_precision_loss)]
|
||||
const TIP_APPLICATION: f64 = (DEFAULT_LOCK_WINDOW * BLOCK_TIME) as f64;
|
||||
|
||||
// TODO: Resolve safety of this in case a reorg occurs/the network changes
|
||||
// TODO: Update this when scanning a block, as possible
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
static DISTRIBUTION_CELL: OnceLock<Mutex<Vec<u64>>> = OnceLock::new();
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
#[allow(non_snake_case)]
|
||||
fn DISTRIBUTION() -> &'static Mutex<Vec<u64>> {
|
||||
DISTRIBUTION_CELL.get_or_init(|| Mutex::new(Vec::with_capacity(3000000)))
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn select_n<'a, R: RngCore + CryptoRng, RPC: RpcConnection>(
|
||||
rng: &mut R,
|
||||
@@ -158,14 +140,6 @@ async fn select_decoys<R: RngCore + CryptoRng, RPC: RpcConnection>(
|
||||
inputs: &[SpendableOutput],
|
||||
fingerprintable_canonical: bool,
|
||||
) -> Result<Vec<Decoys>, RpcError> {
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
#[cfg(not(feature = "std"))]
|
||||
let mut distribution = DISTRIBUTION().lock();
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
#[cfg(feature = "std")]
|
||||
let mut distribution = DISTRIBUTION().lock().await;
|
||||
|
||||
#[cfg(not(feature = "cache-distribution"))]
|
||||
let mut distribution = vec![];
|
||||
|
||||
let decoy_count = ring_len - 1;
|
||||
|
||||
@@ -10,7 +10,7 @@ use curve25519_dalek::{
|
||||
};
|
||||
|
||||
use crate::{
|
||||
hash, hash_to_scalar, serialize::write_varint, ringct::EncryptedAmount, transaction::Input,
|
||||
hash, hash_to_scalar, serialize::write_varint, Commitment, ringct::EncryptedAmount, transaction::Input,
|
||||
};
|
||||
|
||||
pub mod extra;
|
||||
@@ -94,18 +94,21 @@ pub(crate) fn commitment_mask(shared_key: Scalar) -> Scalar {
|
||||
hash_to_scalar(&mask)
|
||||
}
|
||||
|
||||
pub(crate) fn amount_encryption(amount: u64, key: Scalar) -> [u8; 8] {
|
||||
pub(crate) fn compact_amount_encryption(amount: u64, key: Scalar) -> [u8; 8] {
|
||||
let mut amount_mask = b"amount".to_vec();
|
||||
amount_mask.extend(key.to_bytes());
|
||||
(amount ^ u64::from_le_bytes(hash(&amount_mask)[.. 8].try_into().unwrap())).to_le_bytes()
|
||||
}
|
||||
|
||||
// TODO: Move this under EncryptedAmount?
|
||||
fn amount_decryption(amount: &EncryptedAmount, key: Scalar) -> (Scalar, u64) {
|
||||
match amount {
|
||||
EncryptedAmount::Original { mask, amount } => {
|
||||
#[cfg(feature = "experimental")]
|
||||
{
|
||||
impl EncryptedAmount {
|
||||
/// Decrypt an EncryptedAmount into the Commitment it encrypts.
|
||||
///
|
||||
/// The caller must verify the decrypted Commitment matches with the actual Commitment used
|
||||
/// within in the Monero protocol.
|
||||
pub fn decrypt(&self, key: Scalar) -> Commitment {
|
||||
match self {
|
||||
// TODO: Add a test vector for this
|
||||
EncryptedAmount::Original { mask, amount } => {
|
||||
let mask_shared_sec = hash(key.as_bytes());
|
||||
let mask =
|
||||
Scalar::from_bytes_mod_order(*mask) - Scalar::from_bytes_mod_order(mask_shared_sec);
|
||||
@@ -116,20 +119,13 @@ fn amount_decryption(amount: &EncryptedAmount, key: Scalar) -> (Scalar, u64) {
|
||||
// d2b from rctTypes.cpp
|
||||
let amount = u64::from_le_bytes(amount_scalar.to_bytes()[0 .. 8].try_into().unwrap());
|
||||
|
||||
(mask, amount)
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "experimental"))]
|
||||
{
|
||||
let _ = mask;
|
||||
let _ = amount;
|
||||
todo!("decrypting a legacy monero transaction's amount")
|
||||
Commitment::new(mask, amount)
|
||||
}
|
||||
EncryptedAmount::Compact { amount } => Commitment::new(
|
||||
commitment_mask(key),
|
||||
u64::from_le_bytes(compact_amount_encryption(u64::from_le_bytes(*amount), key)),
|
||||
),
|
||||
}
|
||||
EncryptedAmount::Compact { amount } => (
|
||||
commitment_mask(key),
|
||||
u64::from_le_bytes(amount_encryption(u64::from_le_bytes(*amount), key)),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,9 +17,7 @@ use crate::{
|
||||
transaction::{Input, Timelock, Transaction},
|
||||
block::Block,
|
||||
rpc::{RpcError, RpcConnection, Rpc},
|
||||
wallet::{
|
||||
PaymentId, Extra, address::SubaddressIndex, Scanner, uniqueness, shared_key, amount_decryption,
|
||||
},
|
||||
wallet::{PaymentId, Extra, address::SubaddressIndex, Scanner, uniqueness, shared_key},
|
||||
};
|
||||
|
||||
/// An absolute output ID, defined as its transaction hash and output index.
|
||||
@@ -427,15 +425,13 @@ impl Scanner {
|
||||
commitment.amount = amount;
|
||||
// Regular transaction
|
||||
} else {
|
||||
let (mask, amount) = match tx.rct_signatures.base.encrypted_amounts.get(o) {
|
||||
Some(amount) => amount_decryption(amount, shared_key),
|
||||
commitment = match tx.rct_signatures.base.encrypted_amounts.get(o) {
|
||||
Some(amount) => amount.decrypt(shared_key),
|
||||
// This should never happen, yet it may be possible with miner transactions?
|
||||
// Using get just decreases the possibility of a panic and lets us move on in that case
|
||||
None => break,
|
||||
};
|
||||
|
||||
// Rebuild the commitment to verify it
|
||||
commitment = Commitment::new(mask, amount);
|
||||
// If this is a malicious commitment, move to the next output
|
||||
// Any other R value will calculate to a different spend key and are therefore ignorable
|
||||
if Some(&commitment.calculate()) != tx.rct_signatures.base.commitments.get(o) {
|
||||
|
||||
@@ -31,7 +31,7 @@ use crate::{
|
||||
ringct::{
|
||||
generate_key_image,
|
||||
clsag::{ClsagError, ClsagInput, Clsag},
|
||||
bulletproofs::{MAX_OUTPUTS, Bulletproofs},
|
||||
bulletproofs::{MAX_OUTPUTS, Bulletproof},
|
||||
RctBase, RctPrunable, RctSignatures,
|
||||
},
|
||||
transaction::{Input, Output, Timelock, TransactionPrefix, Transaction},
|
||||
@@ -39,7 +39,7 @@ use crate::{
|
||||
wallet::{
|
||||
address::{Network, AddressSpec, MoneroAddress},
|
||||
ViewPair, SpendableOutput, Decoys, PaymentId, ExtraField, Extra, key_image_sort, uniqueness,
|
||||
shared_key, commitment_mask, amount_encryption,
|
||||
shared_key, commitment_mask, compact_amount_encryption,
|
||||
extra::{ARBITRARY_DATA_MARKER, MAX_ARBITRARY_DATA_SIZE},
|
||||
},
|
||||
};
|
||||
@@ -92,7 +92,7 @@ impl SendOutput {
|
||||
view_tag,
|
||||
dest: ((&shared_key * ED25519_BASEPOINT_TABLE) + output.0.spend),
|
||||
commitment: Commitment::new(commitment_mask(shared_key), output.1),
|
||||
amount: amount_encryption(output.1, shared_key),
|
||||
amount: compact_amount_encryption(output.1, shared_key),
|
||||
},
|
||||
payment_id,
|
||||
)
|
||||
@@ -783,7 +783,11 @@ impl SignableTransaction {
|
||||
let sum = commitments.iter().map(|commitment| commitment.mask).sum();
|
||||
|
||||
// Safe due to the constructor checking MAX_OUTPUTS
|
||||
let bp = Bulletproofs::prove(rng, &commitments, self.protocol.bp_plus()).unwrap();
|
||||
let bp = if self.protocol.bp_plus() {
|
||||
Bulletproof::prove_plus(rng, commitments.clone()).unwrap()
|
||||
} else {
|
||||
Bulletproof::prove(rng, &commitments).unwrap()
|
||||
};
|
||||
|
||||
// Create the TX extra
|
||||
let extra = Self::extra(
|
||||
@@ -932,7 +936,7 @@ impl Eventuality {
|
||||
return false;
|
||||
}
|
||||
|
||||
// TODO: Remove this when the following for loop is updated
|
||||
// TODO: Remove this if/when the following for loop is updated to support older TXs
|
||||
assert!(
|
||||
rct_type.compact_encrypted_amounts(),
|
||||
"created an Eventuality for a very old RctType we don't support proving for"
|
||||
|
||||
@@ -18,6 +18,7 @@ use transcript::{Transcript, RecommendedTranscript};
|
||||
use frost::{
|
||||
curve::Ed25519,
|
||||
Participant, FrostError, ThresholdKeys,
|
||||
dkg::lagrange,
|
||||
sign::{
|
||||
Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine,
|
||||
SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
|
||||
@@ -27,7 +28,7 @@ use frost::{
|
||||
use crate::{
|
||||
random_scalar,
|
||||
ringct::{
|
||||
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share},
|
||||
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig},
|
||||
RctPrunable,
|
||||
},
|
||||
transaction::{Input, Transaction},
|
||||
@@ -261,8 +262,13 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||
included.push(self.i);
|
||||
included.sort_unstable();
|
||||
|
||||
// Convert the unified commitments to a Vec of the individual commitments
|
||||
// Start calculating the key images, as needed on the TX level
|
||||
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
|
||||
for (image, (generator, offset)) in images.iter_mut().zip(&self.key_images) {
|
||||
*image = generator * offset;
|
||||
}
|
||||
|
||||
// Convert the serialized nonces commitments to a parallelized Vec
|
||||
let mut commitments = (0 .. self.clsags.len())
|
||||
.map(|c| {
|
||||
included
|
||||
@@ -291,14 +297,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||
// provides the easiest API overall, as this is where the TX is (which needs the key
|
||||
// images in its message), along with where the outputs are determined (where our
|
||||
// outputs may need these in order to guarantee uniqueness)
|
||||
add_key_image_share(
|
||||
&mut images[c],
|
||||
self.key_images[c].0,
|
||||
self.key_images[c].1,
|
||||
&included,
|
||||
*l,
|
||||
preprocess.addendum.key_image.0,
|
||||
);
|
||||
images[c] += preprocess.addendum.key_image.0 * lagrange::<dfg::Scalar>(*l, &included).0;
|
||||
|
||||
Ok((*l, preprocess))
|
||||
})
|
||||
|
||||
@@ -51,7 +51,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||
|
||||
@@ -22,7 +22,7 @@ use serai_db::{Get, DbTxn, Db, create_db};
|
||||
use processor_messages::coordinator::cosign_block_msg;
|
||||
|
||||
use crate::{
|
||||
p2p::{CosignedBlock, GossipMessageKind, P2p},
|
||||
p2p::{CosignedBlock, P2pMessageKind, P2p},
|
||||
substrate::LatestCosignedBlock,
|
||||
};
|
||||
|
||||
@@ -323,7 +323,7 @@ impl<D: Db> CosignEvaluator<D> {
|
||||
for cosign in cosigns {
|
||||
let mut buf = vec![];
|
||||
cosign.serialize(&mut buf).unwrap();
|
||||
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||
P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, buf).await;
|
||||
}
|
||||
sleep(Duration::from_secs(60)).await;
|
||||
}
|
||||
|
||||
@@ -260,7 +260,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
cosign_channel.send(cosigned_block).unwrap();
|
||||
let mut buf = vec![];
|
||||
cosigned_block.serialize(&mut buf).unwrap();
|
||||
P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||
P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await;
|
||||
None
|
||||
}
|
||||
// This causes an action on Substrate yet not on any Tributary
|
||||
@@ -1292,6 +1292,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||
p2p.clone(),
|
||||
cosign_channel.clone(),
|
||||
tributary_event_listener_4,
|
||||
<Ristretto as Ciphersuite>::generator() * key.deref(),
|
||||
));
|
||||
|
||||
// Handle all messages from processors
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use core::{time::Duration, fmt};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
io::{self, Read},
|
||||
io::Read,
|
||||
collections::{HashSet, HashMap},
|
||||
time::{SystemTime, Instant},
|
||||
};
|
||||
@@ -9,13 +9,15 @@ use std::{
|
||||
use async_trait::async_trait;
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
||||
|
||||
use serai_db::Db;
|
||||
|
||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, StreamExt};
|
||||
use futures_util::StreamExt;
|
||||
use tokio::{
|
||||
sync::{Mutex, RwLock, mpsc, broadcast},
|
||||
time::sleep,
|
||||
@@ -27,16 +29,12 @@ use libp2p::{
|
||||
PeerId,
|
||||
tcp::Config as TcpConfig,
|
||||
noise, yamux,
|
||||
request_response::{
|
||||
Codec as RrCodecTrait, Message as RrMessage, Event as RrEvent, Config as RrConfig,
|
||||
Behaviour as RrBehavior,
|
||||
},
|
||||
gossipsub::{
|
||||
IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder,
|
||||
IdentityTransform, AllowAllSubscriptionFilter, Event as GsEvent, PublishError,
|
||||
Behaviour as GsBehavior,
|
||||
},
|
||||
swarm::{NetworkBehaviour, SwarmEvent},
|
||||
swarm::{NetworkBehaviour, SwarmEvent, Swarm},
|
||||
SwarmBuilder,
|
||||
};
|
||||
|
||||
@@ -44,8 +42,6 @@ pub(crate) use tributary::{ReadWrite, P2p as TributaryP2p};
|
||||
|
||||
use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent};
|
||||
|
||||
// Block size limit + 1 KB of space for signatures/metadata
|
||||
const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024;
|
||||
const LIBP2P_TOPIC: &str = "serai-coordinator";
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
||||
@@ -57,112 +53,71 @@ pub struct CosignedBlock {
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
|
||||
pub enum ReqResMessageKind {
|
||||
pub enum P2pMessageKind {
|
||||
KeepAlive,
|
||||
Tributary([u8; 32]),
|
||||
Heartbeat([u8; 32]),
|
||||
Block([u8; 32]),
|
||||
}
|
||||
|
||||
impl ReqResMessageKind {
|
||||
pub fn read<R: Read>(reader: &mut R) -> Option<ReqResMessageKind> {
|
||||
let mut kind = [0; 1];
|
||||
reader.read_exact(&mut kind).ok()?;
|
||||
match kind[0] {
|
||||
0 => Some(ReqResMessageKind::KeepAlive),
|
||||
1 => Some({
|
||||
let mut genesis = [0; 32];
|
||||
reader.read_exact(&mut genesis).ok()?;
|
||||
ReqResMessageKind::Heartbeat(genesis)
|
||||
}),
|
||||
2 => Some({
|
||||
let mut genesis = [0; 32];
|
||||
reader.read_exact(&mut genesis).ok()?;
|
||||
ReqResMessageKind::Block(genesis)
|
||||
}),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
match self {
|
||||
ReqResMessageKind::KeepAlive => vec![0],
|
||||
ReqResMessageKind::Heartbeat(genesis) => {
|
||||
let mut res = vec![1];
|
||||
res.extend(genesis);
|
||||
res
|
||||
}
|
||||
ReqResMessageKind::Block(genesis) => {
|
||||
let mut res = vec![2];
|
||||
res.extend(genesis);
|
||||
res
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
|
||||
pub enum GossipMessageKind {
|
||||
Tributary([u8; 32]),
|
||||
CosignedBlock,
|
||||
}
|
||||
|
||||
impl GossipMessageKind {
|
||||
pub fn read<R: Read>(reader: &mut R) -> Option<GossipMessageKind> {
|
||||
let mut kind = [0; 1];
|
||||
reader.read_exact(&mut kind).ok()?;
|
||||
match kind[0] {
|
||||
0 => Some({
|
||||
let mut genesis = [0; 32];
|
||||
reader.read_exact(&mut genesis).ok()?;
|
||||
GossipMessageKind::Tributary(genesis)
|
||||
}),
|
||||
1 => Some(GossipMessageKind::CosignedBlock),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
match self {
|
||||
GossipMessageKind::Tributary(genesis) => {
|
||||
let mut res = vec![0];
|
||||
res.extend(genesis);
|
||||
res
|
||||
}
|
||||
GossipMessageKind::CosignedBlock => {
|
||||
vec![1]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
|
||||
pub enum P2pMessageKind {
|
||||
ReqRes(ReqResMessageKind),
|
||||
Gossip(GossipMessageKind),
|
||||
}
|
||||
|
||||
impl P2pMessageKind {
|
||||
fn genesis(&self) -> Option<[u8; 32]> {
|
||||
match self {
|
||||
P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) |
|
||||
P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => None,
|
||||
P2pMessageKind::ReqRes(
|
||||
ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis),
|
||||
) |
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => Some(*genesis),
|
||||
P2pMessageKind::KeepAlive | P2pMessageKind::CosignedBlock => None,
|
||||
P2pMessageKind::Tributary(genesis) |
|
||||
P2pMessageKind::Heartbeat(genesis) |
|
||||
P2pMessageKind::Block(genesis) => Some(*genesis),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ReqResMessageKind> for P2pMessageKind {
|
||||
fn from(kind: ReqResMessageKind) -> P2pMessageKind {
|
||||
P2pMessageKind::ReqRes(kind)
|
||||
fn serialize(&self) -> Vec<u8> {
|
||||
match self {
|
||||
P2pMessageKind::KeepAlive => vec![0],
|
||||
P2pMessageKind::Tributary(genesis) => {
|
||||
let mut res = vec![1];
|
||||
res.extend(genesis);
|
||||
res
|
||||
}
|
||||
P2pMessageKind::Heartbeat(genesis) => {
|
||||
let mut res = vec![2];
|
||||
res.extend(genesis);
|
||||
res
|
||||
}
|
||||
P2pMessageKind::Block(genesis) => {
|
||||
let mut res = vec![3];
|
||||
res.extend(genesis);
|
||||
res
|
||||
}
|
||||
P2pMessageKind::CosignedBlock => {
|
||||
vec![4]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<GossipMessageKind> for P2pMessageKind {
|
||||
fn from(kind: GossipMessageKind) -> P2pMessageKind {
|
||||
P2pMessageKind::Gossip(kind)
|
||||
fn read<R: Read>(reader: &mut R) -> Option<P2pMessageKind> {
|
||||
let mut kind = [0; 1];
|
||||
reader.read_exact(&mut kind).ok()?;
|
||||
match kind[0] {
|
||||
0 => Some(P2pMessageKind::KeepAlive),
|
||||
1 => Some({
|
||||
let mut genesis = [0; 32];
|
||||
reader.read_exact(&mut genesis).ok()?;
|
||||
P2pMessageKind::Tributary(genesis)
|
||||
}),
|
||||
2 => Some({
|
||||
let mut genesis = [0; 32];
|
||||
reader.read_exact(&mut genesis).ok()?;
|
||||
P2pMessageKind::Heartbeat(genesis)
|
||||
}),
|
||||
3 => Some({
|
||||
let mut genesis = [0; 32];
|
||||
reader.read_exact(&mut genesis).ok()?;
|
||||
P2pMessageKind::Block(genesis)
|
||||
}),
|
||||
4 => Some(P2pMessageKind::CosignedBlock),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -180,21 +135,17 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
|
||||
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
|
||||
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
|
||||
|
||||
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>);
|
||||
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>);
|
||||
async fn receive(&self) -> Message<Self>;
|
||||
async fn send_raw(&self, to: Self::Id, genesis: Option<[u8; 32]>, msg: Vec<u8>);
|
||||
async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec<u8>);
|
||||
async fn receive_raw(&self) -> (Self::Id, Vec<u8>);
|
||||
|
||||
async fn send(&self, to: Self::Id, kind: ReqResMessageKind, msg: Vec<u8>) {
|
||||
async fn send(&self, to: Self::Id, kind: P2pMessageKind, msg: Vec<u8>) {
|
||||
let mut actual_msg = kind.serialize();
|
||||
actual_msg.extend(msg);
|
||||
self.send_raw(to, actual_msg).await;
|
||||
self.send_raw(to, kind.genesis(), actual_msg).await;
|
||||
}
|
||||
async fn broadcast(&self, kind: impl Send + Into<P2pMessageKind>, msg: Vec<u8>) {
|
||||
let kind = kind.into();
|
||||
let mut actual_msg = match kind {
|
||||
P2pMessageKind::ReqRes(kind) => kind.serialize(),
|
||||
P2pMessageKind::Gossip(kind) => kind.serialize(),
|
||||
};
|
||||
async fn broadcast(&self, kind: P2pMessageKind, msg: Vec<u8>) {
|
||||
let mut actual_msg = kind.serialize();
|
||||
actual_msg.extend(msg);
|
||||
/*
|
||||
log::trace!(
|
||||
@@ -208,70 +159,41 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
|
||||
}
|
||||
);
|
||||
*/
|
||||
self.broadcast_raw(kind, actual_msg).await;
|
||||
self.broadcast_raw(kind.genesis(), actual_msg).await;
|
||||
}
|
||||
}
|
||||
async fn receive(&self) -> Message<Self> {
|
||||
let (sender, kind, msg) = loop {
|
||||
let (sender, msg) = self.receive_raw().await;
|
||||
if msg.is_empty() {
|
||||
log::error!("empty p2p message from {sender:?}");
|
||||
continue;
|
||||
}
|
||||
|
||||
#[derive(Default, Clone, Copy, PartialEq, Eq, Debug)]
|
||||
struct RrCodec;
|
||||
#[async_trait]
|
||||
impl RrCodecTrait for RrCodec {
|
||||
type Protocol = &'static str;
|
||||
type Request = Vec<u8>;
|
||||
type Response = Vec<u8>;
|
||||
|
||||
async fn read_request<R: Send + Unpin + AsyncRead>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut R,
|
||||
) -> io::Result<Vec<u8>> {
|
||||
let mut len = [0; 4];
|
||||
io.read_exact(&mut len).await?;
|
||||
let len = usize::try_from(u32::from_le_bytes(len)).expect("not a 32-bit platform?");
|
||||
if len > MAX_LIBP2P_MESSAGE_SIZE {
|
||||
Err(io::Error::other("request length exceeded MAX_LIBP2P_MESSAGE_SIZE"))?;
|
||||
}
|
||||
// This may be a non-trivial allocation easily causable
|
||||
// While we could chunk the read, meaning we only perform the allocation as bandwidth is used,
|
||||
// the max message size should be sufficiently sane
|
||||
let mut buf = vec![0; len];
|
||||
io.read_exact(&mut buf).await?;
|
||||
Ok(buf)
|
||||
}
|
||||
async fn read_response<R: Send + Unpin + AsyncRead>(
|
||||
&mut self,
|
||||
proto: &Self::Protocol,
|
||||
io: &mut R,
|
||||
) -> io::Result<Vec<u8>> {
|
||||
self.read_request(proto, io).await
|
||||
}
|
||||
async fn write_request<W: Send + Unpin + AsyncWrite>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut W,
|
||||
req: Vec<u8>,
|
||||
) -> io::Result<()> {
|
||||
io.write_all(
|
||||
&u32::try_from(req.len())
|
||||
.map_err(|_| io::Error::other("request length exceeded 2**32"))?
|
||||
.to_le_bytes(),
|
||||
)
|
||||
.await?;
|
||||
io.write_all(&req).await
|
||||
}
|
||||
async fn write_response<W: Send + Unpin + AsyncWrite>(
|
||||
&mut self,
|
||||
proto: &Self::Protocol,
|
||||
io: &mut W,
|
||||
res: Vec<u8>,
|
||||
) -> io::Result<()> {
|
||||
self.write_request(proto, io, res).await
|
||||
let mut msg_ref = msg.as_ref();
|
||||
let Some(kind) = P2pMessageKind::read::<&[u8]>(&mut msg_ref) else {
|
||||
log::error!("invalid p2p message kind from {sender:?}");
|
||||
continue;
|
||||
};
|
||||
break (sender, kind, msg_ref.to_vec());
|
||||
};
|
||||
/*
|
||||
log::trace!(
|
||||
"received p2p message (kind {})",
|
||||
match kind {
|
||||
P2pMessageKind::KeepAlive => "KeepAlive".to_string(),
|
||||
P2pMessageKind::Tributary(genesis) => format!("Tributary({})", hex::encode(genesis)),
|
||||
P2pMessageKind::Heartbeat(genesis) => format!("Heartbeat({})", hex::encode(genesis)),
|
||||
P2pMessageKind::Block(genesis) => format!("Block({})", hex::encode(genesis)),
|
||||
P2pMessageKind::CosignedBlock => "CosignedBlock".to_string(),
|
||||
}
|
||||
);
|
||||
*/
|
||||
Message { sender, kind, msg }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(NetworkBehaviour)]
|
||||
struct Behavior {
|
||||
reqres: RrBehavior<RrCodec>,
|
||||
gossipsub: GsBehavior,
|
||||
}
|
||||
|
||||
@@ -279,9 +201,8 @@ struct Behavior {
|
||||
#[derive(Clone)]
|
||||
pub struct LibP2p {
|
||||
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ValidatorSet, [u8; 32])>>>,
|
||||
send: Arc<Mutex<mpsc::UnboundedSender<(PeerId, Vec<u8>)>>>,
|
||||
broadcast: Arc<Mutex<mpsc::UnboundedSender<(P2pMessageKind, Vec<u8>)>>>,
|
||||
receive: Arc<Mutex<mpsc::UnboundedReceiver<Message<Self>>>>,
|
||||
broadcast: Arc<Mutex<mpsc::UnboundedSender<(Option<[u8; 32]>, Vec<u8>)>>>,
|
||||
receive: Arc<Mutex<mpsc::UnboundedReceiver<(PeerId, Vec<u8>)>>>,
|
||||
}
|
||||
impl fmt::Debug for LibP2p {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
@@ -292,12 +213,14 @@ impl fmt::Debug for LibP2p {
|
||||
impl LibP2p {
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new(serai: Arc<Serai>) -> Self {
|
||||
// Block size limit + 1 KB of space for signatures/metadata
|
||||
const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024;
|
||||
|
||||
log::info!("creating a libp2p instance");
|
||||
|
||||
let throwaway_key_pair = Keypair::generate_ed25519();
|
||||
|
||||
let behavior = Behavior {
|
||||
reqres: { RrBehavior::new([], RrConfig::default()) },
|
||||
gossipsub: {
|
||||
let heartbeat_interval = tributary::tendermint::LATENCY_TIME / 2;
|
||||
let heartbeats_per_block =
|
||||
@@ -358,10 +281,9 @@ impl LibP2p {
|
||||
.with_behaviour(|_| behavior)
|
||||
.unwrap()
|
||||
.build();
|
||||
const PORT: u16 = 30564; // 5132 ^ (('c' << 8) | 'o')
|
||||
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||
swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap();
|
||||
|
||||
let (send_send, mut send_recv) = mpsc::unbounded_channel();
|
||||
let (broadcast_send, mut broadcast_recv) = mpsc::unbounded_channel();
|
||||
let (receive_send, receive_recv) = mpsc::unbounded_channel();
|
||||
let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel();
|
||||
@@ -370,9 +292,6 @@ impl LibP2p {
|
||||
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
|
||||
}
|
||||
|
||||
// TODO: If a network has less than TARGET_PEERS, this will cause retries ad infinitum
|
||||
const TARGET_PEERS: usize = 5;
|
||||
|
||||
// The addrs we're currently dialing, and the networks associated with them
|
||||
let dialing_peers = Arc::new(RwLock::new(HashMap::new()));
|
||||
// The peers we're currently connected to, and the networks associated with them
|
||||
@@ -451,7 +370,7 @@ impl LibP2p {
|
||||
}
|
||||
}
|
||||
// If we do not, start connecting to this network again
|
||||
if remaining_peers < TARGET_PEERS {
|
||||
if remaining_peers < 3 {
|
||||
connect_to_network_send.send(net).expect(
|
||||
"couldn't send net to connect to due to disconnects (receiver dropped?)",
|
||||
);
|
||||
@@ -470,31 +389,28 @@ impl LibP2p {
|
||||
// TODO: We should also connect to random peers from random nets as needed for
|
||||
// cosigning
|
||||
|
||||
// Drain the chainnel, de-duplicating any networks in it
|
||||
let mut connect_to_network_networks = HashSet::new();
|
||||
while let Ok(network) = connect_to_network_recv.try_recv() {
|
||||
connect_to_network_networks.insert(network);
|
||||
}
|
||||
for network in connect_to_network_networks {
|
||||
// Define a buffer, `to_retry`, so we can exhaust this channel before sending more down
|
||||
// it
|
||||
let mut to_retry = vec![];
|
||||
while let Some(network) = connect_to_network_recv.recv().await {
|
||||
if let Ok(mut nodes) = serai.p2p_validators(network).await {
|
||||
// If there's an insufficient amount of nodes known, connect to all yet add it
|
||||
// back and break
|
||||
if nodes.len() < TARGET_PEERS {
|
||||
if nodes.len() < 3 {
|
||||
log::warn!(
|
||||
"insufficient amount of P2P nodes known for {:?}: {}",
|
||||
network,
|
||||
nodes.len()
|
||||
);
|
||||
// Retry this later
|
||||
connect_to_network_send.send(network).unwrap();
|
||||
to_retry.push(network);
|
||||
for node in nodes {
|
||||
connect(network, node).await;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Randomly select up to 150% of the TARGET_PEERS
|
||||
for _ in 0 .. ((3 * TARGET_PEERS) / 2) {
|
||||
// Randomly select up to 5
|
||||
for _ in 0 .. 5 {
|
||||
if !nodes.is_empty() {
|
||||
let to_connect = nodes.swap_remove(
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap())
|
||||
@@ -505,6 +421,9 @@ impl LibP2p {
|
||||
}
|
||||
}
|
||||
}
|
||||
for to_retry in to_retry {
|
||||
connect_to_network_send.send(to_retry).unwrap();
|
||||
}
|
||||
// Sleep 60 seconds before moving to the next iteration
|
||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||
}
|
||||
@@ -515,6 +434,32 @@ impl LibP2p {
|
||||
tokio::spawn({
|
||||
let mut time_of_last_p2p_message = Instant::now();
|
||||
|
||||
#[allow(clippy::needless_pass_by_ref_mut)] // False positive
|
||||
fn broadcast_raw(
|
||||
p2p: &mut Swarm<Behavior>,
|
||||
time_of_last_p2p_message: &mut Instant,
|
||||
set: Option<ValidatorSet>,
|
||||
msg: Vec<u8>,
|
||||
) {
|
||||
// Update the time of last message
|
||||
*time_of_last_p2p_message = Instant::now();
|
||||
|
||||
let topic =
|
||||
if let Some(set) = set { topic_for_set(set) } else { IdentTopic::new(LIBP2P_TOPIC) };
|
||||
|
||||
match p2p.behaviour_mut().gossipsub.publish(topic, msg.clone()) {
|
||||
Err(PublishError::SigningError(e)) => panic!("signing error when broadcasting: {e}"),
|
||||
Err(PublishError::InsufficientPeers) => {
|
||||
log::warn!("failed to send p2p message due to insufficient peers")
|
||||
}
|
||||
Err(PublishError::MessageTooLarge) => {
|
||||
panic!("tried to send a too large message: {}", hex::encode(msg))
|
||||
}
|
||||
Err(PublishError::TransformFailed(e)) => panic!("IdentityTransform failed: {e}"),
|
||||
Err(PublishError::Duplicate) | Ok(_) => {}
|
||||
}
|
||||
}
|
||||
|
||||
async move {
|
||||
let connected_peers = connected_peers.clone();
|
||||
|
||||
@@ -541,50 +486,17 @@ impl LibP2p {
|
||||
}
|
||||
}
|
||||
|
||||
msg = send_recv.recv() => {
|
||||
let (peer, msg): (PeerId, Vec<u8>) =
|
||||
msg.expect("send_recv closed. are we shutting down?");
|
||||
swarm.behaviour_mut().reqres.send_request(&peer, msg);
|
||||
},
|
||||
|
||||
// Handle any queued outbound messages
|
||||
msg = broadcast_recv.recv() => {
|
||||
// Update the time of last message
|
||||
time_of_last_p2p_message = Instant::now();
|
||||
|
||||
let (kind, msg): (P2pMessageKind, Vec<u8>) =
|
||||
let (genesis, msg): (Option<[u8; 32]>, Vec<u8>) =
|
||||
msg.expect("broadcast_recv closed. are we shutting down?");
|
||||
|
||||
if matches!(kind, P2pMessageKind::ReqRes(_)) {
|
||||
// Use request/response, yet send to all connected peers
|
||||
for peer_id in swarm.connected_peers().copied().collect::<Vec<_>>() {
|
||||
swarm.behaviour_mut().reqres.send_request(&peer_id, msg.clone());
|
||||
}
|
||||
} else {
|
||||
// Use gossipsub
|
||||
|
||||
let set =
|
||||
kind.genesis().and_then(|genesis| set_for_genesis.get(&genesis).copied());
|
||||
let topic = if let Some(set) = set {
|
||||
topic_for_set(set)
|
||||
} else {
|
||||
IdentTopic::new(LIBP2P_TOPIC)
|
||||
};
|
||||
|
||||
match swarm.behaviour_mut().gossipsub.publish(topic, msg.clone()) {
|
||||
Err(PublishError::SigningError(e)) => {
|
||||
panic!("signing error when broadcasting: {e}")
|
||||
},
|
||||
Err(PublishError::InsufficientPeers) => {
|
||||
log::warn!("failed to send p2p message due to insufficient peers")
|
||||
}
|
||||
Err(PublishError::MessageTooLarge) => {
|
||||
panic!("tried to send a too large message: {}", hex::encode(msg))
|
||||
}
|
||||
Err(PublishError::TransformFailed(e)) => panic!("IdentityTransform failed: {e}"),
|
||||
Err(PublishError::Duplicate) | Ok(_) => {}
|
||||
}
|
||||
}
|
||||
let set = genesis.and_then(|genesis| set_for_genesis.get(&genesis).copied());
|
||||
broadcast_raw(
|
||||
&mut swarm,
|
||||
&mut time_of_last_p2p_message,
|
||||
set,
|
||||
msg,
|
||||
);
|
||||
}
|
||||
|
||||
// Handle new incoming messages
|
||||
@@ -646,7 +558,7 @@ impl LibP2p {
|
||||
}
|
||||
}
|
||||
// If we do not, start connecting to this network again
|
||||
if remaining_peers < TARGET_PEERS {
|
||||
if remaining_peers < 3 {
|
||||
connect_to_network_send
|
||||
.send(net)
|
||||
.expect(
|
||||
@@ -660,34 +572,12 @@ impl LibP2p {
|
||||
connected_peers.len(),
|
||||
);
|
||||
}
|
||||
Some(SwarmEvent::Behaviour(BehaviorEvent::Reqres(
|
||||
RrEvent::Message { peer, message },
|
||||
))) => {
|
||||
let message = match message {
|
||||
RrMessage::Request { request, .. } => request,
|
||||
RrMessage::Response { response, .. } => response,
|
||||
};
|
||||
|
||||
let mut msg_ref = message.as_slice();
|
||||
let Some(kind) = ReqResMessageKind::read(&mut msg_ref) else { continue };
|
||||
let message = Message {
|
||||
sender: peer,
|
||||
kind: P2pMessageKind::ReqRes(kind),
|
||||
msg: msg_ref.to_vec(),
|
||||
};
|
||||
receive_send.send(message).expect("receive_send closed. are we shutting down?");
|
||||
}
|
||||
Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub(
|
||||
GsEvent::Message { propagation_source, message, .. },
|
||||
))) => {
|
||||
let mut msg_ref = message.data.as_slice();
|
||||
let Some(kind) = GossipMessageKind::read(&mut msg_ref) else { continue };
|
||||
let message = Message {
|
||||
sender: propagation_source,
|
||||
kind: P2pMessageKind::Gossip(kind),
|
||||
msg: msg_ref.to_vec(),
|
||||
};
|
||||
receive_send.send(message).expect("receive_send closed. are we shutting down?");
|
||||
receive_send
|
||||
.send((propagation_source, message.data))
|
||||
.expect("receive_send closed. are we shutting down?");
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
@@ -719,13 +609,12 @@ impl LibP2p {
|
||||
// (where a finalized block only occurs due to network activity), meaning this won't be
|
||||
// run
|
||||
() = tokio::time::sleep(Duration::from_secs(80).saturating_sub(time_since_last)) => {
|
||||
time_of_last_p2p_message = Instant::now();
|
||||
for peer_id in swarm.connected_peers().copied().collect::<Vec<_>>() {
|
||||
swarm
|
||||
.behaviour_mut()
|
||||
.reqres
|
||||
.send_request(&peer_id, ReqResMessageKind::KeepAlive.serialize());
|
||||
}
|
||||
broadcast_raw(
|
||||
&mut swarm,
|
||||
&mut time_of_last_p2p_message,
|
||||
None,
|
||||
P2pMessageKind::KeepAlive.serialize()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -734,7 +623,6 @@ impl LibP2p {
|
||||
|
||||
LibP2p {
|
||||
subscribe: Arc::new(Mutex::new(subscribe_send)),
|
||||
send: Arc::new(Mutex::new(send_send)),
|
||||
broadcast: Arc::new(Mutex::new(broadcast_send)),
|
||||
receive: Arc::new(Mutex::new(receive_recv)),
|
||||
}
|
||||
@@ -763,22 +651,22 @@ impl P2p for LibP2p {
|
||||
.expect("subscribe_send closed. are we shutting down?");
|
||||
}
|
||||
|
||||
async fn send_raw(&self, peer: Self::Id, msg: Vec<u8>) {
|
||||
self.send.lock().await.send((peer, msg)).expect("send_send closed. are we shutting down?");
|
||||
async fn send_raw(&self, _: Self::Id, genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
||||
self.broadcast_raw(genesis, msg).await;
|
||||
}
|
||||
|
||||
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
|
||||
async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
||||
self
|
||||
.broadcast
|
||||
.lock()
|
||||
.await
|
||||
.send((kind, msg))
|
||||
.send((genesis, msg))
|
||||
.expect("broadcast_send closed. are we shutting down?");
|
||||
}
|
||||
|
||||
// TODO: We only have a single handle call this. Differentiate Send/Recv to remove this constant
|
||||
// lock acquisition?
|
||||
async fn receive(&self) -> Message<Self> {
|
||||
async fn receive_raw(&self) -> (Self::Id, Vec<u8>) {
|
||||
self.receive.lock().await.recv().await.expect("receive_recv closed. are we shutting down?")
|
||||
}
|
||||
}
|
||||
@@ -786,10 +674,21 @@ impl P2p for LibP2p {
|
||||
#[async_trait]
|
||||
impl TributaryP2p for LibP2p {
|
||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
||||
<Self as P2p>::broadcast(self, GossipMessageKind::Tributary(genesis), msg).await
|
||||
<Self as P2p>::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await
|
||||
}
|
||||
}
|
||||
|
||||
fn heartbeat_time_unit<D: Db, P: P2p>() -> u64 {
|
||||
// Also include the timestamp so LibP2p doesn't flag this as an old message re-circulating
|
||||
let timestamp = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.expect("system clock is wrong")
|
||||
.as_secs();
|
||||
// Divide by the block time so if multiple parties send a Heartbeat, they're more likely to
|
||||
// overlap
|
||||
timestamp / u64::from(Tributary::<D, Transaction, P>::block_time())
|
||||
}
|
||||
|
||||
pub async fn heartbeat_tributaries_task<D: Db, P: P2p>(
|
||||
p2p: P,
|
||||
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
|
||||
@@ -824,12 +723,9 @@ pub async fn heartbeat_tributaries_task<D: Db, P: P2p>(
|
||||
if SystemTime::now() > (block_time + Duration::from_secs(60)) {
|
||||
log::warn!("last known tributary block was over a minute ago");
|
||||
let mut msg = tip.to_vec();
|
||||
let time: u64 = SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.expect("system clock is wrong")
|
||||
.as_secs();
|
||||
msg.extend(time.to_le_bytes());
|
||||
P2p::broadcast(&p2p, ReqResMessageKind::Heartbeat(tributary.genesis()), msg).await;
|
||||
let time_unit = heartbeat_time_unit::<D, P>();
|
||||
msg.extend(time_unit.to_le_bytes());
|
||||
P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), msg).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -842,6 +738,7 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
p2p: P,
|
||||
cosign_channel: mpsc::UnboundedSender<CosignedBlock>,
|
||||
mut tributary_event: broadcast::Receiver<TributaryEvent<D, P>>,
|
||||
our_key: <Ristretto as Ciphersuite>::G,
|
||||
) {
|
||||
let channels = Arc::new(RwLock::new(HashMap::<_, mpsc::UnboundedSender<Message<P>>>::new()));
|
||||
tokio::spawn({
|
||||
@@ -867,45 +764,93 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
tokio::spawn({
|
||||
let p2p = p2p.clone();
|
||||
async move {
|
||||
let mut last_replied_to_heartbeat = 0;
|
||||
loop {
|
||||
let Some(mut msg) = recv.recv().await else {
|
||||
// Channel closure happens when the tributary retires
|
||||
break;
|
||||
};
|
||||
match msg.kind {
|
||||
P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {}
|
||||
P2pMessageKind::KeepAlive => {}
|
||||
|
||||
// TODO: Slash on Heartbeat which justifies a response, since the node
|
||||
P2pMessageKind::Tributary(msg_genesis) => {
|
||||
assert_eq!(msg_genesis, genesis);
|
||||
log::trace!("handling message for tributary {:?}", spec_set);
|
||||
if tributary.tributary.handle_message(&msg.msg).await {
|
||||
P2p::broadcast(&p2p, msg.kind, msg.msg).await;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO2: Rate limit this per timestamp
|
||||
// And/or slash on Heartbeat which justifies a response, since the node
|
||||
// obviously was offline and we must now use our bandwidth to compensate for
|
||||
// them?
|
||||
P2pMessageKind::ReqRes(ReqResMessageKind::Heartbeat(msg_genesis)) => {
|
||||
P2pMessageKind::Heartbeat(msg_genesis) => {
|
||||
assert_eq!(msg_genesis, genesis);
|
||||
|
||||
let current_time_unit = heartbeat_time_unit::<D, P>();
|
||||
if current_time_unit.saturating_sub(last_replied_to_heartbeat) < 10 {
|
||||
continue;
|
||||
}
|
||||
|
||||
if msg.msg.len() != 40 {
|
||||
log::error!("validator sent invalid heartbeat");
|
||||
continue;
|
||||
}
|
||||
// Only respond to recent heartbeats
|
||||
let msg_time = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect(
|
||||
let msg_time_unit = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect(
|
||||
"length-checked heartbeat message didn't have 8 bytes for the u64",
|
||||
));
|
||||
if SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.expect("system clock is wrong")
|
||||
.as_secs()
|
||||
.saturating_sub(msg_time) >
|
||||
10
|
||||
{
|
||||
if current_time_unit.saturating_sub(msg_time_unit) > 1 {
|
||||
continue;
|
||||
}
|
||||
|
||||
log::debug!("received heartbeat with a recent timestamp");
|
||||
// This is the network's last replied to, not ours specifically
|
||||
last_replied_to_heartbeat = current_time_unit;
|
||||
|
||||
let reader = tributary.tributary.reader();
|
||||
|
||||
// Have sqrt(n) nodes reply with the blocks
|
||||
#[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)]
|
||||
let mut responders = f32::from(tributary.spec.n(&[])).sqrt().floor() as u64;
|
||||
// Try to have at least 3 responders
|
||||
if responders < 3 {
|
||||
responders = tributary.spec.n(&[]).min(3).into();
|
||||
}
|
||||
|
||||
// Decide which nodes will respond by using the latest block's hash as a
|
||||
// mutually agreed upon entropy source
|
||||
// This isn't a secure source of entropy, yet it's fine for this
|
||||
let entropy = u64::from_le_bytes(reader.tip()[.. 8].try_into().unwrap());
|
||||
// If n = 10, responders = 3, we want `start` to be 0 ..= 7
|
||||
// (so the highest is 7, 8, 9)
|
||||
// entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7
|
||||
let start = usize::try_from(
|
||||
entropy % (u64::from(tributary.spec.n(&[]) + 1) - responders),
|
||||
)
|
||||
.unwrap();
|
||||
let mut selected = false;
|
||||
for validator in &tributary.spec.validators()
|
||||
[start .. (start + usize::try_from(responders).unwrap())]
|
||||
{
|
||||
if our_key == validator.0 {
|
||||
selected = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if !selected {
|
||||
log::debug!("received heartbeat and not selected to respond");
|
||||
continue;
|
||||
}
|
||||
|
||||
log::debug!("received heartbeat and selected to respond");
|
||||
|
||||
let p2p = p2p.clone();
|
||||
// Spawn a dedicated task as this may require loading large amounts of data
|
||||
// from disk and take a notable amount of time
|
||||
tokio::spawn(async move {
|
||||
// Have the selected nodes respond
|
||||
// TODO: Spawn a dedicated topic for this heartbeat response?
|
||||
let mut latest = msg.msg[.. 32].try_into().unwrap();
|
||||
let mut to_send = vec![];
|
||||
while let Some(next) = reader.block_after(&latest) {
|
||||
@@ -918,13 +863,13 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
res.extend(reader.commit(&next).unwrap());
|
||||
// Also include the timestamp used within the Heartbeat
|
||||
res.extend(&msg.msg[32 .. 40]);
|
||||
p2p.send(msg.sender, ReqResMessageKind::Block(genesis), res).await;
|
||||
p2p.send(msg.sender, P2pMessageKind::Block(genesis), res).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
P2pMessageKind::ReqRes(ReqResMessageKind::Block(msg_genesis)) => {
|
||||
P2pMessageKind::Block(msg_genesis) => {
|
||||
assert_eq!(msg_genesis, genesis);
|
||||
let mut msg_ref: &[u8] = msg.msg.as_ref();
|
||||
let Ok(block) = Block::<Transaction>::read(&mut msg_ref) else {
|
||||
@@ -943,15 +888,7 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
);
|
||||
}
|
||||
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(msg_genesis)) => {
|
||||
assert_eq!(msg_genesis, genesis);
|
||||
log::trace!("handling message for tributary {:?}", spec_set);
|
||||
if tributary.tributary.handle_message(&msg.msg).await {
|
||||
P2p::broadcast(&p2p, msg.kind, msg.msg).await;
|
||||
}
|
||||
}
|
||||
|
||||
P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => unreachable!(),
|
||||
P2pMessageKind::CosignedBlock => unreachable!(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -971,16 +908,15 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
loop {
|
||||
let msg = p2p.receive().await;
|
||||
match msg.kind {
|
||||
P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {}
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) |
|
||||
P2pMessageKind::ReqRes(
|
||||
ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis),
|
||||
) => {
|
||||
P2pMessageKind::KeepAlive => {}
|
||||
P2pMessageKind::Tributary(genesis) |
|
||||
P2pMessageKind::Heartbeat(genesis) |
|
||||
P2pMessageKind::Block(genesis) => {
|
||||
if let Some(channel) = channels.read().await.get(&genesis) {
|
||||
channel.send(msg).unwrap();
|
||||
}
|
||||
}
|
||||
P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => {
|
||||
P2pMessageKind::CosignedBlock => {
|
||||
let Ok(msg) = CosignedBlock::deserialize_reader(&mut msg.msg.as_slice()) else {
|
||||
log::error!("received CosignedBlock message with invalidly serialized contents");
|
||||
continue;
|
||||
|
||||
@@ -14,7 +14,7 @@ use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
processors::{Message, Processors},
|
||||
TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p,
|
||||
TributaryP2p, P2pMessageKind, P2p,
|
||||
};
|
||||
|
||||
pub mod tributary;
|
||||
@@ -45,10 +45,7 @@ impl Processors for MemProcessors {
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LocalP2p(
|
||||
usize,
|
||||
pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, P2pMessageKind, Vec<u8>)>>)>>,
|
||||
);
|
||||
pub struct LocalP2p(usize, pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, Vec<u8>)>>)>>);
|
||||
|
||||
impl LocalP2p {
|
||||
pub fn new(validators: usize) -> Vec<LocalP2p> {
|
||||
@@ -68,13 +65,11 @@ impl P2p for LocalP2p {
|
||||
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||
|
||||
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
|
||||
let mut msg_ref = msg.as_slice();
|
||||
let kind = ReqResMessageKind::read(&mut msg_ref).unwrap();
|
||||
self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec()));
|
||||
async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
||||
self.1.write().await.1[to].push_back((self.0, msg));
|
||||
}
|
||||
|
||||
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
|
||||
async fn broadcast_raw(&self, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
||||
// Content-based deduplication
|
||||
let mut lock = self.1.write().await;
|
||||
{
|
||||
@@ -86,26 +81,19 @@ impl P2p for LocalP2p {
|
||||
}
|
||||
let queues = &mut lock.1;
|
||||
|
||||
let kind_len = (match kind {
|
||||
P2pMessageKind::ReqRes(kind) => kind.serialize(),
|
||||
P2pMessageKind::Gossip(kind) => kind.serialize(),
|
||||
})
|
||||
.len();
|
||||
let msg = msg[kind_len ..].to_vec();
|
||||
|
||||
for (i, msg_queue) in queues.iter_mut().enumerate() {
|
||||
if i == self.0 {
|
||||
continue;
|
||||
}
|
||||
msg_queue.push_back((self.0, kind, msg.clone()));
|
||||
msg_queue.push_back((self.0, msg.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
async fn receive(&self) -> P2pMessage<Self> {
|
||||
async fn receive_raw(&self) -> (Self::Id, Vec<u8>) {
|
||||
// This is a cursed way to implement an async read from a Vec
|
||||
loop {
|
||||
if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() {
|
||||
return P2pMessage { sender, kind, msg };
|
||||
if let Some(res) = self.1.write().await.1[self.0].pop_front() {
|
||||
return res;
|
||||
}
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
@@ -115,11 +103,6 @@ impl P2p for LocalP2p {
|
||||
#[async_trait]
|
||||
impl TributaryP2p for LocalP2p {
|
||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
||||
<Self as P2p>::broadcast(
|
||||
self,
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)),
|
||||
msg,
|
||||
)
|
||||
.await
|
||||
<Self as P2p>::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ use serai_db::MemDb;
|
||||
use tributary::Tributary;
|
||||
|
||||
use crate::{
|
||||
GossipMessageKind, P2pMessageKind, P2p,
|
||||
P2pMessageKind, P2p,
|
||||
tributary::{Transaction, TributarySpec},
|
||||
tests::LocalP2p,
|
||||
};
|
||||
@@ -98,7 +98,7 @@ pub async fn run_tributaries(
|
||||
for (p2p, tributary) in &mut tributaries {
|
||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||
match msg.kind {
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||
P2pMessageKind::Tributary(genesis) => {
|
||||
assert_eq!(genesis, tributary.genesis());
|
||||
if tributary.handle_message(&msg.msg).await {
|
||||
p2p.broadcast(msg.kind, msg.msg).await;
|
||||
@@ -173,7 +173,7 @@ async fn tributary_test() {
|
||||
for (p2p, tributary) in &mut tributaries {
|
||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||
match msg.kind {
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||
P2pMessageKind::Tributary(genesis) => {
|
||||
assert_eq!(genesis, tributary.genesis());
|
||||
tributary.handle_message(&msg.msg).await;
|
||||
}
|
||||
@@ -199,7 +199,7 @@ async fn tributary_test() {
|
||||
for (p2p, tributary) in &mut tributaries {
|
||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||
match msg.kind {
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||
P2pMessageKind::Tributary(genesis) => {
|
||||
assert_eq!(genesis, tributary.genesis());
|
||||
tributary.handle_message(&msg.msg).await;
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ use std::sync::Arc;
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
|
||||
use tokio::{
|
||||
sync::{mpsc, broadcast},
|
||||
time::sleep,
|
||||
@@ -35,12 +37,17 @@ async fn handle_p2p_test() {
|
||||
|
||||
let mut tributary_senders = vec![];
|
||||
let mut tributary_arcs = vec![];
|
||||
for (p2p, tributary) in tributaries.drain(..) {
|
||||
for (i, (p2p, tributary)) in tributaries.drain(..).enumerate() {
|
||||
let tributary = Arc::new(tributary);
|
||||
tributary_arcs.push(tributary.clone());
|
||||
let (new_tributary_send, new_tributary_recv) = broadcast::channel(5);
|
||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
||||
tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv));
|
||||
tokio::spawn(handle_p2p_task(
|
||||
p2p,
|
||||
cosign_send,
|
||||
new_tributary_recv,
|
||||
<Ristretto as Ciphersuite>::generator() * *keys[i],
|
||||
));
|
||||
new_tributary_send
|
||||
.send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary }))
|
||||
.map_err(|_| "failed to send ActiveTributary")
|
||||
|
||||
@@ -45,12 +45,17 @@ async fn sync_test() {
|
||||
let mut tributary_senders = vec![];
|
||||
let mut tributary_arcs = vec![];
|
||||
let mut p2p_threads = vec![];
|
||||
for (p2p, tributary) in tributaries.drain(..) {
|
||||
for (i, (p2p, tributary)) in tributaries.drain(..).enumerate() {
|
||||
let tributary = Arc::new(tributary);
|
||||
tributary_arcs.push(tributary.clone());
|
||||
let (new_tributary_send, new_tributary_recv) = broadcast::channel(5);
|
||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
||||
let thread = tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv));
|
||||
let thread = tokio::spawn(handle_p2p_task(
|
||||
p2p,
|
||||
cosign_send,
|
||||
new_tributary_recv,
|
||||
<Ristretto as Ciphersuite>::generator() * *keys[i],
|
||||
));
|
||||
new_tributary_send
|
||||
.send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary }))
|
||||
.map_err(|_| "failed to send ActiveTributary")
|
||||
@@ -86,7 +91,7 @@ async fn sync_test() {
|
||||
let syncer_tributary = Arc::new(syncer_tributary);
|
||||
let (syncer_tributary_send, syncer_tributary_recv) = broadcast::channel(5);
|
||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
||||
tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv));
|
||||
tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv, syncer_key));
|
||||
syncer_tributary_send
|
||||
.send(TributaryEvent::NewTributary(ActiveTributary {
|
||||
spec: spec.clone(),
|
||||
|
||||
@@ -74,7 +74,7 @@ impl TributarySpec {
|
||||
|
||||
pub fn genesis(&self) -> [u8; 32] {
|
||||
// Calculate the genesis for this Tributary
|
||||
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis Testnet 2.1");
|
||||
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis");
|
||||
// This locks it to a specific Serai chain
|
||||
genesis.append_message(b"serai_block", self.serai_block);
|
||||
genesis.append_message(b"session", self.set.session.0.to_le_bytes());
|
||||
|
||||
@@ -139,10 +139,8 @@ impl<N: Network> BlockData<N> {
|
||||
// 27, 33, 41, 46, 60, 64
|
||||
self.round_mut().step = data.step();
|
||||
|
||||
// Only return a message to if we're actually a current validator and haven't prior posted a
|
||||
// message
|
||||
// Only return a message to if we're actually a current validator
|
||||
let round_number = self.round().number;
|
||||
let step = data.step();
|
||||
let res = self.validator_id.map(|validator_id| Message {
|
||||
sender: validator_id,
|
||||
block: self.number,
|
||||
@@ -150,21 +148,59 @@ impl<N: Network> BlockData<N> {
|
||||
data,
|
||||
});
|
||||
|
||||
if res.is_some() {
|
||||
if let Some(res) = res.as_ref() {
|
||||
const LATEST_BLOCK_KEY: &[u8] = b"tendermint-machine-sent_block";
|
||||
const LATEST_ROUND_KEY: &[u8] = b"tendermint-machine-sent_round";
|
||||
const PROPOSE_KEY: &[u8] = b"tendermint-machine-sent_propose";
|
||||
const PEVOTE_KEY: &[u8] = b"tendermint-machine-sent_prevote";
|
||||
const PRECOMMIT_KEY: &[u8] = b"tendermint-machine-sent_commit";
|
||||
|
||||
let genesis = self.genesis;
|
||||
let key = |prefix: &[u8]| [prefix, &genesis].concat();
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
let key = [
|
||||
b"tendermint-machine_already_sent_message".as_ref(),
|
||||
&self.genesis,
|
||||
&self.number.0.to_le_bytes(),
|
||||
&round_number.0.to_le_bytes(),
|
||||
&step.encode(),
|
||||
]
|
||||
.concat();
|
||||
// If we've already sent a message, return
|
||||
if txn.get(&key).is_some() {
|
||||
|
||||
// Ensure we haven't prior sent a message for a future block/round
|
||||
let last_block_or_round = |txn: &mut <N::Db as Db>::Transaction<'_>, prefix, current| {
|
||||
let key = key(prefix);
|
||||
let latest =
|
||||
u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap());
|
||||
if latest > current {
|
||||
None?;
|
||||
}
|
||||
if current > latest {
|
||||
txn.put(&key, current.to_le_bytes());
|
||||
return Some(true);
|
||||
}
|
||||
Some(false)
|
||||
};
|
||||
let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?;
|
||||
if new_block {
|
||||
// Delete the latest round key
|
||||
txn.del(&key(LATEST_ROUND_KEY));
|
||||
}
|
||||
let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?;
|
||||
if new_block || new_round {
|
||||
// Delete the messages for the old round
|
||||
txn.del(&key(PROPOSE_KEY));
|
||||
txn.del(&key(PEVOTE_KEY));
|
||||
txn.del(&key(PRECOMMIT_KEY));
|
||||
}
|
||||
|
||||
// Check we haven't sent this message within this round
|
||||
let msg_key = key(match res.data.step() {
|
||||
Step::Propose => PROPOSE_KEY,
|
||||
Step::Prevote => PEVOTE_KEY,
|
||||
Step::Precommit => PRECOMMIT_KEY,
|
||||
});
|
||||
if txn.get(&msg_key).is_some() {
|
||||
assert!(!new_block);
|
||||
assert!(!new_round);
|
||||
None?;
|
||||
}
|
||||
txn.put(&key, []);
|
||||
// Put this message to the DB
|
||||
txn.put(&msg_key, res.encode());
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
|
||||
@@ -514,7 +514,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
match step {
|
||||
Step::Propose => {
|
||||
// Slash the validator for not proposing when they should've
|
||||
log::debug!(target: "tendermint", "Validator didn't propose when they should have");
|
||||
log::debug!(target: "tendermint", "validator didn't propose when they should have");
|
||||
// this slash will be voted on.
|
||||
self.slash(
|
||||
self.weights.proposer(self.block.number, self.block.round().number),
|
||||
@@ -724,7 +724,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
if !self.block.log.log(signed.clone())? {
|
||||
return Err(TendermintError::AlreadyHandled);
|
||||
}
|
||||
log::debug!(
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
"received new tendermint message (block: {}, round: {}, step: {:?})",
|
||||
msg.block.0,
|
||||
|
||||
@@ -57,7 +57,6 @@ impl<N: Network> RoundData<N> {
|
||||
|
||||
// Poll all set timeouts, returning the Step whose timeout has just expired
|
||||
pub(crate) async fn timeout_future(&self) -> Step {
|
||||
/*
|
||||
let now = Instant::now();
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
@@ -65,7 +64,6 @@ impl<N: Network> RoundData<N> {
|
||||
self.step,
|
||||
self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::<HashMap<_, _>>()
|
||||
);
|
||||
*/
|
||||
|
||||
let timeout_future = |step| {
|
||||
let timeout = self.timeouts.get(&step).copied();
|
||||
|
||||
@@ -38,7 +38,6 @@ ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features =
|
||||
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] }
|
||||
|
||||
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
|
||||
dleq = { path = "../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
|
||||
|
||||
dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] }
|
||||
|
||||
|
||||
@@ -39,6 +39,13 @@ pub trait Algorithm<C: Curve>: Send + Sync + Clone {
|
||||
|
||||
/// Obtain the list of nonces to generate, as specified by the generators to create commitments
|
||||
/// against per-nonce.
|
||||
///
|
||||
/// The Algorithm is responsible for all transcripting of these nonce specifications/generators.
|
||||
///
|
||||
/// The prover will be passed the commitments, and the commitments will be sent to all other
|
||||
/// participants. No guarantees the commitments are internally consistent (have the same discrete
|
||||
/// logarithm across generators) are made. Any Algorithm which specifies multiple generators for
|
||||
/// a single nonce must handle that itself.
|
||||
fn nonces(&self) -> Vec<Vec<C::G>>;
|
||||
|
||||
/// Generate an addendum to FROST"s preprocessing stage.
|
||||
|
||||
@@ -1,13 +1,9 @@
|
||||
// FROST defines its nonce as sum(Di, Ei * bi)
|
||||
// Monero needs not just the nonce over G however, yet also over H
|
||||
// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once
|
||||
//
|
||||
// Accordingly, in order for this library to be robust, it supports generating an arbitrary amount
|
||||
// of nonces, each against an arbitrary list of generators
|
||||
// In order for this library to be robust, it supports generating an arbitrary amount of nonces,
|
||||
// each against an arbitrary list of generators
|
||||
//
|
||||
// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)
|
||||
// When representations across multiple generators are provided, a DLEq proof is also provided to
|
||||
// confirm their integrity
|
||||
|
||||
use core::ops::Deref;
|
||||
use std::{
|
||||
@@ -24,32 +20,8 @@ use transcript::Transcript;
|
||||
use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding};
|
||||
use multiexp::multiexp_vartime;
|
||||
|
||||
use dleq::MultiDLEqProof;
|
||||
|
||||
use crate::{curve::Curve, Participant};
|
||||
|
||||
// Transcript used to aggregate binomial nonces for usage within a single DLEq proof.
|
||||
fn aggregation_transcript<T: Transcript>(context: &[u8]) -> T {
|
||||
let mut transcript = T::new(b"FROST DLEq Aggregation v0.5");
|
||||
transcript.append_message(b"context", context);
|
||||
transcript
|
||||
}
|
||||
|
||||
// Every participant proves for their commitments at the start of the protocol
|
||||
// These proofs are verified sequentially, requiring independent transcripts
|
||||
// In order to make these transcripts more robust, the FROST transcript (at time of preprocess) is
|
||||
// challenged in order to create a commitment to it, carried in each independent transcript
|
||||
// (effectively forking the original transcript)
|
||||
//
|
||||
// For FROST, as defined by the IETF, this will do nothing (and this transcript will never even be
|
||||
// constructed). For higher level protocols, the transcript may have contextual info these proofs
|
||||
// will then be bound to
|
||||
fn dleq_transcript<T: Transcript>(context: &[u8]) -> T {
|
||||
let mut transcript = T::new(b"FROST Commitments DLEq v0.5");
|
||||
transcript.append_message(b"context", context);
|
||||
transcript
|
||||
}
|
||||
|
||||
// Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper
|
||||
// This is considered a single nonce as r = d + be
|
||||
#[derive(Clone, Zeroize)]
|
||||
@@ -69,7 +41,7 @@ impl<C: Curve> GeneratorCommitments<C> {
|
||||
}
|
||||
}
|
||||
|
||||
// A single nonce's commitments and relevant proofs
|
||||
// A single nonce's commitments
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub(crate) struct NonceCommitments<C: Curve> {
|
||||
// Called generators as these commitments are indexed by generator later on
|
||||
@@ -121,12 +93,6 @@ impl<C: Curve> NonceCommitments<C> {
|
||||
t.append_message(b"commitment_E", commitments.0[1].to_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
fn aggregation_factor<T: Transcript>(&self, context: &[u8]) -> C::F {
|
||||
let mut transcript = aggregation_transcript::<T>(context);
|
||||
self.transcript(&mut transcript);
|
||||
<C as Curve>::hash_to_F(b"dleq_aggregation", transcript.challenge(b"binding").as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
/// Commitments for all the nonces across all their generators.
|
||||
@@ -135,51 +101,26 @@ pub(crate) struct Commitments<C: Curve> {
|
||||
// Called nonces as these commitments are indexed by nonce
|
||||
// So to get the commitments for the first nonce, it'd be commitments.nonces[0]
|
||||
pub(crate) nonces: Vec<NonceCommitments<C>>,
|
||||
// DLEq Proof proving that each set of commitments were generated using a single pair of discrete
|
||||
// logarithms
|
||||
pub(crate) dleq: Option<MultiDLEqProof<C::G>>,
|
||||
}
|
||||
|
||||
impl<C: Curve> Commitments<C> {
|
||||
pub(crate) fn new<R: RngCore + CryptoRng, T: Transcript>(
|
||||
pub(crate) fn new<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
secret_share: &Zeroizing<C::F>,
|
||||
planned_nonces: &[Vec<C::G>],
|
||||
context: &[u8],
|
||||
) -> (Vec<Nonce<C>>, Commitments<C>) {
|
||||
let mut nonces = vec![];
|
||||
let mut commitments = vec![];
|
||||
|
||||
let mut dleq_generators = vec![];
|
||||
let mut dleq_nonces = vec![];
|
||||
for generators in planned_nonces {
|
||||
let (nonce, these_commitments): (Nonce<C>, _) =
|
||||
NonceCommitments::new(&mut *rng, secret_share, generators);
|
||||
|
||||
if generators.len() > 1 {
|
||||
dleq_generators.push(generators.clone());
|
||||
dleq_nonces.push(Zeroizing::new(
|
||||
(these_commitments.aggregation_factor::<T>(context) * nonce.0[1].deref()) +
|
||||
nonce.0[0].deref(),
|
||||
));
|
||||
}
|
||||
|
||||
nonces.push(nonce);
|
||||
commitments.push(these_commitments);
|
||||
}
|
||||
|
||||
let dleq = if !dleq_generators.is_empty() {
|
||||
Some(MultiDLEqProof::prove(
|
||||
rng,
|
||||
&mut dleq_transcript::<T>(context),
|
||||
&dleq_generators,
|
||||
&dleq_nonces,
|
||||
))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
(nonces, Commitments { nonces: commitments, dleq })
|
||||
(nonces, Commitments { nonces: commitments })
|
||||
}
|
||||
|
||||
pub(crate) fn transcript<T: Transcript>(&self, t: &mut T) {
|
||||
@@ -187,58 +128,20 @@ impl<C: Curve> Commitments<C> {
|
||||
for nonce in &self.nonces {
|
||||
nonce.transcript(t);
|
||||
}
|
||||
|
||||
// Transcripting the DLEqs implicitly transcripts the exact generators used for the nonces in
|
||||
// an exact order
|
||||
// This means it shouldn't be possible for variadic generators to cause conflicts
|
||||
if let Some(dleq) = &self.dleq {
|
||||
t.append_message(b"dleq", dleq.serialize());
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn read<R: Read, T: Transcript>(
|
||||
reader: &mut R,
|
||||
generators: &[Vec<C::G>],
|
||||
context: &[u8],
|
||||
) -> io::Result<Self> {
|
||||
pub(crate) fn read<R: Read>(reader: &mut R, generators: &[Vec<C::G>]) -> io::Result<Self> {
|
||||
let nonces = (0 .. generators.len())
|
||||
.map(|i| NonceCommitments::read(reader, &generators[i]))
|
||||
.collect::<Result<Vec<NonceCommitments<C>>, _>>()?;
|
||||
|
||||
let mut dleq_generators = vec![];
|
||||
let mut dleq_nonces = vec![];
|
||||
for (generators, nonce) in generators.iter().cloned().zip(&nonces) {
|
||||
if generators.len() > 1 {
|
||||
let binding = nonce.aggregation_factor::<T>(context);
|
||||
let mut aggregated = vec![];
|
||||
for commitments in &nonce.generators {
|
||||
aggregated.push(commitments.0[0] + (commitments.0[1] * binding));
|
||||
}
|
||||
dleq_generators.push(generators);
|
||||
dleq_nonces.push(aggregated);
|
||||
}
|
||||
}
|
||||
|
||||
let dleq = if !dleq_generators.is_empty() {
|
||||
let dleq = MultiDLEqProof::read(reader, dleq_generators.len())?;
|
||||
dleq
|
||||
.verify(&mut dleq_transcript::<T>(context), &dleq_generators, &dleq_nonces)
|
||||
.map_err(|_| io::Error::other("invalid DLEq proof"))?;
|
||||
Some(dleq)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Ok(Commitments { nonces, dleq })
|
||||
Ok(Commitments { nonces })
|
||||
}
|
||||
|
||||
pub(crate) fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
for nonce in &self.nonces {
|
||||
nonce.write(writer)?;
|
||||
}
|
||||
if let Some(dleq) = &self.dleq {
|
||||
dleq.write(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,14 +125,8 @@ impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
|
||||
let mut params = self.params;
|
||||
|
||||
let mut rng = ChaCha20Rng::from_seed(*seed.0);
|
||||
// Get a challenge to the existing transcript for use when proving for the commitments
|
||||
let commitments_challenge = params.algorithm.transcript().challenge(b"commitments");
|
||||
let (nonces, commitments) = Commitments::new::<_, A::Transcript>(
|
||||
&mut rng,
|
||||
params.keys.secret_share(),
|
||||
¶ms.algorithm.nonces(),
|
||||
commitments_challenge.as_ref(),
|
||||
);
|
||||
let (nonces, commitments) =
|
||||
Commitments::new::<_>(&mut rng, params.keys.secret_share(), ¶ms.algorithm.nonces());
|
||||
let addendum = params.algorithm.preprocess_addendum(&mut rng, ¶ms.keys);
|
||||
|
||||
let preprocess = Preprocess { commitments, addendum };
|
||||
@@ -141,27 +135,18 @@ impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
|
||||
let mut blame_entropy = [0; 32];
|
||||
rng.fill_bytes(&mut blame_entropy);
|
||||
(
|
||||
AlgorithmSignMachine {
|
||||
params,
|
||||
seed,
|
||||
commitments_challenge,
|
||||
nonces,
|
||||
preprocess: preprocess.clone(),
|
||||
blame_entropy,
|
||||
},
|
||||
AlgorithmSignMachine { params, seed, nonces, preprocess: preprocess.clone(), blame_entropy },
|
||||
preprocess,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(any(test, feature = "tests"))]
|
||||
pub(crate) fn unsafe_override_preprocess(
|
||||
mut self,
|
||||
self,
|
||||
nonces: Vec<Nonce<C>>,
|
||||
preprocess: Preprocess<C, A::Addendum>,
|
||||
) -> AlgorithmSignMachine<C, A> {
|
||||
AlgorithmSignMachine {
|
||||
commitments_challenge: self.params.algorithm.transcript().challenge(b"commitments"),
|
||||
|
||||
params: self.params,
|
||||
seed: CachedPreprocess(Zeroizing::new([0; 32])),
|
||||
|
||||
@@ -255,8 +240,6 @@ pub struct AlgorithmSignMachine<C: Curve, A: Algorithm<C>> {
|
||||
params: Params<C, A>,
|
||||
seed: CachedPreprocess,
|
||||
|
||||
#[zeroize(skip)]
|
||||
commitments_challenge: <A::Transcript as Transcript>::Challenge,
|
||||
pub(crate) nonces: Vec<Nonce<C>>,
|
||||
// Skips the preprocess due to being too large a bound to feasibly enforce on users
|
||||
#[zeroize(skip)]
|
||||
@@ -285,11 +268,7 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
|
||||
|
||||
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
||||
Ok(Preprocess {
|
||||
commitments: Commitments::read::<_, A::Transcript>(
|
||||
reader,
|
||||
&self.params.algorithm.nonces(),
|
||||
self.commitments_challenge.as_ref(),
|
||||
)?,
|
||||
commitments: Commitments::read::<_>(reader, &self.params.algorithm.nonces())?,
|
||||
addendum: self.params.algorithm.read_addendum(reader)?,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::{
|
||||
|
||||
/// Tests for the nonce handling code.
|
||||
pub mod nonces;
|
||||
use nonces::{test_multi_nonce, test_invalid_commitment, test_invalid_dleq_proof};
|
||||
use nonces::test_multi_nonce;
|
||||
|
||||
/// Vectorized test suite to ensure consistency.
|
||||
pub mod vectors;
|
||||
@@ -267,6 +267,4 @@ pub fn test_ciphersuite<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut
|
||||
test_schnorr_blame::<R, C, H>(rng);
|
||||
|
||||
test_multi_nonce::<R, C>(rng);
|
||||
test_invalid_commitment::<R, C>(rng);
|
||||
test_invalid_dleq_proof::<R, C>(rng);
|
||||
}
|
||||
|
||||
@@ -9,14 +9,12 @@ use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use ciphersuite::group::{ff::Field, Group, GroupEncoding};
|
||||
|
||||
use dleq::MultiDLEqProof;
|
||||
pub use dkg::tests::{key_gen, recover_key};
|
||||
|
||||
use crate::{
|
||||
Curve, Participant, ThresholdView, ThresholdKeys, FrostError,
|
||||
algorithm::Algorithm,
|
||||
sign::{Writable, SignMachine},
|
||||
tests::{algorithm_machines, preprocess, sign},
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -157,75 +155,3 @@ pub fn test_multi_nonce<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
|
||||
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
|
||||
sign(&mut *rng, &MultiNonce::<C>::new(), keys.clone(), machines, &[]);
|
||||
}
|
||||
|
||||
/// Test malleating a commitment for a nonce across generators causes the preprocess to error.
|
||||
pub fn test_invalid_commitment<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
|
||||
let keys = key_gen::<R, C>(&mut *rng);
|
||||
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
|
||||
let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {});
|
||||
|
||||
// Select a random participant to give an invalid commitment
|
||||
let participants = preprocesses.keys().collect::<Vec<_>>();
|
||||
let faulty = *participants
|
||||
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
|
||||
|
||||
// Grab their preprocess
|
||||
let mut preprocess = preprocesses.remove(&faulty).unwrap();
|
||||
|
||||
// Mutate one of the commitments
|
||||
let nonce =
|
||||
preprocess.commitments.nonces.get_mut(usize::try_from(rng.next_u64()).unwrap() % 2).unwrap();
|
||||
let generators_len = nonce.generators.len();
|
||||
nonce.generators[usize::try_from(rng.next_u64()).unwrap() % generators_len].0
|
||||
[usize::try_from(rng.next_u64()).unwrap() % 2] = C::G::random(&mut *rng);
|
||||
|
||||
// The commitments are validated at time of deserialization (read_preprocess)
|
||||
// Accordingly, serialize it and read it again to make sure that errors
|
||||
assert!(machines
|
||||
.iter()
|
||||
.next()
|
||||
.unwrap()
|
||||
.1
|
||||
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
|
||||
.is_err());
|
||||
}
|
||||
|
||||
/// Test malleating the DLEq proof for a preprocess causes it to error.
|
||||
pub fn test_invalid_dleq_proof<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
|
||||
let keys = key_gen::<R, C>(&mut *rng);
|
||||
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
|
||||
let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {});
|
||||
|
||||
// Select a random participant to give an invalid DLEq proof
|
||||
let participants = preprocesses.keys().collect::<Vec<_>>();
|
||||
let faulty = *participants
|
||||
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
|
||||
|
||||
// Invalidate it by replacing it with a completely different proof
|
||||
let dlogs = [Zeroizing::new(C::F::random(&mut *rng)), Zeroizing::new(C::F::random(&mut *rng))];
|
||||
let mut preprocess = preprocesses.remove(&faulty).unwrap();
|
||||
preprocess.commitments.dleq = Some(MultiDLEqProof::prove(
|
||||
&mut *rng,
|
||||
&mut RecommendedTranscript::new(b"Invalid DLEq Proof"),
|
||||
&nonces::<C>(),
|
||||
&dlogs,
|
||||
));
|
||||
|
||||
assert!(machines
|
||||
.iter()
|
||||
.next()
|
||||
.unwrap()
|
||||
.1
|
||||
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
|
||||
.is_err());
|
||||
|
||||
// Also test None for a proof will cause an error
|
||||
preprocess.commitments.dleq = None;
|
||||
assert!(machines
|
||||
.iter()
|
||||
.next()
|
||||
.unwrap()
|
||||
.1
|
||||
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
|
||||
.is_err());
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ use ciphersuite::group::{ff::PrimeField, GroupEncoding};
|
||||
use crate::{
|
||||
curve::Curve,
|
||||
Participant, ThresholdCore, ThresholdKeys,
|
||||
algorithm::{IetfTranscript, Hram, IetfSchnorr},
|
||||
algorithm::{Hram, IetfSchnorr},
|
||||
sign::{
|
||||
Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess,
|
||||
PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
|
||||
@@ -191,7 +191,6 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
|
||||
nonces: vec![NonceCommitments {
|
||||
generators: vec![GeneratorCommitments(these_commitments)],
|
||||
}],
|
||||
dleq: None,
|
||||
},
|
||||
addendum: (),
|
||||
};
|
||||
@@ -301,12 +300,8 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
|
||||
}
|
||||
|
||||
// Also test it at the Commitments level
|
||||
let (generated_nonces, commitments) = Commitments::<C>::new::<_, IetfTranscript>(
|
||||
&mut TransparentRng(randomness),
|
||||
&share,
|
||||
&[vec![C::generator()]],
|
||||
&[],
|
||||
);
|
||||
let (generated_nonces, commitments) =
|
||||
Commitments::<C>::new::<_>(&mut TransparentRng(randomness), &share, &[vec![C::generator()]]);
|
||||
|
||||
assert_eq!(generated_nonces.len(), 1);
|
||||
assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]);
|
||||
|
||||
@@ -99,6 +99,7 @@ allow-git = [
|
||||
"https://github.com/rust-lang-nursery/lazy-static.rs",
|
||||
"https://github.com/serai-dex/substrate-bip39",
|
||||
"https://github.com/serai-dex/substrate",
|
||||
"https://github.com/alloy-rs/alloy",
|
||||
"https://github.com/monero-rs/base58-monero",
|
||||
"https://github.com/kayabaNerve/dockertest-rs",
|
||||
]
|
||||
|
||||
@@ -7,7 +7,7 @@ pub fn bitcoin(orchestration_path: &Path, network: Network) {
|
||||
const DOWNLOAD_BITCOIN: &str = r#"
|
||||
FROM alpine:latest as bitcoin
|
||||
|
||||
ENV BITCOIN_VERSION=26.0
|
||||
ENV BITCOIN_VERSION=27.0
|
||||
|
||||
RUN apk --no-cache add git gnupg
|
||||
|
||||
|
||||
@@ -511,7 +511,7 @@ fn start(network: Network, services: HashSet<String>) {
|
||||
command
|
||||
} else {
|
||||
// Publish the port
|
||||
command.arg("-p").arg("30564:30564")
|
||||
command.arg("-p").arg("30563:30563")
|
||||
}
|
||||
}
|
||||
"serai" => {
|
||||
|
||||
@@ -28,6 +28,7 @@ rand_core = { version = "0.6", default-features = false, features = ["std", "get
|
||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||
|
||||
# Encoders
|
||||
const-hex = { version = "1", default-features = false }
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
@@ -40,11 +41,16 @@ transcript = { package = "flexible-transcript", path = "../crypto/transcript", d
|
||||
frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] }
|
||||
frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false }
|
||||
|
||||
# Bitcoin/Ethereum
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true }
|
||||
|
||||
# Bitcoin
|
||||
secp256k1 = { version = "0.28", default-features = false, features = ["std", "global-context", "rand-std"], optional = true }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true }
|
||||
bitcoin-serai = { path = "../coins/bitcoin", default-features = false, features = ["std"], optional = true }
|
||||
|
||||
# Ethereum
|
||||
ethereum-serai = { path = "../coins/ethereum", default-features = false, optional = true }
|
||||
|
||||
# Monero
|
||||
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true }
|
||||
monero-serai = { path = "../coins/monero", default-features = false, features = ["std", "http-rpc", "multisig"], optional = true }
|
||||
@@ -55,12 +61,12 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
|
||||
zalloc = { path = "../common/zalloc" }
|
||||
serai-db = { path = "../common/db", optional = true }
|
||||
serai-db = { path = "../common/db" }
|
||||
serai-env = { path = "../common/env", optional = true }
|
||||
# TODO: Replace with direct usage of primitives
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] }
|
||||
|
||||
messages = { package = "serai-processor-messages", path = "./messages", optional = true }
|
||||
messages = { package = "serai-processor-messages", path = "./messages" }
|
||||
|
||||
message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true }
|
||||
|
||||
@@ -69,6 +75,8 @@ frost = { package = "modular-frost", path = "../crypto/frost", features = ["test
|
||||
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
|
||||
ethereum-serai = { path = "../coins/ethereum", default-features = false, features = ["tests"] }
|
||||
|
||||
dockertest = "0.4"
|
||||
serai-docker-tests = { path = "../tests/docker" }
|
||||
|
||||
@@ -76,9 +84,11 @@ serai-docker-tests = { path = "../tests/docker" }
|
||||
secp256k1 = ["k256", "frost/secp256k1"]
|
||||
bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"]
|
||||
|
||||
ethereum = ["secp256k1", "ethereum-serai"]
|
||||
|
||||
ed25519 = ["dalek-ff-group", "frost/ed25519"]
|
||||
monero = ["ed25519", "monero-serai", "serai-client/monero"]
|
||||
|
||||
binaries = ["env_logger", "serai-env", "messages", "message-queue"]
|
||||
binaries = ["env_logger", "serai-env", "message-queue"]
|
||||
parity-db = ["serai-db/parity-db"]
|
||||
rocksdb = ["serai-db/rocksdb"]
|
||||
|
||||
@@ -1,7 +1,15 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
mod plan;
|
||||
pub use plan::*;
|
||||
|
||||
mod db;
|
||||
pub(crate) use db::*;
|
||||
|
||||
mod key_gen;
|
||||
|
||||
pub mod networks;
|
||||
pub(crate) mod multisigs;
|
||||
|
||||
mod additional_key;
|
||||
pub use additional_key::additional_key;
|
||||
|
||||
@@ -31,6 +31,8 @@ mod networks;
|
||||
use networks::{Block, Network};
|
||||
#[cfg(feature = "bitcoin")]
|
||||
use networks::Bitcoin;
|
||||
#[cfg(feature = "ethereum")]
|
||||
use networks::Ethereum;
|
||||
#[cfg(feature = "monero")]
|
||||
use networks::Monero;
|
||||
|
||||
@@ -735,6 +737,7 @@ async fn main() {
|
||||
};
|
||||
let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() {
|
||||
"bitcoin" => NetworkId::Bitcoin,
|
||||
"ethereum" => NetworkId::Ethereum,
|
||||
"monero" => NetworkId::Monero,
|
||||
_ => panic!("unrecognized network"),
|
||||
};
|
||||
@@ -744,6 +747,8 @@ async fn main() {
|
||||
match network_id {
|
||||
#[cfg(feature = "bitcoin")]
|
||||
NetworkId::Bitcoin => run(db, Bitcoin::new(url).await, coordinator).await,
|
||||
#[cfg(feature = "ethereum")]
|
||||
NetworkId::Ethereum => run(db.clone(), Ethereum::new(db, url).await, coordinator).await,
|
||||
#[cfg(feature = "monero")]
|
||||
NetworkId::Monero => run(db, Monero::new(url).await, coordinator).await,
|
||||
_ => panic!("spawning a processor for an unsupported network"),
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::io;
|
||||
|
||||
use ciphersuite::Ciphersuite;
|
||||
pub use serai_db::*;
|
||||
|
||||
@@ -6,9 +8,59 @@ use serai_client::{primitives::Balance, in_instructions::primitives::InInstructi
|
||||
|
||||
use crate::{
|
||||
Get, Plan,
|
||||
networks::{Transaction, Network},
|
||||
networks::{Output, Transaction, Network},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum PlanFromScanning<N: Network> {
|
||||
Refund(N::Output, N::Address),
|
||||
Forward(N::Output),
|
||||
}
|
||||
|
||||
impl<N: Network> PlanFromScanning<N> {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let output = N::Output::read(reader)?;
|
||||
|
||||
let mut address_vec_len = [0; 4];
|
||||
reader.read_exact(&mut address_vec_len)?;
|
||||
let mut address_vec =
|
||||
vec![0; usize::try_from(u32::from_le_bytes(address_vec_len)).unwrap()];
|
||||
reader.read_exact(&mut address_vec)?;
|
||||
let address =
|
||||
N::Address::try_from(address_vec).map_err(|_| "invalid address saved to disk").unwrap();
|
||||
|
||||
Ok(PlanFromScanning::Refund(output, address))
|
||||
}
|
||||
1 => {
|
||||
let output = N::Output::read(reader)?;
|
||||
Ok(PlanFromScanning::Forward(output))
|
||||
}
|
||||
_ => panic!("reading unrecognized PlanFromScanning"),
|
||||
}
|
||||
}
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
PlanFromScanning::Refund(output, address) => {
|
||||
writer.write_all(&[0])?;
|
||||
output.write(writer)?;
|
||||
|
||||
let address_vec: Vec<u8> =
|
||||
address.clone().try_into().map_err(|_| "invalid address being refunded to").unwrap();
|
||||
writer.write_all(&u32::try_from(address_vec.len()).unwrap().to_le_bytes())?;
|
||||
writer.write_all(&address_vec)
|
||||
}
|
||||
PlanFromScanning::Forward(output) => {
|
||||
writer.write_all(&[1])?;
|
||||
output.write(writer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
create_db!(
|
||||
MultisigsDb {
|
||||
NextBatchDb: () -> u32,
|
||||
@@ -80,7 +132,11 @@ impl PlanDb {
|
||||
) -> bool {
|
||||
let plan = Plan::<N>::read::<&[u8]>(&mut &Self::get(getter, &id).unwrap()[8 ..]).unwrap();
|
||||
assert_eq!(plan.id(), id);
|
||||
(key == plan.key) && (Some(N::change_address(plan.key)) == plan.change)
|
||||
if let Some(change) = N::change_address(plan.key) {
|
||||
(key == plan.key) && (Some(change) == plan.change)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -130,7 +186,7 @@ impl PlansFromScanningDb {
|
||||
pub fn set_plans_from_scanning<N: Network>(
|
||||
txn: &mut impl DbTxn,
|
||||
block_number: usize,
|
||||
plans: Vec<Plan<N>>,
|
||||
plans: Vec<PlanFromScanning<N>>,
|
||||
) {
|
||||
let mut buf = vec![];
|
||||
for plan in plans {
|
||||
@@ -142,13 +198,13 @@ impl PlansFromScanningDb {
|
||||
pub fn take_plans_from_scanning<N: Network>(
|
||||
txn: &mut impl DbTxn,
|
||||
block_number: usize,
|
||||
) -> Option<Vec<Plan<N>>> {
|
||||
) -> Option<Vec<PlanFromScanning<N>>> {
|
||||
let block_number = u64::try_from(block_number).unwrap();
|
||||
let res = Self::get(txn, block_number).map(|plans| {
|
||||
let mut plans_ref = plans.as_slice();
|
||||
let mut res = vec![];
|
||||
while !plans_ref.is_empty() {
|
||||
res.push(Plan::<N>::read(&mut plans_ref).unwrap());
|
||||
res.push(PlanFromScanning::<N>::read(&mut plans_ref).unwrap());
|
||||
}
|
||||
res
|
||||
});
|
||||
|
||||
@@ -7,7 +7,7 @@ use scale::{Encode, Decode};
|
||||
use messages::SubstrateContext;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{MAX_DATA_LEN, NetworkId, Coin, ExternalAddress, BlockHash, Data},
|
||||
primitives::{MAX_DATA_LEN, ExternalAddress, BlockHash, Data},
|
||||
in_instructions::primitives::{
|
||||
InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE,
|
||||
},
|
||||
@@ -28,15 +28,12 @@ use scanner::{ScannerEvent, ScannerHandle, Scanner};
|
||||
mod db;
|
||||
use db::*;
|
||||
|
||||
#[cfg(not(test))]
|
||||
mod scheduler;
|
||||
#[cfg(test)]
|
||||
pub mod scheduler;
|
||||
pub(crate) mod scheduler;
|
||||
use scheduler::Scheduler;
|
||||
|
||||
use crate::{
|
||||
Get, Db, Payment, Plan,
|
||||
networks::{OutputType, Output, Transaction, SignableTransaction, Block, PreparedSend, Network},
|
||||
networks::{OutputType, Output, SignableTransaction, Eventuality, Block, PreparedSend, Network},
|
||||
};
|
||||
|
||||
// InInstructionWithBalance from an external output
|
||||
@@ -95,6 +92,8 @@ enum RotationStep {
|
||||
ClosingExisting,
|
||||
}
|
||||
|
||||
// This explicitly shouldn't take the database as we prepare Plans we won't execute for fee
|
||||
// estimates
|
||||
async fn prepare_send<N: Network>(
|
||||
network: &N,
|
||||
block_number: usize,
|
||||
@@ -122,7 +121,7 @@ async fn prepare_send<N: Network>(
|
||||
pub struct MultisigViewer<N: Network> {
|
||||
activation_block: usize,
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
scheduler: Scheduler<N>,
|
||||
scheduler: N::Scheduler,
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
@@ -131,7 +130,7 @@ pub enum MultisigEvent<N: Network> {
|
||||
// Batches to publish
|
||||
Batches(Option<(<N::Curve as Ciphersuite>::G, <N::Curve as Ciphersuite>::G)>, Vec<Batch>),
|
||||
// Eventuality completion found on-chain
|
||||
Completed(Vec<u8>, [u8; 32], N::Transaction),
|
||||
Completed(Vec<u8>, [u8; 32], <N::Eventuality as Eventuality>::Completion),
|
||||
}
|
||||
|
||||
pub struct MultisigManager<D: Db, N: Network> {
|
||||
@@ -157,20 +156,7 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
|
||||
assert!(current_keys.len() <= 2);
|
||||
let mut actively_signing = vec![];
|
||||
for (_, key) in ¤t_keys {
|
||||
schedulers.push(
|
||||
Scheduler::from_db(
|
||||
raw_db,
|
||||
*key,
|
||||
match N::NETWORK {
|
||||
NetworkId::Serai => panic!("adding a key for Serai"),
|
||||
NetworkId::Bitcoin => Coin::Bitcoin,
|
||||
// TODO: This is incomplete to DAI
|
||||
NetworkId::Ethereum => Coin::Ether,
|
||||
NetworkId::Monero => Coin::Monero,
|
||||
},
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
schedulers.push(N::Scheduler::from_db(raw_db, *key, N::NETWORK).unwrap());
|
||||
|
||||
// Load any TXs being actively signed
|
||||
let key = key.to_bytes();
|
||||
@@ -245,17 +231,7 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
|
||||
let viewer = Some(MultisigViewer {
|
||||
activation_block,
|
||||
key: external_key,
|
||||
scheduler: Scheduler::<N>::new::<D>(
|
||||
txn,
|
||||
external_key,
|
||||
match N::NETWORK {
|
||||
NetworkId::Serai => panic!("adding a key for Serai"),
|
||||
NetworkId::Bitcoin => Coin::Bitcoin,
|
||||
// TODO: This is incomplete to DAI
|
||||
NetworkId::Ethereum => Coin::Ether,
|
||||
NetworkId::Monero => Coin::Monero,
|
||||
},
|
||||
),
|
||||
scheduler: N::Scheduler::new::<D>(txn, external_key, N::NETWORK),
|
||||
});
|
||||
|
||||
if self.existing.is_none() {
|
||||
@@ -352,48 +328,30 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
|
||||
(existing_outputs, new_outputs)
|
||||
}
|
||||
|
||||
fn refund_plan(output: N::Output, refund_to: N::Address) -> Plan<N> {
|
||||
fn refund_plan(
|
||||
scheduler: &mut N::Scheduler,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
output: N::Output,
|
||||
refund_to: N::Address,
|
||||
) -> Plan<N> {
|
||||
log::info!("creating refund plan for {}", hex::encode(output.id()));
|
||||
assert_eq!(output.kind(), OutputType::External);
|
||||
Plan {
|
||||
key: output.key(),
|
||||
// Uses a payment as this will still be successfully sent due to fee amortization,
|
||||
// and because change is currently always a Serai key
|
||||
payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }],
|
||||
inputs: vec![output],
|
||||
change: None,
|
||||
}
|
||||
scheduler.refund_plan::<D>(txn, output, refund_to)
|
||||
}
|
||||
|
||||
fn forward_plan(&self, output: N::Output) -> Plan<N> {
|
||||
// Returns the plan for forwarding if one is needed.
|
||||
// Returns None if one is not needed to forward this output.
|
||||
fn forward_plan(&mut self, txn: &mut D::Transaction<'_>, output: &N::Output) -> Option<Plan<N>> {
|
||||
log::info!("creating forwarding plan for {}", hex::encode(output.id()));
|
||||
|
||||
/*
|
||||
Sending a Plan, with arbitrary data proxying the InInstruction, would require adding
|
||||
a flow for networks which drop their data to still embed arbitrary data. It'd also have
|
||||
edge cases causing failures (we'd need to manually provide the origin if it was implied,
|
||||
which may exceed the encoding limit).
|
||||
|
||||
Instead, we save the InInstruction as we scan this output. Then, when the output is
|
||||
successfully forwarded, we simply read it from the local database. This also saves the
|
||||
costs of embedding arbitrary data.
|
||||
|
||||
Since we can't rely on the Eventuality system to detect if it's a forwarded transaction,
|
||||
due to the asynchonicity of the Eventuality system, we instead interpret an Forwarded
|
||||
output which has an amount associated with an InInstruction which was forwarded as having
|
||||
been forwarded.
|
||||
*/
|
||||
|
||||
Plan {
|
||||
key: self.existing.as_ref().unwrap().key,
|
||||
payments: vec![Payment {
|
||||
address: N::forward_address(self.new.as_ref().unwrap().key),
|
||||
data: None,
|
||||
balance: output.balance(),
|
||||
}],
|
||||
inputs: vec![output],
|
||||
change: None,
|
||||
let res = self.existing.as_mut().unwrap().scheduler.forward_plan::<D>(
|
||||
txn,
|
||||
output.clone(),
|
||||
self.new.as_ref().expect("forwarding plan yet no new multisig").key,
|
||||
);
|
||||
if res.is_none() {
|
||||
log::info!("no forwarding plan was necessary for {}", hex::encode(output.id()));
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
// Filter newly received outputs due to the step being RotationStep::ClosingExisting.
|
||||
@@ -605,7 +563,31 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
|
||||
block_number
|
||||
{
|
||||
// Load plans crated when we scanned the block
|
||||
plans = PlansFromScanningDb::take_plans_from_scanning::<N>(txn, block_number).unwrap();
|
||||
let scanning_plans =
|
||||
PlansFromScanningDb::take_plans_from_scanning::<N>(txn, block_number).unwrap();
|
||||
// Expand into actual plans
|
||||
plans = scanning_plans
|
||||
.into_iter()
|
||||
.map(|plan| match plan {
|
||||
PlanFromScanning::Refund(output, refund_to) => {
|
||||
let existing = self.existing.as_mut().unwrap();
|
||||
if output.key() == existing.key {
|
||||
Self::refund_plan(&mut existing.scheduler, txn, output, refund_to)
|
||||
} else {
|
||||
let new = self
|
||||
.new
|
||||
.as_mut()
|
||||
.expect("new multisig didn't expect yet output wasn't for existing multisig");
|
||||
assert_eq!(output.key(), new.key, "output wasn't for existing nor new multisig");
|
||||
Self::refund_plan(&mut new.scheduler, txn, output, refund_to)
|
||||
}
|
||||
}
|
||||
PlanFromScanning::Forward(output) => self
|
||||
.forward_plan(txn, &output)
|
||||
.expect("supposed to forward an output yet no forwarding plan"),
|
||||
})
|
||||
.collect();
|
||||
|
||||
for plan in &plans {
|
||||
plans_from_scanning.insert(plan.id());
|
||||
}
|
||||
@@ -665,13 +647,23 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
|
||||
});
|
||||
|
||||
for plan in &plans {
|
||||
if plan.change == Some(N::change_address(plan.key)) {
|
||||
// Assert these are only created during the expected step
|
||||
match *step {
|
||||
RotationStep::UseExisting => {}
|
||||
RotationStep::NewAsChange |
|
||||
RotationStep::ForwardFromExisting |
|
||||
RotationStep::ClosingExisting => panic!("change was set to self despite rotating"),
|
||||
// This first equality should 'never meaningfully' be false
|
||||
// All created plans so far are by the existing multisig EXCEPT:
|
||||
// A) If we created a refund plan from the new multisig (yet that wouldn't have change)
|
||||
// B) The existing Scheduler returned a Plan for the new key (yet that happens with the SC
|
||||
// scheduler, yet that doesn't have change)
|
||||
// Despite being 'unnecessary' now, it's better to explicitly ensure and be robust
|
||||
if plan.key == self.existing.as_ref().unwrap().key {
|
||||
if let Some(change) = N::change_address(plan.key) {
|
||||
if plan.change == Some(change) {
|
||||
// Assert these (self-change) are only created during the expected step
|
||||
match *step {
|
||||
RotationStep::UseExisting => {}
|
||||
RotationStep::NewAsChange |
|
||||
RotationStep::ForwardFromExisting |
|
||||
RotationStep::ClosingExisting => panic!("change was set to self despite rotating"),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -853,15 +845,20 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
|
||||
let plans_at_start = plans.len();
|
||||
let (refund_to, instruction) = instruction_from_output::<N>(output);
|
||||
if let Some(mut instruction) = instruction {
|
||||
// Build a dedicated Plan forwarding this
|
||||
let forward_plan = self.forward_plan(output.clone());
|
||||
plans.push(forward_plan.clone());
|
||||
let Some(shimmed_plan) = N::Scheduler::shim_forward_plan(
|
||||
output.clone(),
|
||||
self.new.as_ref().expect("forwarding from existing yet no new multisig").key,
|
||||
) else {
|
||||
// If this network doesn't need forwarding, report the output now
|
||||
return true;
|
||||
};
|
||||
plans.push(PlanFromScanning::<N>::Forward(output.clone()));
|
||||
|
||||
// Set the instruction for this output to be returned
|
||||
// We need to set it under the amount it's forwarded with, so prepare its forwarding
|
||||
// TX to determine the fees involved
|
||||
let PreparedSend { tx, post_fee_branches: _, operating_costs } =
|
||||
prepare_send(network, block_number, forward_plan, 0).await;
|
||||
prepare_send(network, block_number, shimmed_plan, 0).await;
|
||||
// operating_costs should not increase in a forwarding TX
|
||||
assert_eq!(operating_costs, 0);
|
||||
|
||||
@@ -872,12 +869,28 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
|
||||
// letting it die out
|
||||
if let Some(tx) = &tx {
|
||||
instruction.balance.amount.0 -= tx.0.fee();
|
||||
|
||||
/*
|
||||
Sending a Plan, with arbitrary data proxying the InInstruction, would require
|
||||
adding a flow for networks which drop their data to still embed arbitrary data.
|
||||
It'd also have edge cases causing failures (we'd need to manually provide the
|
||||
origin if it was implied, which may exceed the encoding limit).
|
||||
|
||||
Instead, we save the InInstruction as we scan this output. Then, when the
|
||||
output is successfully forwarded, we simply read it from the local database.
|
||||
This also saves the costs of embedding arbitrary data.
|
||||
|
||||
Since we can't rely on the Eventuality system to detect if it's a forwarded
|
||||
transaction, due to the asynchonicity of the Eventuality system, we instead
|
||||
interpret an Forwarded output which has an amount associated with an
|
||||
InInstruction which was forwarded as having been forwarded.
|
||||
*/
|
||||
ForwardedOutputDb::save_forwarded_output(txn, &instruction);
|
||||
}
|
||||
} else if let Some(refund_to) = refund_to {
|
||||
if let Ok(refund_to) = refund_to.consume().try_into() {
|
||||
// Build a dedicated Plan refunding this
|
||||
plans.push(Self::refund_plan(output.clone(), refund_to));
|
||||
plans.push(PlanFromScanning::Refund(output.clone(), refund_to));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -909,7 +922,7 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
|
||||
let Some(instruction) = instruction else {
|
||||
if let Some(refund_to) = refund_to {
|
||||
if let Ok(refund_to) = refund_to.consume().try_into() {
|
||||
plans.push(Self::refund_plan(output.clone(), refund_to));
|
||||
plans.push(PlanFromScanning::Refund(output.clone(), refund_to));
|
||||
}
|
||||
}
|
||||
continue;
|
||||
@@ -999,9 +1012,9 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
|
||||
// This must be emitted before ScannerEvent::Block for all completions of known Eventualities
|
||||
// within the block. Unknown Eventualities may have their Completed events emitted after
|
||||
// ScannerEvent::Block however.
|
||||
ScannerEvent::Completed(key, block_number, id, tx) => {
|
||||
ResolvedDb::resolve_plan::<N>(txn, &key, id, &tx.id());
|
||||
(block_number, MultisigEvent::Completed(key, id, tx))
|
||||
ScannerEvent::Completed(key, block_number, id, tx_id, completion) => {
|
||||
ResolvedDb::resolve_plan::<N>(txn, &key, id, &tx_id);
|
||||
(block_number, MultisigEvent::Completed(key, id, completion))
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -17,15 +17,25 @@ use tokio::{
|
||||
|
||||
use crate::{
|
||||
Get, DbTxn, Db,
|
||||
networks::{Output, Transaction, EventualitiesTracker, Block, Network},
|
||||
networks::{Output, Transaction, Eventuality, EventualitiesTracker, Block, Network},
|
||||
};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ScannerEvent<N: Network> {
|
||||
// Block scanned
|
||||
Block { is_retirement_block: bool, block: <N::Block as Block<N>>::Id, outputs: Vec<N::Output> },
|
||||
Block {
|
||||
is_retirement_block: bool,
|
||||
block: <N::Block as Block<N>>::Id,
|
||||
outputs: Vec<N::Output>,
|
||||
},
|
||||
// Eventuality completion found on-chain
|
||||
Completed(Vec<u8>, usize, [u8; 32], N::Transaction),
|
||||
Completed(
|
||||
Vec<u8>,
|
||||
usize,
|
||||
[u8; 32],
|
||||
<N::Transaction as Transaction<N>>::Id,
|
||||
<N::Eventuality as Eventuality>::Completion,
|
||||
),
|
||||
}
|
||||
|
||||
pub type ScannerEventChannel<N> = mpsc::UnboundedReceiver<ScannerEvent<N>>;
|
||||
@@ -555,19 +565,25 @@ impl<N: Network, D: Db> Scanner<N, D> {
|
||||
}
|
||||
}
|
||||
|
||||
for (id, (block_number, tx)) in network
|
||||
for (id, (block_number, tx, completion)) in network
|
||||
.get_eventuality_completions(scanner.eventualities.get_mut(&key_vec).unwrap(), &block)
|
||||
.await
|
||||
{
|
||||
info!(
|
||||
"eventuality {} resolved by {}, as found on chain",
|
||||
hex::encode(id),
|
||||
hex::encode(&tx.id())
|
||||
hex::encode(tx.as_ref())
|
||||
);
|
||||
|
||||
completion_block_numbers.push(block_number);
|
||||
// This must be before the mission of ScannerEvent::Block, per commentary in mod.rs
|
||||
if !scanner.emit(ScannerEvent::Completed(key_vec.clone(), block_number, id, tx)) {
|
||||
if !scanner.emit(ScannerEvent::Completed(
|
||||
key_vec.clone(),
|
||||
block_number,
|
||||
id,
|
||||
tx,
|
||||
completion,
|
||||
)) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
95
processor/src/multisigs/scheduler/mod.rs
Normal file
95
processor/src/multisigs/scheduler/mod.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use core::fmt::Debug;
|
||||
use std::io;
|
||||
|
||||
use ciphersuite::Ciphersuite;
|
||||
|
||||
use serai_client::primitives::{NetworkId, Balance};
|
||||
|
||||
use crate::{networks::Network, Db, Payment, Plan};
|
||||
|
||||
pub(crate) mod utxo;
|
||||
pub(crate) mod smart_contract;
|
||||
|
||||
pub trait SchedulerAddendum: Send + Clone + PartialEq + Debug {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()>;
|
||||
}
|
||||
|
||||
impl SchedulerAddendum for () {
|
||||
fn read<R: io::Read>(_: &mut R) -> io::Result<Self> {
|
||||
Ok(())
|
||||
}
|
||||
fn write<W: io::Write>(&self, _: &mut W) -> io::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Scheduler<N: Network>: Sized + Clone + PartialEq + Debug {
|
||||
type Addendum: SchedulerAddendum;
|
||||
|
||||
/// Check if this Scheduler is empty.
|
||||
fn empty(&self) -> bool;
|
||||
|
||||
/// Create a new Scheduler.
|
||||
fn new<D: Db>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
network: NetworkId,
|
||||
) -> Self;
|
||||
|
||||
/// Load a Scheduler from the DB.
|
||||
fn from_db<D: Db>(
|
||||
db: &D,
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
network: NetworkId,
|
||||
) -> io::Result<Self>;
|
||||
|
||||
/// Check if a branch is usable.
|
||||
fn can_use_branch(&self, balance: Balance) -> bool;
|
||||
|
||||
/// Schedule a series of outputs/payments.
|
||||
fn schedule<D: Db>(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
utxos: Vec<N::Output>,
|
||||
payments: Vec<Payment<N>>,
|
||||
key_for_any_change: <N::Curve as Ciphersuite>::G,
|
||||
force_spend: bool,
|
||||
) -> Vec<Plan<N>>;
|
||||
|
||||
/// Consume all payments still pending within this Scheduler, without scheduling them.
|
||||
fn consume_payments<D: Db>(&mut self, txn: &mut D::Transaction<'_>) -> Vec<Payment<N>>;
|
||||
|
||||
/// Note a branch output as having been created, with the amount it was actually created with,
|
||||
/// or not having been created due to being too small.
|
||||
fn created_output<D: Db>(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
expected: u64,
|
||||
actual: Option<u64>,
|
||||
);
|
||||
|
||||
/// Refund a specific output.
|
||||
fn refund_plan<D: Db>(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
output: N::Output,
|
||||
refund_to: N::Address,
|
||||
) -> Plan<N>;
|
||||
|
||||
/// Shim the forwarding Plan as necessary to obtain a fee estimate.
|
||||
///
|
||||
/// If this Scheduler is for a Network which requires forwarding, this must return Some with a
|
||||
/// plan with identical fee behavior. If forwarding isn't necessary, returns None.
|
||||
fn shim_forward_plan(output: N::Output, to: <N::Curve as Ciphersuite>::G) -> Option<Plan<N>>;
|
||||
|
||||
/// Forward a specific output to the new multisig.
|
||||
///
|
||||
/// Returns None if no forwarding is necessary. Must return Some if forwarding is necessary.
|
||||
fn forward_plan<D: Db>(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
output: N::Output,
|
||||
to: <N::Curve as Ciphersuite>::G,
|
||||
) -> Option<Plan<N>>;
|
||||
}
|
||||
208
processor/src/multisigs/scheduler/smart_contract.rs
Normal file
208
processor/src/multisigs/scheduler/smart_contract.rs
Normal file
@@ -0,0 +1,208 @@
|
||||
use std::{io, collections::HashSet};
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use serai_client::primitives::{NetworkId, Coin, Balance};
|
||||
|
||||
use crate::{
|
||||
Get, DbTxn, Db, Payment, Plan, create_db,
|
||||
networks::{Output, Network},
|
||||
multisigs::scheduler::{SchedulerAddendum, Scheduler as SchedulerTrait},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Scheduler<N: Network> {
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
coins: HashSet<Coin>,
|
||||
rotated: bool,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub enum Addendum<N: Network> {
|
||||
Nonce(u64),
|
||||
RotateTo { nonce: u64, new_key: <N::Curve as Ciphersuite>::G },
|
||||
}
|
||||
|
||||
impl<N: Network> SchedulerAddendum for Addendum<N> {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut nonce = [0; 8];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
Ok(Addendum::Nonce(u64::from_le_bytes(nonce)))
|
||||
}
|
||||
1 => {
|
||||
let mut nonce = [0; 8];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
let nonce = u64::from_le_bytes(nonce);
|
||||
|
||||
let new_key = N::Curve::read_G(reader)?;
|
||||
Ok(Addendum::RotateTo { nonce, new_key })
|
||||
}
|
||||
_ => Err(io::Error::other("reading unknown Addendum type"))?,
|
||||
}
|
||||
}
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
Addendum::Nonce(nonce) => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(&nonce.to_le_bytes())
|
||||
}
|
||||
Addendum::RotateTo { nonce, new_key } => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(&nonce.to_le_bytes())?;
|
||||
writer.write_all(new_key.to_bytes().as_ref())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
create_db! {
|
||||
SchedulerDb {
|
||||
LastNonce: () -> u64,
|
||||
RotatedTo: (key: &[u8]) -> Vec<u8>,
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: Network<Scheduler = Self>> SchedulerTrait<N> for Scheduler<N> {
|
||||
type Addendum = Addendum<N>;
|
||||
|
||||
/// Check if this Scheduler is empty.
|
||||
fn empty(&self) -> bool {
|
||||
self.rotated
|
||||
}
|
||||
|
||||
/// Create a new Scheduler.
|
||||
fn new<D: Db>(
|
||||
_txn: &mut D::Transaction<'_>,
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
network: NetworkId,
|
||||
) -> Self {
|
||||
assert!(N::branch_address(key).is_none());
|
||||
assert!(N::change_address(key).is_none());
|
||||
assert!(N::forward_address(key).is_none());
|
||||
|
||||
Scheduler { key, coins: network.coins().iter().copied().collect(), rotated: false }
|
||||
}
|
||||
|
||||
/// Load a Scheduler from the DB.
|
||||
fn from_db<D: Db>(
|
||||
db: &D,
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
network: NetworkId,
|
||||
) -> io::Result<Self> {
|
||||
Ok(Scheduler {
|
||||
key,
|
||||
coins: network.coins().iter().copied().collect(),
|
||||
rotated: RotatedTo::get(db, key.to_bytes().as_ref()).is_some(),
|
||||
})
|
||||
}
|
||||
|
||||
fn can_use_branch(&self, _balance: Balance) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
fn schedule<D: Db>(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
utxos: Vec<N::Output>,
|
||||
payments: Vec<Payment<N>>,
|
||||
key_for_any_change: <N::Curve as Ciphersuite>::G,
|
||||
force_spend: bool,
|
||||
) -> Vec<Plan<N>> {
|
||||
for utxo in utxos {
|
||||
assert!(self.coins.contains(&utxo.balance().coin));
|
||||
}
|
||||
|
||||
let mut nonce = LastNonce::get(txn).map_or(0, |nonce| nonce + 1);
|
||||
let mut plans = vec![];
|
||||
for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) {
|
||||
// Once we rotate, all further payments should be scheduled via the new multisig
|
||||
assert!(!self.rotated);
|
||||
plans.push(Plan {
|
||||
key: self.key,
|
||||
inputs: vec![],
|
||||
payments: chunk.to_vec(),
|
||||
change: None,
|
||||
scheduler_addendum: Addendum::Nonce(nonce),
|
||||
});
|
||||
nonce += 1;
|
||||
}
|
||||
|
||||
// If we're supposed to rotate to the new key, create an empty Plan which will signify the key
|
||||
// update
|
||||
if force_spend && (!self.rotated) {
|
||||
plans.push(Plan {
|
||||
key: self.key,
|
||||
inputs: vec![],
|
||||
payments: vec![],
|
||||
change: None,
|
||||
scheduler_addendum: Addendum::RotateTo { nonce, new_key: key_for_any_change },
|
||||
});
|
||||
nonce += 1;
|
||||
self.rotated = true;
|
||||
RotatedTo::set(
|
||||
txn,
|
||||
self.key.to_bytes().as_ref(),
|
||||
&key_for_any_change.to_bytes().as_ref().to_vec(),
|
||||
);
|
||||
}
|
||||
|
||||
LastNonce::set(txn, &nonce);
|
||||
|
||||
plans
|
||||
}
|
||||
|
||||
fn consume_payments<D: Db>(&mut self, _txn: &mut D::Transaction<'_>) -> Vec<Payment<N>> {
|
||||
vec![]
|
||||
}
|
||||
|
||||
fn created_output<D: Db>(
|
||||
&mut self,
|
||||
_txn: &mut D::Transaction<'_>,
|
||||
_expected: u64,
|
||||
_actual: Option<u64>,
|
||||
) {
|
||||
panic!("Smart Contract Scheduler created a Branch output")
|
||||
}
|
||||
|
||||
/// Refund a specific output.
|
||||
fn refund_plan<D: Db>(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
output: N::Output,
|
||||
refund_to: N::Address,
|
||||
) -> Plan<N> {
|
||||
let current_key = RotatedTo::get(txn, self.key.to_bytes().as_ref())
|
||||
.and_then(|key_bytes| <N::Curve as Ciphersuite>::read_G(&mut key_bytes.as_slice()).ok())
|
||||
.unwrap_or(self.key);
|
||||
|
||||
let nonce = LastNonce::get(txn).map_or(0, |nonce| nonce + 1);
|
||||
LastNonce::set(txn, &(nonce + 1));
|
||||
Plan {
|
||||
key: current_key,
|
||||
inputs: vec![],
|
||||
payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }],
|
||||
change: None,
|
||||
scheduler_addendum: Addendum::Nonce(nonce),
|
||||
}
|
||||
}
|
||||
|
||||
fn shim_forward_plan(_output: N::Output, _to: <N::Curve as Ciphersuite>::G) -> Option<Plan<N>> {
|
||||
None
|
||||
}
|
||||
|
||||
/// Forward a specific output to the new multisig.
|
||||
///
|
||||
/// Returns None if no forwarding is necessary.
|
||||
fn forward_plan<D: Db>(
|
||||
&mut self,
|
||||
_txn: &mut D::Transaction<'_>,
|
||||
_output: N::Output,
|
||||
_to: <N::Curve as Ciphersuite>::G,
|
||||
) -> Option<Plan<N>> {
|
||||
None
|
||||
}
|
||||
}
|
||||
@@ -5,16 +5,17 @@ use std::{
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
||||
|
||||
use serai_client::primitives::{Coin, Amount, Balance};
|
||||
use serai_client::primitives::{NetworkId, Coin, Amount, Balance};
|
||||
|
||||
use crate::{
|
||||
networks::{OutputType, Output, Network},
|
||||
DbTxn, Db, Payment, Plan,
|
||||
networks::{OutputType, Output, Network, UtxoNetwork},
|
||||
multisigs::scheduler::Scheduler as SchedulerTrait,
|
||||
};
|
||||
|
||||
/// Stateless, deterministic output/payment manager.
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub struct Scheduler<N: Network> {
|
||||
/// Deterministic output/payment manager.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Scheduler<N: UtxoNetwork> {
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
coin: Coin,
|
||||
|
||||
@@ -46,7 +47,7 @@ fn scheduler_key<D: Db, G: GroupEncoding>(key: &G) -> Vec<u8> {
|
||||
D::key(b"SCHEDULER", b"scheduler", key.to_bytes())
|
||||
}
|
||||
|
||||
impl<N: Network> Scheduler<N> {
|
||||
impl<N: UtxoNetwork<Scheduler = Self>> Scheduler<N> {
|
||||
pub fn empty(&self) -> bool {
|
||||
self.queued_plans.is_empty() &&
|
||||
self.plans.is_empty() &&
|
||||
@@ -144,8 +145,18 @@ impl<N: Network> Scheduler<N> {
|
||||
pub fn new<D: Db>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
coin: Coin,
|
||||
network: NetworkId,
|
||||
) -> Self {
|
||||
assert!(N::branch_address(key).is_some());
|
||||
assert!(N::change_address(key).is_some());
|
||||
assert!(N::forward_address(key).is_some());
|
||||
|
||||
let coin = {
|
||||
let coins = network.coins();
|
||||
assert_eq!(coins.len(), 1);
|
||||
coins[0]
|
||||
};
|
||||
|
||||
let res = Scheduler {
|
||||
key,
|
||||
coin,
|
||||
@@ -159,7 +170,17 @@ impl<N: Network> Scheduler<N> {
|
||||
res
|
||||
}
|
||||
|
||||
pub fn from_db<D: Db>(db: &D, key: <N::Curve as Ciphersuite>::G, coin: Coin) -> io::Result<Self> {
|
||||
pub fn from_db<D: Db>(
|
||||
db: &D,
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
network: NetworkId,
|
||||
) -> io::Result<Self> {
|
||||
let coin = {
|
||||
let coins = network.coins();
|
||||
assert_eq!(coins.len(), 1);
|
||||
coins[0]
|
||||
};
|
||||
|
||||
let scheduler = db.get(scheduler_key::<D, _>(&key)).unwrap_or_else(|| {
|
||||
panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes()))
|
||||
});
|
||||
@@ -201,7 +222,7 @@ impl<N: Network> Scheduler<N> {
|
||||
amount
|
||||
};
|
||||
|
||||
let branch_address = N::branch_address(self.key);
|
||||
let branch_address = N::branch_address(self.key).unwrap();
|
||||
|
||||
// If we have more payments than we can handle in a single TX, create plans for them
|
||||
// TODO2: This isn't perfect. For 258 outputs, and a MAX_OUTPUTS of 16, this will create:
|
||||
@@ -237,7 +258,8 @@ impl<N: Network> Scheduler<N> {
|
||||
key: self.key,
|
||||
inputs,
|
||||
payments,
|
||||
change: Some(N::change_address(key_for_any_change)).filter(|_| change),
|
||||
change: Some(N::change_address(key_for_any_change).unwrap()).filter(|_| change),
|
||||
scheduler_addendum: (),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -305,7 +327,7 @@ impl<N: Network> Scheduler<N> {
|
||||
its *own* branch address, since created_output is called on the signer's Scheduler.
|
||||
*/
|
||||
{
|
||||
let branch_address = N::branch_address(self.key);
|
||||
let branch_address = N::branch_address(self.key).unwrap();
|
||||
payments =
|
||||
payments.drain(..).filter(|payment| payment.address != branch_address).collect::<Vec<_>>();
|
||||
}
|
||||
@@ -357,7 +379,8 @@ impl<N: Network> Scheduler<N> {
|
||||
key: self.key,
|
||||
inputs: chunk,
|
||||
payments: vec![],
|
||||
change: Some(N::change_address(key_for_any_change)),
|
||||
change: Some(N::change_address(key_for_any_change).unwrap()),
|
||||
scheduler_addendum: (),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -403,7 +426,8 @@ impl<N: Network> Scheduler<N> {
|
||||
key: self.key,
|
||||
inputs: self.utxos.drain(..).collect::<Vec<_>>(),
|
||||
payments: vec![],
|
||||
change: Some(N::change_address(key_for_any_change)),
|
||||
change: Some(N::change_address(key_for_any_change).unwrap()),
|
||||
scheduler_addendum: (),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -435,9 +459,6 @@ impl<N: Network> Scheduler<N> {
|
||||
|
||||
// Note a branch output as having been created, with the amount it was actually created with,
|
||||
// or not having been created due to being too small
|
||||
// This can be called whenever, so long as it's properly ordered
|
||||
// (it's independent to Serai/the chain we're scheduling over, yet still expects outputs to be
|
||||
// created in the same order Plans are returned in)
|
||||
pub fn created_output<D: Db>(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
@@ -501,3 +522,106 @@ impl<N: Network> Scheduler<N> {
|
||||
txn.put(scheduler_key::<D, _>(&self.key), self.serialize());
|
||||
}
|
||||
}
|
||||
|
||||
impl<N: UtxoNetwork<Scheduler = Self>> SchedulerTrait<N> for Scheduler<N> {
|
||||
type Addendum = ();
|
||||
|
||||
/// Check if this Scheduler is empty.
|
||||
fn empty(&self) -> bool {
|
||||
Scheduler::empty(self)
|
||||
}
|
||||
|
||||
/// Create a new Scheduler.
|
||||
fn new<D: Db>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
network: NetworkId,
|
||||
) -> Self {
|
||||
Scheduler::new::<D>(txn, key, network)
|
||||
}
|
||||
|
||||
/// Load a Scheduler from the DB.
|
||||
fn from_db<D: Db>(
|
||||
db: &D,
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
network: NetworkId,
|
||||
) -> io::Result<Self> {
|
||||
Scheduler::from_db::<D>(db, key, network)
|
||||
}
|
||||
|
||||
/// Check if a branch is usable.
|
||||
fn can_use_branch(&self, balance: Balance) -> bool {
|
||||
Scheduler::can_use_branch(self, balance)
|
||||
}
|
||||
|
||||
/// Schedule a series of outputs/payments.
|
||||
fn schedule<D: Db>(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
utxos: Vec<N::Output>,
|
||||
payments: Vec<Payment<N>>,
|
||||
key_for_any_change: <N::Curve as Ciphersuite>::G,
|
||||
force_spend: bool,
|
||||
) -> Vec<Plan<N>> {
|
||||
Scheduler::schedule::<D>(self, txn, utxos, payments, key_for_any_change, force_spend)
|
||||
}
|
||||
|
||||
/// Consume all payments still pending within this Scheduler, without scheduling them.
|
||||
fn consume_payments<D: Db>(&mut self, txn: &mut D::Transaction<'_>) -> Vec<Payment<N>> {
|
||||
Scheduler::consume_payments::<D>(self, txn)
|
||||
}
|
||||
|
||||
/// Note a branch output as having been created, with the amount it was actually created with,
|
||||
/// or not having been created due to being too small.
|
||||
// TODO: Move this to Balance.
|
||||
fn created_output<D: Db>(
|
||||
&mut self,
|
||||
txn: &mut D::Transaction<'_>,
|
||||
expected: u64,
|
||||
actual: Option<u64>,
|
||||
) {
|
||||
Scheduler::created_output::<D>(self, txn, expected, actual)
|
||||
}
|
||||
|
||||
fn refund_plan<D: Db>(
|
||||
&mut self,
|
||||
_: &mut D::Transaction<'_>,
|
||||
output: N::Output,
|
||||
refund_to: N::Address,
|
||||
) -> Plan<N> {
|
||||
Plan {
|
||||
key: output.key(),
|
||||
// Uses a payment as this will still be successfully sent due to fee amortization,
|
||||
// and because change is currently always a Serai key
|
||||
payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }],
|
||||
inputs: vec![output],
|
||||
change: None,
|
||||
scheduler_addendum: (),
|
||||
}
|
||||
}
|
||||
|
||||
fn shim_forward_plan(output: N::Output, to: <N::Curve as Ciphersuite>::G) -> Option<Plan<N>> {
|
||||
Some(Plan {
|
||||
key: output.key(),
|
||||
payments: vec![Payment {
|
||||
address: N::forward_address(to).unwrap(),
|
||||
data: None,
|
||||
balance: output.balance(),
|
||||
}],
|
||||
inputs: vec![output],
|
||||
change: None,
|
||||
scheduler_addendum: (),
|
||||
})
|
||||
}
|
||||
|
||||
fn forward_plan<D: Db>(
|
||||
&mut self,
|
||||
_: &mut D::Transaction<'_>,
|
||||
output: N::Output,
|
||||
to: <N::Curve as Ciphersuite>::G,
|
||||
) -> Option<Plan<N>> {
|
||||
assert_eq!(self.key, output.key());
|
||||
// Call shim as shim returns the actual
|
||||
Self::shim_forward_plan(output, to)
|
||||
}
|
||||
}
|
||||
@@ -52,9 +52,10 @@ use crate::{
|
||||
networks::{
|
||||
NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait,
|
||||
Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait,
|
||||
Eventuality as EventualityTrait, EventualitiesTracker, Network,
|
||||
Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork,
|
||||
},
|
||||
Payment,
|
||||
multisigs::scheduler::utxo::Scheduler,
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
@@ -178,14 +179,6 @@ impl TransactionTrait<Bitcoin> for Transaction {
|
||||
hash.reverse();
|
||||
hash
|
||||
}
|
||||
fn serialize(&self) -> Vec<u8> {
|
||||
let mut buf = vec![];
|
||||
self.consensus_encode(&mut buf).unwrap();
|
||||
buf
|
||||
}
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
Transaction::consensus_decode(reader).map_err(|e| io::Error::other(format!("{e}")))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn fee(&self, network: &Bitcoin) -> u64 {
|
||||
@@ -209,7 +202,23 @@ impl TransactionTrait<Bitcoin> for Transaction {
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Eventuality([u8; 32]);
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Default, Debug)]
|
||||
pub struct EmptyClaim;
|
||||
impl AsRef<[u8]> for EmptyClaim {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&[]
|
||||
}
|
||||
}
|
||||
impl AsMut<[u8]> for EmptyClaim {
|
||||
fn as_mut(&mut self) -> &mut [u8] {
|
||||
&mut []
|
||||
}
|
||||
}
|
||||
|
||||
impl EventualityTrait for Eventuality {
|
||||
type Claim = EmptyClaim;
|
||||
type Completion = Transaction;
|
||||
|
||||
fn lookup(&self) -> Vec<u8> {
|
||||
self.0.to_vec()
|
||||
}
|
||||
@@ -224,6 +233,18 @@ impl EventualityTrait for Eventuality {
|
||||
fn serialize(&self) -> Vec<u8> {
|
||||
self.0.to_vec()
|
||||
}
|
||||
|
||||
fn claim(_: &Transaction) -> EmptyClaim {
|
||||
EmptyClaim
|
||||
}
|
||||
fn serialize_completion(completion: &Transaction) -> Vec<u8> {
|
||||
let mut buf = vec![];
|
||||
completion.consensus_encode(&mut buf).unwrap();
|
||||
buf
|
||||
}
|
||||
fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Transaction> {
|
||||
Transaction::consensus_decode(reader).map_err(|e| io::Error::other(format!("{e}")))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -374,8 +395,12 @@ impl Bitcoin {
|
||||
for input in &tx.input {
|
||||
let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array();
|
||||
input_tx.reverse();
|
||||
in_value += self.get_transaction(&input_tx).await?.output
|
||||
[usize::try_from(input.previous_output.vout).unwrap()]
|
||||
in_value += self
|
||||
.rpc
|
||||
.get_transaction(&input_tx)
|
||||
.await
|
||||
.map_err(|_| NetworkError::ConnectionError)?
|
||||
.output[usize::try_from(input.previous_output.vout).unwrap()]
|
||||
.value
|
||||
.to_sat();
|
||||
}
|
||||
@@ -537,6 +562,25 @@ impl Bitcoin {
|
||||
}
|
||||
}
|
||||
|
||||
// Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT)
|
||||
// A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes
|
||||
// While our inputs are entirely SegWit, such fine tuning is not necessary and could create
|
||||
// issues in the future (if the size decreases or we misevaluate it)
|
||||
// It also offers a minimal amount of benefit when we are able to logarithmically accumulate
|
||||
// inputs
|
||||
// For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and
|
||||
// 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192
|
||||
// bytes
|
||||
// 100,000 / 192 = 520
|
||||
// 520 * 192 leaves 160 bytes of overhead for the transaction structure itself
|
||||
const MAX_INPUTS: usize = 520;
|
||||
const MAX_OUTPUTS: usize = 520;
|
||||
|
||||
fn address_from_key(key: ProjectivePoint) -> Address {
|
||||
Address::new(BAddress::<NetworkChecked>::new(BNetwork::Bitcoin, address_payload(key).unwrap()))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Network for Bitcoin {
|
||||
type Curve = Secp256k1;
|
||||
@@ -549,6 +593,8 @@ impl Network for Bitcoin {
|
||||
type Eventuality = Eventuality;
|
||||
type TransactionMachine = TransactionMachine;
|
||||
|
||||
type Scheduler = Scheduler<Bitcoin>;
|
||||
|
||||
type Address = Address;
|
||||
|
||||
const NETWORK: NetworkId = NetworkId::Bitcoin;
|
||||
@@ -598,19 +644,7 @@ impl Network for Bitcoin {
|
||||
// aggregation TX
|
||||
const COST_TO_AGGREGATE: u64 = 800;
|
||||
|
||||
// Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT)
|
||||
// A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes
|
||||
// While our inputs are entirely SegWit, such fine tuning is not necessary and could create
|
||||
// issues in the future (if the size decreases or we misevaluate it)
|
||||
// It also offers a minimal amount of benefit when we are able to logarithmically accumulate
|
||||
// inputs
|
||||
// For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and
|
||||
// 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192
|
||||
// bytes
|
||||
// 100,000 / 192 = 520
|
||||
// 520 * 192 leaves 160 bytes of overhead for the transaction structure itself
|
||||
const MAX_INPUTS: usize = 520;
|
||||
const MAX_OUTPUTS: usize = 520;
|
||||
const MAX_OUTPUTS: usize = MAX_OUTPUTS;
|
||||
|
||||
fn tweak_keys(keys: &mut ThresholdKeys<Self::Curve>) {
|
||||
*keys = tweak_keys(keys);
|
||||
@@ -618,24 +652,24 @@ impl Network for Bitcoin {
|
||||
scanner(keys.group_key());
|
||||
}
|
||||
|
||||
fn external_address(key: ProjectivePoint) -> Address {
|
||||
Address::new(BAddress::<NetworkChecked>::new(BNetwork::Bitcoin, address_payload(key).unwrap()))
|
||||
.unwrap()
|
||||
#[cfg(test)]
|
||||
async fn external_address(&self, key: ProjectivePoint) -> Address {
|
||||
address_from_key(key)
|
||||
}
|
||||
|
||||
fn branch_address(key: ProjectivePoint) -> Address {
|
||||
fn branch_address(key: ProjectivePoint) -> Option<Address> {
|
||||
let (_, offsets, _) = scanner(key);
|
||||
Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch]))
|
||||
Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch])))
|
||||
}
|
||||
|
||||
fn change_address(key: ProjectivePoint) -> Address {
|
||||
fn change_address(key: ProjectivePoint) -> Option<Address> {
|
||||
let (_, offsets, _) = scanner(key);
|
||||
Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change]))
|
||||
Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change])))
|
||||
}
|
||||
|
||||
fn forward_address(key: ProjectivePoint) -> Address {
|
||||
fn forward_address(key: ProjectivePoint) -> Option<Address> {
|
||||
let (_, offsets, _) = scanner(key);
|
||||
Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded]))
|
||||
Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded])))
|
||||
}
|
||||
|
||||
async fn get_latest_block_number(&self) -> Result<usize, NetworkError> {
|
||||
@@ -682,7 +716,7 @@ impl Network for Bitcoin {
|
||||
spent_tx.reverse();
|
||||
let mut tx;
|
||||
while {
|
||||
tx = self.get_transaction(&spent_tx).await;
|
||||
tx = self.rpc.get_transaction(&spent_tx).await;
|
||||
tx.is_err()
|
||||
} {
|
||||
log::error!("couldn't get transaction from bitcoin node: {tx:?}");
|
||||
@@ -710,7 +744,7 @@ impl Network for Bitcoin {
|
||||
&self,
|
||||
eventualities: &mut EventualitiesTracker<Eventuality>,
|
||||
block: &Self::Block,
|
||||
) -> HashMap<[u8; 32], (usize, Transaction)> {
|
||||
) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> {
|
||||
let mut res = HashMap::new();
|
||||
if eventualities.map.is_empty() {
|
||||
return res;
|
||||
@@ -719,11 +753,11 @@ impl Network for Bitcoin {
|
||||
fn check_block(
|
||||
eventualities: &mut EventualitiesTracker<Eventuality>,
|
||||
block: &Block,
|
||||
res: &mut HashMap<[u8; 32], (usize, Transaction)>,
|
||||
res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>,
|
||||
) {
|
||||
for tx in &block.txdata[1 ..] {
|
||||
if let Some((plan, _)) = eventualities.map.remove(tx.id().as_slice()) {
|
||||
res.insert(plan, (eventualities.block_number, tx.clone()));
|
||||
res.insert(plan, (eventualities.block_number, tx.id(), tx.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -770,7 +804,6 @@ impl Network for Bitcoin {
|
||||
async fn needed_fee(
|
||||
&self,
|
||||
block_number: usize,
|
||||
_: &[u8; 32],
|
||||
inputs: &[Output],
|
||||
payments: &[Payment<Self>],
|
||||
change: &Option<Address>,
|
||||
@@ -787,9 +820,11 @@ impl Network for Bitcoin {
|
||||
&self,
|
||||
block_number: usize,
|
||||
plan_id: &[u8; 32],
|
||||
_key: ProjectivePoint,
|
||||
inputs: &[Output],
|
||||
payments: &[Payment<Self>],
|
||||
change: &Option<Address>,
|
||||
(): &(),
|
||||
) -> Result<Option<(Self::SignableTransaction, Self::Eventuality)>, NetworkError> {
|
||||
Ok(self.make_signable_transaction(block_number, inputs, payments, change, false).await?.map(
|
||||
|signable| {
|
||||
@@ -803,7 +838,7 @@ impl Network for Bitcoin {
|
||||
))
|
||||
}
|
||||
|
||||
async fn attempt_send(
|
||||
async fn attempt_sign(
|
||||
&self,
|
||||
keys: ThresholdKeys<Self::Curve>,
|
||||
transaction: Self::SignableTransaction,
|
||||
@@ -817,7 +852,7 @@ impl Network for Bitcoin {
|
||||
)
|
||||
}
|
||||
|
||||
async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> {
|
||||
async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> {
|
||||
match self.rpc.send_raw_transaction(tx).await {
|
||||
Ok(_) => (),
|
||||
Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?,
|
||||
@@ -828,12 +863,14 @@ impl Network for Bitcoin {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn get_transaction(&self, id: &[u8; 32]) -> Result<Transaction, NetworkError> {
|
||||
self.rpc.get_transaction(id).await.map_err(|_| NetworkError::ConnectionError)
|
||||
}
|
||||
|
||||
fn confirm_completion(&self, eventuality: &Self::Eventuality, tx: &Transaction) -> bool {
|
||||
eventuality.0 == tx.id()
|
||||
async fn confirm_completion(
|
||||
&self,
|
||||
eventuality: &Self::Eventuality,
|
||||
_: &EmptyClaim,
|
||||
) -> Result<Option<Transaction>, NetworkError> {
|
||||
Ok(Some(
|
||||
self.rpc.get_transaction(&eventuality.0).await.map_err(|_| NetworkError::ConnectionError)?,
|
||||
))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -841,6 +878,20 @@ impl Network for Bitcoin {
|
||||
self.rpc.get_block_number(id).await.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn check_eventuality_by_claim(
|
||||
&self,
|
||||
eventuality: &Self::Eventuality,
|
||||
_: &EmptyClaim,
|
||||
) -> bool {
|
||||
self.rpc.get_transaction(&eventuality.0).await.is_ok()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction {
|
||||
self.rpc.get_transaction(&id.0).await.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn mine_block(&self) {
|
||||
self
|
||||
@@ -892,3 +943,7 @@ impl Network for Bitcoin {
|
||||
self.get_block(block).await.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl UtxoNetwork for Bitcoin {
|
||||
const MAX_INPUTS: usize = MAX_INPUTS;
|
||||
}
|
||||
|
||||
827
processor/src/networks/ethereum.rs
Normal file
827
processor/src/networks/ethereum.rs
Normal file
@@ -0,0 +1,827 @@
|
||||
use core::{fmt::Debug, time::Duration};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
io,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1};
|
||||
use frost::ThresholdKeys;
|
||||
|
||||
use ethereum_serai::{
|
||||
alloy_core::primitives::U256,
|
||||
alloy_rpc_types::{BlockNumberOrTag, Transaction},
|
||||
alloy_simple_request_transport::SimpleRequest,
|
||||
alloy_rpc_client::ClientBuilder,
|
||||
alloy_provider::{Provider, RootProvider},
|
||||
crypto::{PublicKey, Signature},
|
||||
deployer::Deployer,
|
||||
router::{Router, Coin as EthereumCoin, InInstruction as EthereumInInstruction},
|
||||
machine::*,
|
||||
};
|
||||
#[cfg(test)]
|
||||
use ethereum_serai::alloy_core::primitives::B256;
|
||||
|
||||
use tokio::{
|
||||
time::sleep,
|
||||
sync::{RwLock, RwLockReadGuard},
|
||||
};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{Coin, Amount, Balance, NetworkId},
|
||||
validator_sets::primitives::Session,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Db, Payment,
|
||||
networks::{
|
||||
OutputType, Output, Transaction as TransactionTrait, SignableTransaction, Block,
|
||||
Eventuality as EventualityTrait, EventualitiesTracker, NetworkError, Network,
|
||||
},
|
||||
key_gen::NetworkKeyDb,
|
||||
multisigs::scheduler::{
|
||||
Scheduler as SchedulerTrait,
|
||||
smart_contract::{Addendum, Scheduler},
|
||||
},
|
||||
};
|
||||
|
||||
#[cfg(not(test))]
|
||||
const DAI: [u8; 20] =
|
||||
match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") {
|
||||
Ok(res) => res,
|
||||
Err(_) => panic!("invalid non-test DAI hex address"),
|
||||
};
|
||||
#[cfg(test)] // TODO
|
||||
const DAI: [u8; 20] =
|
||||
match const_hex::const_decode_to_array(b"0000000000000000000000000000000000000000") {
|
||||
Ok(res) => res,
|
||||
Err(_) => panic!("invalid test DAI hex address"),
|
||||
};
|
||||
|
||||
fn coin_to_serai_coin(coin: &EthereumCoin) -> Option<Coin> {
|
||||
match coin {
|
||||
EthereumCoin::Ether => Some(Coin::Ether),
|
||||
EthereumCoin::Erc20(token) => {
|
||||
if *token == DAI {
|
||||
return Some(Coin::Dai);
|
||||
}
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn amount_to_serai_amount(coin: Coin, amount: U256) -> Amount {
|
||||
assert_eq!(coin.network(), NetworkId::Ethereum);
|
||||
assert_eq!(coin.decimals(), 8);
|
||||
// Remove 10 decimals so we go from 18 decimals to 8 decimals
|
||||
let divisor = U256::from(10_000_000_000u64);
|
||||
// This is valid up to 184b, which is assumed for the coins allowed
|
||||
Amount(u64::try_from(amount / divisor).unwrap())
|
||||
}
|
||||
|
||||
fn balance_to_ethereum_amount(balance: Balance) -> U256 {
|
||||
assert_eq!(balance.coin.network(), NetworkId::Ethereum);
|
||||
assert_eq!(balance.coin.decimals(), 8);
|
||||
// Restore 10 decimals so we go from 8 decimals to 18 decimals
|
||||
let factor = U256::from(10_000_000_000u64);
|
||||
U256::from(balance.amount.0) * factor
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Address(pub [u8; 20]);
|
||||
impl TryFrom<Vec<u8>> for Address {
|
||||
type Error = ();
|
||||
fn try_from(bytes: Vec<u8>) -> Result<Address, ()> {
|
||||
if bytes.len() != 20 {
|
||||
Err(())?;
|
||||
}
|
||||
let mut res = [0; 20];
|
||||
res.copy_from_slice(&bytes);
|
||||
Ok(Address(res))
|
||||
}
|
||||
}
|
||||
impl TryInto<Vec<u8>> for Address {
|
||||
type Error = ();
|
||||
fn try_into(self) -> Result<Vec<u8>, ()> {
|
||||
Ok(self.0.to_vec())
|
||||
}
|
||||
}
|
||||
impl ToString for Address {
|
||||
fn to_string(&self) -> String {
|
||||
ethereum_serai::alloy_core::primitives::Address::from(self.0).to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl SignableTransaction for RouterCommand {
|
||||
fn fee(&self) -> u64 {
|
||||
// Return a fee of 0 as we'll handle amortization on our end
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<D: Debug + Db> TransactionTrait<Ethereum<D>> for Transaction {
|
||||
type Id = [u8; 32];
|
||||
fn id(&self) -> Self::Id {
|
||||
self.hash.0
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn fee(&self, _network: &Ethereum<D>) -> u64 {
|
||||
// Return a fee of 0 as we'll handle amortization on our end
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
// We use 32-block Epochs to represent blocks.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Epoch {
|
||||
// The hash of the block which ended the prior Epoch.
|
||||
prior_end_hash: [u8; 32],
|
||||
// The first block number within this Epoch.
|
||||
start: u64,
|
||||
// The hash of the last block within this Epoch.
|
||||
end_hash: [u8; 32],
|
||||
// The monotonic time for this Epoch.
|
||||
time: u64,
|
||||
}
|
||||
|
||||
impl Epoch {
|
||||
fn end(&self) -> u64 {
|
||||
self.start + 31
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<D: Debug + Db> Block<Ethereum<D>> for Epoch {
|
||||
type Id = [u8; 32];
|
||||
fn id(&self) -> [u8; 32] {
|
||||
self.end_hash
|
||||
}
|
||||
fn parent(&self) -> [u8; 32] {
|
||||
self.prior_end_hash
|
||||
}
|
||||
async fn time(&self, _: &Ethereum<D>) -> u64 {
|
||||
self.time
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Debug + Db> Output<Ethereum<D>> for EthereumInInstruction {
|
||||
type Id = [u8; 32];
|
||||
|
||||
fn kind(&self) -> OutputType {
|
||||
OutputType::External
|
||||
}
|
||||
|
||||
fn id(&self) -> Self::Id {
|
||||
let mut id = [0; 40];
|
||||
id[.. 32].copy_from_slice(&self.id.0);
|
||||
id[32 ..].copy_from_slice(&self.id.1.to_le_bytes());
|
||||
*ethereum_serai::alloy_core::primitives::keccak256(id)
|
||||
}
|
||||
fn tx_id(&self) -> [u8; 32] {
|
||||
self.id.0
|
||||
}
|
||||
fn key(&self) -> <Secp256k1 as Ciphersuite>::G {
|
||||
self.key_at_end_of_block
|
||||
}
|
||||
|
||||
fn presumed_origin(&self) -> Option<Address> {
|
||||
Some(Address(self.from))
|
||||
}
|
||||
|
||||
fn balance(&self) -> Balance {
|
||||
let coin = coin_to_serai_coin(&self.coin).unwrap_or_else(|| {
|
||||
panic!(
|
||||
"requesting coin for an EthereumInInstruction with a coin {}",
|
||||
"we don't handle. this never should have been yielded"
|
||||
)
|
||||
});
|
||||
Balance { coin, amount: amount_to_serai_amount(coin, self.amount) }
|
||||
}
|
||||
fn data(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
EthereumInInstruction::write(self, writer)
|
||||
}
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
EthereumInInstruction::read(reader)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Claim {
|
||||
signature: [u8; 64],
|
||||
}
|
||||
impl AsRef<[u8]> for Claim {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
&self.signature
|
||||
}
|
||||
}
|
||||
impl AsMut<[u8]> for Claim {
|
||||
fn as_mut(&mut self) -> &mut [u8] {
|
||||
&mut self.signature
|
||||
}
|
||||
}
|
||||
impl Default for Claim {
|
||||
fn default() -> Self {
|
||||
Self { signature: [0; 64] }
|
||||
}
|
||||
}
|
||||
impl From<&Signature> for Claim {
|
||||
fn from(sig: &Signature) -> Self {
|
||||
Self { signature: sig.to_bytes() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Eventuality(PublicKey, RouterCommand);
|
||||
impl EventualityTrait for Eventuality {
|
||||
type Claim = Claim;
|
||||
type Completion = SignedRouterCommand;
|
||||
|
||||
fn lookup(&self) -> Vec<u8> {
|
||||
match self.1 {
|
||||
RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => {
|
||||
nonce.as_le_bytes().to_vec()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let point = Secp256k1::read_G(reader)?;
|
||||
let command = RouterCommand::read(reader)?;
|
||||
Ok(Eventuality(
|
||||
PublicKey::new(point).ok_or(io::Error::other("unusable key within Eventuality"))?,
|
||||
command,
|
||||
))
|
||||
}
|
||||
fn serialize(&self) -> Vec<u8> {
|
||||
let mut res = vec![];
|
||||
res.extend(self.0.point().to_bytes().as_slice());
|
||||
self.1.write(&mut res).unwrap();
|
||||
res
|
||||
}
|
||||
|
||||
fn claim(completion: &Self::Completion) -> Self::Claim {
|
||||
Claim::from(completion.signature())
|
||||
}
|
||||
fn serialize_completion(completion: &Self::Completion) -> Vec<u8> {
|
||||
let mut res = vec![];
|
||||
completion.write(&mut res).unwrap();
|
||||
res
|
||||
}
|
||||
fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Self::Completion> {
|
||||
SignedRouterCommand::read(reader)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Ethereum<D: Debug + Db> {
|
||||
// This DB is solely used to access the first key generated, as needed to determine the Router's
|
||||
// address. Accordingly, all methods present are consistent to a Serai chain with a finalized
|
||||
// first key (regardless of local state), and this is safe.
|
||||
db: D,
|
||||
provider: Arc<RootProvider<SimpleRequest>>,
|
||||
deployer: Deployer,
|
||||
router: Arc<RwLock<Option<Router>>>,
|
||||
}
|
||||
impl<D: Debug + Db> PartialEq for Ethereum<D> {
|
||||
fn eq(&self, _other: &Ethereum<D>) -> bool {
|
||||
true
|
||||
}
|
||||
}
|
||||
impl<D: Debug + Db> Ethereum<D> {
|
||||
pub async fn new(db: D, url: String) -> Self {
|
||||
let provider = Arc::new(RootProvider::new(
|
||||
ClientBuilder::default().transport(SimpleRequest::new(url), true),
|
||||
));
|
||||
|
||||
#[cfg(test)] // TODO: Move to test code
|
||||
provider.raw_request::<_, ()>("evm_setAutomine".into(), false).await.unwrap();
|
||||
|
||||
let mut deployer = Deployer::new(provider.clone()).await;
|
||||
while !matches!(deployer, Ok(Some(_))) {
|
||||
log::error!("Deployer wasn't deployed yet or networking error");
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
deployer = Deployer::new(provider.clone()).await;
|
||||
}
|
||||
let deployer = deployer.unwrap().unwrap();
|
||||
|
||||
Ethereum { db, provider, deployer, router: Arc::new(RwLock::new(None)) }
|
||||
}
|
||||
|
||||
// Obtain a reference to the Router, sleeping until it's deployed if it hasn't already been.
|
||||
// This is guaranteed to return Some.
|
||||
pub async fn router(&self) -> RwLockReadGuard<'_, Option<Router>> {
|
||||
// If we've already instantiated the Router, return a read reference
|
||||
{
|
||||
let router = self.router.read().await;
|
||||
if router.is_some() {
|
||||
return router;
|
||||
}
|
||||
}
|
||||
|
||||
// Instantiate it
|
||||
let mut router = self.router.write().await;
|
||||
// If another attempt beat us to it, return
|
||||
if router.is_some() {
|
||||
drop(router);
|
||||
return self.router.read().await;
|
||||
}
|
||||
|
||||
// Get the first key from the DB
|
||||
let first_key =
|
||||
NetworkKeyDb::get(&self.db, Session(0)).expect("getting outputs before confirming a key");
|
||||
let key = Secp256k1::read_G(&mut first_key.as_slice()).unwrap();
|
||||
let public_key = PublicKey::new(key).unwrap();
|
||||
|
||||
// Find the router
|
||||
let mut found = self.deployer.find_router(self.provider.clone(), &public_key).await;
|
||||
while !matches!(found, Ok(Some(_))) {
|
||||
log::error!("Router wasn't deployed yet or networking error");
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
found = self.deployer.find_router(self.provider.clone(), &public_key).await;
|
||||
}
|
||||
|
||||
// Set it
|
||||
*router = Some(found.unwrap().unwrap());
|
||||
|
||||
// Downgrade to a read lock
|
||||
// Explicitly doesn't use `downgrade` so that another pending write txn can realize it's no
|
||||
// longer necessary
|
||||
drop(router);
|
||||
self.router.read().await
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<D: Debug + Db> Network for Ethereum<D> {
|
||||
type Curve = Secp256k1;
|
||||
|
||||
type Transaction = Transaction;
|
||||
type Block = Epoch;
|
||||
|
||||
type Output = EthereumInInstruction;
|
||||
type SignableTransaction = RouterCommand;
|
||||
type Eventuality = Eventuality;
|
||||
type TransactionMachine = RouterCommandMachine;
|
||||
|
||||
type Scheduler = Scheduler<Self>;
|
||||
|
||||
type Address = Address;
|
||||
|
||||
const NETWORK: NetworkId = NetworkId::Ethereum;
|
||||
const ID: &'static str = "Ethereum";
|
||||
const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 32 * 12;
|
||||
const CONFIRMATIONS: usize = 1;
|
||||
|
||||
const DUST: u64 = 0; // TODO
|
||||
|
||||
const COST_TO_AGGREGATE: u64 = 0;
|
||||
|
||||
// TODO: usize::max, with a merkle tree in the router
|
||||
const MAX_OUTPUTS: usize = 256;
|
||||
|
||||
fn tweak_keys(keys: &mut ThresholdKeys<Self::Curve>) {
|
||||
while PublicKey::new(keys.group_key()).is_none() {
|
||||
*keys = keys.offset(<Secp256k1 as Ciphersuite>::F::ONE);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn external_address(&self, _key: <Secp256k1 as Ciphersuite>::G) -> Address {
|
||||
Address(self.router().await.as_ref().unwrap().address())
|
||||
}
|
||||
|
||||
fn branch_address(_key: <Secp256k1 as Ciphersuite>::G) -> Option<Address> {
|
||||
None
|
||||
}
|
||||
|
||||
fn change_address(_key: <Secp256k1 as Ciphersuite>::G) -> Option<Address> {
|
||||
None
|
||||
}
|
||||
|
||||
fn forward_address(_key: <Secp256k1 as Ciphersuite>::G) -> Option<Address> {
|
||||
None
|
||||
}
|
||||
|
||||
async fn get_latest_block_number(&self) -> Result<usize, NetworkError> {
|
||||
let actual_number = self
|
||||
.provider
|
||||
.get_block(BlockNumberOrTag::Finalized.into(), false)
|
||||
.await
|
||||
.map_err(|_| NetworkError::ConnectionError)?
|
||||
.expect("no blocks were finalized")
|
||||
.header
|
||||
.number
|
||||
.unwrap();
|
||||
// Error if there hasn't been a full epoch yet
|
||||
if actual_number < 32 {
|
||||
Err(NetworkError::ConnectionError)?
|
||||
}
|
||||
// If this is 33, the division will return 1, yet 1 is the epoch in progress
|
||||
let latest_full_epoch = (actual_number / 32).saturating_sub(1);
|
||||
Ok(latest_full_epoch.try_into().unwrap())
|
||||
}
|
||||
|
||||
async fn get_block(&self, number: usize) -> Result<Self::Block, NetworkError> {
|
||||
let latest_finalized = self.get_latest_block_number().await?;
|
||||
if number > latest_finalized {
|
||||
Err(NetworkError::ConnectionError)?
|
||||
}
|
||||
|
||||
let start = number * 32;
|
||||
let prior_end_hash = if start == 0 {
|
||||
[0; 32]
|
||||
} else {
|
||||
self
|
||||
.provider
|
||||
.get_block(u64::try_from(start - 1).unwrap().into(), false)
|
||||
.await
|
||||
.ok()
|
||||
.flatten()
|
||||
.ok_or(NetworkError::ConnectionError)?
|
||||
.header
|
||||
.hash
|
||||
.unwrap()
|
||||
.into()
|
||||
};
|
||||
|
||||
let end_header = self
|
||||
.provider
|
||||
.get_block(u64::try_from(start + 31).unwrap().into(), false)
|
||||
.await
|
||||
.ok()
|
||||
.flatten()
|
||||
.ok_or(NetworkError::ConnectionError)?
|
||||
.header;
|
||||
|
||||
let end_hash = end_header.hash.unwrap().into();
|
||||
let time = end_header.timestamp;
|
||||
|
||||
Ok(Epoch { prior_end_hash, start: start.try_into().unwrap(), end_hash, time })
|
||||
}
|
||||
|
||||
async fn get_outputs(
|
||||
&self,
|
||||
block: &Self::Block,
|
||||
_: <Secp256k1 as Ciphersuite>::G,
|
||||
) -> Vec<Self::Output> {
|
||||
let router = self.router().await;
|
||||
let router = router.as_ref().unwrap();
|
||||
|
||||
// TODO: Top-level transfers
|
||||
|
||||
let mut all_events = vec![];
|
||||
for block in block.start .. (block.start + 32) {
|
||||
let mut events = router.in_instructions(block, &HashSet::from([DAI])).await;
|
||||
while let Err(e) = events {
|
||||
log::error!("couldn't connect to Ethereum node for the Router's events: {e:?}");
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
events = router.in_instructions(block, &HashSet::from([DAI])).await;
|
||||
}
|
||||
all_events.extend(events.unwrap());
|
||||
}
|
||||
|
||||
for event in &all_events {
|
||||
assert!(
|
||||
coin_to_serai_coin(&event.coin).is_some(),
|
||||
"router yielded events for unrecognized coins"
|
||||
);
|
||||
}
|
||||
all_events
|
||||
}
|
||||
|
||||
async fn get_eventuality_completions(
|
||||
&self,
|
||||
eventualities: &mut EventualitiesTracker<Self::Eventuality>,
|
||||
block: &Self::Block,
|
||||
) -> HashMap<
|
||||
[u8; 32],
|
||||
(
|
||||
usize,
|
||||
<Self::Transaction as TransactionTrait<Self>>::Id,
|
||||
<Self::Eventuality as EventualityTrait>::Completion,
|
||||
),
|
||||
> {
|
||||
let mut res = HashMap::new();
|
||||
if eventualities.map.is_empty() {
|
||||
return res;
|
||||
}
|
||||
|
||||
let router = self.router().await;
|
||||
let router = router.as_ref().unwrap();
|
||||
|
||||
let past_scanned_epoch = loop {
|
||||
match self.get_block(eventualities.block_number).await {
|
||||
Ok(block) => break block,
|
||||
Err(e) => log::error!("couldn't get the last scanned block in the tracker: {}", e),
|
||||
}
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
};
|
||||
assert_eq!(
|
||||
past_scanned_epoch.start / 32,
|
||||
u64::try_from(eventualities.block_number).unwrap(),
|
||||
"assumption of tracker block number's relation to epoch start is incorrect"
|
||||
);
|
||||
|
||||
// Iterate from after the epoch number in the tracker to the end of this epoch
|
||||
for block_num in (past_scanned_epoch.end() + 1) ..= block.end() {
|
||||
let executed = loop {
|
||||
match router.executed_commands(block_num).await {
|
||||
Ok(executed) => break executed,
|
||||
Err(e) => log::error!("couldn't get the executed commands in block {block_num}: {e}"),
|
||||
}
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
};
|
||||
|
||||
for executed in executed {
|
||||
let lookup = executed.nonce.to_le_bytes().to_vec();
|
||||
if let Some((plan_id, eventuality)) = eventualities.map.get(&lookup) {
|
||||
if let Some(command) =
|
||||
SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &executed.signature)
|
||||
{
|
||||
res.insert(*plan_id, (block_num.try_into().unwrap(), executed.tx_id, command));
|
||||
eventualities.map.remove(&lookup);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
eventualities.block_number = (block.start / 32).try_into().unwrap();
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
async fn needed_fee(
|
||||
&self,
|
||||
_block_number: usize,
|
||||
inputs: &[Self::Output],
|
||||
_payments: &[Payment<Self>],
|
||||
_change: &Option<Self::Address>,
|
||||
) -> Result<Option<u64>, NetworkError> {
|
||||
assert_eq!(inputs.len(), 0);
|
||||
// Claim no fee is needed so we can perform amortization ourselves
|
||||
Ok(Some(0))
|
||||
}
|
||||
|
||||
async fn signable_transaction(
|
||||
&self,
|
||||
_block_number: usize,
|
||||
_plan_id: &[u8; 32],
|
||||
key: <Self::Curve as Ciphersuite>::G,
|
||||
inputs: &[Self::Output],
|
||||
payments: &[Payment<Self>],
|
||||
change: &Option<Self::Address>,
|
||||
scheduler_addendum: &<Self::Scheduler as SchedulerTrait<Self>>::Addendum,
|
||||
) -> Result<Option<(Self::SignableTransaction, Self::Eventuality)>, NetworkError> {
|
||||
assert_eq!(inputs.len(), 0);
|
||||
assert!(change.is_none());
|
||||
let chain_id = self.provider.get_chain_id().await.map_err(|_| NetworkError::ConnectionError)?;
|
||||
|
||||
// TODO: Perform fee amortization (in scheduler?
|
||||
// TODO: Make this function internal and have needed_fee properly return None as expected?
|
||||
// TODO: signable_transaction is written as cannot return None if needed_fee returns Some
|
||||
// TODO: Why can this return None at all if it isn't allowed to return None?
|
||||
|
||||
let command = match scheduler_addendum {
|
||||
Addendum::Nonce(nonce) => RouterCommand::Execute {
|
||||
chain_id: U256::try_from(chain_id).unwrap(),
|
||||
nonce: U256::try_from(*nonce).unwrap(),
|
||||
outs: payments
|
||||
.iter()
|
||||
.filter_map(|payment| {
|
||||
Some(OutInstruction {
|
||||
target: if let Some(data) = payment.data.as_ref() {
|
||||
// This introspects the Call serialization format, expecting the first 20 bytes to
|
||||
// be the address
|
||||
// This avoids wasting the 20-bytes allocated within address
|
||||
let full_data = [payment.address.0.as_slice(), data].concat();
|
||||
let mut reader = full_data.as_slice();
|
||||
|
||||
let mut calls = vec![];
|
||||
while !reader.is_empty() {
|
||||
calls.push(Call::read(&mut reader).ok()?)
|
||||
}
|
||||
// The above must have executed at least once since reader contains the address
|
||||
assert_eq!(calls[0].to, payment.address.0);
|
||||
|
||||
OutInstructionTarget::Calls(calls)
|
||||
} else {
|
||||
OutInstructionTarget::Direct(payment.address.0)
|
||||
},
|
||||
value: {
|
||||
assert_eq!(payment.balance.coin, Coin::Ether); // TODO
|
||||
balance_to_ethereum_amount(payment.balance)
|
||||
},
|
||||
})
|
||||
})
|
||||
.collect(),
|
||||
},
|
||||
Addendum::RotateTo { nonce, new_key } => {
|
||||
assert!(payments.is_empty());
|
||||
RouterCommand::UpdateSeraiKey {
|
||||
chain_id: U256::try_from(chain_id).unwrap(),
|
||||
nonce: U256::try_from(*nonce).unwrap(),
|
||||
key: PublicKey::new(*new_key).expect("new key wasn't a valid ETH public key"),
|
||||
}
|
||||
}
|
||||
};
|
||||
Ok(Some((
|
||||
command.clone(),
|
||||
Eventuality(PublicKey::new(key).expect("key wasn't a valid ETH public key"), command),
|
||||
)))
|
||||
}
|
||||
|
||||
async fn attempt_sign(
|
||||
&self,
|
||||
keys: ThresholdKeys<Self::Curve>,
|
||||
transaction: Self::SignableTransaction,
|
||||
) -> Result<Self::TransactionMachine, NetworkError> {
|
||||
Ok(
|
||||
RouterCommandMachine::new(keys, transaction)
|
||||
.expect("keys weren't usable to sign router commands"),
|
||||
)
|
||||
}
|
||||
|
||||
async fn publish_completion(
|
||||
&self,
|
||||
completion: &<Self::Eventuality as EventualityTrait>::Completion,
|
||||
) -> Result<(), NetworkError> {
|
||||
// Publish this to the dedicated TX server for a solver to actually publish
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
let _ = completion;
|
||||
todo!("TODO");
|
||||
}
|
||||
|
||||
// Publish this using a dummy account we fund with magic RPC commands
|
||||
#[cfg(test)]
|
||||
{
|
||||
use rand_core::OsRng;
|
||||
use ciphersuite::group::ff::Field;
|
||||
|
||||
let key = <Secp256k1 as Ciphersuite>::F::random(&mut OsRng);
|
||||
let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key));
|
||||
|
||||
// Set a 1.1 ETH balance
|
||||
self
|
||||
.provider
|
||||
.raw_request::<_, ()>(
|
||||
"anvil_setBalance".into(),
|
||||
[Address(address).to_string(), "1100000000000000000".into()],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let router = self.router().await;
|
||||
let router = router.as_ref().unwrap();
|
||||
|
||||
let mut tx = match completion.command() {
|
||||
RouterCommand::UpdateSeraiKey { key, .. } => {
|
||||
router.update_serai_key(key, completion.signature())
|
||||
}
|
||||
RouterCommand::Execute { outs, .. } => router.execute(
|
||||
&outs.iter().cloned().map(Into::into).collect::<Vec<_>>(),
|
||||
completion.signature(),
|
||||
),
|
||||
};
|
||||
tx.gas_price = 100_000_000_000u128;
|
||||
|
||||
use ethereum_serai::alloy_consensus::SignableTransaction;
|
||||
let sig =
|
||||
k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap())
|
||||
.sign_prehash_recoverable(tx.signature_hash().as_ref())
|
||||
.unwrap();
|
||||
|
||||
let mut bytes = vec![];
|
||||
tx.encode_with_signature_fields(&sig.into(), &mut bytes);
|
||||
let _ = self.provider.send_raw_transaction(&bytes).await.ok().unwrap();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn confirm_completion(
|
||||
&self,
|
||||
eventuality: &Self::Eventuality,
|
||||
claim: &<Self::Eventuality as EventualityTrait>::Claim,
|
||||
) -> Result<Option<<Self::Eventuality as EventualityTrait>::Completion>, NetworkError> {
|
||||
Ok(SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn get_block_number(&self, id: &<Self::Block as Block<Self>>::Id) -> usize {
|
||||
self
|
||||
.provider
|
||||
.get_block(B256::from(*id).into(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.header
|
||||
.number
|
||||
.unwrap()
|
||||
.try_into()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn check_eventuality_by_claim(
|
||||
&self,
|
||||
eventuality: &Self::Eventuality,
|
||||
claim: &<Self::Eventuality as EventualityTrait>::Claim,
|
||||
) -> bool {
|
||||
SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature).is_some()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn get_transaction_by_eventuality(
|
||||
&self,
|
||||
block: usize,
|
||||
eventuality: &Self::Eventuality,
|
||||
) -> Self::Transaction {
|
||||
match eventuality.1 {
|
||||
RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => {
|
||||
let router = self.router().await;
|
||||
let router = router.as_ref().unwrap();
|
||||
|
||||
let block = u64::try_from(block).unwrap();
|
||||
let filter = router
|
||||
.key_updated_filter()
|
||||
.from_block(block * 32)
|
||||
.to_block(((block + 1) * 32) - 1)
|
||||
.topic1(nonce);
|
||||
let logs = self.provider.get_logs(&filter).await.unwrap();
|
||||
if let Some(log) = logs.first() {
|
||||
return self
|
||||
.provider
|
||||
.get_transaction_by_hash(log.clone().transaction_hash.unwrap())
|
||||
.await
|
||||
.unwrap();
|
||||
};
|
||||
|
||||
let filter = router
|
||||
.executed_filter()
|
||||
.from_block(block * 32)
|
||||
.to_block(((block + 1) * 32) - 1)
|
||||
.topic1(nonce);
|
||||
let logs = self.provider.get_logs(&filter).await.unwrap();
|
||||
self.provider.get_transaction_by_hash(logs[0].transaction_hash.unwrap()).await.unwrap()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn mine_block(&self) {
|
||||
self.provider.raw_request::<_, ()>("anvil_mine".into(), [32]).await.unwrap();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn test_send(&self, send_to: Self::Address) -> Self::Block {
|
||||
use rand_core::OsRng;
|
||||
use ciphersuite::group::ff::Field;
|
||||
|
||||
let key = <Secp256k1 as Ciphersuite>::F::random(&mut OsRng);
|
||||
let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key));
|
||||
|
||||
// Set a 1.1 ETH balance
|
||||
self
|
||||
.provider
|
||||
.raw_request::<_, ()>(
|
||||
"anvil_setBalance".into(),
|
||||
[Address(address).to_string(), "1100000000000000000".into()],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tx = ethereum_serai::alloy_consensus::TxLegacy {
|
||||
chain_id: None,
|
||||
nonce: 0,
|
||||
gas_price: 100_000_000_000u128,
|
||||
gas_limit: 21_0000u128,
|
||||
to: ethereum_serai::alloy_core::primitives::TxKind::Call(send_to.0.into()),
|
||||
// 1 ETH
|
||||
value: U256::from_str_radix("1000000000000000000", 10).unwrap(),
|
||||
input: vec![].into(),
|
||||
};
|
||||
|
||||
use ethereum_serai::alloy_consensus::SignableTransaction;
|
||||
let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap())
|
||||
.sign_prehash_recoverable(tx.signature_hash().as_ref())
|
||||
.unwrap();
|
||||
|
||||
let mut bytes = vec![];
|
||||
tx.encode_with_signature_fields(&sig.into(), &mut bytes);
|
||||
let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap();
|
||||
|
||||
// Mine an epoch containing this TX
|
||||
self.mine_block().await;
|
||||
assert!(pending_tx.get_receipt().await.unwrap().status());
|
||||
// Yield the freshly mined block
|
||||
self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap()
|
||||
}
|
||||
}
|
||||
@@ -21,12 +21,17 @@ pub mod bitcoin;
|
||||
#[cfg(feature = "bitcoin")]
|
||||
pub use self::bitcoin::Bitcoin;
|
||||
|
||||
#[cfg(feature = "ethereum")]
|
||||
pub mod ethereum;
|
||||
#[cfg(feature = "ethereum")]
|
||||
pub use ethereum::Ethereum;
|
||||
|
||||
#[cfg(feature = "monero")]
|
||||
pub mod monero;
|
||||
#[cfg(feature = "monero")]
|
||||
pub use monero::Monero;
|
||||
|
||||
use crate::{Payment, Plan};
|
||||
use crate::{Payment, Plan, multisigs::scheduler::Scheduler};
|
||||
|
||||
#[derive(Clone, Copy, Error, Debug)]
|
||||
pub enum NetworkError {
|
||||
@@ -105,7 +110,7 @@ pub trait Output<N: Network>: Send + Sync + Sized + Clone + PartialEq + Eq + Deb
|
||||
fn kind(&self) -> OutputType;
|
||||
|
||||
fn id(&self) -> Self::Id;
|
||||
fn tx_id(&self) -> <N::Transaction as Transaction<N>>::Id;
|
||||
fn tx_id(&self) -> <N::Transaction as Transaction<N>>::Id; // TODO: Review use of
|
||||
fn key(&self) -> <N::Curve as Ciphersuite>::G;
|
||||
|
||||
fn presumed_origin(&self) -> Option<N::Address>;
|
||||
@@ -118,25 +123,33 @@ pub trait Output<N: Network>: Send + Sync + Sized + Clone + PartialEq + Eq + Deb
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Transaction<N: Network>: Send + Sync + Sized + Clone + Debug {
|
||||
pub trait Transaction<N: Network>: Send + Sync + Sized + Clone + PartialEq + Debug {
|
||||
type Id: 'static + Id;
|
||||
fn id(&self) -> Self::Id;
|
||||
fn serialize(&self) -> Vec<u8>;
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
|
||||
|
||||
// TODO: Move to Balance
|
||||
#[cfg(test)]
|
||||
async fn fee(&self, network: &N) -> u64;
|
||||
}
|
||||
|
||||
pub trait SignableTransaction: Send + Sync + Clone + Debug {
|
||||
// TODO: Move to Balance
|
||||
fn fee(&self) -> u64;
|
||||
}
|
||||
|
||||
pub trait Eventuality: Send + Sync + Clone + Debug {
|
||||
pub trait Eventuality: Send + Sync + Clone + PartialEq + Debug {
|
||||
type Claim: Send + Sync + Clone + PartialEq + Default + AsRef<[u8]> + AsMut<[u8]> + Debug;
|
||||
type Completion: Send + Sync + Clone + PartialEq + Debug;
|
||||
|
||||
fn lookup(&self) -> Vec<u8>;
|
||||
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self>;
|
||||
fn serialize(&self) -> Vec<u8>;
|
||||
|
||||
fn claim(completion: &Self::Completion) -> Self::Claim;
|
||||
|
||||
// TODO: Make a dedicated Completion trait
|
||||
fn serialize_completion(completion: &Self::Completion) -> Vec<u8>;
|
||||
fn read_completion<R: io::Read>(reader: &mut R) -> io::Result<Self::Completion>;
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
@@ -211,7 +224,7 @@ fn drop_branches<N: Network>(
|
||||
) -> Vec<PostFeeBranch> {
|
||||
let mut branch_outputs = vec![];
|
||||
for payment in payments {
|
||||
if payment.address == N::branch_address(key) {
|
||||
if Some(&payment.address) == N::branch_address(key).as_ref() {
|
||||
branch_outputs.push(PostFeeBranch { expected: payment.balance.amount.0, actual: None });
|
||||
}
|
||||
}
|
||||
@@ -227,12 +240,12 @@ pub struct PreparedSend<N: Network> {
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug {
|
||||
/// The elliptic curve used for this network.
|
||||
type Curve: Curve;
|
||||
|
||||
/// The type representing the transaction for this network.
|
||||
type Transaction: Transaction<Self>;
|
||||
type Transaction: Transaction<Self>; // TODO: Review use of
|
||||
/// The type representing the block for this network.
|
||||
type Block: Block<Self>;
|
||||
|
||||
@@ -246,7 +259,12 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
/// This must be binding to both the outputs expected and the plan ID.
|
||||
type Eventuality: Eventuality;
|
||||
/// The FROST machine to sign a transaction.
|
||||
type TransactionMachine: PreprocessMachine<Signature = Self::Transaction>;
|
||||
type TransactionMachine: PreprocessMachine<
|
||||
Signature = <Self::Eventuality as Eventuality>::Completion,
|
||||
>;
|
||||
|
||||
/// The scheduler for this network.
|
||||
type Scheduler: Scheduler<Self>;
|
||||
|
||||
/// The type representing an address.
|
||||
// This should NOT be a String, yet a tailored type representing an efficient binary encoding,
|
||||
@@ -269,10 +287,6 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize;
|
||||
/// The amount of confirmations required to consider a block 'final'.
|
||||
const CONFIRMATIONS: usize;
|
||||
/// The maximum amount of inputs which will fit in a TX.
|
||||
/// This should be equal to MAX_OUTPUTS unless one is specifically limited.
|
||||
/// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size.
|
||||
const MAX_INPUTS: usize;
|
||||
/// The maximum amount of outputs which will fit in a TX.
|
||||
/// This should be equal to MAX_INPUTS unless one is specifically limited.
|
||||
/// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size.
|
||||
@@ -293,13 +307,16 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
fn tweak_keys(key: &mut ThresholdKeys<Self::Curve>);
|
||||
|
||||
/// Address for the given group key to receive external coins to.
|
||||
fn external_address(key: <Self::Curve as Ciphersuite>::G) -> Self::Address;
|
||||
#[cfg(test)]
|
||||
async fn external_address(&self, key: <Self::Curve as Ciphersuite>::G) -> Self::Address;
|
||||
/// Address for the given group key to use for scheduled branches.
|
||||
fn branch_address(key: <Self::Curve as Ciphersuite>::G) -> Self::Address;
|
||||
fn branch_address(key: <Self::Curve as Ciphersuite>::G) -> Option<Self::Address>;
|
||||
/// Address for the given group key to use for change.
|
||||
fn change_address(key: <Self::Curve as Ciphersuite>::G) -> Self::Address;
|
||||
fn change_address(key: <Self::Curve as Ciphersuite>::G) -> Option<Self::Address>;
|
||||
/// Address for forwarded outputs from prior multisigs.
|
||||
fn forward_address(key: <Self::Curve as Ciphersuite>::G) -> Self::Address;
|
||||
///
|
||||
/// forward_address must only return None if explicit forwarding isn't necessary.
|
||||
fn forward_address(key: <Self::Curve as Ciphersuite>::G) -> Option<Self::Address>;
|
||||
|
||||
/// Get the latest block's number.
|
||||
async fn get_latest_block_number(&self) -> Result<usize, NetworkError>;
|
||||
@@ -349,13 +366,24 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
/// registered eventualities may have been completed in.
|
||||
///
|
||||
/// This may panic if not fed a block greater than the tracker's block number.
|
||||
///
|
||||
/// Plan ID -> (block number, TX ID, completion)
|
||||
// TODO: get_eventuality_completions_internal + provided get_eventuality_completions for common
|
||||
// code
|
||||
// TODO: Consider having this return the Transaction + the Completion?
|
||||
// Or Transaction with extract_completion?
|
||||
async fn get_eventuality_completions(
|
||||
&self,
|
||||
eventualities: &mut EventualitiesTracker<Self::Eventuality>,
|
||||
block: &Self::Block,
|
||||
) -> HashMap<[u8; 32], (usize, Self::Transaction)>;
|
||||
) -> HashMap<
|
||||
[u8; 32],
|
||||
(
|
||||
usize,
|
||||
<Self::Transaction as Transaction<Self>>::Id,
|
||||
<Self::Eventuality as Eventuality>::Completion,
|
||||
),
|
||||
>;
|
||||
|
||||
/// Returns the needed fee to fulfill this Plan at this fee rate.
|
||||
///
|
||||
@@ -363,7 +391,6 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
async fn needed_fee(
|
||||
&self,
|
||||
block_number: usize,
|
||||
plan_id: &[u8; 32],
|
||||
inputs: &[Self::Output],
|
||||
payments: &[Payment<Self>],
|
||||
change: &Option<Self::Address>,
|
||||
@@ -375,16 +402,25 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
/// 1) Call needed_fee
|
||||
/// 2) If the Plan is fulfillable, amortize the fee
|
||||
/// 3) Call signable_transaction *which MUST NOT return None if the above was done properly*
|
||||
///
|
||||
/// This takes a destructured Plan as some of these arguments are malleated from the original
|
||||
/// Plan.
|
||||
// TODO: Explicit AmortizedPlan?
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn signable_transaction(
|
||||
&self,
|
||||
block_number: usize,
|
||||
plan_id: &[u8; 32],
|
||||
key: <Self::Curve as Ciphersuite>::G,
|
||||
inputs: &[Self::Output],
|
||||
payments: &[Payment<Self>],
|
||||
change: &Option<Self::Address>,
|
||||
scheduler_addendum: &<Self::Scheduler as Scheduler<Self>>::Addendum,
|
||||
) -> Result<Option<(Self::SignableTransaction, Self::Eventuality)>, NetworkError>;
|
||||
|
||||
/// Prepare a SignableTransaction for a transaction.
|
||||
///
|
||||
/// This must not persist anything as we will prepare Plans we never intend to execute.
|
||||
async fn prepare_send(
|
||||
&self,
|
||||
block_number: usize,
|
||||
@@ -395,13 +431,12 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
assert!((!plan.payments.is_empty()) || plan.change.is_some());
|
||||
|
||||
let plan_id = plan.id();
|
||||
let Plan { key, inputs, mut payments, change } = plan;
|
||||
let Plan { key, inputs, mut payments, change, scheduler_addendum } = plan;
|
||||
let theoretical_change_amount =
|
||||
inputs.iter().map(|input| input.balance().amount.0).sum::<u64>() -
|
||||
payments.iter().map(|payment| payment.balance.amount.0).sum::<u64>();
|
||||
|
||||
let Some(tx_fee) = self.needed_fee(block_number, &plan_id, &inputs, &payments, &change).await?
|
||||
else {
|
||||
let Some(tx_fee) = self.needed_fee(block_number, &inputs, &payments, &change).await? else {
|
||||
// This Plan is not fulfillable
|
||||
// TODO: Have Plan explicitly distinguish payments and branches in two separate Vecs?
|
||||
return Ok(PreparedSend {
|
||||
@@ -466,7 +501,7 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
// Note the branch outputs' new values
|
||||
let mut branch_outputs = vec![];
|
||||
for (initial_amount, payment) in initial_payment_amounts.into_iter().zip(&payments) {
|
||||
if payment.address == Self::branch_address(key) {
|
||||
if Some(&payment.address) == Self::branch_address(key).as_ref() {
|
||||
branch_outputs.push(PostFeeBranch {
|
||||
expected: initial_amount,
|
||||
actual: if payment.balance.amount.0 == 0 {
|
||||
@@ -508,11 +543,20 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
)
|
||||
})();
|
||||
|
||||
let Some(tx) =
|
||||
self.signable_transaction(block_number, &plan_id, &inputs, &payments, &change).await?
|
||||
let Some(tx) = self
|
||||
.signable_transaction(
|
||||
block_number,
|
||||
&plan_id,
|
||||
key,
|
||||
&inputs,
|
||||
&payments,
|
||||
&change,
|
||||
&scheduler_addendum,
|
||||
)
|
||||
.await?
|
||||
else {
|
||||
panic!(
|
||||
"{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}",
|
||||
"{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}, {}: {:?}",
|
||||
"signable_transaction returned None for a TX we prior successfully calculated the fee for",
|
||||
"id",
|
||||
hex::encode(plan_id),
|
||||
@@ -524,6 +568,8 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
change,
|
||||
"successfully amoritized fee",
|
||||
tx_fee,
|
||||
"scheduler's addendum",
|
||||
scheduler_addendum,
|
||||
)
|
||||
};
|
||||
|
||||
@@ -546,31 +592,49 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
}
|
||||
|
||||
/// Attempt to sign a SignableTransaction.
|
||||
async fn attempt_send(
|
||||
async fn attempt_sign(
|
||||
&self,
|
||||
keys: ThresholdKeys<Self::Curve>,
|
||||
transaction: Self::SignableTransaction,
|
||||
) -> Result<Self::TransactionMachine, NetworkError>;
|
||||
|
||||
/// Publish a transaction.
|
||||
async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError>;
|
||||
|
||||
/// Get a transaction by its ID.
|
||||
async fn get_transaction(
|
||||
/// Publish a completion.
|
||||
async fn publish_completion(
|
||||
&self,
|
||||
id: &<Self::Transaction as Transaction<Self>>::Id,
|
||||
) -> Result<Self::Transaction, NetworkError>;
|
||||
completion: &<Self::Eventuality as Eventuality>::Completion,
|
||||
) -> Result<(), NetworkError>;
|
||||
|
||||
/// Confirm a plan was completed by the specified transaction.
|
||||
// This is allowed to take shortcuts.
|
||||
// This may assume an honest multisig, solely checking the inputs specified were spent.
|
||||
// This may solely check the outputs are equivalent *so long as it's locked to the plan ID*.
|
||||
fn confirm_completion(&self, eventuality: &Self::Eventuality, tx: &Self::Transaction) -> bool;
|
||||
/// Confirm a plan was completed by the specified transaction, per our bounds.
|
||||
///
|
||||
/// Returns Err if there was an error with the confirmation methodology.
|
||||
/// Returns Ok(None) if this is not a valid completion.
|
||||
/// Returns Ok(Some(_)) with the completion if it's valid.
|
||||
async fn confirm_completion(
|
||||
&self,
|
||||
eventuality: &Self::Eventuality,
|
||||
claim: &<Self::Eventuality as Eventuality>::Claim,
|
||||
) -> Result<Option<<Self::Eventuality as Eventuality>::Completion>, NetworkError>;
|
||||
|
||||
/// Get a block's number by its ID.
|
||||
#[cfg(test)]
|
||||
async fn get_block_number(&self, id: &<Self::Block as Block<Self>>::Id) -> usize;
|
||||
|
||||
/// Check an Eventuality is fulfilled by a claim.
|
||||
#[cfg(test)]
|
||||
async fn check_eventuality_by_claim(
|
||||
&self,
|
||||
eventuality: &Self::Eventuality,
|
||||
claim: &<Self::Eventuality as Eventuality>::Claim,
|
||||
) -> bool;
|
||||
|
||||
/// Get a transaction by the Eventuality it completes.
|
||||
#[cfg(test)]
|
||||
async fn get_transaction_by_eventuality(
|
||||
&self,
|
||||
block: usize,
|
||||
eventuality: &Self::Eventuality,
|
||||
) -> Self::Transaction;
|
||||
|
||||
#[cfg(test)]
|
||||
async fn mine_block(&self);
|
||||
|
||||
@@ -579,3 +643,10 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug {
|
||||
#[cfg(test)]
|
||||
async fn test_send(&self, key: Self::Address) -> Self::Block;
|
||||
}
|
||||
|
||||
pub trait UtxoNetwork: Network {
|
||||
/// The maximum amount of inputs which will fit in a TX.
|
||||
/// This should be equal to MAX_OUTPUTS unless one is specifically limited.
|
||||
/// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size.
|
||||
const MAX_INPUTS: usize;
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user