mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-10 13:09:24 +00:00
Merge branch 'develop' into HEAD
This commit is contained in:
2
.github/actions/bitcoin/action.yml
vendored
2
.github/actions/bitcoin/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: 24.0.1
|
||||
default: "27.0"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
@@ -42,8 +42,8 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install svm-rs
|
||||
svm install 0.8.16
|
||||
svm use 0.8.16
|
||||
svm install 0.8.25
|
||||
svm use 0.8.25
|
||||
|
||||
# - name: Cache Rust
|
||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||
|
||||
6
.github/actions/test-dependencies/action.yml
vendored
6
.github/actions/test-dependencies/action.yml
vendored
@@ -10,7 +10,7 @@ inputs:
|
||||
bitcoin-version:
|
||||
description: "Bitcoin version to download and run as a regtest node"
|
||||
required: false
|
||||
default: 24.0.1
|
||||
default: "27.0"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -19,9 +19,9 @@ runs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install Foundry
|
||||
uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf
|
||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||
with:
|
||||
version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2
|
||||
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
|
||||
cache: false
|
||||
|
||||
- name: Run a Monero Regtest Node
|
||||
|
||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
||||
nightly-2024-02-07
|
||||
nightly-2024-06-01
|
||||
|
||||
1
.github/workflows/coins-tests.yml
vendored
1
.github/workflows/coins-tests.yml
vendored
@@ -30,6 +30,7 @@ jobs:
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p bitcoin-serai \
|
||||
-p alloy-simple-request-transport \
|
||||
-p ethereum-serai \
|
||||
-p monero-generators \
|
||||
-p monero-serai
|
||||
|
||||
3
.github/workflows/common-tests.yml
vendored
3
.github/workflows/common-tests.yml
vendored
@@ -28,4 +28,5 @@ jobs:
|
||||
-p std-shims \
|
||||
-p zalloc \
|
||||
-p serai-db \
|
||||
-p serai-env
|
||||
-p serai-env \
|
||||
-p simple-request
|
||||
|
||||
2
.github/workflows/coordinator-tests.yml
vendored
2
.github/workflows/coordinator-tests.yml
vendored
@@ -37,4 +37,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run coordinator Docker tests
|
||||
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
2
.github/workflows/full-stack-tests.yml
vendored
2
.github/workflows/full-stack-tests.yml
vendored
@@ -19,4 +19,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Full Stack Docker tests
|
||||
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
2
.github/workflows/message-queue-tests.yml
vendored
2
.github/workflows/message-queue-tests.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run message-queue Docker tests
|
||||
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
90
.github/workflows/pages.yml
vendored
Normal file
90
.github/workflows/pages.yml
vendored
Normal file
@@ -0,0 +1,90 @@
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2022 just-the-docs
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
||||
name: Deploy Jekyll site to Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow one concurrent deployment
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# Build job
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: docs
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
bundler-cache: true
|
||||
cache-version: 0
|
||||
working-directory: "${{ github.workspace }}/docs"
|
||||
- name: Setup Pages
|
||||
id: pages
|
||||
uses: actions/configure-pages@v3
|
||||
- name: Build with Jekyll
|
||||
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||
env:
|
||||
JEKYLL_ENV: production
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
with:
|
||||
path: "docs/_site/"
|
||||
|
||||
# Deployment job
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v2
|
||||
2
.github/workflows/processor-tests.yml
vendored
2
.github/workflows/processor-tests.yml
vendored
@@ -37,4 +37,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run processor Docker tests
|
||||
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
2
.github/workflows/reproducible-runtime.yml
vendored
2
.github/workflows/reproducible-runtime.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Reproducible Runtime tests
|
||||
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
|
||||
3
.github/workflows/tests.yml
vendored
3
.github/workflows/tests.yml
vendored
@@ -43,6 +43,7 @@ jobs:
|
||||
-p tendermint-machine \
|
||||
-p tributary-chain \
|
||||
-p serai-coordinator \
|
||||
-p serai-orchestrator \
|
||||
-p serai-docker-tests
|
||||
|
||||
test-substrate:
|
||||
@@ -64,7 +65,9 @@ jobs:
|
||||
-p serai-validator-sets-pallet \
|
||||
-p serai-in-instructions-primitives \
|
||||
-p serai-in-instructions-pallet \
|
||||
-p serai-signals-primitives \
|
||||
-p serai-signals-pallet \
|
||||
-p serai-abi \
|
||||
-p serai-runtime \
|
||||
-p serai-node
|
||||
|
||||
|
||||
2697
Cargo.lock
generated
2697
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
13
Cargo.toml
13
Cargo.toml
@@ -2,7 +2,10 @@
|
||||
resolver = "2"
|
||||
members = [
|
||||
# Version patches
|
||||
"patches/parking_lot_core",
|
||||
"patches/parking_lot",
|
||||
"patches/zstd",
|
||||
"patches/rocksdb",
|
||||
"patches/proc-macro-crate",
|
||||
|
||||
# std patches
|
||||
@@ -35,7 +38,11 @@ members = [
|
||||
"crypto/schnorrkel",
|
||||
|
||||
"coins/bitcoin",
|
||||
|
||||
"coins/ethereum/alloy-simple-request-transport",
|
||||
"coins/ethereum",
|
||||
"coins/ethereum/relayer",
|
||||
|
||||
"coins/monero/generators",
|
||||
"coins/monero",
|
||||
|
||||
@@ -108,10 +115,14 @@ panic = "unwind"
|
||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||
|
||||
# Needed due to dockertest's usage of `Rc`s when we need `Arc`s
|
||||
dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "arc" }
|
||||
dockertest = { git = "https://github.com/orcalabs/dockertest-rs", rev = "4dd6ae24738aa6dc5c89444cc822ea4745517493" }
|
||||
|
||||
parking_lot_core = { path = "patches/parking_lot_core" }
|
||||
parking_lot = { path = "patches/parking_lot" }
|
||||
# wasmtime pulls in an old version for this
|
||||
zstd = { path = "patches/zstd" }
|
||||
# Needed for WAL compression
|
||||
rocksdb = { path = "patches/rocksdb" }
|
||||
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
|
||||
proc-macro-crate = { path = "patches/proc-macro-crate" }
|
||||
|
||||
|
||||
@@ -5,13 +5,16 @@ Bitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading
|
||||
experience. Funds are stored in an economically secured threshold-multisig
|
||||
wallet.
|
||||
|
||||
[Getting Started](docs/Getting%20Started.md)
|
||||
[Getting Started](spec/Getting%20Started.md)
|
||||
|
||||
### Layout
|
||||
|
||||
- `audits`: Audits for various parts of Serai.
|
||||
|
||||
- `docs`: Documentation on the Serai protocol.
|
||||
- `spec`: The specification of the Serai protocol, both internally and as
|
||||
networked.
|
||||
|
||||
- `docs`: User-facing documentation on the Serai protocol.
|
||||
|
||||
- `common`: Crates containing utilities common to a variety of areas under
|
||||
Serai, none neatly fitting under another category.
|
||||
|
||||
@@ -23,7 +23,7 @@ thiserror = { version = "1", default-features = false, optional = true }
|
||||
zeroize = { version = "^1.5", default-features = false }
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
bitcoin = { version = "0.31", default-features = false, features = ["no-std"] }
|
||||
bitcoin = { version = "0.32", default-features = false }
|
||||
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits"] }
|
||||
|
||||
@@ -36,7 +36,7 @@ serde_json = { version = "1", default-features = false, optional = true }
|
||||
simple-request = { path = "../../common/request", version = "0.1", default-features = false, features = ["tls", "basic-auth"], optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
secp256k1 = { version = "0.28", default-features = false, features = ["std"] }
|
||||
secp256k1 = { version = "0.29", default-features = false, features = ["std"] }
|
||||
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] }
|
||||
|
||||
|
||||
@@ -195,13 +195,13 @@ impl Rpc {
|
||||
// If this was already successfully published, consider this having succeeded
|
||||
if let RpcError::RequestError(Error { code, .. }) = e {
|
||||
if code == RPC_VERIFY_ALREADY_IN_CHAIN {
|
||||
return Ok(tx.txid());
|
||||
return Ok(tx.compute_txid());
|
||||
}
|
||||
}
|
||||
Err(e)?
|
||||
}
|
||||
};
|
||||
if txid != tx.txid() {
|
||||
if txid != tx.compute_txid() {
|
||||
Err(RpcError::InvalidResponse("returned TX ID inequals calculated TX ID"))?;
|
||||
}
|
||||
Ok(txid)
|
||||
@@ -215,7 +215,7 @@ impl Rpc {
|
||||
let tx: Transaction = encode::deserialize(&bytes)
|
||||
.map_err(|_| RpcError::InvalidResponse("node sent an improperly serialized transaction"))?;
|
||||
|
||||
let mut tx_hash = *tx.txid().as_raw_hash().as_byte_array();
|
||||
let mut tx_hash = *tx.compute_txid().as_raw_hash().as_byte_array();
|
||||
tx_hash.reverse();
|
||||
if hash != &tx_hash {
|
||||
Err(RpcError::InvalidResponse("node replied with a different transaction"))?;
|
||||
|
||||
@@ -39,7 +39,7 @@ fn test_algorithm() {
|
||||
.verify_schnorr(
|
||||
&Signature::from_slice(&sig)
|
||||
.expect("couldn't convert produced signature to secp256k1::Signature"),
|
||||
&Message::from(Hash::hash(MESSAGE)),
|
||||
&Message::from_digest_slice(Hash::hash(MESSAGE).as_ref()).unwrap(),
|
||||
&x_only(&keys[&Participant::new(1).unwrap()].group_key()),
|
||||
)
|
||||
.unwrap()
|
||||
|
||||
@@ -4,7 +4,7 @@ use std_shims::{
|
||||
io::{self, Write},
|
||||
};
|
||||
#[cfg(feature = "std")]
|
||||
use std_shims::io::Read;
|
||||
use std::io::{Read, BufReader};
|
||||
|
||||
use k256::{
|
||||
elliptic_curve::sec1::{Tag, ToEncodedPoint},
|
||||
@@ -18,8 +18,8 @@ use frost::{
|
||||
};
|
||||
|
||||
use bitcoin::{
|
||||
consensus::encode::serialize, key::TweakedPublicKey, address::Payload, OutPoint, ScriptBuf,
|
||||
TxOut, Transaction, Block,
|
||||
consensus::encode::serialize, key::TweakedPublicKey, OutPoint, ScriptBuf, TxOut, Transaction,
|
||||
Block,
|
||||
};
|
||||
#[cfg(feature = "std")]
|
||||
use bitcoin::consensus::encode::Decodable;
|
||||
@@ -46,12 +46,12 @@ pub fn tweak_keys(keys: &ThresholdKeys<Secp256k1>) -> ThresholdKeys<Secp256k1> {
|
||||
/// Return the Taproot address payload for a public key.
|
||||
///
|
||||
/// If the key is odd, this will return None.
|
||||
pub fn address_payload(key: ProjectivePoint) -> Option<Payload> {
|
||||
pub fn p2tr_script_buf(key: ProjectivePoint) -> Option<ScriptBuf> {
|
||||
if key.to_encoded_point(true).tag() != Tag::CompressedEvenY {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key))))
|
||||
Some(ScriptBuf::new_p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key))))
|
||||
}
|
||||
|
||||
/// A spendable output.
|
||||
@@ -89,11 +89,17 @@ impl ReceivedOutput {
|
||||
/// Read a ReceivedOutput from a generic satisfying Read.
|
||||
#[cfg(feature = "std")]
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<ReceivedOutput> {
|
||||
Ok(ReceivedOutput {
|
||||
offset: Secp256k1::read_F(r)?,
|
||||
output: TxOut::consensus_decode(r).map_err(|_| io::Error::other("invalid TxOut"))?,
|
||||
outpoint: OutPoint::consensus_decode(r).map_err(|_| io::Error::other("invalid OutPoint"))?,
|
||||
})
|
||||
let offset = Secp256k1::read_F(r)?;
|
||||
let output;
|
||||
let outpoint;
|
||||
{
|
||||
let mut buf_r = BufReader::with_capacity(0, r);
|
||||
output =
|
||||
TxOut::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid TxOut"))?;
|
||||
outpoint =
|
||||
OutPoint::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid OutPoint"))?;
|
||||
}
|
||||
Ok(ReceivedOutput { offset, output, outpoint })
|
||||
}
|
||||
|
||||
/// Write a ReceivedOutput to a generic satisfying Write.
|
||||
@@ -124,7 +130,7 @@ impl Scanner {
|
||||
/// Returns None if this key can't be scanned for.
|
||||
pub fn new(key: ProjectivePoint) -> Option<Scanner> {
|
||||
let mut scripts = HashMap::new();
|
||||
scripts.insert(address_payload(key)?.script_pubkey(), Scalar::ZERO);
|
||||
scripts.insert(p2tr_script_buf(key)?, Scalar::ZERO);
|
||||
Some(Scanner { key, scripts })
|
||||
}
|
||||
|
||||
@@ -141,9 +147,8 @@ impl Scanner {
|
||||
// chance of being even
|
||||
// That means this should terminate within a very small amount of iterations
|
||||
loop {
|
||||
match address_payload(self.key + (ProjectivePoint::GENERATOR * offset)) {
|
||||
Some(address) => {
|
||||
let script = address.script_pubkey();
|
||||
match p2tr_script_buf(self.key + (ProjectivePoint::GENERATOR * offset)) {
|
||||
Some(script) => {
|
||||
if self.scripts.contains_key(&script) {
|
||||
None?;
|
||||
}
|
||||
@@ -166,7 +171,7 @@ impl Scanner {
|
||||
res.push(ReceivedOutput {
|
||||
offset: *offset,
|
||||
output: output.clone(),
|
||||
outpoint: OutPoint::new(tx.txid(), vout),
|
||||
outpoint: OutPoint::new(tx.compute_txid(), vout),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,12 +18,12 @@ use bitcoin::{
|
||||
absolute::LockTime,
|
||||
script::{PushBytesBuf, ScriptBuf},
|
||||
transaction::{Version, Transaction},
|
||||
OutPoint, Sequence, Witness, TxIn, Amount, TxOut, Address,
|
||||
OutPoint, Sequence, Witness, TxIn, Amount, TxOut,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
crypto::Schnorr,
|
||||
wallet::{ReceivedOutput, address_payload},
|
||||
wallet::{ReceivedOutput, p2tr_script_buf},
|
||||
};
|
||||
|
||||
#[rustfmt::skip]
|
||||
@@ -61,7 +61,11 @@ pub struct SignableTransaction {
|
||||
}
|
||||
|
||||
impl SignableTransaction {
|
||||
fn calculate_weight(inputs: usize, payments: &[(Address, u64)], change: Option<&Address>) -> u64 {
|
||||
fn calculate_weight(
|
||||
inputs: usize,
|
||||
payments: &[(ScriptBuf, u64)],
|
||||
change: Option<&ScriptBuf>,
|
||||
) -> u64 {
|
||||
// Expand this a full transaction in order to use the bitcoin library's weight function
|
||||
let mut tx = Transaction {
|
||||
version: Version(2),
|
||||
@@ -86,14 +90,14 @@ impl SignableTransaction {
|
||||
// The script pub key is not of a fixed size and does have to be used here
|
||||
.map(|payment| TxOut {
|
||||
value: Amount::from_sat(payment.1),
|
||||
script_pubkey: payment.0.script_pubkey(),
|
||||
script_pubkey: payment.0.clone(),
|
||||
})
|
||||
.collect(),
|
||||
};
|
||||
if let Some(change) = change {
|
||||
// Use a 0 value since we're currently unsure what the change amount will be, and since
|
||||
// the value is fixed size (so any value could be used here)
|
||||
tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.script_pubkey() });
|
||||
tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.clone() });
|
||||
}
|
||||
u64::from(tx.weight())
|
||||
}
|
||||
@@ -121,8 +125,8 @@ impl SignableTransaction {
|
||||
/// If data is specified, an OP_RETURN output will be added with it.
|
||||
pub fn new(
|
||||
mut inputs: Vec<ReceivedOutput>,
|
||||
payments: &[(Address, u64)],
|
||||
change: Option<&Address>,
|
||||
payments: &[(ScriptBuf, u64)],
|
||||
change: Option<ScriptBuf>,
|
||||
data: Option<Vec<u8>>,
|
||||
fee_per_weight: u64,
|
||||
) -> Result<SignableTransaction, TransactionError> {
|
||||
@@ -159,10 +163,7 @@ impl SignableTransaction {
|
||||
let payment_sat = payments.iter().map(|payment| payment.1).sum::<u64>();
|
||||
let mut tx_outs = payments
|
||||
.iter()
|
||||
.map(|payment| TxOut {
|
||||
value: Amount::from_sat(payment.1),
|
||||
script_pubkey: payment.0.script_pubkey(),
|
||||
})
|
||||
.map(|payment| TxOut { value: Amount::from_sat(payment.1), script_pubkey: payment.0.clone() })
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Add the OP_RETURN output
|
||||
@@ -213,12 +214,11 @@ impl SignableTransaction {
|
||||
|
||||
// If there's a change address, check if there's change to give it
|
||||
if let Some(change) = change {
|
||||
let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(change));
|
||||
let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(&change));
|
||||
let fee_with_change = fee_per_weight * weight_with_change;
|
||||
if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) {
|
||||
if value >= DUST {
|
||||
tx_outs
|
||||
.push(TxOut { value: Amount::from_sat(value), script_pubkey: change.script_pubkey() });
|
||||
tx_outs.push(TxOut { value: Amount::from_sat(value), script_pubkey: change });
|
||||
weight = weight_with_change;
|
||||
needed_fee = fee_with_change;
|
||||
}
|
||||
@@ -248,7 +248,7 @@ impl SignableTransaction {
|
||||
|
||||
/// Returns the TX ID of the transaction this will create.
|
||||
pub fn txid(&self) -> [u8; 32] {
|
||||
let mut res = self.tx.txid().to_byte_array();
|
||||
let mut res = self.tx.compute_txid().to_byte_array();
|
||||
res.reverse();
|
||||
res
|
||||
}
|
||||
@@ -288,7 +288,7 @@ impl SignableTransaction {
|
||||
transcript.append_message(b"signing_input", u32::try_from(i).unwrap().to_le_bytes());
|
||||
|
||||
let offset = keys.clone().offset(self.offsets[i]);
|
||||
if address_payload(offset.group_key())?.script_pubkey() != self.prevouts[i].script_pubkey {
|
||||
if p2tr_script_buf(offset.group_key())? != self.prevouts[i].script_pubkey {
|
||||
None?;
|
||||
}
|
||||
|
||||
@@ -375,7 +375,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||
msg: &[u8],
|
||||
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
|
||||
if !msg.is_empty() {
|
||||
panic!("message was passed to the TransactionMachine when it generates its own");
|
||||
panic!("message was passed to the TransactionSignMachine when it generates its own");
|
||||
}
|
||||
|
||||
let commitments = (0 .. self.sigs.len())
|
||||
|
||||
@@ -22,11 +22,10 @@ use bitcoin_serai::{
|
||||
hashes::Hash as HashTrait,
|
||||
blockdata::opcodes::all::OP_RETURN,
|
||||
script::{PushBytesBuf, Instruction, Instructions, Script},
|
||||
address::NetworkChecked,
|
||||
OutPoint, Amount, TxOut, Transaction, Network, Address,
|
||||
},
|
||||
wallet::{
|
||||
tweak_keys, address_payload, ReceivedOutput, Scanner, TransactionError, SignableTransaction,
|
||||
tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, SignableTransaction,
|
||||
},
|
||||
rpc::Rpc,
|
||||
};
|
||||
@@ -48,7 +47,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint)
|
||||
"generatetoaddress",
|
||||
serde_json::json!([
|
||||
1,
|
||||
Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap())
|
||||
Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap()
|
||||
]),
|
||||
)
|
||||
.await
|
||||
@@ -69,7 +68,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint)
|
||||
assert_eq!(outputs, scanner.scan_transaction(&block.txdata[0]));
|
||||
|
||||
assert_eq!(outputs.len(), 1);
|
||||
assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].txid(), 0));
|
||||
assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].compute_txid(), 0));
|
||||
assert_eq!(outputs[0].value(), block.txdata[0].output[0].value.to_sat());
|
||||
|
||||
assert_eq!(
|
||||
@@ -193,7 +192,7 @@ async_sequential! {
|
||||
assert_eq!(output.offset(), Scalar::ZERO);
|
||||
|
||||
let inputs = vec![output];
|
||||
let addr = || Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap());
|
||||
let addr = || p2tr_script_buf(key).unwrap();
|
||||
let payments = vec![(addr(), 1000)];
|
||||
|
||||
assert!(SignableTransaction::new(inputs.clone(), &payments, None, None, FEE).is_ok());
|
||||
@@ -206,7 +205,7 @@ async_sequential! {
|
||||
// No change
|
||||
assert!(SignableTransaction::new(inputs.clone(), &[(addr(), 1000)], None, None, FEE).is_ok());
|
||||
// Consolidation TX
|
||||
assert!(SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, FEE).is_ok());
|
||||
assert!(SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, FEE).is_ok());
|
||||
// Data
|
||||
assert!(SignableTransaction::new(inputs.clone(), &[], None, Some(vec![]), FEE).is_ok());
|
||||
// No outputs
|
||||
@@ -229,7 +228,7 @@ async_sequential! {
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, 0),
|
||||
SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, 0),
|
||||
Err(TransactionError::TooLowFee),
|
||||
);
|
||||
|
||||
@@ -261,20 +260,19 @@ async_sequential! {
|
||||
|
||||
// Declare payments, change, fee
|
||||
let payments = [
|
||||
(Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap()), 1005),
|
||||
(Address::<NetworkChecked>::new(Network::Regtest, address_payload(offset_key).unwrap()), 1007)
|
||||
(p2tr_script_buf(key).unwrap(), 1005),
|
||||
(p2tr_script_buf(offset_key).unwrap(), 1007)
|
||||
];
|
||||
|
||||
let change_offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap();
|
||||
let change_key = key + (ProjectivePoint::GENERATOR * change_offset);
|
||||
let change_addr =
|
||||
Address::<NetworkChecked>::new(Network::Regtest, address_payload(change_key).unwrap());
|
||||
let change_addr = p2tr_script_buf(change_key).unwrap();
|
||||
|
||||
// Create and sign the TX
|
||||
let tx = SignableTransaction::new(
|
||||
vec![output.clone(), offset_output.clone()],
|
||||
&payments,
|
||||
Some(&change_addr),
|
||||
Some(change_addr.clone()),
|
||||
None,
|
||||
FEE
|
||||
).unwrap();
|
||||
@@ -287,7 +285,7 @@ async_sequential! {
|
||||
// Ensure we can scan it
|
||||
let outputs = scanner.scan_transaction(&tx);
|
||||
for (o, output) in outputs.iter().enumerate() {
|
||||
assert_eq!(output.outpoint(), &OutPoint::new(tx.txid(), u32::try_from(o).unwrap()));
|
||||
assert_eq!(output.outpoint(), &OutPoint::new(tx.compute_txid(), u32::try_from(o).unwrap()));
|
||||
assert_eq!(&ReceivedOutput::read::<&[u8]>(&mut output.serialize().as_ref()).unwrap(), output);
|
||||
}
|
||||
|
||||
@@ -299,7 +297,7 @@ async_sequential! {
|
||||
for ((output, scanned), payment) in tx.output.iter().zip(outputs.iter()).zip(payments.iter()) {
|
||||
assert_eq!(
|
||||
output,
|
||||
&TxOut { script_pubkey: payment.0.script_pubkey(), value: Amount::from_sat(payment.1) },
|
||||
&TxOut { script_pubkey: payment.0.clone(), value: Amount::from_sat(payment.1) },
|
||||
);
|
||||
assert_eq!(scanned.value(), payment.1 );
|
||||
}
|
||||
@@ -314,13 +312,13 @@ async_sequential! {
|
||||
input_value - payments.iter().map(|payment| payment.1).sum::<u64>() - needed_fee;
|
||||
assert_eq!(
|
||||
tx.output[2],
|
||||
TxOut { script_pubkey: change_addr.script_pubkey(), value: Amount::from_sat(change_amount) },
|
||||
TxOut { script_pubkey: change_addr, value: Amount::from_sat(change_amount) },
|
||||
);
|
||||
|
||||
// This also tests send_raw_transaction and get_transaction, which the RPC test can't
|
||||
// effectively test
|
||||
rpc.send_raw_transaction(&tx).await.unwrap();
|
||||
let mut hash = *tx.txid().as_raw_hash().as_byte_array();
|
||||
let mut hash = *tx.compute_txid().as_raw_hash().as_byte_array();
|
||||
hash.reverse();
|
||||
assert_eq!(tx, rpc.get_transaction(&hash).await.unwrap());
|
||||
assert_eq!(expected_id, hash);
|
||||
@@ -344,7 +342,7 @@ async_sequential! {
|
||||
&SignableTransaction::new(
|
||||
vec![output],
|
||||
&[],
|
||||
Some(&Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap())),
|
||||
Some(p2tr_script_buf(key).unwrap()),
|
||||
Some(data.clone()),
|
||||
FEE
|
||||
).unwrap()
|
||||
|
||||
2
coins/ethereum/.gitignore
vendored
2
coins/ethereum/.gitignore
vendored
@@ -1,3 +1,3 @@
|
||||
# solidity build outputs
|
||||
# Solidity build outputs
|
||||
cache
|
||||
artifacts
|
||||
|
||||
@@ -18,25 +18,32 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { version = "1", default-features = false }
|
||||
eyre = { version = "0.6", default-features = false }
|
||||
|
||||
sha3 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
|
||||
group = { version = "0.13", default-features = false }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] }
|
||||
|
||||
ethers-core = { version = "2", default-features = false }
|
||||
ethers-providers = { version = "2", default-features = false }
|
||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
||||
|
||||
[dev-dependencies]
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
serde = { version = "1", default-features = false, features = ["std"] }
|
||||
serde_json = { version = "1", default-features = false, features = ["std"] }
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] }
|
||||
|
||||
sha2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
group = { version = "0.13", default-features = false }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] }
|
||||
|
||||
alloy-core = { version = "0.7", default-features = false }
|
||||
alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] }
|
||||
alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false, features = ["k256"] }
|
||||
alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
|
||||
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
|
||||
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
|
||||
alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false }
|
||||
alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
|
||||
|
||||
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] }
|
||||
|
||||
tokio = { version = "1", features = ["macros"] }
|
||||
|
||||
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
|
||||
|
||||
[features]
|
||||
tests = ["alloy-node-bindings", "frost/tests"]
|
||||
|
||||
@@ -3,6 +3,12 @@
|
||||
This package contains Ethereum-related functionality, specifically deploying and
|
||||
interacting with Serai contracts.
|
||||
|
||||
While `monero-serai` and `bitcoin-serai` are general purpose libraries,
|
||||
`ethereum-serai` is Serai specific. If any of the utilities are generally
|
||||
desired, please fork and maintain your own copy to ensure the desired
|
||||
functionality is preserved, or open an issue to request we make this library
|
||||
general purpose.
|
||||
|
||||
### Dependencies
|
||||
|
||||
- solc
|
||||
|
||||
29
coins/ethereum/alloy-simple-request-transport/Cargo.toml
Normal file
29
coins/ethereum/alloy-simple-request-transport/Cargo.toml
Normal file
@@ -0,0 +1,29 @@
|
||||
[package]
|
||||
name = "alloy-simple-request-transport"
|
||||
version = "0.1.0"
|
||||
description = "A transport for alloy based off simple-request"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/alloy-simple-request-transport"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.74"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
tower = "0.4"
|
||||
|
||||
serde_json = { version = "1", default-features = false }
|
||||
simple-request = { path = "../../../common/request", default-features = false }
|
||||
|
||||
alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
|
||||
alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
|
||||
|
||||
[features]
|
||||
default = ["tls"]
|
||||
tls = ["simple-request/tls"]
|
||||
21
coins/ethereum/alloy-simple-request-transport/LICENSE
Normal file
21
coins/ethereum/alloy-simple-request-transport/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2024 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
4
coins/ethereum/alloy-simple-request-transport/README.md
Normal file
4
coins/ethereum/alloy-simple-request-transport/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Alloy Simple Request Transport
|
||||
|
||||
A transport for alloy based on simple-request, a small HTTP client built around
|
||||
hyper.
|
||||
60
coins/ethereum/alloy-simple-request-transport/src/lib.rs
Normal file
60
coins/ethereum/alloy-simple-request-transport/src/lib.rs
Normal file
@@ -0,0 +1,60 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
use core::task;
|
||||
use std::io;
|
||||
|
||||
use alloy_json_rpc::{RequestPacket, ResponsePacket};
|
||||
use alloy_transport::{TransportError, TransportErrorKind, TransportFut};
|
||||
|
||||
use simple_request::{hyper, Request, Client};
|
||||
|
||||
use tower::Service;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct SimpleRequest {
|
||||
client: Client,
|
||||
url: String,
|
||||
}
|
||||
|
||||
impl SimpleRequest {
|
||||
pub fn new(url: String) -> Self {
|
||||
Self { client: Client::with_connection_pool(), url }
|
||||
}
|
||||
}
|
||||
|
||||
impl Service<RequestPacket> for SimpleRequest {
|
||||
type Response = ResponsePacket;
|
||||
type Error = TransportError;
|
||||
type Future = TransportFut<'static>;
|
||||
|
||||
#[inline]
|
||||
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll<Result<(), Self::Error>> {
|
||||
task::Poll::Ready(Ok(()))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn call(&mut self, req: RequestPacket) -> Self::Future {
|
||||
let inner = self.clone();
|
||||
Box::pin(async move {
|
||||
let packet = req.serialize().map_err(TransportError::SerError)?;
|
||||
let request = Request::from(
|
||||
hyper::Request::post(&inner.url)
|
||||
.header("Content-Type", "application/json")
|
||||
.body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into())
|
||||
.unwrap(),
|
||||
);
|
||||
|
||||
let mut res = inner
|
||||
.client
|
||||
.request(request)
|
||||
.await
|
||||
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?
|
||||
.body()
|
||||
.await
|
||||
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?;
|
||||
|
||||
serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, ""))
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,41 @@
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=contracts");
|
||||
println!("cargo:rerun-if-changed=artifacts");
|
||||
println!("cargo:rerun-if-changed=contracts/*");
|
||||
println!("cargo:rerun-if-changed=artifacts/*");
|
||||
|
||||
for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout)
|
||||
.unwrap()
|
||||
.lines()
|
||||
{
|
||||
if let Some(version) = line.strip_prefix("Version: ") {
|
||||
let version = version.split('+').next().unwrap();
|
||||
assert_eq!(version, "0.8.25");
|
||||
}
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
let args = [
|
||||
"--base-path", ".",
|
||||
"-o", "./artifacts", "--overwrite",
|
||||
"--bin", "--abi",
|
||||
"--optimize",
|
||||
"./contracts/Schnorr.sol"
|
||||
];
|
||||
"--via-ir", "--optimize",
|
||||
|
||||
assert!(std::process::Command::new("solc").args(args).status().unwrap().success());
|
||||
"./contracts/IERC20.sol",
|
||||
|
||||
"./contracts/Schnorr.sol",
|
||||
"./contracts/Deployer.sol",
|
||||
"./contracts/Sandbox.sol",
|
||||
"./contracts/Router.sol",
|
||||
|
||||
"./src/tests/contracts/Schnorr.sol",
|
||||
"./src/tests/contracts/ERC20.sol",
|
||||
|
||||
"--no-color",
|
||||
];
|
||||
let solc = Command::new("solc").args(args).output().unwrap();
|
||||
assert!(solc.status.success());
|
||||
for line in String::from_utf8(solc.stderr).unwrap().lines() {
|
||||
assert!(!line.starts_with("Error:"));
|
||||
}
|
||||
}
|
||||
|
||||
52
coins/ethereum/contracts/Deployer.sol
Normal file
52
coins/ethereum/contracts/Deployer.sol
Normal file
@@ -0,0 +1,52 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
/*
|
||||
The expected deployment process of the Router is as follows:
|
||||
|
||||
1) A transaction deploying Deployer is made. Then, a deterministic signature is
|
||||
created such that an account with an unknown private key is the creator of
|
||||
the contract. Anyone can fund this address, and once anyone does, the
|
||||
transaction deploying Deployer can be published by anyone. No other
|
||||
transaction may be made from that account.
|
||||
|
||||
2) Anyone deploys the Router through the Deployer. This uses a sequential nonce
|
||||
such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.
|
||||
While such attacks would still be feasible if the Deployer's address was
|
||||
controllable, the usage of a deterministic signature with a NUMS method
|
||||
prevents that.
|
||||
|
||||
This doesn't have any denial-of-service risks and will resolve once anyone steps
|
||||
forward as deployer. This does fail to guarantee an identical address across
|
||||
every chain, though it enables letting anyone efficiently ask the Deployer for
|
||||
the address (with the Deployer having an identical address on every chain).
|
||||
|
||||
Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the
|
||||
Deployer contract to use a consistent salt for the Router, yet the Router must
|
||||
be deployed with a specific public key for Serai. Since Ethereum isn't able to
|
||||
determine a valid public key (one the result of a Serai DKG) from a dishonest
|
||||
public key, we have to allow multiple deployments with Serai being the one to
|
||||
determine which to use.
|
||||
|
||||
The alternative would be to have a council publish the Serai key on-Ethereum,
|
||||
with Serai verifying the published result. This would introduce a DoS risk in
|
||||
the council not publishing the correct key/not publishing any key.
|
||||
*/
|
||||
|
||||
contract Deployer {
|
||||
event Deployment(bytes32 indexed init_code_hash, address created);
|
||||
|
||||
error DeploymentFailed();
|
||||
|
||||
function deploy(bytes memory init_code) external {
|
||||
address created;
|
||||
assembly {
|
||||
created := create(0, add(init_code, 0x20), mload(init_code))
|
||||
}
|
||||
if (created == address(0)) {
|
||||
revert DeploymentFailed();
|
||||
}
|
||||
// These may be emitted out of order upon re-entrancy
|
||||
emit Deployment(keccak256(init_code), created);
|
||||
}
|
||||
}
|
||||
20
coins/ethereum/contracts/IERC20.sol
Normal file
20
coins/ethereum/contracts/IERC20.sol
Normal file
@@ -0,0 +1,20 @@
|
||||
// SPDX-License-Identifier: CC0
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
interface IERC20 {
|
||||
event Transfer(address indexed from, address indexed to, uint256 value);
|
||||
event Approval(address indexed owner, address indexed spender, uint256 value);
|
||||
|
||||
function name() external view returns (string memory);
|
||||
function symbol() external view returns (string memory);
|
||||
function decimals() external view returns (uint8);
|
||||
|
||||
function totalSupply() external view returns (uint256);
|
||||
|
||||
function balanceOf(address owner) external view returns (uint256);
|
||||
function transfer(address to, uint256 value) external returns (bool);
|
||||
function transferFrom(address from, address to, uint256 value) external returns (bool);
|
||||
|
||||
function approve(address spender, uint256 value) external returns (bool);
|
||||
function allowance(address owner, address spender) external view returns (uint256);
|
||||
}
|
||||
222
coins/ethereum/contracts/Router.sol
Normal file
222
coins/ethereum/contracts/Router.sol
Normal file
@@ -0,0 +1,222 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "./IERC20.sol";
|
||||
|
||||
import "./Schnorr.sol";
|
||||
import "./Sandbox.sol";
|
||||
|
||||
contract Router {
|
||||
// Nonce is incremented for each batch of transactions executed/key update
|
||||
uint256 public nonce;
|
||||
|
||||
// Current public key's x-coordinate
|
||||
// This key must always have the parity defined within the Schnorr contract
|
||||
bytes32 public seraiKey;
|
||||
|
||||
struct OutInstruction {
|
||||
address to;
|
||||
Call[] calls;
|
||||
|
||||
uint256 value;
|
||||
}
|
||||
|
||||
struct Signature {
|
||||
bytes32 c;
|
||||
bytes32 s;
|
||||
}
|
||||
|
||||
event SeraiKeyUpdated(
|
||||
uint256 indexed nonce,
|
||||
bytes32 indexed key,
|
||||
Signature signature
|
||||
);
|
||||
event InInstruction(
|
||||
address indexed from,
|
||||
address indexed coin,
|
||||
uint256 amount,
|
||||
bytes instruction
|
||||
);
|
||||
// success is a uint256 representing a bitfield of transaction successes
|
||||
event Executed(
|
||||
uint256 indexed nonce,
|
||||
bytes32 indexed batch,
|
||||
uint256 success,
|
||||
Signature signature
|
||||
);
|
||||
|
||||
// error types
|
||||
error InvalidKey();
|
||||
error InvalidSignature();
|
||||
error InvalidAmount();
|
||||
error FailedTransfer();
|
||||
error TooManyTransactions();
|
||||
|
||||
modifier _updateSeraiKeyAtEndOfFn(
|
||||
uint256 _nonce,
|
||||
bytes32 key,
|
||||
Signature memory sig
|
||||
) {
|
||||
if (
|
||||
(key == bytes32(0)) ||
|
||||
((bytes32(uint256(key) % Schnorr.Q)) != key)
|
||||
) {
|
||||
revert InvalidKey();
|
||||
}
|
||||
|
||||
_;
|
||||
|
||||
seraiKey = key;
|
||||
emit SeraiKeyUpdated(_nonce, key, sig);
|
||||
}
|
||||
|
||||
constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn(
|
||||
0,
|
||||
_seraiKey,
|
||||
Signature({ c: bytes32(0), s: bytes32(0) })
|
||||
) {
|
||||
nonce = 1;
|
||||
}
|
||||
|
||||
// updateSeraiKey validates the given Schnorr signature against the current
|
||||
// public key, and if successful, updates the contract's public key to the
|
||||
// given one.
|
||||
function updateSeraiKey(
|
||||
bytes32 _seraiKey,
|
||||
Signature calldata sig
|
||||
) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) {
|
||||
bytes memory message =
|
||||
abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey);
|
||||
nonce++;
|
||||
|
||||
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||
revert InvalidSignature();
|
||||
}
|
||||
}
|
||||
|
||||
function inInstruction(
|
||||
address coin,
|
||||
uint256 amount,
|
||||
bytes memory instruction
|
||||
) external payable {
|
||||
if (coin == address(0)) {
|
||||
if (amount != msg.value) {
|
||||
revert InvalidAmount();
|
||||
}
|
||||
} else {
|
||||
(bool success, bytes memory res) =
|
||||
address(coin).call(
|
||||
abi.encodeWithSelector(
|
||||
IERC20.transferFrom.selector,
|
||||
msg.sender,
|
||||
address(this),
|
||||
amount
|
||||
)
|
||||
);
|
||||
|
||||
// Require there was nothing returned, which is done by some non-standard
|
||||
// tokens, or that the ERC20 contract did in fact return true
|
||||
bool nonStandardResOrTrue =
|
||||
(res.length == 0) || abi.decode(res, (bool));
|
||||
if (!(success && nonStandardResOrTrue)) {
|
||||
revert FailedTransfer();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Due to fee-on-transfer tokens, emitting the amount directly is frowned upon.
|
||||
The amount instructed to transfer may not actually be the amount
|
||||
transferred.
|
||||
|
||||
If we add nonReentrant to every single function which can effect the
|
||||
balance, we can check the amount exactly matches. This prevents transfers of
|
||||
less value than expected occurring, at least, not without an additional
|
||||
transfer to top up the difference (which isn't routed through this contract
|
||||
and accordingly isn't trying to artificially create events).
|
||||
|
||||
If we don't add nonReentrant, a transfer can be started, and then a new
|
||||
transfer for the difference can follow it up (again and again until a
|
||||
rounding error is reached). This contract would believe all transfers were
|
||||
done in full, despite each only being done in part (except for the last
|
||||
one).
|
||||
|
||||
Given fee-on-transfer tokens aren't intended to be supported, the only
|
||||
token planned to be supported is Dai and it doesn't have any fee-on-transfer
|
||||
logic, fee-on-transfer tokens aren't even able to be supported at this time,
|
||||
we simply classify this entire class of tokens as non-standard
|
||||
implementations which induce undefined behavior. It is the Serai network's
|
||||
role not to add support for any non-standard implementations.
|
||||
*/
|
||||
emit InInstruction(msg.sender, coin, amount, instruction);
|
||||
}
|
||||
|
||||
// execute accepts a list of transactions to execute as well as a signature.
|
||||
// if signature verification passes, the given transactions are executed.
|
||||
// if signature verification fails, this function will revert.
|
||||
function execute(
|
||||
OutInstruction[] calldata transactions,
|
||||
Signature calldata sig
|
||||
) external {
|
||||
if (transactions.length > 256) {
|
||||
revert TooManyTransactions();
|
||||
}
|
||||
|
||||
bytes memory message =
|
||||
abi.encode("execute", block.chainid, nonce, transactions);
|
||||
uint256 executed_with_nonce = nonce;
|
||||
// This prevents re-entrancy from causing double spends yet does allow
|
||||
// out-of-order execution via re-entrancy
|
||||
nonce++;
|
||||
|
||||
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||
revert InvalidSignature();
|
||||
}
|
||||
|
||||
uint256 successes;
|
||||
for (uint256 i = 0; i < transactions.length; i++) {
|
||||
bool success;
|
||||
|
||||
// If there are no calls, send to `to` the value
|
||||
if (transactions[i].calls.length == 0) {
|
||||
(success, ) = transactions[i].to.call{
|
||||
value: transactions[i].value,
|
||||
gas: 5_000
|
||||
}("");
|
||||
} else {
|
||||
// If there are calls, ignore `to`. Deploy a new Sandbox and proxy the
|
||||
// calls through that
|
||||
//
|
||||
// We could use a single sandbox in order to reduce gas costs, yet that
|
||||
// risks one person creating an approval that's hooked before another
|
||||
// user's intended action executes, in order to drain their coins
|
||||
//
|
||||
// While technically, that would be a flaw in the sandboxed flow, this
|
||||
// is robust and prevents such flaws from being possible
|
||||
//
|
||||
// We also don't want people to set state via the Sandbox and expect it
|
||||
// future available when anyone else could set a distinct value
|
||||
Sandbox sandbox = new Sandbox();
|
||||
(success, ) = address(sandbox).call{
|
||||
value: transactions[i].value,
|
||||
// TODO: Have the Call specify the gas up front
|
||||
gas: 350_000
|
||||
}(
|
||||
abi.encodeWithSelector(
|
||||
Sandbox.sandbox.selector,
|
||||
transactions[i].calls
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
assembly {
|
||||
successes := or(successes, shl(i, success))
|
||||
}
|
||||
}
|
||||
emit Executed(
|
||||
executed_with_nonce,
|
||||
keccak256(message),
|
||||
successes,
|
||||
sig
|
||||
);
|
||||
}
|
||||
}
|
||||
48
coins/ethereum/contracts/Sandbox.sol
Normal file
48
coins/ethereum/contracts/Sandbox.sol
Normal file
@@ -0,0 +1,48 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
struct Call {
|
||||
address to;
|
||||
uint256 value;
|
||||
bytes data;
|
||||
}
|
||||
|
||||
// A minimal sandbox focused on gas efficiency.
|
||||
//
|
||||
// The first call is executed if any of the calls fail, making it a fallback.
|
||||
// All other calls are executed sequentially.
|
||||
contract Sandbox {
|
||||
error AlreadyCalled();
|
||||
error CallsFailed();
|
||||
|
||||
function sandbox(Call[] calldata calls) external payable {
|
||||
// Prevent re-entrancy due to this executing arbitrary calls from anyone
|
||||
// and anywhere
|
||||
bool called;
|
||||
assembly { called := tload(0) }
|
||||
if (called) {
|
||||
revert AlreadyCalled();
|
||||
}
|
||||
assembly { tstore(0, 1) }
|
||||
|
||||
// Execute the calls, starting from 1
|
||||
for (uint256 i = 1; i < calls.length; i++) {
|
||||
(bool success, ) =
|
||||
calls[i].to.call{ value: calls[i].value }(calls[i].data);
|
||||
|
||||
// If this call failed, execute the fallback (call 0)
|
||||
if (!success) {
|
||||
(success, ) =
|
||||
calls[0].to.call{ value: address(this).balance }(calls[0].data);
|
||||
// If this call also failed, revert entirely
|
||||
if (!success) {
|
||||
revert CallsFailed();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// We don't clear the re-entrancy guard as this contract should never be
|
||||
// called again, so there's no reason to spend the effort
|
||||
}
|
||||
}
|
||||
@@ -1,36 +1,44 @@
|
||||
//SPDX-License-Identifier: AGPLv3
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
// see https://github.com/noot/schnorr-verify for implementation details
|
||||
contract Schnorr {
|
||||
library Schnorr {
|
||||
// secp256k1 group order
|
||||
uint256 constant public Q =
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
||||
|
||||
// parity := public key y-coord parity (27 or 28)
|
||||
// px := public key x-coord
|
||||
// message := 32-byte message
|
||||
// s := schnorr signature
|
||||
// e := schnorr signature challenge
|
||||
function verify(
|
||||
uint8 parity,
|
||||
bytes32 px,
|
||||
bytes32 message,
|
||||
bytes32 s,
|
||||
bytes32 e
|
||||
) public view returns (bool) {
|
||||
// ecrecover = (m, v, r, s);
|
||||
bytes32 sp = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||
bytes32 ep = bytes32(Q - mulmod(uint256(e), uint256(px), Q));
|
||||
// Fixed parity for the public keys used in this contract
|
||||
// This avoids spending a word passing the parity in a similar style to
|
||||
// Bitcoin's Taproot
|
||||
uint8 constant public KEY_PARITY = 27;
|
||||
|
||||
require(sp != 0);
|
||||
// the ecrecover precompile implementation checks that the `r` and `s`
|
||||
// inputs are non-zero (in this case, `px` and `ep`), thus we don't need to
|
||||
// check if they're zero.will make me
|
||||
address R = ecrecover(sp, parity, px, ep);
|
||||
require(R != address(0), "ecrecover failed");
|
||||
return e == keccak256(
|
||||
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
|
||||
);
|
||||
error InvalidSOrA();
|
||||
error MalformedSignature();
|
||||
|
||||
// px := public key x-coord, where the public key has a parity of KEY_PARITY
|
||||
// message := 32-byte hash of the message
|
||||
// c := schnorr signature challenge
|
||||
// s := schnorr signature
|
||||
function verify(
|
||||
bytes32 px,
|
||||
bytes memory message,
|
||||
bytes32 c,
|
||||
bytes32 s
|
||||
) internal pure returns (bool) {
|
||||
// ecrecover = (m, v, r, s) -> key
|
||||
// We instead pass the following to obtain the nonce (not the key)
|
||||
// Then we hash it and verify it matches the challenge
|
||||
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
||||
|
||||
// For safety, we want each input to ecrecover to be 0 (sa, px, ca)
|
||||
// The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero
|
||||
// That leaves us to check `sa` are non-zero
|
||||
if (sa == 0) revert InvalidSOrA();
|
||||
address R = ecrecover(sa, KEY_PARITY, px, ca);
|
||||
if (R == address(0)) revert MalformedSignature();
|
||||
|
||||
// Check the signature is correct by rebuilding the challenge
|
||||
return c == keccak256(abi.encodePacked(R, px, message));
|
||||
}
|
||||
}
|
||||
|
||||
30
coins/ethereum/relayer/Cargo.toml
Normal file
30
coins/ethereum/relayer/Cargo.toml
Normal file
@@ -0,0 +1,30 @@
|
||||
[package]
|
||||
name = "serai-ethereum-relayer"
|
||||
version = "0.1.0"
|
||||
description = "A relayer for Serai's Ethereum transactions"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/relayer"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||
|
||||
tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] }
|
||||
|
||||
serai-env = { path = "../../../common/env" }
|
||||
serai-db = { path = "../../../common/db" }
|
||||
|
||||
[features]
|
||||
parity-db = ["serai-db/parity-db"]
|
||||
rocksdb = ["serai-db/rocksdb"]
|
||||
15
coins/ethereum/relayer/LICENSE
Normal file
15
coins/ethereum/relayer/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
4
coins/ethereum/relayer/README.md
Normal file
4
coins/ethereum/relayer/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Ethereum Transaction Relayer
|
||||
|
||||
This server collects Ethereum router commands to be published, offering an RPC
|
||||
to fetch them.
|
||||
100
coins/ethereum/relayer/src/main.rs
Normal file
100
coins/ethereum/relayer/src/main.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
pub(crate) use tokio::{
|
||||
io::{AsyncReadExt, AsyncWriteExt},
|
||||
net::TcpListener,
|
||||
};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db as DbTrait};
|
||||
|
||||
#[tokio::main(flavor = "current_thread")]
|
||||
async fn main() {
|
||||
// Override the panic handler with one which will panic if any tokio task panics
|
||||
{
|
||||
let existing = std::panic::take_hook();
|
||||
std::panic::set_hook(Box::new(move |panic| {
|
||||
existing(panic);
|
||||
const MSG: &str = "exiting the process due to a task panicking";
|
||||
println!("{MSG}");
|
||||
log::error!("{MSG}");
|
||||
std::process::exit(1);
|
||||
}));
|
||||
}
|
||||
|
||||
if std::env::var("RUST_LOG").is_err() {
|
||||
std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string()));
|
||||
}
|
||||
env_logger::init();
|
||||
|
||||
log::info!("Starting Ethereum relayer server...");
|
||||
|
||||
// Open the DB
|
||||
#[allow(unused_variables, unreachable_code)]
|
||||
let db = {
|
||||
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
|
||||
panic!("built with parity-db and rocksdb");
|
||||
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||
let db =
|
||||
serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
|
||||
#[cfg(feature = "rocksdb")]
|
||||
let db =
|
||||
serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
|
||||
db
|
||||
};
|
||||
|
||||
// Start command recipience server
|
||||
// This should not be publicly exposed
|
||||
// TODO: Add auth
|
||||
tokio::spawn({
|
||||
let db = db.clone();
|
||||
async move {
|
||||
// 5132 ^ ((b'E' << 8) | b'R')
|
||||
let server = TcpListener::bind("0.0.0.0:20830").await.unwrap();
|
||||
loop {
|
||||
let (mut socket, _) = server.accept().await.unwrap();
|
||||
let db = db.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut db = db.clone();
|
||||
loop {
|
||||
let Ok(msg_len) = socket.read_u32_le().await else { break };
|
||||
let mut buf = vec![0; usize::try_from(msg_len).unwrap()];
|
||||
let Ok(_) = socket.read_exact(&mut buf).await else { break };
|
||||
|
||||
if buf.len() < 5 {
|
||||
break;
|
||||
}
|
||||
let nonce = u32::from_le_bytes(buf[.. 4].try_into().unwrap());
|
||||
let mut txn = db.txn();
|
||||
txn.put(nonce.to_le_bytes(), &buf[4 ..]);
|
||||
txn.commit();
|
||||
|
||||
let Ok(()) = socket.write_all(&[1]).await else { break };
|
||||
|
||||
log::info!("received signed command #{nonce}");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Start command fetch server
|
||||
// 5132 ^ ((b'E' << 8) | b'R') + 1
|
||||
let server = TcpListener::bind("0.0.0.0:20831").await.unwrap();
|
||||
loop {
|
||||
let (mut socket, _) = server.accept().await.unwrap();
|
||||
let db = db.clone();
|
||||
tokio::spawn(async move {
|
||||
let db = db.clone();
|
||||
loop {
|
||||
// Nonce to get the router comamnd for
|
||||
let mut buf = vec![0; 4];
|
||||
let Ok(_) = socket.read_exact(&mut buf).await else { break };
|
||||
|
||||
let command = db.get(&buf[.. 4]).unwrap_or(vec![]);
|
||||
let Ok(()) = socket.write_all(&u32::try_from(command.len()).unwrap().to_le_bytes()).await
|
||||
else {
|
||||
break;
|
||||
};
|
||||
let Ok(()) = socket.write_all(&command).await else { break };
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
37
coins/ethereum/src/abi/mod.rs
Normal file
37
coins/ethereum/src/abi/mod.rs
Normal file
@@ -0,0 +1,37 @@
|
||||
use alloy_sol_types::sol;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod erc20_container {
|
||||
use super::*;
|
||||
sol!("contracts/IERC20.sol");
|
||||
}
|
||||
pub use erc20_container::IERC20 as erc20;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod deployer_container {
|
||||
use super::*;
|
||||
sol!("contracts/Deployer.sol");
|
||||
}
|
||||
pub use deployer_container::Deployer as deployer;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod router_container {
|
||||
use super::*;
|
||||
sol!(Router, "artifacts/Router.abi");
|
||||
}
|
||||
pub use router_container::Router as router;
|
||||
@@ -1,36 +0,0 @@
|
||||
use thiserror::Error;
|
||||
use eyre::{eyre, Result};
|
||||
|
||||
use ethers_providers::{Provider, Http};
|
||||
use ethers_contract::abigen;
|
||||
|
||||
use crate::crypto::ProcessedSignature;
|
||||
|
||||
#[derive(Error, Debug)]
|
||||
pub enum EthereumError {
|
||||
#[error("failed to verify Schnorr signature")]
|
||||
VerificationError,
|
||||
}
|
||||
|
||||
abigen!(Schnorr, "./artifacts/Schnorr.abi");
|
||||
|
||||
pub async fn call_verify(
|
||||
contract: &Schnorr<Provider<Http>>,
|
||||
params: &ProcessedSignature,
|
||||
) -> Result<()> {
|
||||
if contract
|
||||
.verify(
|
||||
params.parity + 27,
|
||||
params.px.to_bytes().into(),
|
||||
params.message,
|
||||
params.s.to_bytes().into(),
|
||||
params.e.to_bytes().into(),
|
||||
)
|
||||
.call()
|
||||
.await?
|
||||
{
|
||||
Ok(())
|
||||
} else {
|
||||
Err(eyre!(EthereumError::VerificationError))
|
||||
}
|
||||
}
|
||||
@@ -1,107 +1,188 @@
|
||||
use sha3::{Digest, Keccak256};
|
||||
|
||||
use group::Group;
|
||||
use group::ff::PrimeField;
|
||||
use k256::{
|
||||
elliptic_curve::{
|
||||
bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint, sec1::ToEncodedPoint,
|
||||
},
|
||||
AffinePoint, ProjectivePoint, Scalar, U256,
|
||||
elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint},
|
||||
ProjectivePoint, Scalar, U256 as KU256,
|
||||
};
|
||||
#[cfg(test)]
|
||||
use k256::{elliptic_curve::point::DecompressPoint, AffinePoint};
|
||||
|
||||
use frost::{
|
||||
algorithm::{Hram, SchnorrSignature},
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
};
|
||||
|
||||
use frost::{algorithm::Hram, curve::Secp256k1};
|
||||
use alloy_core::primitives::{Parity, Signature as AlloySignature};
|
||||
use alloy_consensus::{SignableTransaction, Signed, TxLegacy};
|
||||
|
||||
pub fn keccak256(data: &[u8]) -> [u8; 32] {
|
||||
Keccak256::digest(data).into()
|
||||
use crate::abi::router::{Signature as AbiSignature};
|
||||
|
||||
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
|
||||
alloy_core::primitives::keccak256(data).into()
|
||||
}
|
||||
|
||||
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
|
||||
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(data).into())
|
||||
}
|
||||
|
||||
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
|
||||
let encoded_point = point.to_encoded_point(false);
|
||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 .. 32].try_into().unwrap()
|
||||
// Last 20 bytes of the hash of the concatenated x and y coordinates
|
||||
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
|
||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
|
||||
}
|
||||
|
||||
pub fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||
if r.is_zero().into() || s.is_zero().into() {
|
||||
return None;
|
||||
}
|
||||
/// Deterministically sign a transaction.
|
||||
///
|
||||
/// This function panics if passed a transaction with a non-None chain ID.
|
||||
pub fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
|
||||
assert!(
|
||||
tx.chain_id.is_none(),
|
||||
"chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)"
|
||||
);
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let R = AffinePoint::decompress(&r.to_bytes(), v.into());
|
||||
#[allow(non_snake_case)]
|
||||
if let Some(R) = Option::<AffinePoint>::from(R) {
|
||||
#[allow(non_snake_case)]
|
||||
let R = ProjectivePoint::from(R);
|
||||
|
||||
let r = r.invert().unwrap();
|
||||
let u1 = ProjectivePoint::GENERATOR * (-message * r);
|
||||
let u2 = R * (s * r);
|
||||
let key: ProjectivePoint = u1 + u2;
|
||||
if !bool::from(key.is_identity()) {
|
||||
return Some(address(&key));
|
||||
let sig_hash = tx.signature_hash().0;
|
||||
let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat());
|
||||
let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat());
|
||||
loop {
|
||||
let r_bytes: [u8; 32] = r.to_repr().into();
|
||||
let s_bytes: [u8; 32] = s.to_repr().into();
|
||||
let v = Parity::NonEip155(false);
|
||||
let signature =
|
||||
AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap();
|
||||
let tx = tx.clone().into_signed(signature);
|
||||
if tx.recover_signer().is_ok() {
|
||||
return tx;
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
// Re-hash until valid
|
||||
r = hash_to_scalar(r_bytes.as_ref());
|
||||
s = hash_to_scalar(s_bytes.as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
/// The public key for a Schnorr-signing account.
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct PublicKey {
|
||||
pub(crate) A: ProjectivePoint,
|
||||
pub(crate) px: Scalar,
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// Construct a new `PublicKey`.
|
||||
///
|
||||
/// This will return None if the provided point isn't eligible to be a public key (due to
|
||||
/// bounds such as parity).
|
||||
#[allow(non_snake_case)]
|
||||
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
||||
let affine = A.to_affine();
|
||||
// Only allow even keys to save a word within Ethereum
|
||||
let is_odd = bool::from(affine.y_is_odd());
|
||||
if is_odd {
|
||||
None?;
|
||||
}
|
||||
|
||||
let x_coord = affine.x();
|
||||
let x_coord_scalar = <Scalar as Reduce<KU256>>::reduce_bytes(&x_coord);
|
||||
// Return None if a reduction would occur
|
||||
// Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less
|
||||
// headache/concern to have
|
||||
// This does ban a trivial amoount of public keys
|
||||
if x_coord_scalar.to_repr() != x_coord {
|
||||
None?;
|
||||
}
|
||||
|
||||
Some(PublicKey { A, px: x_coord_scalar })
|
||||
}
|
||||
|
||||
pub fn point(&self) -> ProjectivePoint {
|
||||
self.A
|
||||
}
|
||||
|
||||
pub(crate) fn eth_repr(&self) -> [u8; 32] {
|
||||
self.px.to_repr().into()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
|
||||
#[allow(non_snake_case)]
|
||||
let A = Option::<AffinePoint>::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into();
|
||||
Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px })
|
||||
}
|
||||
}
|
||||
|
||||
/// The HRAm to use for the Schnorr contract.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct EthereumHram {}
|
||||
impl Hram<Secp256k1> for EthereumHram {
|
||||
#[allow(non_snake_case)]
|
||||
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
|
||||
let a_encoded_point = A.to_encoded_point(true);
|
||||
let mut a_encoded = a_encoded_point.as_ref().to_owned();
|
||||
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
|
||||
let x_coord = A.to_affine().x();
|
||||
|
||||
let mut data = address(R).to_vec();
|
||||
data.append(&mut a_encoded);
|
||||
data.append(&mut m.to_vec());
|
||||
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
|
||||
data.extend(x_coord.as_slice());
|
||||
data.extend(m);
|
||||
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(&data).into())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProcessedSignature {
|
||||
pub s: Scalar,
|
||||
pub px: Scalar,
|
||||
pub parity: u8,
|
||||
pub message: [u8; 32],
|
||||
pub e: Scalar,
|
||||
/// A signature for the Schnorr contract.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Signature {
|
||||
pub(crate) c: Scalar,
|
||||
pub(crate) s: Scalar,
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub fn preprocess_signature_for_ecrecover(
|
||||
m: [u8; 32],
|
||||
R: &ProjectivePoint,
|
||||
s: Scalar,
|
||||
A: &ProjectivePoint,
|
||||
chain_id: U256,
|
||||
) -> (Scalar, Scalar) {
|
||||
let processed_sig = process_signature_for_contract(m, R, s, A, chain_id);
|
||||
let sr = processed_sig.s.mul(&processed_sig.px).negate();
|
||||
let er = processed_sig.e.mul(&processed_sig.px).negate();
|
||||
(sr, er)
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub fn process_signature_for_contract(
|
||||
m: [u8; 32],
|
||||
R: &ProjectivePoint,
|
||||
s: Scalar,
|
||||
A: &ProjectivePoint,
|
||||
chain_id: U256,
|
||||
) -> ProcessedSignature {
|
||||
let encoded_pk = A.to_encoded_point(true);
|
||||
let px = &encoded_pk.as_ref()[1 .. 33];
|
||||
let px_scalar = Scalar::reduce(U256::from_be_slice(px));
|
||||
let e = EthereumHram::hram(R, A, &[chain_id.to_be_byte_array().as_slice(), &m].concat());
|
||||
ProcessedSignature {
|
||||
s,
|
||||
px: px_scalar,
|
||||
parity: &encoded_pk.as_ref()[0] - 2,
|
||||
impl Signature {
|
||||
pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {
|
||||
#[allow(non_snake_case)]
|
||||
message: m,
|
||||
e,
|
||||
let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c);
|
||||
EthereumHram::hram(&R, &public_key.A, message) == self.c
|
||||
}
|
||||
|
||||
/// Construct a new `Signature`.
|
||||
///
|
||||
/// This will return None if the signature is invalid.
|
||||
pub fn new(
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: SchnorrSignature<Secp256k1>,
|
||||
) -> Option<Signature> {
|
||||
let c = EthereumHram::hram(&signature.R, &public_key.A, message);
|
||||
if !signature.verify(public_key.A, c) {
|
||||
None?;
|
||||
}
|
||||
|
||||
let res = Signature { c, s: signature.s };
|
||||
assert!(res.verify(public_key, message));
|
||||
Some(res)
|
||||
}
|
||||
|
||||
pub fn c(&self) -> Scalar {
|
||||
self.c
|
||||
}
|
||||
pub fn s(&self) -> Scalar {
|
||||
self.s
|
||||
}
|
||||
|
||||
pub fn to_bytes(&self) -> [u8; 64] {
|
||||
let mut res = [0; 64];
|
||||
res[.. 32].copy_from_slice(self.c.to_repr().as_ref());
|
||||
res[32 ..].copy_from_slice(self.s.to_repr().as_ref());
|
||||
res
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {
|
||||
let mut reader = bytes.as_slice();
|
||||
let c = Secp256k1::read_F(&mut reader)?;
|
||||
let s = Secp256k1::read_F(&mut reader)?;
|
||||
Ok(Signature { c, s })
|
||||
}
|
||||
}
|
||||
impl From<&Signature> for AbiSignature {
|
||||
fn from(sig: &Signature) -> AbiSignature {
|
||||
let c: [u8; 32] = sig.c.to_repr().into();
|
||||
let s: [u8; 32] = sig.s.to_repr().into();
|
||||
AbiSignature { c: c.into(), s: s.into() }
|
||||
}
|
||||
}
|
||||
|
||||
113
coins/ethereum/src/deployer.rs
Normal file
113
coins/ethereum/src/deployer.rs
Normal file
@@ -0,0 +1,113 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind};
|
||||
use alloy_consensus::{Signed, TxLegacy};
|
||||
|
||||
use alloy_sol_types::{SolCall, SolEvent};
|
||||
|
||||
use alloy_rpc_types::{BlockNumberOrTag, Filter};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::{self, keccak256, PublicKey},
|
||||
router::Router,
|
||||
};
|
||||
pub use crate::abi::deployer as abi;
|
||||
|
||||
/// The Deployer contract for the Router contract.
|
||||
///
|
||||
/// This Deployer has a deterministic address, letting it be immediately identified on any
|
||||
/// compatible chain. It then supports retrieving the Router contract's address (which isn't
|
||||
/// deterministic) using a single log query.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Deployer;
|
||||
impl Deployer {
|
||||
/// Obtain the transaction to deploy this contract, already signed.
|
||||
///
|
||||
/// The account this transaction is sent from (which is populated in `from`) must be sufficiently
|
||||
/// funded for this transaction to be submitted. This account has no known private key to anyone,
|
||||
/// so ETH sent can be neither misappropriated nor returned.
|
||||
pub fn deployment_tx() -> Signed<TxLegacy> {
|
||||
let bytecode = include_str!("../artifacts/Deployer.bin");
|
||||
let bytecode =
|
||||
Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex");
|
||||
|
||||
let tx = TxLegacy {
|
||||
chain_id: None,
|
||||
nonce: 0,
|
||||
gas_price: 100_000_000_000u128,
|
||||
// TODO: Use a more accurate gas limit
|
||||
gas_limit: 1_000_000u128,
|
||||
to: TxKind::Create,
|
||||
value: U256::ZERO,
|
||||
input: bytecode,
|
||||
};
|
||||
|
||||
crypto::deterministically_sign(&tx)
|
||||
}
|
||||
|
||||
/// Obtain the deterministic address for this contract.
|
||||
pub fn address() -> [u8; 20] {
|
||||
let deployer_deployer =
|
||||
Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature");
|
||||
**Address::create(&deployer_deployer, 0)
|
||||
}
|
||||
|
||||
/// Construct a new view of the `Deployer`.
|
||||
pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result<Option<Self>, Error> {
|
||||
let address = Self::address();
|
||||
let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?;
|
||||
// Contract has yet to be deployed
|
||||
if code.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(Self))
|
||||
}
|
||||
|
||||
/// Yield the `ContractCall` necessary to deploy the Router.
|
||||
pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {
|
||||
TxLegacy {
|
||||
to: TxKind::Call(Self::address().into()),
|
||||
input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(),
|
||||
gas_limit: 1_000_000,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the first Router deployed with the specified key as its first key.
|
||||
///
|
||||
/// This is the Router Serai will use, and is the only way to construct a `Router`.
|
||||
pub async fn find_router(
|
||||
&self,
|
||||
provider: Arc<RootProvider<SimpleRequest>>,
|
||||
key: &PublicKey,
|
||||
) -> Result<Option<Router>, Error> {
|
||||
let init_code = Router::init_code(key);
|
||||
let init_code_hash = keccak256(&init_code);
|
||||
|
||||
#[cfg(not(test))]
|
||||
let to_block = BlockNumberOrTag::Finalized;
|
||||
#[cfg(test)]
|
||||
let to_block = BlockNumberOrTag::Latest;
|
||||
|
||||
// Find the first log using this init code (where the init code is binding to the key)
|
||||
// TODO: Make an abstraction for event filtering (de-duplicating common code)
|
||||
let filter =
|
||||
Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address()));
|
||||
let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH);
|
||||
let filter = filter.topic1(B256::from(init_code_hash));
|
||||
let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let Some(first_log) = logs.first() else { return Ok(None) };
|
||||
let router = first_log
|
||||
.log_decode::<abi::Deployment>()
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.inner
|
||||
.data
|
||||
.created;
|
||||
|
||||
Ok(Some(Router::new(provider, router)))
|
||||
}
|
||||
}
|
||||
105
coins/ethereum/src/erc20.rs
Normal file
105
coins/ethereum/src/erc20.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use alloy_core::primitives::{Address, B256, U256};
|
||||
|
||||
use alloy_sol_types::{SolInterface, SolEvent};
|
||||
|
||||
use alloy_rpc_types::Filter;
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::Error;
|
||||
pub use crate::abi::erc20 as abi;
|
||||
use abi::{IERC20Calls, Transfer, transferCall, transferFromCall};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TopLevelErc20Transfer {
|
||||
pub id: [u8; 32],
|
||||
pub from: [u8; 20],
|
||||
pub amount: U256,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A view for an ERC20 contract.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Erc20(Arc<RootProvider<SimpleRequest>>, Address);
|
||||
impl Erc20 {
|
||||
/// Construct a new view of the specified ERC20 contract.
|
||||
pub fn new(provider: Arc<RootProvider<SimpleRequest>>, address: [u8; 20]) -> Self {
|
||||
Self(provider, Address::from(&address))
|
||||
}
|
||||
|
||||
pub async fn top_level_transfers(
|
||||
&self,
|
||||
block: u64,
|
||||
to: [u8; 20],
|
||||
) -> Result<Vec<TopLevelErc20Transfer>, Error> {
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(Transfer::SIGNATURE_HASH);
|
||||
let mut to_topic = [0; 32];
|
||||
to_topic[12 ..].copy_from_slice(&to);
|
||||
let filter = filter.topic2(B256::from(to_topic));
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let mut handled = HashSet::new();
|
||||
|
||||
let mut top_level_transfers = vec![];
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||
let tx =
|
||||
self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?;
|
||||
|
||||
// If this is a top-level call...
|
||||
if tx.to == Some(self.1) {
|
||||
// And we recognize the call...
|
||||
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due
|
||||
// to the InInstruction appended
|
||||
if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) {
|
||||
// Extract the top-level call's from/to/value
|
||||
let (from, call_to, value) = match call {
|
||||
IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value),
|
||||
IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => {
|
||||
(from, call_to, value)
|
||||
}
|
||||
// Treat any other function selectors as unrecognized
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let log = log.log_decode::<Transfer>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
// Ensure the top-level transfer is equivalent, and this presumably isn't a log for an
|
||||
// internal transfer
|
||||
if (log.from != from) || (call_to != to) || (value != log.value) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's
|
||||
// the only log we handle
|
||||
if handled.contains(&tx_id) {
|
||||
continue;
|
||||
}
|
||||
handled.insert(tx_id);
|
||||
|
||||
// Read the data appended after
|
||||
let encoded = call.abi_encode();
|
||||
let data = tx.input.as_ref()[encoded.len() ..].to_vec();
|
||||
|
||||
// Push the transfer
|
||||
top_level_transfers.push(TopLevelErc20Transfer {
|
||||
// Since we'll only handle one log for this TX, set the ID to the TX ID
|
||||
id: *tx_id,
|
||||
from: *log.from.0,
|
||||
amount: log.value,
|
||||
data,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(top_level_transfers)
|
||||
}
|
||||
}
|
||||
@@ -1,2 +1,35 @@
|
||||
pub mod contract;
|
||||
use thiserror::Error;
|
||||
|
||||
pub mod alloy {
|
||||
pub use alloy_core::primitives;
|
||||
pub use alloy_core as core;
|
||||
pub use alloy_sol_types as sol_types;
|
||||
|
||||
pub use alloy_consensus as consensus;
|
||||
pub use alloy_network as network;
|
||||
pub use alloy_rpc_types as rpc_types;
|
||||
pub use alloy_simple_request_transport as simple_request_transport;
|
||||
pub use alloy_rpc_client as rpc_client;
|
||||
pub use alloy_provider as provider;
|
||||
}
|
||||
|
||||
pub mod crypto;
|
||||
|
||||
pub(crate) mod abi;
|
||||
|
||||
pub mod erc20;
|
||||
pub mod deployer;
|
||||
pub mod router;
|
||||
|
||||
pub mod machine;
|
||||
|
||||
#[cfg(any(test, feature = "tests"))]
|
||||
pub mod tests;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("failed to verify Schnorr signature")]
|
||||
InvalidSignature,
|
||||
#[error("couldn't make call/send TX")]
|
||||
ConnectionError,
|
||||
}
|
||||
|
||||
414
coins/ethereum/src/machine.rs
Normal file
414
coins/ethereum/src/machine.rs
Normal file
@@ -0,0 +1,414 @@
|
||||
use std::{
|
||||
io::{self, Read},
|
||||
collections::HashMap,
|
||||
};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use group::GroupEncoding;
|
||||
use frost::{
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
Participant, ThresholdKeys, FrostError,
|
||||
algorithm::Schnorr,
|
||||
sign::*,
|
||||
};
|
||||
|
||||
use alloy_core::primitives::U256;
|
||||
|
||||
use crate::{
|
||||
crypto::{PublicKey, EthereumHram, Signature},
|
||||
router::{
|
||||
abi::{Call as AbiCall, OutInstruction as AbiOutInstruction},
|
||||
Router,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Call {
|
||||
pub to: [u8; 20],
|
||||
pub value: U256,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
impl Call {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut to = [0; 20];
|
||||
reader.read_exact(&mut to)?;
|
||||
|
||||
let value = {
|
||||
let mut value_bytes = [0; 32];
|
||||
reader.read_exact(&mut value_bytes)?;
|
||||
U256::from_le_slice(&value_bytes)
|
||||
};
|
||||
|
||||
let mut data_len = {
|
||||
let mut data_len = [0; 4];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize")
|
||||
};
|
||||
|
||||
// A valid DoS would be to claim a 4 GB data is present for only 4 bytes
|
||||
// We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB)
|
||||
let mut data = vec![];
|
||||
while data_len > 0 {
|
||||
let chunk_len = data_len.min(1024);
|
||||
let mut chunk = vec![0; chunk_len];
|
||||
reader.read_exact(&mut chunk)?;
|
||||
data.extend(&chunk);
|
||||
data_len -= chunk_len;
|
||||
}
|
||||
|
||||
Ok(Call { to, value, data })
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.to)?;
|
||||
writer.write_all(&self.value.as_le_bytes())?;
|
||||
|
||||
let data_len = u32::try_from(self.data.len())
|
||||
.map_err(|_| io::Error::other("call data length exceeded 2**32"))?;
|
||||
writer.write_all(&data_len.to_le_bytes())?;
|
||||
writer.write_all(&self.data)
|
||||
}
|
||||
}
|
||||
impl From<Call> for AbiCall {
|
||||
fn from(call: Call) -> AbiCall {
|
||||
AbiCall { to: call.to.into(), value: call.value, data: call.data.into() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum OutInstructionTarget {
|
||||
Direct([u8; 20]),
|
||||
Calls(Vec<Call>),
|
||||
}
|
||||
impl OutInstructionTarget {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut addr = [0; 20];
|
||||
reader.read_exact(&mut addr)?;
|
||||
Ok(OutInstructionTarget::Direct(addr))
|
||||
}
|
||||
1 => {
|
||||
let mut calls_len = [0; 4];
|
||||
reader.read_exact(&mut calls_len)?;
|
||||
let calls_len = u32::from_le_bytes(calls_len);
|
||||
|
||||
let mut calls = vec![];
|
||||
for _ in 0 .. calls_len {
|
||||
calls.push(Call::read(reader)?);
|
||||
}
|
||||
Ok(OutInstructionTarget::Calls(calls))
|
||||
}
|
||||
_ => Err(io::Error::other("unrecognized OutInstructionTarget"))?,
|
||||
}
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
OutInstructionTarget::Direct(addr) => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(addr)?;
|
||||
}
|
||||
OutInstructionTarget::Calls(calls) => {
|
||||
writer.write_all(&[1])?;
|
||||
let call_len = u32::try_from(calls.len())
|
||||
.map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?;
|
||||
writer.write_all(&call_len.to_le_bytes())?;
|
||||
for call in calls {
|
||||
call.write(writer)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct OutInstruction {
|
||||
pub target: OutInstructionTarget,
|
||||
pub value: U256,
|
||||
}
|
||||
impl OutInstruction {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let target = OutInstructionTarget::read(reader)?;
|
||||
|
||||
let value = {
|
||||
let mut value_bytes = [0; 32];
|
||||
reader.read_exact(&mut value_bytes)?;
|
||||
U256::from_le_slice(&value_bytes)
|
||||
};
|
||||
|
||||
Ok(OutInstruction { target, value })
|
||||
}
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.target.write(writer)?;
|
||||
writer.write_all(&self.value.as_le_bytes())
|
||||
}
|
||||
}
|
||||
impl From<OutInstruction> for AbiOutInstruction {
|
||||
fn from(instruction: OutInstruction) -> AbiOutInstruction {
|
||||
match instruction.target {
|
||||
OutInstructionTarget::Direct(addr) => {
|
||||
AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value }
|
||||
}
|
||||
OutInstructionTarget::Calls(calls) => AbiOutInstruction {
|
||||
to: [0; 20].into(),
|
||||
calls: calls.into_iter().map(Into::into).collect(),
|
||||
value: instruction.value,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum RouterCommand {
|
||||
UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey },
|
||||
Execute { chain_id: U256, nonce: U256, outs: Vec<OutInstruction> },
|
||||
}
|
||||
|
||||
impl RouterCommand {
|
||||
pub fn msg(&self) -> Vec<u8> {
|
||||
match self {
|
||||
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||
Router::update_serai_key_message(*chain_id, *nonce, key)
|
||||
}
|
||||
RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message(
|
||||
*chain_id,
|
||||
*nonce,
|
||||
outs.iter().map(|out| out.clone().into()).collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut chain_id = [0; 32];
|
||||
reader.read_exact(&mut chain_id)?;
|
||||
|
||||
let mut nonce = [0; 32];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
|
||||
let key = PublicKey::new(Secp256k1::read_G(reader)?)
|
||||
.ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?;
|
||||
Ok(RouterCommand::UpdateSeraiKey {
|
||||
chain_id: U256::from_le_slice(&chain_id),
|
||||
nonce: U256::from_le_slice(&nonce),
|
||||
key,
|
||||
})
|
||||
}
|
||||
1 => {
|
||||
let mut chain_id = [0; 32];
|
||||
reader.read_exact(&mut chain_id)?;
|
||||
let chain_id = U256::from_le_slice(&chain_id);
|
||||
|
||||
let mut nonce = [0; 32];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
let nonce = U256::from_le_slice(&nonce);
|
||||
|
||||
let mut outs_len = [0; 4];
|
||||
reader.read_exact(&mut outs_len)?;
|
||||
let outs_len = u32::from_le_bytes(outs_len);
|
||||
|
||||
let mut outs = vec![];
|
||||
for _ in 0 .. outs_len {
|
||||
outs.push(OutInstruction::read(reader)?);
|
||||
}
|
||||
|
||||
Ok(RouterCommand::Execute { chain_id, nonce, outs })
|
||||
}
|
||||
_ => Err(io::Error::other("reading unknown type of RouterCommand"))?,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(&chain_id.as_le_bytes())?;
|
||||
writer.write_all(&nonce.as_le_bytes())?;
|
||||
writer.write_all(&key.A.to_bytes())
|
||||
}
|
||||
RouterCommand::Execute { chain_id, nonce, outs } => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(&chain_id.as_le_bytes())?;
|
||||
writer.write_all(&nonce.as_le_bytes())?;
|
||||
writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;
|
||||
for out in outs {
|
||||
out.write(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut res = vec![];
|
||||
self.write(&mut res).unwrap();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct SignedRouterCommand {
|
||||
command: RouterCommand,
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl SignedRouterCommand {
|
||||
pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option<Self> {
|
||||
let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?;
|
||||
let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?;
|
||||
let signature = Signature { c, s };
|
||||
|
||||
if !signature.verify(key, &command.msg()) {
|
||||
None?
|
||||
}
|
||||
Some(SignedRouterCommand { command, signature })
|
||||
}
|
||||
|
||||
pub fn command(&self) -> &RouterCommand {
|
||||
&self.command
|
||||
}
|
||||
|
||||
pub fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let command = RouterCommand::read(reader)?;
|
||||
|
||||
let mut sig = [0; 64];
|
||||
reader.read_exact(&mut sig)?;
|
||||
let signature = Signature::from_bytes(sig)?;
|
||||
|
||||
Ok(SignedRouterCommand { command, signature })
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.command.write(writer)?;
|
||||
writer.write_all(&self.signature.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine: AlgorithmMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl RouterCommandMachine {
|
||||
pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> Option<Self> {
|
||||
// The Schnorr algorithm should be fine without this, even when using the IETF variant
|
||||
// If this is better and more comprehensive, we should do it, even if not necessary
|
||||
let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1");
|
||||
let key = keys.group_key();
|
||||
transcript.append_message(b"key", key.to_bytes());
|
||||
transcript.append_message(b"command", command.serialize());
|
||||
|
||||
Some(Self {
|
||||
key: PublicKey::new(key)?,
|
||||
command,
|
||||
machine: AlgorithmMachine::new(Schnorr::new(transcript), keys),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PreprocessMachine for RouterCommandMachine {
|
||||
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||
type Signature = SignedRouterCommand;
|
||||
type SignMachine = RouterCommandSignMachine;
|
||||
|
||||
fn preprocess<R: RngCore + CryptoRng>(
|
||||
self,
|
||||
rng: &mut R,
|
||||
) -> (Self::SignMachine, Self::Preprocess) {
|
||||
let (machine, preprocess) = self.machine.preprocess(rng);
|
||||
|
||||
(RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandSignMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine: AlgorithmSignMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl SignMachine<SignedRouterCommand> for RouterCommandSignMachine {
|
||||
type Params = ();
|
||||
type Keys = ThresholdKeys<Secp256k1>;
|
||||
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||
type SignatureShare = SignatureShare<Secp256k1>;
|
||||
type SignatureMachine = RouterCommandSignatureMachine;
|
||||
|
||||
fn cache(self) -> CachedPreprocess {
|
||||
unimplemented!(
|
||||
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||
"being already bound to a specific command"
|
||||
);
|
||||
}
|
||||
|
||||
fn from_cache(
|
||||
(): (),
|
||||
_: ThresholdKeys<Secp256k1>,
|
||||
_: CachedPreprocess,
|
||||
) -> (Self, Self::Preprocess) {
|
||||
unimplemented!(
|
||||
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||
"being already bound to a specific command"
|
||||
);
|
||||
}
|
||||
|
||||
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
||||
self.machine.read_preprocess(reader)
|
||||
}
|
||||
|
||||
fn sign(
|
||||
self,
|
||||
commitments: HashMap<Participant, Self::Preprocess>,
|
||||
msg: &[u8],
|
||||
) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> {
|
||||
if !msg.is_empty() {
|
||||
panic!("message was passed to a RouterCommand machine when it generates its own");
|
||||
}
|
||||
|
||||
let (machine, share) = self.machine.sign(commitments, &self.command.msg())?;
|
||||
|
||||
Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandSignatureMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine:
|
||||
AlgorithmSignatureMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl SignatureMachine<SignedRouterCommand> for RouterCommandSignatureMachine {
|
||||
type SignatureShare = SignatureShare<Secp256k1>;
|
||||
|
||||
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
|
||||
self.machine.read_share(reader)
|
||||
}
|
||||
|
||||
fn complete(
|
||||
self,
|
||||
shares: HashMap<Participant, Self::SignatureShare>,
|
||||
) -> Result<SignedRouterCommand, FrostError> {
|
||||
let sig = self.machine.complete(shares)?;
|
||||
let signature = Signature::new(&self.key, &self.command.msg(), sig)
|
||||
.expect("machine produced an invalid signature");
|
||||
Ok(SignedRouterCommand { command: self.command, signature })
|
||||
}
|
||||
}
|
||||
443
coins/ethereum/src/router.rs
Normal file
443
coins/ethereum/src/router.rs
Normal file
@@ -0,0 +1,443 @@
|
||||
use std::{sync::Arc, io, collections::HashSet};
|
||||
|
||||
use k256::{
|
||||
elliptic_curve::{group::GroupEncoding, sec1},
|
||||
ProjectivePoint,
|
||||
};
|
||||
|
||||
use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind};
|
||||
#[cfg(test)]
|
||||
use alloy_core::primitives::B256;
|
||||
use alloy_consensus::TxLegacy;
|
||||
|
||||
use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent};
|
||||
|
||||
use alloy_rpc_types::Filter;
|
||||
#[cfg(test)]
|
||||
use alloy_rpc_types::{BlockId, TransactionRequest, TransactionInput};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
pub use crate::{
|
||||
Error,
|
||||
crypto::{PublicKey, Signature},
|
||||
abi::{erc20::Transfer, router as abi},
|
||||
};
|
||||
use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Coin {
|
||||
Ether,
|
||||
Erc20([u8; 20]),
|
||||
}
|
||||
|
||||
impl Coin {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
Ok(match kind[0] {
|
||||
0 => Coin::Ether,
|
||||
1 => {
|
||||
let mut address = [0; 20];
|
||||
reader.read_exact(&mut address)?;
|
||||
Coin::Erc20(address)
|
||||
}
|
||||
_ => Err(io::Error::other("unrecognized Coin type"))?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
Coin::Ether => writer.write_all(&[0]),
|
||||
Coin::Erc20(token) => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(token)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct InInstruction {
|
||||
pub id: ([u8; 32], u64),
|
||||
pub from: [u8; 20],
|
||||
pub coin: Coin,
|
||||
pub amount: U256,
|
||||
pub data: Vec<u8>,
|
||||
pub key_at_end_of_block: ProjectivePoint,
|
||||
}
|
||||
|
||||
impl InInstruction {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let id = {
|
||||
let mut id_hash = [0; 32];
|
||||
reader.read_exact(&mut id_hash)?;
|
||||
let mut id_pos = [0; 8];
|
||||
reader.read_exact(&mut id_pos)?;
|
||||
let id_pos = u64::from_le_bytes(id_pos);
|
||||
(id_hash, id_pos)
|
||||
};
|
||||
|
||||
let mut from = [0; 20];
|
||||
reader.read_exact(&mut from)?;
|
||||
|
||||
let coin = Coin::read(reader)?;
|
||||
let mut amount = [0; 32];
|
||||
reader.read_exact(&mut amount)?;
|
||||
let amount = U256::from_le_slice(&amount);
|
||||
|
||||
let mut data_len = [0; 4];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
let data_len = usize::try_from(u32::from_le_bytes(data_len))
|
||||
.map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?;
|
||||
let mut data = vec![0; data_len];
|
||||
reader.read_exact(&mut data)?;
|
||||
|
||||
let mut key_at_end_of_block = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||
reader.read_exact(&mut key_at_end_of_block)?;
|
||||
let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block))
|
||||
.ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?;
|
||||
|
||||
Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block })
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.id.0)?;
|
||||
writer.write_all(&self.id.1.to_le_bytes())?;
|
||||
|
||||
writer.write_all(&self.from)?;
|
||||
|
||||
self.coin.write(writer)?;
|
||||
writer.write_all(&self.amount.as_le_bytes())?;
|
||||
|
||||
writer.write_all(
|
||||
&u32::try_from(self.data.len())
|
||||
.map_err(|_| {
|
||||
io::Error::other("InInstruction being written had data exceeding 2**32 in length")
|
||||
})?
|
||||
.to_le_bytes(),
|
||||
)?;
|
||||
writer.write_all(&self.data)?;
|
||||
|
||||
writer.write_all(&self.key_at_end_of_block.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Executed {
|
||||
pub tx_id: [u8; 32],
|
||||
pub nonce: u64,
|
||||
pub signature: [u8; 64],
|
||||
}
|
||||
|
||||
/// The contract Serai uses to manage its state.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Router(Arc<RootProvider<SimpleRequest>>, Address);
|
||||
impl Router {
|
||||
pub(crate) fn code() -> Vec<u8> {
|
||||
let bytecode = include_str!("../artifacts/Router.bin");
|
||||
Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {
|
||||
let mut bytecode = Self::code();
|
||||
// Append the constructor arguments
|
||||
bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode());
|
||||
bytecode
|
||||
}
|
||||
|
||||
// This isn't pub in order to force users to use `Deployer::find_router`.
|
||||
pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
|
||||
Self(provider, address)
|
||||
}
|
||||
|
||||
pub fn address(&self) -> [u8; 20] {
|
||||
**self.1
|
||||
}
|
||||
|
||||
/// Get the key for Serai at the specified block.
|
||||
#[cfg(test)]
|
||||
pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {
|
||||
let call = TransactionRequest::default()
|
||||
.to(self.1)
|
||||
.input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into()));
|
||||
let bytes = self
|
||||
.0
|
||||
.call(&call)
|
||||
.block(BlockId::Hash(B256::from(at).into()))
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError)
|
||||
}
|
||||
|
||||
/// Get the message to be signed in order to update the key for Serai.
|
||||
pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec<u8> {
|
||||
let mut buffer = b"updateSeraiKey".to_vec();
|
||||
buffer.extend(&chain_id.to_be_bytes::<32>());
|
||||
buffer.extend(&nonce.to_be_bytes::<32>());
|
||||
buffer.extend(&key.eth_repr());
|
||||
buffer
|
||||
}
|
||||
|
||||
/// Update the key representing Serai.
|
||||
pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy {
|
||||
// TODO: Set a more accurate gas
|
||||
TxLegacy {
|
||||
to: TxKind::Call(self.1),
|
||||
input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into()))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
gas_limit: 100_000,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current nonce for the published batches.
|
||||
#[cfg(test)]
|
||||
pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {
|
||||
let call = TransactionRequest::default()
|
||||
.to(self.1)
|
||||
.input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into()));
|
||||
let bytes = self
|
||||
.0
|
||||
.call(&call)
|
||||
.block(BlockId::Hash(B256::from(at).into()))
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
Ok(res._0)
|
||||
}
|
||||
|
||||
/// Get the message to be signed in order to update the key for Serai.
|
||||
pub(crate) fn execute_message(
|
||||
chain_id: U256,
|
||||
nonce: U256,
|
||||
outs: Vec<abi::OutInstruction>,
|
||||
) -> Vec<u8> {
|
||||
("execute".to_string(), chain_id, nonce, outs).abi_encode_params()
|
||||
}
|
||||
|
||||
/// Execute a batch of `OutInstruction`s.
|
||||
pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy {
|
||||
TxLegacy {
|
||||
to: TxKind::Call(self.1),
|
||||
input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(),
|
||||
// TODO
|
||||
gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn key_at_end_of_block(&self, block: u64) -> Result<Option<ProjectivePoint>, Error> {
|
||||
let filter = Filter::new().from_block(0).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||
let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
if all_keys.is_empty() {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?;
|
||||
let last_key_x_coordinate = last_key_x_coordinate_log
|
||||
.log_decode::<SeraiKeyUpdated>()
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.inner
|
||||
.data
|
||||
.key;
|
||||
|
||||
let mut compressed_point = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||
compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY);
|
||||
compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice());
|
||||
|
||||
let key =
|
||||
Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?;
|
||||
Ok(Some(key))
|
||||
}
|
||||
|
||||
pub async fn in_instructions(
|
||||
&self,
|
||||
block: u64,
|
||||
allowed_tokens: &HashSet<[u8; 20]>,
|
||||
) -> Result<Vec<InInstruction>, Error> {
|
||||
let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else {
|
||||
return Ok(vec![]);
|
||||
};
|
||||
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let mut transfer_check = HashSet::new();
|
||||
let mut in_instructions = vec![];
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let id = (
|
||||
log.block_hash.ok_or(Error::ConnectionError)?.into(),
|
||||
log.log_index.ok_or(Error::ConnectionError)?,
|
||||
);
|
||||
|
||||
let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||
let tx = self
|
||||
.0
|
||||
.get_transaction_by_hash(tx_hash)
|
||||
.await
|
||||
.ok()
|
||||
.flatten()
|
||||
.ok_or(Error::ConnectionError)?;
|
||||
|
||||
let log =
|
||||
log.log_decode::<InInstructionEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let coin = if log.coin.0 == [0; 20] {
|
||||
Coin::Ether
|
||||
} else {
|
||||
let token = *log.coin.0;
|
||||
|
||||
if !allowed_tokens.contains(&token) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this also counts as a top-level transfer via the token, drop it
|
||||
//
|
||||
// Necessary in order to handle a potential edge case with some theoretical token
|
||||
// implementations
|
||||
//
|
||||
// This will either let it be handled by the top-level transfer hook or will drop it
|
||||
// entirely on the side of caution
|
||||
if tx.to == Some(token.into()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get all logs for this TX
|
||||
let receipt = self
|
||||
.0
|
||||
.get_transaction_receipt(tx_hash)
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.ok_or(Error::ConnectionError)?;
|
||||
let tx_logs = receipt.inner.logs();
|
||||
|
||||
// Find a matching transfer log
|
||||
let mut found_transfer = false;
|
||||
for tx_log in tx_logs {
|
||||
let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?;
|
||||
// Ensure we didn't already use this transfer to check a distinct InInstruction event
|
||||
if transfer_check.contains(&log_index) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this log is from the token we expected to be transferred
|
||||
if tx_log.address().0 != token {
|
||||
continue;
|
||||
}
|
||||
// Check if this is a transfer log
|
||||
// https://github.com/alloy-rs/core/issues/589
|
||||
if tx_log.topics()[0] != Transfer::SIGNATURE_HASH {
|
||||
continue;
|
||||
}
|
||||
let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue };
|
||||
// Check if this is a transfer to us for the expected amount
|
||||
if (transfer.to == self.1) && (transfer.value == log.amount) {
|
||||
transfer_check.insert(log_index);
|
||||
found_transfer = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !found_transfer {
|
||||
// This shouldn't be a ConnectionError
|
||||
// This is an exploit, a non-conforming ERC20, or an invalid connection
|
||||
// This should halt the process which is sufficient, yet this is sub-optimal
|
||||
// TODO
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
Coin::Erc20(token)
|
||||
};
|
||||
|
||||
in_instructions.push(InInstruction {
|
||||
id,
|
||||
from: *log.from.0,
|
||||
coin,
|
||||
amount: log.amount,
|
||||
data: log.instruction.as_ref().to_vec(),
|
||||
key_at_end_of_block,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(in_instructions)
|
||||
}
|
||||
|
||||
pub async fn executed_commands(&self, block: u64) -> Result<Vec<Executed>, Error> {
|
||||
let mut res = vec![];
|
||||
|
||||
{
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||
|
||||
let log =
|
||||
log.log_decode::<SeraiKeyUpdated>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let mut signature = [0; 64];
|
||||
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||
res.push(Executed {
|
||||
tx_id,
|
||||
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||
|
||||
let log = log.log_decode::<ExecutedEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let mut signature = [0; 64];
|
||||
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||
res.push(Executed {
|
||||
tx_id,
|
||||
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[cfg(feature = "tests")]
|
||||
pub fn key_updated_filter(&self) -> Filter {
|
||||
Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH)
|
||||
}
|
||||
#[cfg(feature = "tests")]
|
||||
pub fn executed_filter(&self) -> Filter {
|
||||
Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH)
|
||||
}
|
||||
}
|
||||
13
coins/ethereum/src/tests/abi/mod.rs
Normal file
13
coins/ethereum/src/tests/abi/mod.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
use alloy_sol_types::sol;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod schnorr_container {
|
||||
use super::*;
|
||||
sol!("src/tests/contracts/Schnorr.sol");
|
||||
}
|
||||
pub(crate) use schnorr_container::TestSchnorr as schnorr;
|
||||
51
coins/ethereum/src/tests/contracts/ERC20.sol
Normal file
51
coins/ethereum/src/tests/contracts/ERC20.sol
Normal file
@@ -0,0 +1,51 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
contract TestERC20 {
|
||||
event Transfer(address indexed from, address indexed to, uint256 value);
|
||||
event Approval(address indexed owner, address indexed spender, uint256 value);
|
||||
|
||||
function name() public pure returns (string memory) {
|
||||
return "Test ERC20";
|
||||
}
|
||||
function symbol() public pure returns (string memory) {
|
||||
return "TEST";
|
||||
}
|
||||
function decimals() public pure returns (uint8) {
|
||||
return 18;
|
||||
}
|
||||
|
||||
function totalSupply() public pure returns (uint256) {
|
||||
return 1_000_000 * 10e18;
|
||||
}
|
||||
|
||||
mapping(address => uint256) balances;
|
||||
mapping(address => mapping(address => uint256)) allowances;
|
||||
|
||||
constructor() {
|
||||
balances[msg.sender] = totalSupply();
|
||||
}
|
||||
|
||||
function balanceOf(address owner) public view returns (uint256) {
|
||||
return balances[owner];
|
||||
}
|
||||
function transfer(address to, uint256 value) public returns (bool) {
|
||||
balances[msg.sender] -= value;
|
||||
balances[to] += value;
|
||||
return true;
|
||||
}
|
||||
function transferFrom(address from, address to, uint256 value) public returns (bool) {
|
||||
allowances[from][msg.sender] -= value;
|
||||
balances[from] -= value;
|
||||
balances[to] += value;
|
||||
return true;
|
||||
}
|
||||
|
||||
function approve(address spender, uint256 value) public returns (bool) {
|
||||
allowances[msg.sender][spender] = value;
|
||||
return true;
|
||||
}
|
||||
function allowance(address owner, address spender) public view returns (uint256) {
|
||||
return allowances[owner][spender];
|
||||
}
|
||||
}
|
||||
15
coins/ethereum/src/tests/contracts/Schnorr.sol
Normal file
15
coins/ethereum/src/tests/contracts/Schnorr.sol
Normal file
@@ -0,0 +1,15 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "../../../contracts/Schnorr.sol";
|
||||
|
||||
contract TestSchnorr {
|
||||
function verify(
|
||||
bytes32 px,
|
||||
bytes calldata message,
|
||||
bytes32 c,
|
||||
bytes32 s
|
||||
) external pure returns (bool) {
|
||||
return Schnorr.verify(px, message, c, s);
|
||||
}
|
||||
}
|
||||
105
coins/ethereum/src/tests/crypto.rs
Normal file
105
coins/ethereum/src/tests/crypto.rs
Normal file
@@ -0,0 +1,105 @@
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::{Field, PrimeField};
|
||||
use k256::{
|
||||
ecdsa::{
|
||||
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,
|
||||
},
|
||||
Scalar, ProjectivePoint,
|
||||
};
|
||||
|
||||
use frost::{
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
algorithm::{Hram, IetfSchnorr},
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use crate::{crypto::*, tests::key_gen};
|
||||
|
||||
// The ecrecover opcode, yet with parity replacing v
|
||||
pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||
let sig = ecdsa::Signature::from_scalars(r, s).ok()?;
|
||||
let message: [u8; 32] = message.to_repr().into();
|
||||
alloy_core::primitives::Signature::from_signature_and_parity(
|
||||
sig,
|
||||
alloy_core::primitives::Parity::Parity(odd_y),
|
||||
)
|
||||
.ok()?
|
||||
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
|
||||
.ok()
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover() {
|
||||
let private = SigningKey::random(&mut OsRng);
|
||||
let public = VerifyingKey::from(&private);
|
||||
|
||||
// Sign the signature
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let (sig, recovery_id) = private
|
||||
.as_nonzero_scalar()
|
||||
.try_sign_prehashed(
|
||||
<Secp256k1 as Ciphersuite>::F::random(&mut OsRng),
|
||||
&keccak256(MESSAGE).into(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Sanity check the signature verifies
|
||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||
{
|
||||
assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ());
|
||||
}
|
||||
|
||||
// Perform the ecrecover
|
||||
assert_eq!(
|
||||
ecrecover(
|
||||
hash_to_scalar(MESSAGE),
|
||||
u8::from(recovery_id.unwrap().is_y_odd()) == 1,
|
||||
*sig.r(),
|
||||
*sig.s()
|
||||
)
|
||||
.unwrap(),
|
||||
address(&ProjectivePoint::from(public.as_affine()))
|
||||
);
|
||||
}
|
||||
|
||||
// Run the sign test with the EthereumHram
|
||||
#[test]
|
||||
fn test_signing() {
|
||||
let (keys, _) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let _sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub fn preprocess_signature_for_ecrecover(
|
||||
R: ProjectivePoint,
|
||||
public_key: &PublicKey,
|
||||
m: &[u8],
|
||||
s: Scalar,
|
||||
) -> (Scalar, Scalar) {
|
||||
let c = EthereumHram::hram(&R, &public_key.A, m);
|
||||
let sa = -(s * public_key.px);
|
||||
let ca = -(c * public_key.px);
|
||||
(sa, ca)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover_hack() {
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
|
||||
let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s);
|
||||
let q = ecrecover(sa, false, public_key.px, ca).unwrap();
|
||||
assert_eq!(q, address(&sig.R));
|
||||
}
|
||||
131
coins/ethereum/src/tests/mod.rs
Normal file
131
coins/ethereum/src/tests/mod.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use k256::{Scalar, ProjectivePoint};
|
||||
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
|
||||
|
||||
use alloy_core::{
|
||||
primitives::{Address, U256, Bytes, TxKind},
|
||||
hex::FromHex,
|
||||
};
|
||||
use alloy_consensus::{SignableTransaction, TxLegacy};
|
||||
|
||||
use alloy_rpc_types::TransactionReceipt;
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::crypto::{address, deterministically_sign, PublicKey};
|
||||
|
||||
#[cfg(test)]
|
||||
mod crypto;
|
||||
|
||||
#[cfg(test)]
|
||||
mod abi;
|
||||
#[cfg(test)]
|
||||
mod schnorr;
|
||||
#[cfg(test)]
|
||||
mod router;
|
||||
|
||||
pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) {
|
||||
let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng);
|
||||
let mut group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
|
||||
let mut offset = Scalar::ZERO;
|
||||
while PublicKey::new(group_key).is_none() {
|
||||
offset += Scalar::ONE;
|
||||
group_key += ProjectivePoint::GENERATOR;
|
||||
}
|
||||
for keys in keys.values_mut() {
|
||||
*keys = keys.offset(offset);
|
||||
}
|
||||
let public_key = PublicKey::new(group_key).unwrap();
|
||||
|
||||
(keys, public_key)
|
||||
}
|
||||
|
||||
// TODO: Use a proper error here
|
||||
pub async fn send(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
mut tx: TxLegacy,
|
||||
) -> Option<TransactionReceipt> {
|
||||
let verifying_key = *wallet.verifying_key().as_affine();
|
||||
let address = Address::from(address(&verifying_key.into()));
|
||||
|
||||
// https://github.com/alloy-rs/alloy/issues/539
|
||||
// let chain_id = provider.get_chain_id().await.unwrap();
|
||||
// tx.chain_id = Some(chain_id);
|
||||
tx.chain_id = None;
|
||||
tx.nonce = provider.get_transaction_count(address).await.unwrap();
|
||||
// 100 gwei
|
||||
tx.gas_price = 100_000_000_000u128;
|
||||
|
||||
let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap();
|
||||
assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap());
|
||||
assert!(
|
||||
provider.get_balance(address).await.unwrap() >
|
||||
((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value)
|
||||
);
|
||||
|
||||
let mut bytes = vec![];
|
||||
tx.encode_with_signature_fields(&sig.into(), &mut bytes);
|
||||
let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?;
|
||||
pending_tx.get_receipt().await.ok()
|
||||
}
|
||||
|
||||
pub async fn fund_account(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
to_fund: Address,
|
||||
value: U256,
|
||||
) -> Option<()> {
|
||||
let funding_tx =
|
||||
TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() };
|
||||
assert!(send(provider, wallet, funding_tx).await.unwrap().status());
|
||||
|
||||
Some(())
|
||||
}
|
||||
|
||||
// TODO: Use a proper error here
|
||||
pub async fn deploy_contract(
|
||||
client: Arc<RootProvider<SimpleRequest>>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
name: &str,
|
||||
) -> Option<Address> {
|
||||
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
|
||||
let hex_bin =
|
||||
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
||||
let bin = Bytes::from_hex(hex_bin).unwrap();
|
||||
|
||||
let deployment_tx = TxLegacy {
|
||||
chain_id: None,
|
||||
nonce: 0,
|
||||
// 100 gwei
|
||||
gas_price: 100_000_000_000u128,
|
||||
gas_limit: 1_000_000,
|
||||
to: TxKind::Create,
|
||||
value: U256::ZERO,
|
||||
input: bin,
|
||||
};
|
||||
|
||||
let deployment_tx = deterministically_sign(&deployment_tx);
|
||||
|
||||
// Fund the deployer address
|
||||
fund_account(
|
||||
&client,
|
||||
wallet,
|
||||
deployment_tx.recover_signer().unwrap(),
|
||||
U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let (deployment_tx, sig, _) = deployment_tx.into_parts();
|
||||
let mut bytes = vec![];
|
||||
deployment_tx.encode_with_signature_fields(&sig, &mut bytes);
|
||||
let pending_tx = client.send_raw_transaction(&bytes).await.ok()?;
|
||||
let receipt = pending_tx.get_receipt().await.ok()?;
|
||||
assert!(receipt.status());
|
||||
|
||||
Some(receipt.contract_address.unwrap())
|
||||
}
|
||||
183
coins/ethereum/src/tests/router.rs
Normal file
183
coins/ethereum/src/tests/router.rs
Normal file
@@ -0,0 +1,183 @@
|
||||
use std::{convert::TryFrom, sync::Arc, collections::HashMap};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::Group;
|
||||
use k256::ProjectivePoint;
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
Participant, ThresholdKeys,
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use alloy_core::primitives::{Address, U256};
|
||||
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_rpc_client::ClientBuilder;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||
|
||||
use crate::{
|
||||
crypto::*,
|
||||
deployer::Deployer,
|
||||
router::{Router, abi as router},
|
||||
tests::{key_gen, send, fund_account},
|
||||
};
|
||||
|
||||
async fn setup_test() -> (
|
||||
AnvilInstance,
|
||||
Arc<RootProvider<SimpleRequest>>,
|
||||
u64,
|
||||
Router,
|
||||
HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
||||
PublicKey,
|
||||
) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider = RootProvider::new(
|
||||
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||
);
|
||||
let chain_id = provider.get_chain_id().await.unwrap();
|
||||
let wallet = anvil.keys()[0].clone().into();
|
||||
let client = Arc::new(provider);
|
||||
|
||||
// Make sure the Deployer constructor returns None, as it doesn't exist yet
|
||||
assert!(Deployer::new(client.clone()).await.unwrap().is_none());
|
||||
|
||||
// Deploy the Deployer
|
||||
let tx = Deployer::deployment_tx();
|
||||
fund_account(
|
||||
&client,
|
||||
&wallet,
|
||||
tx.recover_signer().unwrap(),
|
||||
U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (tx, sig, _) = tx.into_parts();
|
||||
let mut bytes = vec![];
|
||||
tx.encode_with_signature_fields(&sig, &mut bytes);
|
||||
|
||||
let pending_tx = client.send_raw_transaction(&bytes).await.unwrap();
|
||||
let receipt = pending_tx.get_receipt().await.unwrap();
|
||||
assert!(receipt.status());
|
||||
let deployer =
|
||||
Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed");
|
||||
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
// Verify the Router constructor returns None, as it doesn't exist yet
|
||||
assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none());
|
||||
|
||||
// Deploy the router
|
||||
let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(receipt.status());
|
||||
let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap();
|
||||
|
||||
(anvil, client, chain_id, contract, keys, public_key)
|
||||
}
|
||||
|
||||
async fn latest_block_hash(client: &RootProvider<SimpleRequest>) -> [u8; 32] {
|
||||
client
|
||||
.get_block(client.get_block_number().await.unwrap().into(), false)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.header
|
||||
.hash
|
||||
.unwrap()
|
||||
.0
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_contract() {
|
||||
let (_anvil, client, _, router, _, public_key) = setup_test().await;
|
||||
|
||||
let block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key);
|
||||
assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
|
||||
// TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis
|
||||
}
|
||||
|
||||
pub fn hash_and_sign(
|
||||
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
) -> Signature {
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message);
|
||||
|
||||
Signature::new(public_key, message, sig).unwrap()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_router_update_serai_key() {
|
||||
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
|
||||
|
||||
let next_key = loop {
|
||||
let point = ProjectivePoint::random(&mut OsRng);
|
||||
let Some(next_key) = PublicKey::new(point) else { continue };
|
||||
break next_key;
|
||||
};
|
||||
|
||||
let message = Router::update_serai_key_message(
|
||||
U256::try_from(chain_id).unwrap(),
|
||||
U256::try_from(1u64).unwrap(),
|
||||
&next_key,
|
||||
);
|
||||
let sig = hash_and_sign(&keys, &public_key, &message);
|
||||
|
||||
let first_block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
|
||||
|
||||
let receipt =
|
||||
send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(receipt.status());
|
||||
|
||||
let second_block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key);
|
||||
// Check this does still offer the historical state
|
||||
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
|
||||
// TODO: Check logs
|
||||
|
||||
println!("gas used: {:?}", receipt.gas_used);
|
||||
// println!("logs: {:?}", receipt.logs);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_router_execute() {
|
||||
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
|
||||
|
||||
let to = Address::from([0; 20]);
|
||||
let value = U256::ZERO;
|
||||
let tx = router::OutInstruction { to, value, calls: vec![] };
|
||||
let txs = vec![tx];
|
||||
|
||||
let first_block_hash = latest_block_hash(&client).await;
|
||||
let nonce = contract.nonce(first_block_hash).await.unwrap();
|
||||
assert_eq!(nonce, U256::try_from(1u64).unwrap());
|
||||
|
||||
let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone());
|
||||
let sig = hash_and_sign(&keys, &public_key, &message);
|
||||
|
||||
let receipt =
|
||||
send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap();
|
||||
assert!(receipt.status());
|
||||
|
||||
let second_block_hash = latest_block_hash(&client).await;
|
||||
assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap());
|
||||
// Check this does still offer the historical state
|
||||
assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
|
||||
// TODO: Check logs
|
||||
|
||||
println!("gas used: {:?}", receipt.gas_used);
|
||||
// println!("logs: {:?}", receipt.logs);
|
||||
}
|
||||
93
coins/ethereum/src/tests/schnorr.rs
Normal file
93
coins/ethereum/src/tests/schnorr.rs
Normal file
@@ -0,0 +1,93 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::PrimeField;
|
||||
use k256::Scalar;
|
||||
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use alloy_core::primitives::Address;
|
||||
|
||||
use alloy_sol_types::SolCall;
|
||||
|
||||
use alloy_rpc_types::{TransactionInput, TransactionRequest};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_rpc_client::ClientBuilder;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::*,
|
||||
tests::{key_gen, deploy_contract, abi::schnorr as abi},
|
||||
};
|
||||
|
||||
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider = RootProvider::new(
|
||||
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||
);
|
||||
let wallet = anvil.keys()[0].clone().into();
|
||||
let client = Arc::new(provider);
|
||||
|
||||
let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap();
|
||||
(anvil, client, address)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_contract() {
|
||||
setup_test().await;
|
||||
}
|
||||
|
||||
pub async fn call_verify(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
contract: Address,
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: &Signature,
|
||||
) -> Result<(), Error> {
|
||||
let px: [u8; 32] = public_key.px.to_repr().into();
|
||||
let c_bytes: [u8; 32] = signature.c.to_repr().into();
|
||||
let s_bytes: [u8; 32] = signature.s.to_repr().into();
|
||||
let call = TransactionRequest::default().to(contract).input(TransactionInput::new(
|
||||
abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into()))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
));
|
||||
let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
if res._0 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::InvalidSignature)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ecrecover_hack() {
|
||||
let (_anvil, client, contract) = setup_test().await;
|
||||
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
let sig = Signature::new(&public_key, MESSAGE, sig).unwrap();
|
||||
|
||||
call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap();
|
||||
// Test an invalid signature fails
|
||||
let mut sig = sig;
|
||||
sig.s += Scalar::ONE;
|
||||
assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err());
|
||||
}
|
||||
@@ -1,128 +0,0 @@
|
||||
use std::{convert::TryFrom, sync::Arc, time::Duration, fs::File};
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use ::k256::{
|
||||
elliptic_curve::{bigint::ArrayEncoding, PrimeField},
|
||||
U256,
|
||||
};
|
||||
|
||||
use ethers_core::{
|
||||
types::Signature,
|
||||
abi::Abi,
|
||||
utils::{keccak256, Anvil, AnvilInstance},
|
||||
};
|
||||
use ethers_contract::ContractFactory;
|
||||
use ethers_providers::{Middleware, Provider, Http};
|
||||
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
Participant,
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{key_gen, algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use ethereum_serai::{
|
||||
crypto,
|
||||
contract::{Schnorr, call_verify},
|
||||
};
|
||||
|
||||
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
|
||||
// to fund the deployer, not create/pass a wallet
|
||||
pub async fn deploy_schnorr_verifier_contract(
|
||||
chain_id: u32,
|
||||
client: Arc<Provider<Http>>,
|
||||
wallet: &k256::ecdsa::SigningKey,
|
||||
) -> eyre::Result<Schnorr<Provider<Http>>> {
|
||||
let abi: Abi = serde_json::from_reader(File::open("./artifacts/Schnorr.abi").unwrap()).unwrap();
|
||||
|
||||
let hex_bin_buf = std::fs::read_to_string("./artifacts/Schnorr.bin").unwrap();
|
||||
let hex_bin =
|
||||
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
||||
let bin = hex::decode(hex_bin).unwrap();
|
||||
let factory = ContractFactory::new(abi, bin.into(), client.clone());
|
||||
|
||||
let mut deployment_tx = factory.deploy(())?.tx;
|
||||
deployment_tx.set_chain_id(chain_id);
|
||||
deployment_tx.set_gas(500_000);
|
||||
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
|
||||
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
|
||||
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
|
||||
|
||||
let sig_hash = deployment_tx.sighash();
|
||||
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
|
||||
|
||||
// EIP-155 v
|
||||
let mut v = u64::from(rid.to_byte());
|
||||
assert!((v == 0) || (v == 1));
|
||||
v += u64::from((chain_id * 2) + 35);
|
||||
|
||||
let r = sig.r().to_repr();
|
||||
let r_ref: &[u8] = r.as_ref();
|
||||
let s = sig.s().to_repr();
|
||||
let s_ref: &[u8] = s.as_ref();
|
||||
let deployment_tx = deployment_tx.rlp_signed(&Signature { r: r_ref.into(), s: s_ref.into(), v });
|
||||
|
||||
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
|
||||
|
||||
let mut receipt;
|
||||
while {
|
||||
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
|
||||
receipt.is_none()
|
||||
} {
|
||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
||||
}
|
||||
let receipt = receipt.unwrap();
|
||||
assert!(receipt.status == Some(1.into()));
|
||||
|
||||
let contract = Schnorr::new(receipt.contract_address.unwrap(), client.clone());
|
||||
Ok(contract)
|
||||
}
|
||||
|
||||
async fn deploy_test_contract() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider =
|
||||
Provider::<Http>::try_from(anvil.endpoint()).unwrap().interval(Duration::from_millis(10u64));
|
||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
||||
let wallet = anvil.keys()[0].clone().into();
|
||||
let client = Arc::new(provider);
|
||||
|
||||
(chain_id, anvil, deploy_schnorr_verifier_contract(chain_id, client, &wallet).await.unwrap())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_contract() {
|
||||
deploy_test_contract().await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ecrecover_hack() {
|
||||
let (chain_id, _anvil, contract) = deploy_test_contract().await;
|
||||
let chain_id = U256::from(chain_id);
|
||||
|
||||
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
||||
let group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let hashed_message = keccak256(MESSAGE);
|
||||
|
||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, crypto::EthereumHram>::ietf();
|
||||
let sig = sign(
|
||||
&mut OsRng,
|
||||
&algo,
|
||||
keys.clone(),
|
||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
||||
full_message,
|
||||
);
|
||||
let mut processed_sig =
|
||||
crypto::process_signature_for_contract(hashed_message, &sig.R, sig.s, &group_key, chain_id);
|
||||
|
||||
call_verify(&contract, &processed_sig).await.unwrap();
|
||||
|
||||
// test invalid signature fails
|
||||
processed_sig.message[0] = 0;
|
||||
assert!(call_verify(&contract, &processed_sig).await.is_err());
|
||||
}
|
||||
@@ -1,87 +0,0 @@
|
||||
use k256::{
|
||||
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, sec1::ToEncodedPoint},
|
||||
ProjectivePoint, Scalar, U256,
|
||||
};
|
||||
use frost::{curve::Secp256k1, Participant};
|
||||
|
||||
use ethereum_serai::crypto::*;
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover() {
|
||||
use rand_core::OsRng;
|
||||
use sha2::Sha256;
|
||||
use sha3::{Digest, Keccak256};
|
||||
use k256::ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey};
|
||||
|
||||
let private = SigningKey::random(&mut OsRng);
|
||||
let public = VerifyingKey::from(&private);
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let (sig, recovery_id) = private
|
||||
.as_nonzero_scalar()
|
||||
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
|
||||
.unwrap();
|
||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||
{
|
||||
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
|
||||
}
|
||||
|
||||
assert_eq!(
|
||||
ecrecover(hash_to_scalar(MESSAGE), recovery_id.unwrap().is_y_odd().into(), *sig.r(), *sig.s())
|
||||
.unwrap(),
|
||||
address(&ProjectivePoint::from(public.as_affine()))
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signing() {
|
||||
use frost::{
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{algorithm_machines, key_gen, sign},
|
||||
};
|
||||
use rand_core::OsRng;
|
||||
|
||||
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
||||
let _group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let _sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover_hack() {
|
||||
use frost::{
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{algorithm_machines, key_gen, sign},
|
||||
};
|
||||
use rand_core::OsRng;
|
||||
|
||||
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
||||
let group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
let group_key_encoded = group_key.to_encoded_point(true);
|
||||
let group_key_compressed = group_key_encoded.as_ref();
|
||||
let group_key_x = Scalar::reduce(U256::from_be_slice(&group_key_compressed[1 .. 33]));
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let hashed_message = keccak256(MESSAGE);
|
||||
let chain_id = U256::ONE;
|
||||
|
||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig = sign(
|
||||
&mut OsRng,
|
||||
&algo,
|
||||
keys.clone(),
|
||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
||||
full_message,
|
||||
);
|
||||
|
||||
let (sr, er) =
|
||||
preprocess_signature_for_ecrecover(hashed_message, &sig.R, sig.s, &group_key, chain_id);
|
||||
let q = ecrecover(sr, group_key_compressed[0] - 2, group_key_x, er).unwrap();
|
||||
assert_eq!(q, address(&sig.R));
|
||||
}
|
||||
@@ -1,2 +0,0 @@
|
||||
mod contract;
|
||||
mod crypto;
|
||||
@@ -43,7 +43,6 @@ multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features =
|
||||
|
||||
# Needed for multisig
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
|
||||
dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true }
|
||||
|
||||
monero-generators = { path = "generators", version = "0.4", default-features = false }
|
||||
@@ -91,7 +90,6 @@ std = [
|
||||
"multiexp/std",
|
||||
|
||||
"transcript/std",
|
||||
"dleq/std",
|
||||
|
||||
"monero-generators/std",
|
||||
|
||||
@@ -106,7 +104,7 @@ std = [
|
||||
|
||||
cache-distribution = ["async-lock"]
|
||||
http-rpc = ["digest_auth", "simple-request", "tokio"]
|
||||
multisig = ["transcript", "frost", "dleq", "std"]
|
||||
multisig = ["transcript", "frost", "std"]
|
||||
binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"]
|
||||
experimental = []
|
||||
|
||||
|
||||
@@ -14,7 +14,12 @@ use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use sha3::{Digest, Keccak256};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
|
||||
use curve25519_dalek::{
|
||||
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
|
||||
scalar::Scalar,
|
||||
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
||||
traits::VartimePrecomputedMultiscalarMul,
|
||||
};
|
||||
|
||||
pub use monero_generators::{H, decompress_point};
|
||||
|
||||
@@ -56,6 +61,13 @@ pub(crate) fn INV_EIGHT() -> Scalar {
|
||||
*INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert())
|
||||
}
|
||||
|
||||
static BASEPOINT_PRECOMP_CELL: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new();
|
||||
#[allow(non_snake_case)]
|
||||
pub(crate) fn BASEPOINT_PRECOMP() -> &'static VartimeEdwardsPrecomputation {
|
||||
BASEPOINT_PRECOMP_CELL
|
||||
.get_or_init(|| VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT]))
|
||||
}
|
||||
|
||||
/// Monero protocol version.
|
||||
///
|
||||
/// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the
|
||||
|
||||
@@ -91,7 +91,7 @@ impl Bulletproofs {
|
||||
Bulletproofs::Plus(
|
||||
AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect())
|
||||
.unwrap()
|
||||
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap()))
|
||||
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs.to_vec()).unwrap()))
|
||||
.unwrap(),
|
||||
)
|
||||
})
|
||||
|
||||
@@ -9,7 +9,7 @@ use curve25519_dalek::{scalar::Scalar as DalekScalar, edwards::EdwardsPoint as D
|
||||
use group::{ff::Field, Group};
|
||||
use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint};
|
||||
|
||||
use multiexp::BatchVerifier;
|
||||
use multiexp::{BatchVerifier, multiexp};
|
||||
|
||||
use crate::{Commitment, ringct::bulletproofs::core::*};
|
||||
|
||||
@@ -17,7 +17,20 @@ include!(concat!(env!("OUT_DIR"), "/generators.rs"));
|
||||
|
||||
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
|
||||
pub(crate) fn IP12() -> Scalar {
|
||||
*IP12_CELL.get_or_init(|| inner_product(&ScalarVector(vec![Scalar::ONE; N]), TWO_N()))
|
||||
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N()))
|
||||
}
|
||||
|
||||
pub(crate) fn hadamard_fold(
|
||||
l: &[EdwardsPoint],
|
||||
r: &[EdwardsPoint],
|
||||
a: Scalar,
|
||||
b: Scalar,
|
||||
) -> Vec<EdwardsPoint> {
|
||||
let mut res = Vec::with_capacity(l.len() / 2);
|
||||
for i in 0 .. l.len() {
|
||||
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
@@ -57,7 +70,7 @@ impl OriginalStruct {
|
||||
let mut cache = hash_to_scalar(&y.to_bytes());
|
||||
let z = cache;
|
||||
|
||||
let l0 = &aL - z;
|
||||
let l0 = aL - z;
|
||||
let l1 = sL;
|
||||
|
||||
let mut zero_twos = Vec::with_capacity(MN);
|
||||
@@ -69,12 +82,12 @@ impl OriginalStruct {
|
||||
}
|
||||
|
||||
let yMN = ScalarVector::powers(y, MN);
|
||||
let r0 = (&(aR + z) * &yMN) + ScalarVector(zero_twos);
|
||||
let r1 = yMN * sR;
|
||||
let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos);
|
||||
let r1 = yMN * &sR;
|
||||
|
||||
let (T1, T2, x, mut taux) = {
|
||||
let t1 = inner_product(&l0, &r1) + inner_product(&l1, &r0);
|
||||
let t2 = inner_product(&l1, &r1);
|
||||
let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1);
|
||||
let t2 = l1.clone().inner_product(&r1);
|
||||
|
||||
let mut tau1 = Scalar::random(&mut *rng);
|
||||
let mut tau2 = Scalar::random(&mut *rng);
|
||||
@@ -100,10 +113,10 @@ impl OriginalStruct {
|
||||
taux += zpow[i + 2] * gamma;
|
||||
}
|
||||
|
||||
let l = &l0 + &(l1 * x);
|
||||
let r = &r0 + &(r1 * x);
|
||||
let l = l0 + &(l1 * x);
|
||||
let r = r0 + &(r1 * x);
|
||||
|
||||
let t = inner_product(&l, &r);
|
||||
let t = l.clone().inner_product(&r);
|
||||
|
||||
let x_ip =
|
||||
hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
|
||||
@@ -126,8 +139,8 @@ impl OriginalStruct {
|
||||
let (aL, aR) = a.split();
|
||||
let (bL, bR) = b.split();
|
||||
|
||||
let cL = inner_product(&aL, &bR);
|
||||
let cR = inner_product(&aR, &bL);
|
||||
let cL = aL.clone().inner_product(&bR);
|
||||
let cR = aR.clone().inner_product(&bL);
|
||||
|
||||
let (G_L, G_R) = G_proof.split_at(aL.len());
|
||||
let (H_L, H_R) = H_proof.split_at(aL.len());
|
||||
@@ -140,8 +153,8 @@ impl OriginalStruct {
|
||||
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
|
||||
let winv = w.invert().unwrap();
|
||||
|
||||
a = (aL * w) + (aR * winv);
|
||||
b = (bL * winv) + (bR * w);
|
||||
a = (aL * w) + &(aR * winv);
|
||||
b = (bL * winv) + &(bR * w);
|
||||
|
||||
if a.len() != 1 {
|
||||
G_proof = hadamard_fold(G_L, G_R, winv, w);
|
||||
|
||||
@@ -24,7 +24,7 @@ use crate::{
|
||||
},
|
||||
};
|
||||
|
||||
// Figure 3
|
||||
// Figure 3 of the Bulletproofs+ Paper
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct AggregateRangeStatement {
|
||||
generators: Generators,
|
||||
@@ -38,24 +38,15 @@ impl Zeroize for AggregateRangeStatement {
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub(crate) struct AggregateRangeWitness {
|
||||
values: Vec<u64>,
|
||||
gammas: Vec<Scalar>,
|
||||
}
|
||||
pub(crate) struct AggregateRangeWitness(Vec<Commitment>);
|
||||
|
||||
impl AggregateRangeWitness {
|
||||
pub(crate) fn new(commitments: &[Commitment]) -> Option<Self> {
|
||||
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> {
|
||||
if commitments.is_empty() || (commitments.len() > MAX_M) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut values = Vec::with_capacity(commitments.len());
|
||||
let mut gammas = Vec::with_capacity(commitments.len());
|
||||
for commitment in commitments {
|
||||
values.push(commitment.amount);
|
||||
gammas.push(Scalar(commitment.mask));
|
||||
}
|
||||
Some(AggregateRangeWitness { values, gammas })
|
||||
Some(AggregateRangeWitness(commitments))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,7 +103,7 @@ impl AggregateRangeStatement {
|
||||
let mut d = ScalarVector::new(mn);
|
||||
for j in 1 ..= V.len() {
|
||||
z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this
|
||||
d = d.add_vec(&Self::d_j(j, V.len()).mul(z_pow[j - 1]));
|
||||
d = d + &(Self::d_j(j, V.len()) * (z_pow[j - 1]));
|
||||
}
|
||||
|
||||
let mut ascending_y = ScalarVector(vec![y]);
|
||||
@@ -124,7 +115,8 @@ impl AggregateRangeStatement {
|
||||
let mut descending_y = ascending_y.clone();
|
||||
descending_y.0.reverse();
|
||||
|
||||
let d_descending_y = d.mul_vec(&descending_y);
|
||||
let d_descending_y = d.clone() * &descending_y;
|
||||
let d_descending_y_plus_z = d_descending_y + z;
|
||||
|
||||
let y_mn_plus_one = descending_y[0] * y;
|
||||
|
||||
@@ -135,9 +127,9 @@ impl AggregateRangeStatement {
|
||||
|
||||
let neg_z = -z;
|
||||
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2);
|
||||
for (i, d_y_z) in d_descending_y.add(z).0.drain(..).enumerate() {
|
||||
for (i, d_y_z) in d_descending_y_plus_z.0.iter().enumerate() {
|
||||
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i)));
|
||||
A_terms.push((d_y_z, generators.generator(GeneratorsList::HBold1, i)));
|
||||
A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold1, i)));
|
||||
}
|
||||
A_terms.push((y_mn_plus_one, commitment_accum));
|
||||
A_terms.push((
|
||||
@@ -145,7 +137,14 @@ impl AggregateRangeStatement {
|
||||
Generators::g(),
|
||||
));
|
||||
|
||||
(y, d_descending_y, y_mn_plus_one, z, ScalarVector(z_pow), A + multiexp_vartime(&A_terms))
|
||||
(
|
||||
y,
|
||||
d_descending_y_plus_z,
|
||||
y_mn_plus_one,
|
||||
z,
|
||||
ScalarVector(z_pow),
|
||||
A + multiexp_vartime(&A_terms),
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn prove<R: RngCore + CryptoRng>(
|
||||
@@ -154,13 +153,11 @@ impl AggregateRangeStatement {
|
||||
witness: &AggregateRangeWitness,
|
||||
) -> Option<AggregateRangeProof> {
|
||||
// Check for consistency with the witness
|
||||
if self.V.len() != witness.values.len() {
|
||||
if self.V.len() != witness.0.len() {
|
||||
return None;
|
||||
}
|
||||
for (commitment, (value, gamma)) in
|
||||
self.V.iter().zip(witness.values.iter().zip(witness.gammas.iter()))
|
||||
{
|
||||
if Commitment::new(**gamma, *value).calculate() != **commitment {
|
||||
for (commitment, witness) in self.V.iter().zip(witness.0.iter()) {
|
||||
if witness.calculate() != **commitment {
|
||||
return None;
|
||||
}
|
||||
}
|
||||
@@ -188,10 +185,16 @@ impl AggregateRangeStatement {
|
||||
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N));
|
||||
for j in 1 ..= V.len() {
|
||||
d_js.push(Self::d_j(j, V.len()));
|
||||
a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
|
||||
#[allow(clippy::map_unwrap_or)]
|
||||
a_l.0.append(
|
||||
&mut u64_decompose(
|
||||
*witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0),
|
||||
)
|
||||
.0,
|
||||
);
|
||||
}
|
||||
|
||||
let a_r = a_l.sub(Scalar::ONE);
|
||||
let a_r = a_l.clone() - Scalar::ONE;
|
||||
|
||||
let alpha = Scalar::random(&mut *rng);
|
||||
|
||||
@@ -209,14 +212,14 @@ impl AggregateRangeStatement {
|
||||
// Multiply by INV_EIGHT per earlier commentary
|
||||
A.0 *= crate::INV_EIGHT();
|
||||
|
||||
let (y, d_descending_y, y_mn_plus_one, z, z_pow, A_hat) =
|
||||
let (y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat) =
|
||||
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A);
|
||||
|
||||
let a_l = a_l.sub(z);
|
||||
let a_r = a_r.add_vec(&d_descending_y).add(z);
|
||||
let a_l = a_l - z;
|
||||
let a_r = a_r + &d_descending_y_plus_z;
|
||||
let mut alpha = alpha;
|
||||
for j in 1 ..= witness.gammas.len() {
|
||||
alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;
|
||||
for j in 1 ..= witness.0.len() {
|
||||
alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one;
|
||||
}
|
||||
|
||||
Some(AggregateRangeProof {
|
||||
|
||||
@@ -3,8 +3,7 @@
|
||||
use group::Group;
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
|
||||
mod scalar_vector;
|
||||
pub(crate) use scalar_vector::{ScalarVector, weighted_inner_product};
|
||||
pub(crate) use crate::ringct::bulletproofs::scalar_vector::ScalarVector;
|
||||
mod point_vector;
|
||||
pub(crate) use point_vector::PointVector;
|
||||
|
||||
|
||||
@@ -1,114 +0,0 @@
|
||||
use core::{
|
||||
borrow::Borrow,
|
||||
ops::{Index, IndexMut},
|
||||
};
|
||||
use std_shims::vec::Vec;
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use group::ff::Field;
|
||||
use dalek_ff_group::Scalar;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
||||
|
||||
impl Index<usize> for ScalarVector {
|
||||
type Output = Scalar;
|
||||
fn index(&self, index: usize) -> &Scalar {
|
||||
&self.0[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl IndexMut<usize> for ScalarVector {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Scalar {
|
||||
&mut self.0[index]
|
||||
}
|
||||
}
|
||||
|
||||
impl ScalarVector {
|
||||
pub(crate) fn new(len: usize) -> Self {
|
||||
ScalarVector(vec![Scalar::ZERO; len])
|
||||
}
|
||||
|
||||
pub(crate) fn add(&self, scalar: impl Borrow<Scalar>) -> Self {
|
||||
let mut res = self.clone();
|
||||
for val in &mut res.0 {
|
||||
*val += scalar.borrow();
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn sub(&self, scalar: impl Borrow<Scalar>) -> Self {
|
||||
let mut res = self.clone();
|
||||
for val in &mut res.0 {
|
||||
*val -= scalar.borrow();
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn mul(&self, scalar: impl Borrow<Scalar>) -> Self {
|
||||
let mut res = self.clone();
|
||||
for val in &mut res.0 {
|
||||
*val *= scalar.borrow();
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn add_vec(&self, vector: &Self) -> Self {
|
||||
debug_assert_eq!(self.len(), vector.len());
|
||||
let mut res = self.clone();
|
||||
for (i, val) in res.0.iter_mut().enumerate() {
|
||||
*val += vector.0[i];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn mul_vec(&self, vector: &Self) -> Self {
|
||||
debug_assert_eq!(self.len(), vector.len());
|
||||
let mut res = self.clone();
|
||||
for (i, val) in res.0.iter_mut().enumerate() {
|
||||
*val *= vector.0[i];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn inner_product(&self, vector: &Self) -> Scalar {
|
||||
self.mul_vec(vector).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
|
||||
debug_assert!(len != 0);
|
||||
|
||||
let mut res = Vec::with_capacity(len);
|
||||
res.push(Scalar::ONE);
|
||||
res.push(x);
|
||||
for i in 2 .. len {
|
||||
res.push(res[i - 1] * x);
|
||||
}
|
||||
res.truncate(len);
|
||||
ScalarVector(res)
|
||||
}
|
||||
|
||||
pub(crate) fn sum(mut self) -> Scalar {
|
||||
self.0.drain(..).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub(crate) fn split(mut self) -> (Self, Self) {
|
||||
debug_assert!(self.len() > 1);
|
||||
let r = self.0.split_off(self.0.len() / 2);
|
||||
debug_assert_eq!(self.len(), r.len());
|
||||
(self, ScalarVector(r))
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn weighted_inner_product(
|
||||
a: &ScalarVector,
|
||||
b: &ScalarVector,
|
||||
y: &ScalarVector,
|
||||
) -> Scalar {
|
||||
a.inner_product(&b.mul_vec(y))
|
||||
}
|
||||
@@ -4,7 +4,7 @@ use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use multiexp::{multiexp, multiexp_vartime, BatchVerifier};
|
||||
use multiexp::{BatchVerifier, multiexp, multiexp_vartime};
|
||||
use group::{
|
||||
ff::{Field, PrimeField},
|
||||
GroupEncoding,
|
||||
@@ -12,11 +12,10 @@ use group::{
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
|
||||
use crate::ringct::bulletproofs::plus::{
|
||||
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, weighted_inner_product,
|
||||
transcript::*,
|
||||
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
|
||||
};
|
||||
|
||||
// Figure 1
|
||||
// Figure 1 of the Bulletproofs+ paper
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct WipStatement {
|
||||
generators: Generators,
|
||||
@@ -219,7 +218,7 @@ impl WipStatement {
|
||||
.zip(g_bold.0.iter().copied())
|
||||
.chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied()))
|
||||
.collect::<Vec<_>>();
|
||||
P_terms.push((weighted_inner_product(&witness.a, &witness.b, &y), g));
|
||||
P_terms.push((witness.a.clone().weighted_inner_product(&witness.b, &y), g));
|
||||
P_terms.push((witness.alpha, h));
|
||||
debug_assert_eq!(multiexp(&P_terms), P);
|
||||
P_terms.zeroize();
|
||||
@@ -258,14 +257,13 @@ impl WipStatement {
|
||||
let d_l = Scalar::random(&mut *rng);
|
||||
let d_r = Scalar::random(&mut *rng);
|
||||
|
||||
let c_l = weighted_inner_product(&a1, &b2, &y);
|
||||
let c_r = weighted_inner_product(&(a2.mul(y_n_hat)), &b1, &y);
|
||||
let c_l = a1.clone().weighted_inner_product(&b2, &y);
|
||||
let c_r = (a2.clone() * y_n_hat).weighted_inner_product(&b1, &y);
|
||||
|
||||
// TODO: Calculate these with a batch inversion
|
||||
let y_inv_n_hat = y_n_hat.invert().unwrap();
|
||||
|
||||
let mut L_terms = a1
|
||||
.mul(y_inv_n_hat)
|
||||
let mut L_terms = (a1.clone() * y_inv_n_hat)
|
||||
.0
|
||||
.drain(..)
|
||||
.zip(g_bold2.0.iter().copied())
|
||||
@@ -277,8 +275,7 @@ impl WipStatement {
|
||||
L_vec.push(L);
|
||||
L_terms.zeroize();
|
||||
|
||||
let mut R_terms = a2
|
||||
.mul(y_n_hat)
|
||||
let mut R_terms = (a2.clone() * y_n_hat)
|
||||
.0
|
||||
.drain(..)
|
||||
.zip(g_bold1.0.iter().copied())
|
||||
@@ -294,8 +291,8 @@ impl WipStatement {
|
||||
(e, inv_e, e_square, inv_e_square, g_bold, h_bold) =
|
||||
Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat);
|
||||
|
||||
a = a1.mul(e).add_vec(&a2.mul(y_n_hat * inv_e));
|
||||
b = b1.mul(inv_e).add_vec(&b2.mul(e));
|
||||
a = (a1 * e) + &(a2 * (y_n_hat * inv_e));
|
||||
b = (b1 * inv_e) + &(b2 * e);
|
||||
alpha += (d_l * e_square) + (d_r * inv_e_square);
|
||||
|
||||
debug_assert_eq!(g_bold.len(), a.len());
|
||||
|
||||
@@ -1,85 +1,17 @@
|
||||
use core::ops::{Add, Sub, Mul, Index};
|
||||
use core::{
|
||||
borrow::Borrow,
|
||||
ops::{Index, IndexMut, Add, Sub, Mul},
|
||||
};
|
||||
use std_shims::vec::Vec;
|
||||
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use group::ff::Field;
|
||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
|
||||
use multiexp::multiexp;
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
||||
macro_rules! math_op {
|
||||
($Op: ident, $op: ident, $f: expr) => {
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
impl $Op<Scalar> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn $op(self, b: Scalar) -> ScalarVector {
|
||||
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
impl $Op<Scalar> for &ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn $op(self, b: Scalar) -> ScalarVector {
|
||||
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
impl $Op<ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn $op(self, b: ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), b.len());
|
||||
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::redundant_closure_call)]
|
||||
impl $Op<&ScalarVector> for &ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn $op(self, b: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), b.len());
|
||||
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
math_op!(Add, add, |(a, b): (&Scalar, &Scalar)| *a + *b);
|
||||
math_op!(Sub, sub, |(a, b): (&Scalar, &Scalar)| *a - *b);
|
||||
math_op!(Mul, mul, |(a, b): (&Scalar, &Scalar)| *a * *b);
|
||||
|
||||
impl ScalarVector {
|
||||
pub(crate) fn new(len: usize) -> ScalarVector {
|
||||
ScalarVector(vec![Scalar::ZERO; len])
|
||||
}
|
||||
|
||||
pub(crate) fn powers(x: Scalar, len: usize) -> ScalarVector {
|
||||
debug_assert!(len != 0);
|
||||
|
||||
let mut res = Vec::with_capacity(len);
|
||||
res.push(Scalar::ONE);
|
||||
for i in 1 .. len {
|
||||
res.push(res[i - 1] * x);
|
||||
}
|
||||
ScalarVector(res)
|
||||
}
|
||||
|
||||
pub(crate) fn sum(mut self) -> Scalar {
|
||||
self.0.drain(..).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub(crate) fn split(self) -> (ScalarVector, ScalarVector) {
|
||||
let (l, r) = self.0.split_at(self.0.len() / 2);
|
||||
(ScalarVector(l.to_vec()), ScalarVector(r.to_vec()))
|
||||
}
|
||||
}
|
||||
|
||||
impl Index<usize> for ScalarVector {
|
||||
type Output = Scalar;
|
||||
@@ -87,28 +19,120 @@ impl Index<usize> for ScalarVector {
|
||||
&self.0[index]
|
||||
}
|
||||
}
|
||||
impl IndexMut<usize> for ScalarVector {
|
||||
fn index_mut(&mut self, index: usize) -> &mut Scalar {
|
||||
&mut self.0[index]
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn inner_product(a: &ScalarVector, b: &ScalarVector) -> Scalar {
|
||||
(a * b).sum()
|
||||
impl<S: Borrow<Scalar>> Add<S> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn add(mut self, scalar: S) -> ScalarVector {
|
||||
for s in &mut self.0 {
|
||||
*s += scalar.borrow();
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl<S: Borrow<Scalar>> Sub<S> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn sub(mut self, scalar: S) -> ScalarVector {
|
||||
for s in &mut self.0 {
|
||||
*s -= scalar.borrow();
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl<S: Borrow<Scalar>> Mul<S> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn mul(mut self, scalar: S) -> ScalarVector {
|
||||
for s in &mut self.0 {
|
||||
*s *= scalar.borrow();
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<&ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn add(mut self, other: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), other.len());
|
||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||
*s += o;
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl Sub<&ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn sub(mut self, other: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), other.len());
|
||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||
*s -= o;
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
impl Mul<&ScalarVector> for ScalarVector {
|
||||
type Output = ScalarVector;
|
||||
fn mul(mut self, other: &ScalarVector) -> ScalarVector {
|
||||
debug_assert_eq!(self.len(), other.len());
|
||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||
*s *= o;
|
||||
}
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Mul<&[EdwardsPoint]> for &ScalarVector {
|
||||
type Output = EdwardsPoint;
|
||||
fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint {
|
||||
debug_assert_eq!(self.len(), b.len());
|
||||
multiexp(&self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>())
|
||||
let mut multiexp_args = self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>();
|
||||
let res = multiexp(&multiexp_args);
|
||||
multiexp_args.zeroize();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn hadamard_fold(
|
||||
l: &[EdwardsPoint],
|
||||
r: &[EdwardsPoint],
|
||||
a: Scalar,
|
||||
b: Scalar,
|
||||
) -> Vec<EdwardsPoint> {
|
||||
let mut res = Vec::with_capacity(l.len() / 2);
|
||||
for i in 0 .. l.len() {
|
||||
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
|
||||
impl ScalarVector {
|
||||
pub(crate) fn new(len: usize) -> Self {
|
||||
ScalarVector(vec![Scalar::ZERO; len])
|
||||
}
|
||||
|
||||
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
|
||||
debug_assert!(len != 0);
|
||||
|
||||
let mut res = Vec::with_capacity(len);
|
||||
res.push(Scalar::ONE);
|
||||
res.push(x);
|
||||
for i in 2 .. len {
|
||||
res.push(res[i - 1] * x);
|
||||
}
|
||||
res.truncate(len);
|
||||
ScalarVector(res)
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.0.len()
|
||||
}
|
||||
|
||||
pub(crate) fn sum(mut self) -> Scalar {
|
||||
self.0.drain(..).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn inner_product(self, vector: &Self) -> Scalar {
|
||||
(self * vector).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn weighted_inner_product(self, vector: &Self, y: &Self) -> Scalar {
|
||||
(self * vector * y).sum()
|
||||
}
|
||||
|
||||
pub(crate) fn split(mut self) -> (Self, Self) {
|
||||
debug_assert!(self.len() > 1);
|
||||
let r = self.0.split_off(self.0.len() / 2);
|
||||
debug_assert_eq!(self.len(), r.len());
|
||||
(self, ScalarVector(r))
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
@@ -9,17 +9,17 @@ use std_shims::{
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||
use subtle::{ConstantTimeEq, Choice, CtOption};
|
||||
use subtle::{ConstantTimeEq, ConditionallySelectable};
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_TABLE,
|
||||
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
|
||||
scalar::Scalar,
|
||||
traits::{IsIdentity, VartimePrecomputedMultiscalarMul},
|
||||
traits::{IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul},
|
||||
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
INV_EIGHT, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
|
||||
INV_EIGHT, BASEPOINT_PRECOMP, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
|
||||
ringct::hash_to_point, serialize::*,
|
||||
};
|
||||
|
||||
@@ -27,8 +27,6 @@ use crate::{
|
||||
mod multisig;
|
||||
#[cfg(feature = "multisig")]
|
||||
pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
|
||||
#[cfg(feature = "multisig")]
|
||||
pub(crate) use multisig::add_key_image_share;
|
||||
|
||||
/// Errors returned when CLSAG signing fails.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
@@ -100,8 +98,11 @@ fn core(
|
||||
) -> ((EdwardsPoint, Scalar, Scalar), Scalar) {
|
||||
let n = ring.len();
|
||||
|
||||
let images_precomp = VartimeEdwardsPrecomputation::new([I, D]);
|
||||
let D = D * INV_EIGHT();
|
||||
let images_precomp = match A_c1 {
|
||||
Mode::Sign(..) => None,
|
||||
Mode::Verify(..) => Some(VartimeEdwardsPrecomputation::new([I, D])),
|
||||
};
|
||||
let D_INV_EIGHT = D * INV_EIGHT();
|
||||
|
||||
// Generate the transcript
|
||||
// Instead of generating multiple, a single transcript is created and then edited as needed
|
||||
@@ -130,7 +131,7 @@ fn core(
|
||||
}
|
||||
|
||||
to_hash.extend(I.compress().to_bytes());
|
||||
to_hash.extend(D.compress().to_bytes());
|
||||
to_hash.extend(D_INV_EIGHT.compress().to_bytes());
|
||||
to_hash.extend(pseudo_out.compress().to_bytes());
|
||||
// mu_P with agg_0
|
||||
let mu_P = hash_to_scalar(&to_hash);
|
||||
@@ -169,29 +170,44 @@ fn core(
|
||||
}
|
||||
|
||||
// Perform the core loop
|
||||
let mut c1 = CtOption::new(Scalar::ZERO, Choice::from(0));
|
||||
let mut c1 = c;
|
||||
for i in (start .. end).map(|i| i % n) {
|
||||
// This will only execute once and shouldn't need to be constant time. Making it constant time
|
||||
// removes the risk of branch prediction creating timing differences depending on ring index
|
||||
// however
|
||||
c1 = c1.or_else(|| CtOption::new(c, i.ct_eq(&0)));
|
||||
|
||||
let c_p = mu_P * c;
|
||||
let c_c = mu_C * c;
|
||||
|
||||
let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]);
|
||||
// (s_i * G) + (c_p * P_i) + (c_c * C_i)
|
||||
let L = match A_c1 {
|
||||
Mode::Sign(..) => {
|
||||
EdwardsPoint::multiscalar_mul([s[i], c_p, c_c], [ED25519_BASEPOINT_POINT, P[i], C[i]])
|
||||
}
|
||||
Mode::Verify(..) => {
|
||||
BASEPOINT_PRECOMP().vartime_mixed_multiscalar_mul([s[i]], [c_p, c_c], [P[i], C[i]])
|
||||
}
|
||||
};
|
||||
|
||||
let PH = hash_to_point(&P[i]);
|
||||
// Shouldn't be an issue as all of the variables in this vartime statement are public
|
||||
let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]);
|
||||
|
||||
// (c_p * I) + (c_c * D) + (s_i * PH)
|
||||
let R = match A_c1 {
|
||||
Mode::Sign(..) => EdwardsPoint::multiscalar_mul([c_p, c_c, s[i]], [I, D, &PH]),
|
||||
Mode::Verify(..) => {
|
||||
images_precomp.as_ref().unwrap().vartime_mixed_multiscalar_mul([c_p, c_c], [s[i]], [PH])
|
||||
}
|
||||
};
|
||||
|
||||
to_hash.truncate(((2 * n) + 3) * 32);
|
||||
to_hash.extend(L.compress().to_bytes());
|
||||
to_hash.extend(R.compress().to_bytes());
|
||||
c = hash_to_scalar(&to_hash);
|
||||
|
||||
// This will only execute once and shouldn't need to be constant time. Making it constant time
|
||||
// removes the risk of branch prediction creating timing differences depending on ring index
|
||||
// however
|
||||
c1.conditional_assign(&c, i.ct_eq(&(n - 1)));
|
||||
}
|
||||
|
||||
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
|
||||
((D, c * mu_P, c * mu_C), c1.unwrap_or(c))
|
||||
((D_INV_EIGHT, c * mu_P, c * mu_C), c1)
|
||||
}
|
||||
|
||||
/// CLSAG signature, as used in Monero.
|
||||
@@ -261,8 +277,10 @@ impl Clsag {
|
||||
nonce.deref() *
|
||||
hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
|
||||
);
|
||||
clsag.s[usize::from(inputs[i].2.decoys.i)] =
|
||||
(-((p * inputs[i].0.deref()) + c)) + nonce.deref();
|
||||
// Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring
|
||||
// member's commitment and our input commitment (which will only have a known discrete log
|
||||
// over G if the amounts cancel out)
|
||||
clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce.deref() - ((p * inputs[i].0.deref()) + c);
|
||||
inputs[i].0.zeroize();
|
||||
nonce.zeroize();
|
||||
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use core::{ops::Deref, fmt::Debug};
|
||||
use std_shims::io::{self, Read, Write};
|
||||
use std_shims::{
|
||||
io::{self, Read, Write},
|
||||
collections::HashMap,
|
||||
};
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng, SeedableRng};
|
||||
@@ -9,11 +12,13 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use group::{ff::Field, Group, GroupEncoding};
|
||||
use group::{
|
||||
ff::{Field, PrimeField},
|
||||
Group, GroupEncoding,
|
||||
};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
use dalek_ff_group as dfg;
|
||||
use dleq::DLEqProof;
|
||||
use frost::{
|
||||
dkg::lagrange,
|
||||
curve::Ed25519,
|
||||
@@ -26,10 +31,6 @@ use crate::ringct::{
|
||||
clsag::{ClsagInput, Clsag},
|
||||
};
|
||||
|
||||
fn dleq_transcript() -> RecommendedTranscript {
|
||||
RecommendedTranscript::new(b"monero_key_image_dleq")
|
||||
}
|
||||
|
||||
impl ClsagInput {
|
||||
fn transcript<T: Transcript>(&self, transcript: &mut T) {
|
||||
// Doesn't domain separate as this is considered part of the larger CLSAG proof
|
||||
@@ -43,6 +44,7 @@ impl ClsagInput {
|
||||
// They're just a unreliable reference to this data which will be included in the message
|
||||
// if in use
|
||||
transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]);
|
||||
// This also transcripts the key image generator since it's derived from this key
|
||||
transcript.append_message(b"key", pair[0].compress().to_bytes());
|
||||
transcript.append_message(b"commitment", pair[1].compress().to_bytes())
|
||||
}
|
||||
@@ -70,13 +72,11 @@ impl ClsagDetails {
|
||||
#[derive(Clone, PartialEq, Eq, Zeroize, Debug)]
|
||||
pub struct ClsagAddendum {
|
||||
pub(crate) key_image: dfg::EdwardsPoint,
|
||||
dleq: DLEqProof<dfg::EdwardsPoint>,
|
||||
}
|
||||
|
||||
impl WriteAddendum for ClsagAddendum {
|
||||
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(self.key_image.compress().to_bytes().as_ref())?;
|
||||
self.dleq.write(writer)
|
||||
writer.write_all(self.key_image.compress().to_bytes().as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -97,9 +97,8 @@ pub struct ClsagMultisig {
|
||||
transcript: RecommendedTranscript,
|
||||
|
||||
pub(crate) H: EdwardsPoint,
|
||||
// Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires
|
||||
// an extra round
|
||||
image: EdwardsPoint,
|
||||
key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>,
|
||||
image: Option<dfg::EdwardsPoint>,
|
||||
|
||||
details: Arc<RwLock<Option<ClsagDetails>>>,
|
||||
|
||||
@@ -117,7 +116,8 @@ impl ClsagMultisig {
|
||||
transcript,
|
||||
|
||||
H: hash_to_point(&output_key),
|
||||
image: EdwardsPoint::identity(),
|
||||
key_image_shares: HashMap::new(),
|
||||
image: None,
|
||||
|
||||
details,
|
||||
|
||||
@@ -135,20 +135,6 @@ impl ClsagMultisig {
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn add_key_image_share(
|
||||
image: &mut EdwardsPoint,
|
||||
generator: EdwardsPoint,
|
||||
offset: Scalar,
|
||||
included: &[Participant],
|
||||
participant: Participant,
|
||||
share: EdwardsPoint,
|
||||
) {
|
||||
if image.is_identity().into() {
|
||||
*image = generator * offset;
|
||||
}
|
||||
*image += share * lagrange::<dfg::Scalar>(participant, included).0;
|
||||
}
|
||||
|
||||
impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
type Transcript = RecommendedTranscript;
|
||||
type Addendum = ClsagAddendum;
|
||||
@@ -160,23 +146,10 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
|
||||
fn preprocess_addendum<R: RngCore + CryptoRng>(
|
||||
&mut self,
|
||||
rng: &mut R,
|
||||
_rng: &mut R,
|
||||
keys: &ThresholdKeys<Ed25519>,
|
||||
) -> ClsagAddendum {
|
||||
ClsagAddendum {
|
||||
key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(),
|
||||
dleq: DLEqProof::prove(
|
||||
rng,
|
||||
// Doesn't take in a larger transcript object due to the usage of this
|
||||
// Every prover would immediately write their own DLEq proof, when they can only do so in
|
||||
// the proper order if they want to reach consensus
|
||||
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to
|
||||
// try to merge later in some form, when it should instead just merge xH (as it does)
|
||||
&mut dleq_transcript(),
|
||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
|
||||
keys.secret_share(),
|
||||
),
|
||||
}
|
||||
ClsagAddendum { key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref() }
|
||||
}
|
||||
|
||||
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> {
|
||||
@@ -190,7 +163,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
Err(io::Error::other("non-canonical key image"))?;
|
||||
}
|
||||
|
||||
Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::<dfg::EdwardsPoint>::read(reader)? })
|
||||
Ok(ClsagAddendum { key_image: xH })
|
||||
}
|
||||
|
||||
fn process_addendum(
|
||||
@@ -199,32 +172,29 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
l: Participant,
|
||||
addendum: ClsagAddendum,
|
||||
) -> Result<(), FrostError> {
|
||||
if self.image.is_identity().into() {
|
||||
if self.image.is_none() {
|
||||
self.transcript.domain_separate(b"CLSAG");
|
||||
// Transcript the ring
|
||||
self.input().transcript(&mut self.transcript);
|
||||
// Transcript the mask
|
||||
self.transcript.append_message(b"mask", self.mask().to_bytes());
|
||||
|
||||
// Init the image to the offset
|
||||
self.image = Some(dfg::EdwardsPoint(self.H) * view.offset());
|
||||
}
|
||||
|
||||
// Transcript this participant's contribution
|
||||
self.transcript.append_message(b"participant", l.to_bytes());
|
||||
|
||||
addendum
|
||||
.dleq
|
||||
.verify(
|
||||
&mut dleq_transcript(),
|
||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
|
||||
&[view.original_verification_share(l), addendum.key_image],
|
||||
)
|
||||
.map_err(|_| FrostError::InvalidPreprocess(l))?;
|
||||
|
||||
self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes());
|
||||
add_key_image_share(
|
||||
&mut self.image,
|
||||
self.H,
|
||||
view.offset().0,
|
||||
view.included(),
|
||||
l,
|
||||
addendum.key_image.0,
|
||||
);
|
||||
|
||||
// Accumulate the interpolated share
|
||||
let interpolated_key_image_share =
|
||||
addendum.key_image * lagrange::<dfg::Scalar>(l, view.included());
|
||||
*self.image.as_mut().unwrap() += interpolated_key_image_share;
|
||||
|
||||
self
|
||||
.key_image_shares
|
||||
.insert(view.verification_share(l).to_bytes(), interpolated_key_image_share);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -252,7 +222,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
#[allow(non_snake_case)]
|
||||
let (clsag, pseudo_out, p, c) = Clsag::sign_core(
|
||||
&mut rng,
|
||||
&self.image,
|
||||
&self.image.expect("verifying a share despite never processing any addendums").0,
|
||||
&self.input(),
|
||||
self.mask(),
|
||||
self.msg.as_ref().unwrap(),
|
||||
@@ -261,7 +231,8 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
);
|
||||
self.interim = Some(Interim { p, c, clsag, pseudo_out });
|
||||
|
||||
(-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref()
|
||||
// r - p x, where p is the challenge for the keys
|
||||
*nonces[0] - dfg::Scalar(p) * view.secret_share().deref()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
@@ -273,11 +244,13 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
) -> Option<Self::Signature> {
|
||||
let interim = self.interim.as_ref().unwrap();
|
||||
let mut clsag = interim.clsag.clone();
|
||||
// We produced shares as `r - p x`, yet the signature is `r - p x - c x`
|
||||
// Substract `c x` (saved as `c`) now
|
||||
clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c;
|
||||
if clsag
|
||||
.verify(
|
||||
&self.input().decoys.ring,
|
||||
&self.image,
|
||||
&self.image.expect("verifying a signature despite never processing any addendums").0,
|
||||
&interim.pseudo_out,
|
||||
self.msg.as_ref().unwrap(),
|
||||
)
|
||||
@@ -295,10 +268,61 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
share: dfg::Scalar,
|
||||
) -> Result<Vec<(dfg::Scalar, dfg::EdwardsPoint)>, ()> {
|
||||
let interim = self.interim.as_ref().unwrap();
|
||||
Ok(vec![
|
||||
|
||||
// For a share `r - p x`, the following two equalities should hold:
|
||||
// - `(r - p x)G == R.0 - pV`, where `V = xG`
|
||||
// - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share)
|
||||
//
|
||||
// This is effectively a discrete log equality proof for:
|
||||
// V, K over G, H
|
||||
// with nonces
|
||||
// R.0, R.1
|
||||
// and solution
|
||||
// s
|
||||
//
|
||||
// Which is a batch-verifiable rewrite of the traditional CP93 proof
|
||||
// (and also writable as Generalized Schnorr Protocol)
|
||||
//
|
||||
// That means that given a proper challenge, this alone can be certainly argued to prove the
|
||||
// key image share is well-formed and the provided signature so proves for that.
|
||||
|
||||
// This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of
|
||||
// the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically
|
||||
// is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be
|
||||
// extracted, and the nonces as used in CLSAG are also part of its prover data/transcript).
|
||||
|
||||
let key_image_share = self.key_image_shares[&verification_share.to_bytes()];
|
||||
|
||||
// Hash every variable relevant here, using the hahs output as the random weight
|
||||
let mut weight_transcript =
|
||||
RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share");
|
||||
weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes());
|
||||
weight_transcript.append_message(b"H", self.H.to_bytes());
|
||||
weight_transcript.append_message(b"xG", verification_share.to_bytes());
|
||||
weight_transcript.append_message(b"xH", key_image_share.to_bytes());
|
||||
weight_transcript.append_message(b"rG", nonces[0][0].to_bytes());
|
||||
weight_transcript.append_message(b"rH", nonces[0][1].to_bytes());
|
||||
weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr());
|
||||
weight_transcript.append_message(b"s", share.to_repr());
|
||||
let weight = weight_transcript.challenge(b"weight");
|
||||
let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into()));
|
||||
|
||||
let part_one = vec![
|
||||
(share, dfg::EdwardsPoint::generator()),
|
||||
(dfg::Scalar(interim.p), verification_share),
|
||||
// -(R.0 - pV) == -R.0 + pV
|
||||
(-dfg::Scalar::ONE, nonces[0][0]),
|
||||
])
|
||||
(dfg::Scalar(interim.p), verification_share),
|
||||
];
|
||||
|
||||
let mut part_two = vec![
|
||||
(weight * share, dfg::EdwardsPoint(self.H)),
|
||||
// -(R.1 - pK) == -R.1 + pK
|
||||
(-weight, nonces[0][1]),
|
||||
(weight * dfg::Scalar(interim.p), key_image_share),
|
||||
];
|
||||
|
||||
let mut all = part_one;
|
||||
all.append(&mut part_two);
|
||||
Ok(all)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ fn test_aggregate_range_proof() {
|
||||
}
|
||||
let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect();
|
||||
let statement = AggregateRangeStatement::new(commitment_points).unwrap();
|
||||
let witness = AggregateRangeWitness::new(&commitments).unwrap();
|
||||
let witness = AggregateRangeWitness::new(commitments).unwrap();
|
||||
|
||||
let proof = statement.clone().prove(&mut OsRng, &witness).unwrap();
|
||||
statement.verify(&mut OsRng, &mut verifier, (), proof);
|
||||
|
||||
@@ -9,7 +9,6 @@ use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||
use crate::ringct::bulletproofs::plus::{
|
||||
ScalarVector, PointVector, GeneratorsList, Generators,
|
||||
weighted_inner_product::{WipStatement, WipWitness},
|
||||
weighted_inner_product,
|
||||
};
|
||||
|
||||
#[test]
|
||||
@@ -68,7 +67,7 @@ fn test_weighted_inner_product() {
|
||||
#[allow(non_snake_case)]
|
||||
let P = g_bold.multiexp(&a) +
|
||||
h_bold.multiexp(&b) +
|
||||
(g * weighted_inner_product(&a, &b, &y_vec)) +
|
||||
(g * a.clone().weighted_inner_product(&b, &y_vec)) +
|
||||
(h * alpha);
|
||||
|
||||
let statement = WipStatement::new(generators, P, y);
|
||||
|
||||
@@ -57,7 +57,7 @@ fn clsag() {
|
||||
}
|
||||
|
||||
let image = generate_key_image(&secrets.0);
|
||||
let (clsag, pseudo_out) = Clsag::sign(
|
||||
let (mut clsag, pseudo_out) = Clsag::sign(
|
||||
&mut OsRng,
|
||||
vec![(
|
||||
secrets.0,
|
||||
@@ -76,7 +76,12 @@ fn clsag() {
|
||||
msg,
|
||||
)
|
||||
.swap_remove(0);
|
||||
|
||||
clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap();
|
||||
|
||||
// make sure verification fails if we throw a random `c1` at it.
|
||||
clsag.c1 = random_scalar(&mut OsRng);
|
||||
assert!(clsag.verify(&ring, &image, &pseudo_out, &msg).is_err());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use core::{marker::PhantomData, fmt::Debug};
|
||||
use std_shims::string::{String, ToString};
|
||||
use core::{marker::PhantomData, fmt};
|
||||
use std_shims::string::ToString;
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
@@ -81,7 +81,7 @@ impl AddressType {
|
||||
}
|
||||
|
||||
/// A type which returns the byte for a given address.
|
||||
pub trait AddressBytes: Clone + Copy + PartialEq + Eq + Debug {
|
||||
pub trait AddressBytes: Clone + Copy + PartialEq + Eq + fmt::Debug {
|
||||
fn network_bytes(network: Network) -> (u8, u8, u8, u8);
|
||||
}
|
||||
|
||||
@@ -191,8 +191,8 @@ pub struct Address<B: AddressBytes> {
|
||||
pub view: EdwardsPoint,
|
||||
}
|
||||
|
||||
impl<B: AddressBytes> core::fmt::Debug for Address<B> {
|
||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||
impl<B: AddressBytes> fmt::Debug for Address<B> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
fmt
|
||||
.debug_struct("Address")
|
||||
.field("meta", &self.meta)
|
||||
@@ -212,8 +212,8 @@ impl<B: AddressBytes> Zeroize for Address<B> {
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: AddressBytes> ToString for Address<B> {
|
||||
fn to_string(&self) -> String {
|
||||
impl<B: AddressBytes> fmt::Display for Address<B> {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let mut data = vec![self.meta.to_byte()];
|
||||
data.extend(self.spend.compress().to_bytes());
|
||||
data.extend(self.view.compress().to_bytes());
|
||||
@@ -226,7 +226,7 @@ impl<B: AddressBytes> ToString for Address<B> {
|
||||
if let Some(id) = self.meta.kind.payment_id() {
|
||||
data.extend(id);
|
||||
}
|
||||
encode_check(&data).unwrap()
|
||||
write!(f, "{}", encode_check(&data).unwrap())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -105,13 +105,13 @@ pub struct Metadata {
|
||||
/// but the payment ID will be returned here anyway:
|
||||
///
|
||||
/// 1) If the payment ID is tied to an output received by a subaddress account
|
||||
/// that spent Monero in the transaction (the received output is considered
|
||||
/// "change" and is not considered a "payment" in this case). If there are multiple
|
||||
/// spending subaddress accounts in a transaction, the highest index spent key image
|
||||
/// is used to determine the spending subaddress account.
|
||||
/// that spent Monero in the transaction (the received output is considered
|
||||
/// "change" and is not considered a "payment" in this case). If there are multiple
|
||||
/// spending subaddress accounts in a transaction, the highest index spent key image
|
||||
/// is used to determine the spending subaddress account.
|
||||
///
|
||||
/// 2) If the payment ID is the unencrypted variant and the block's hf version is
|
||||
/// v12 or higher (https://github.com/serai-dex/serai/issues/512)
|
||||
/// v12 or higher (https://github.com/serai-dex/serai/issues/512)
|
||||
pub payment_id: Option<PaymentId>,
|
||||
/// Arbitrary data encoded in TX extra.
|
||||
pub arbitrary_data: Vec<Vec<u8>>,
|
||||
|
||||
@@ -364,8 +364,8 @@ impl Change {
|
||||
/// 1) The change in the tx is shunted to the fee (fingerprintable fee).
|
||||
///
|
||||
/// 2) If there are 2 outputs in the tx, there would be no payment ID as is the case when the
|
||||
/// reference wallet creates 2 output txs, since monero-serai doesn't know which output
|
||||
/// to tie the dummy payment ID to.
|
||||
/// reference wallet creates 2 output txs, since monero-serai doesn't know which output
|
||||
/// to tie the dummy payment ID to.
|
||||
pub fn fingerprintable(address: Option<MoneroAddress>) -> Change {
|
||||
Change { address, view: None }
|
||||
}
|
||||
|
||||
@@ -18,6 +18,7 @@ use transcript::{Transcript, RecommendedTranscript};
|
||||
use frost::{
|
||||
curve::Ed25519,
|
||||
Participant, FrostError, ThresholdKeys,
|
||||
dkg::lagrange,
|
||||
sign::{
|
||||
Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine,
|
||||
SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
|
||||
@@ -27,7 +28,7 @@ use frost::{
|
||||
use crate::{
|
||||
random_scalar,
|
||||
ringct::{
|
||||
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share},
|
||||
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig},
|
||||
RctPrunable,
|
||||
},
|
||||
transaction::{Input, Transaction},
|
||||
@@ -261,8 +262,13 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||
included.push(self.i);
|
||||
included.sort_unstable();
|
||||
|
||||
// Convert the unified commitments to a Vec of the individual commitments
|
||||
// Start calculating the key images, as needed on the TX level
|
||||
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
|
||||
for (image, (generator, offset)) in images.iter_mut().zip(&self.key_images) {
|
||||
*image = generator * offset;
|
||||
}
|
||||
|
||||
// Convert the serialized nonces commitments to a parallelized Vec
|
||||
let mut commitments = (0 .. self.clsags.len())
|
||||
.map(|c| {
|
||||
included
|
||||
@@ -291,14 +297,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
|
||||
// provides the easiest API overall, as this is where the TX is (which needs the key
|
||||
// images in its message), along with where the outputs are determined (where our
|
||||
// outputs may need these in order to guarantee uniqueness)
|
||||
add_key_image_share(
|
||||
&mut images[c],
|
||||
self.key_images[c].0,
|
||||
self.key_images[c].1,
|
||||
&included,
|
||||
*l,
|
||||
preprocess.addendum.key_image.0,
|
||||
);
|
||||
images[c] += preprocess.addendum.key_image.0 * lagrange::<dfg::Scalar>(*l, &included).0;
|
||||
|
||||
Ok((*l, preprocess))
|
||||
})
|
||||
|
||||
@@ -88,7 +88,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
|
||||
.unwrap();
|
||||
let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap();
|
||||
|
||||
// TODO: Needs https://github.com/monero-project/monero/pull/8882
|
||||
// TODO: Needs https://github.com/monero-project/monero/pull/9260
|
||||
// let fee_rate = daemon_rpc
|
||||
// .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant)
|
||||
// .await
|
||||
@@ -107,7 +107,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
|
||||
let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap();
|
||||
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||
|
||||
// TODO: Needs https://github.com/monero-project/monero/pull/8882
|
||||
// TODO: Needs https://github.com/monero-project/monero/pull/9260
|
||||
// runner::check_weight_and_fee(&tx, fee_rate);
|
||||
|
||||
match spec {
|
||||
|
||||
@@ -18,7 +18,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
||||
rocksdb = { version = "0.21", default-features = false, features = ["lz4"], optional = true }
|
||||
rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true }
|
||||
|
||||
[features]
|
||||
parity-db = ["dep:parity-db"]
|
||||
|
||||
@@ -11,7 +11,7 @@ impl Get for Transaction<'_> {
|
||||
let mut res = self.0.get(&key);
|
||||
for change in &self.1 {
|
||||
if change.1 == key.as_ref() {
|
||||
res = change.2.clone();
|
||||
res.clone_from(&change.2);
|
||||
}
|
||||
}
|
||||
res
|
||||
|
||||
@@ -1,42 +1,65 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use rocksdb::{DBCompressionType, ThreadMode, SingleThreaded, Options, Transaction, TransactionDB};
|
||||
use rocksdb::{
|
||||
DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions,
|
||||
Transaction as RocksTransaction, Options, OptimisticTransactionDB,
|
||||
};
|
||||
|
||||
use crate::*;
|
||||
|
||||
impl<T: ThreadMode> Get for Transaction<'_, TransactionDB<T>> {
|
||||
pub struct Transaction<'a, T: ThreadMode>(
|
||||
RocksTransaction<'a, OptimisticTransactionDB<T>>,
|
||||
&'a OptimisticTransactionDB<T>,
|
||||
);
|
||||
|
||||
impl<T: ThreadMode> Get for Transaction<'_, T> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
self.get(key).expect("couldn't read from RocksDB via transaction")
|
||||
self.0.get(key).expect("couldn't read from RocksDB via transaction")
|
||||
}
|
||||
}
|
||||
impl<T: ThreadMode> DbTxn for Transaction<'_, TransactionDB<T>> {
|
||||
impl<T: ThreadMode> DbTxn for Transaction<'_, T> {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||
Transaction::put(self, key, value).expect("couldn't write to RocksDB via transaction")
|
||||
self.0.put(key, value).expect("couldn't write to RocksDB via transaction")
|
||||
}
|
||||
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||
self.delete(key).expect("couldn't delete from RocksDB via transaction")
|
||||
self.0.delete(key).expect("couldn't delete from RocksDB via transaction")
|
||||
}
|
||||
fn commit(self) {
|
||||
Transaction::commit(self).expect("couldn't commit to RocksDB via transaction")
|
||||
self.0.commit().expect("couldn't commit to RocksDB via transaction");
|
||||
self.1.flush_wal(true).expect("couldn't flush RocksDB WAL");
|
||||
self.1.flush().expect("couldn't flush RocksDB");
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ThreadMode> Get for Arc<TransactionDB<T>> {
|
||||
impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
TransactionDB::get(self, key).expect("couldn't read from RocksDB")
|
||||
OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB")
|
||||
}
|
||||
}
|
||||
impl<T: ThreadMode + 'static> Db for Arc<TransactionDB<T>> {
|
||||
type Transaction<'a> = Transaction<'a, TransactionDB<T>>;
|
||||
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
||||
type Transaction<'a> = Transaction<'a, T>;
|
||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||
self.transaction()
|
||||
let mut opts = WriteOptions::default();
|
||||
opts.set_sync(true);
|
||||
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
||||
}
|
||||
}
|
||||
|
||||
pub type RocksDB = Arc<TransactionDB<SingleThreaded>>;
|
||||
pub type RocksDB = Arc<OptimisticTransactionDB<SingleThreaded>>;
|
||||
pub fn new_rocksdb(path: &str) -> RocksDB {
|
||||
let mut options = Options::default();
|
||||
options.create_if_missing(true);
|
||||
options.set_compression_type(DBCompressionType::Lz4);
|
||||
Arc::new(TransactionDB::open(&options, &Default::default(), path).unwrap())
|
||||
options.set_compression_type(DBCompressionType::Zstd);
|
||||
|
||||
options.set_wal_compression_type(DBCompressionType::Zstd);
|
||||
// 10 MB
|
||||
options.set_max_total_wal_size(10 * 1024 * 1024);
|
||||
options.set_wal_size_limit_mb(10);
|
||||
|
||||
options.set_log_level(LogLevel::Warn);
|
||||
// 1 MB
|
||||
options.set_max_log_file_size(1024 * 1024);
|
||||
options.set_recycle_log_file_num(1);
|
||||
|
||||
Arc::new(OptimisticTransactionDB::open(&options, path).unwrap())
|
||||
}
|
||||
|
||||
@@ -23,7 +23,7 @@ hyper-util = { version = "0.1", default-features = false, features = ["http1", "
|
||||
http-body-util = { version = "0.1", default-features = false }
|
||||
tokio = { version = "1", default-features = false }
|
||||
|
||||
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||
|
||||
zeroize = { version = "1", optional = true }
|
||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||
|
||||
@@ -55,6 +55,8 @@ impl Client {
|
||||
fn connector() -> Connector {
|
||||
let mut res = HttpConnector::new();
|
||||
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
||||
res.set_nodelay(true);
|
||||
res.set_reuse_address(true);
|
||||
#[cfg(feature = "tls")]
|
||||
let res = HttpsConnectorBuilder::new()
|
||||
.with_native_roots()
|
||||
@@ -68,7 +70,9 @@ impl Client {
|
||||
pub fn with_connection_pool() -> Client {
|
||||
Client {
|
||||
connection: Connection::ConnectionPool(
|
||||
HyperClient::builder(TokioExecutor::new()).build(Self::connector()),
|
||||
HyperClient::builder(TokioExecutor::new())
|
||||
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
||||
.build(Self::connector()),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.60"
|
||||
rust-version = "1.77.0"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -19,8 +19,10 @@ workspace = true
|
||||
[dependencies]
|
||||
zeroize = { version = "^1.5", default-features = false }
|
||||
|
||||
[build-dependencies]
|
||||
rustversion = { version = "1", default-features = false }
|
||||
|
||||
[features]
|
||||
std = ["zeroize/std"]
|
||||
default = ["std"]
|
||||
# Commented for now as it requires nightly and we don't use nightly
|
||||
# allocator = []
|
||||
allocator = []
|
||||
|
||||
10
common/zalloc/build.rs
Normal file
10
common/zalloc/build.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
#[rustversion::nightly]
|
||||
fn main() {
|
||||
println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)");
|
||||
println!("cargo::rustc-cfg=zalloc_rustc_nightly");
|
||||
}
|
||||
|
||||
#[rustversion::not(nightly)]
|
||||
fn main() {
|
||||
println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)");
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(feature = "allocator", feature(allocator_api))]
|
||||
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||
|
||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||
//! This can either be used with Box (requires nightly and the "allocator" feature) to provide the
|
||||
@@ -17,12 +17,12 @@ use zeroize::Zeroize;
|
||||
/// An allocator wrapper which zeroizes its memory on dealloc.
|
||||
pub struct ZeroizingAlloc<T>(pub T);
|
||||
|
||||
#[cfg(feature = "allocator")]
|
||||
#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))]
|
||||
use core::{
|
||||
ptr::NonNull,
|
||||
alloc::{AllocError, Allocator},
|
||||
};
|
||||
#[cfg(feature = "allocator")]
|
||||
#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))]
|
||||
unsafe impl<T: Allocator> Allocator for ZeroizingAlloc<T> {
|
||||
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
|
||||
self.0.allocate(layout)
|
||||
|
||||
@@ -51,7 +51,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||
|
||||
@@ -22,7 +22,7 @@ use serai_db::{Get, DbTxn, Db, create_db};
|
||||
use processor_messages::coordinator::cosign_block_msg;
|
||||
|
||||
use crate::{
|
||||
p2p::{CosignedBlock, P2pMessageKind, P2p},
|
||||
p2p::{CosignedBlock, GossipMessageKind, P2p},
|
||||
substrate::LatestCosignedBlock,
|
||||
};
|
||||
|
||||
@@ -323,7 +323,7 @@ impl<D: Db> CosignEvaluator<D> {
|
||||
for cosign in cosigns {
|
||||
let mut buf = vec![];
|
||||
cosign.serialize(&mut buf).unwrap();
|
||||
P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, buf).await;
|
||||
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||
}
|
||||
sleep(Duration::from_secs(60)).await;
|
||||
}
|
||||
|
||||
@@ -122,7 +122,7 @@ impl QueuedBatchesDb {
|
||||
|
||||
pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
|
||||
let batches_vec = Self::get(txn, set).unwrap_or_default();
|
||||
txn.del(&Self::key(set));
|
||||
txn.del(Self::key(set));
|
||||
|
||||
let mut batches: &[u8] = &batches_vec;
|
||||
let mut res = vec![];
|
||||
|
||||
@@ -260,7 +260,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
cosign_channel.send(cosigned_block).unwrap();
|
||||
let mut buf = vec![];
|
||||
cosigned_block.serialize(&mut buf).unwrap();
|
||||
P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await;
|
||||
P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||
None
|
||||
}
|
||||
// This causes an action on Substrate yet not on any Tributary
|
||||
@@ -836,8 +836,8 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
||||
) {
|
||||
let mut tributaries = HashMap::new();
|
||||
'outer: loop {
|
||||
// TODO: Create a better async flow for this, as this does still hammer this task
|
||||
tokio::task::yield_now().await;
|
||||
// TODO: Create a better async flow for this
|
||||
tokio::time::sleep(core::time::Duration::from_millis(100)).await;
|
||||
|
||||
match tributary_event.try_recv() {
|
||||
Ok(event) => match event {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -41,8 +41,9 @@ enum HasEvents {
|
||||
|
||||
create_db!(
|
||||
SubstrateCosignDb {
|
||||
ScanCosignFrom: () -> u64,
|
||||
IntendedCosign: () -> (u64, Option<u64>),
|
||||
BlockHasEvents: (block: u64) -> HasEvents,
|
||||
BlockHasEventsCache: (block: u64) -> HasEvents,
|
||||
LatestCosignedBlock: () -> u64,
|
||||
}
|
||||
);
|
||||
@@ -85,7 +86,7 @@ async fn block_has_events(
|
||||
serai: &Serai,
|
||||
block: u64,
|
||||
) -> Result<HasEvents, SeraiError> {
|
||||
let cached = BlockHasEvents::get(txn, block);
|
||||
let cached = BlockHasEventsCache::get(txn, block);
|
||||
match cached {
|
||||
None => {
|
||||
let serai = serai.as_of(
|
||||
@@ -107,8 +108,8 @@ async fn block_has_events(
|
||||
|
||||
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
||||
|
||||
BlockHasEvents::set(txn, block, &has_events);
|
||||
Ok(HasEvents::Yes)
|
||||
BlockHasEventsCache::set(txn, block, &has_events);
|
||||
Ok(has_events)
|
||||
}
|
||||
Some(code) => Ok(code),
|
||||
}
|
||||
@@ -135,6 +136,7 @@ async fn potentially_cosign_block(
|
||||
if (block_has_events == HasEvents::No) &&
|
||||
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
||||
{
|
||||
log::debug!("automatically co-signing next block ({block}) since it has no events");
|
||||
LatestCosignedBlock::set(txn, &block);
|
||||
}
|
||||
|
||||
@@ -178,7 +180,7 @@ async fn potentially_cosign_block(
|
||||
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
||||
`latest_number` doesn't change.
|
||||
*/
|
||||
pub async fn advance_cosign_protocol(
|
||||
async fn advance_cosign_protocol_inner(
|
||||
db: &mut impl Db,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
serai: &Serai,
|
||||
@@ -203,16 +205,23 @@ pub async fn advance_cosign_protocol(
|
||||
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
||||
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
||||
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
||||
window_end_exclusive = 0;
|
||||
window_end_exclusive = 1;
|
||||
}
|
||||
|
||||
// The consensus rules for this are `last_intended_to_cosign_block + 1`
|
||||
let scan_start_block = last_intended_to_cosign_block + 1;
|
||||
// As a practical optimization, we don't re-scan old blocks since old blocks are independent to
|
||||
// new state
|
||||
let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));
|
||||
|
||||
// Check all blocks within the window to see if they should be cosigned
|
||||
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
||||
// do cosign them
|
||||
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
||||
// the skipped block will cause will cosign all other blocks within this window
|
||||
if skipped_block.is_none() {
|
||||
for b in (last_intended_to_cosign_block + 1) .. window_end_exclusive.min(latest_number) {
|
||||
let window_end_inclusive = window_end_exclusive - 1;
|
||||
for b in scan_start_block ..= window_end_inclusive.min(latest_number) {
|
||||
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
||||
skipped_block = Some(b);
|
||||
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
||||
@@ -227,7 +236,7 @@ pub async fn advance_cosign_protocol(
|
||||
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
||||
let mut cosigning = vec![];
|
||||
|
||||
for block in (last_intended_to_cosign_block + 1) ..= latest_number {
|
||||
for block in scan_start_block ..= latest_number {
|
||||
let actual_block = serai
|
||||
.finalized_block_by_number(block)
|
||||
.await?
|
||||
@@ -276,6 +285,11 @@ pub async fn advance_cosign_protocol(
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
// If this TX is committed, always start future scanning from the next block
|
||||
ScanCosignFrom::set(&mut txn, &(block + 1));
|
||||
// Since we're scanning *from* the next block, tidy the cache
|
||||
BlockHasEventsCache::del(&mut txn, block);
|
||||
}
|
||||
|
||||
if let Some((number, hash)) = to_cosign {
|
||||
@@ -297,3 +311,22 @@ pub async fn advance_cosign_protocol(
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn advance_cosign_protocol(
|
||||
db: &mut impl Db,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
serai: &Serai,
|
||||
latest_number: u64,
|
||||
) -> Result<(), SeraiError> {
|
||||
loop {
|
||||
let scan_from = ScanCosignFrom::get(db).unwrap_or(1);
|
||||
// Only scan 1000 blocks at a time to limit a massive txn from forming
|
||||
let scan_to = latest_number.min(scan_from + 1000);
|
||||
advance_cosign_protocol_inner(db, key, serai, scan_to).await?;
|
||||
// If we didn't limit the scan_to, break
|
||||
if scan_to == latest_number {
|
||||
break;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -11,10 +11,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use serai_client::{
|
||||
SeraiError, Block, Serai, TemporalSerai,
|
||||
primitives::{BlockHash, NetworkId},
|
||||
validator_sets::{
|
||||
primitives::{ValidatorSet, amortize_excess_key_shares},
|
||||
ValidatorSetsEvent,
|
||||
},
|
||||
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
|
||||
in_instructions::InInstructionsEvent,
|
||||
coins::CoinsEvent,
|
||||
};
|
||||
@@ -69,12 +66,7 @@ async fn handle_new_set<D: Db>(
|
||||
let set_participants =
|
||||
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
|
||||
|
||||
let mut set_data = set_participants
|
||||
.into_iter()
|
||||
.map(|(k, w)| (k, u16::try_from(w).unwrap()))
|
||||
.collect::<Vec<_>>();
|
||||
amortize_excess_key_shares(&mut set_data);
|
||||
set_data
|
||||
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
|
||||
};
|
||||
|
||||
let time = if let Ok(time) = block.time() {
|
||||
|
||||
@@ -14,7 +14,7 @@ use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
processors::{Message, Processors},
|
||||
TributaryP2p, P2pMessageKind, P2p,
|
||||
TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p,
|
||||
};
|
||||
|
||||
pub mod tributary;
|
||||
@@ -45,7 +45,10 @@ impl Processors for MemProcessors {
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct LocalP2p(usize, pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, Vec<u8>)>>)>>);
|
||||
pub struct LocalP2p(
|
||||
usize,
|
||||
pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, P2pMessageKind, Vec<u8>)>>)>>,
|
||||
);
|
||||
|
||||
impl LocalP2p {
|
||||
pub fn new(validators: usize) -> Vec<LocalP2p> {
|
||||
@@ -65,11 +68,13 @@ impl P2p for LocalP2p {
|
||||
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||
|
||||
async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
||||
self.1.write().await.1[to].push_back((self.0, msg));
|
||||
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
|
||||
let mut msg_ref = msg.as_slice();
|
||||
let kind = ReqResMessageKind::read(&mut msg_ref).unwrap();
|
||||
self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec()));
|
||||
}
|
||||
|
||||
async fn broadcast_raw(&self, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
||||
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
|
||||
// Content-based deduplication
|
||||
let mut lock = self.1.write().await;
|
||||
{
|
||||
@@ -81,19 +86,26 @@ impl P2p for LocalP2p {
|
||||
}
|
||||
let queues = &mut lock.1;
|
||||
|
||||
let kind_len = (match kind {
|
||||
P2pMessageKind::ReqRes(kind) => kind.serialize(),
|
||||
P2pMessageKind::Gossip(kind) => kind.serialize(),
|
||||
})
|
||||
.len();
|
||||
let msg = msg[kind_len ..].to_vec();
|
||||
|
||||
for (i, msg_queue) in queues.iter_mut().enumerate() {
|
||||
if i == self.0 {
|
||||
continue;
|
||||
}
|
||||
msg_queue.push_back((self.0, msg.clone()));
|
||||
msg_queue.push_back((self.0, kind, msg.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
async fn receive_raw(&self) -> (Self::Id, Vec<u8>) {
|
||||
async fn receive(&self) -> P2pMessage<Self> {
|
||||
// This is a cursed way to implement an async read from a Vec
|
||||
loop {
|
||||
if let Some(res) = self.1.write().await.1[self.0].pop_front() {
|
||||
return res;
|
||||
if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() {
|
||||
return P2pMessage { sender, kind, msg };
|
||||
}
|
||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
@@ -103,6 +115,11 @@ impl P2p for LocalP2p {
|
||||
#[async_trait]
|
||||
impl TributaryP2p for LocalP2p {
|
||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
||||
<Self as P2p>::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await
|
||||
<Self as P2p>::broadcast(
|
||||
self,
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)),
|
||||
msg,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ use serai_db::MemDb;
|
||||
use tributary::Tributary;
|
||||
|
||||
use crate::{
|
||||
P2pMessageKind, P2p,
|
||||
GossipMessageKind, P2pMessageKind, P2p,
|
||||
tributary::{Transaction, TributarySpec},
|
||||
tests::LocalP2p,
|
||||
};
|
||||
@@ -98,7 +98,7 @@ pub async fn run_tributaries(
|
||||
for (p2p, tributary) in &mut tributaries {
|
||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||
match msg.kind {
|
||||
P2pMessageKind::Tributary(genesis) => {
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||
assert_eq!(genesis, tributary.genesis());
|
||||
if tributary.handle_message(&msg.msg).await {
|
||||
p2p.broadcast(msg.kind, msg.msg).await;
|
||||
@@ -173,7 +173,7 @@ async fn tributary_test() {
|
||||
for (p2p, tributary) in &mut tributaries {
|
||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||
match msg.kind {
|
||||
P2pMessageKind::Tributary(genesis) => {
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||
assert_eq!(genesis, tributary.genesis());
|
||||
tributary.handle_message(&msg.msg).await;
|
||||
}
|
||||
@@ -199,7 +199,7 @@ async fn tributary_test() {
|
||||
for (p2p, tributary) in &mut tributaries {
|
||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||
match msg.kind {
|
||||
P2pMessageKind::Tributary(genesis) => {
|
||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||
assert_eq!(genesis, tributary.genesis());
|
||||
tributary.handle_message(&msg.msg).await;
|
||||
}
|
||||
|
||||
@@ -116,8 +116,8 @@ async fn sync_test() {
|
||||
.map_err(|_| "failed to send ActiveTributary to heartbeat")
|
||||
.unwrap();
|
||||
|
||||
// The heartbeat is once every 10 blocks
|
||||
sleep(Duration::from_secs(10 * block_time)).await;
|
||||
// The heartbeat is once every 10 blocks, with some limitations
|
||||
sleep(Duration::from_secs(20 * block_time)).await;
|
||||
assert!(syncer_tributary.tip().await != spec.genesis());
|
||||
|
||||
// Verify it synced to the tip
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use core::{marker::PhantomData, fmt::Debug};
|
||||
use std::{sync::Arc, io};
|
||||
use std::{sync::Arc, io, collections::VecDeque};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
@@ -59,8 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
||||
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
||||
|
||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||
pub(crate) const BLOCK_MESSAGE: u8 = 1;
|
||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 2;
|
||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
@@ -194,7 +193,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
);
|
||||
let blockchain = Arc::new(RwLock::new(blockchain));
|
||||
|
||||
let to_rebroadcast = Arc::new(RwLock::new(vec![]));
|
||||
let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new()));
|
||||
// Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the
|
||||
// P2P layer
|
||||
let p2p_meta_task_handle = Arc::new(
|
||||
@@ -207,7 +206,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
for msg in to_rebroadcast {
|
||||
p2p.broadcast(genesis, msg).await;
|
||||
}
|
||||
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
|
||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -218,7 +217,15 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p };
|
||||
|
||||
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
||||
TendermintMachine::new(network.clone(), block_number, start_time, proposal).await;
|
||||
TendermintMachine::new(
|
||||
db.clone(),
|
||||
network.clone(),
|
||||
genesis,
|
||||
block_number,
|
||||
start_time,
|
||||
proposal,
|
||||
)
|
||||
.await;
|
||||
tokio::spawn(machine.run());
|
||||
|
||||
Some(Self {
|
||||
@@ -328,9 +335,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
|
||||
// Return true if the message should be rebroadcasted.
|
||||
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
||||
// Acquire the lock now to prevent sync_block from being run at the same time
|
||||
let mut sync_block = self.synced_block_result.write().await;
|
||||
|
||||
match msg.first() {
|
||||
Some(&TRANSACTION_MESSAGE) => {
|
||||
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
||||
@@ -362,19 +366,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
false
|
||||
}
|
||||
|
||||
Some(&BLOCK_MESSAGE) => {
|
||||
let mut msg_ref = &msg[1 ..];
|
||||
let Ok(block) = Block::<T>::read(&mut msg_ref) else {
|
||||
log::error!("received invalid block message");
|
||||
return false;
|
||||
};
|
||||
let commit = msg[(msg.len() - msg_ref.len()) ..].to_vec();
|
||||
if self.sync_block_internal(block, commit, &mut sync_block).await {
|
||||
log::debug!("synced block over p2p net instead of building the commit ourselves");
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -74,7 +74,7 @@ impl<D: Db, T: Transaction> ProvidedTransactions<D, T> {
|
||||
panic!("provided transaction saved to disk wasn't provided");
|
||||
};
|
||||
|
||||
if res.transactions.get(order).is_none() {
|
||||
if !res.transactions.contains_key(order) {
|
||||
res.transactions.insert(order, VecDeque::new());
|
||||
}
|
||||
res.transactions.get_mut(order).unwrap().push_back(tx);
|
||||
@@ -135,7 +135,7 @@ impl<D: Db, T: Transaction> ProvidedTransactions<D, T> {
|
||||
txn.put(current_provided_key, currently_provided);
|
||||
txn.commit();
|
||||
|
||||
if self.transactions.get(order).is_none() {
|
||||
if !self.transactions.contains_key(order) {
|
||||
self.transactions.insert(order, VecDeque::new());
|
||||
}
|
||||
self.transactions.get_mut(order).unwrap().push_back(tx);
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
use core::ops::Deref;
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{VecDeque, HashMap},
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
@@ -38,9 +41,8 @@ use tendermint::{
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::{
|
||||
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite,
|
||||
transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError,
|
||||
Blockchain, P2p,
|
||||
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait,
|
||||
Transaction, BlockHeader, Block, BlockError, Blockchain, P2p,
|
||||
};
|
||||
|
||||
pub mod tx;
|
||||
@@ -268,7 +270,7 @@ pub struct TendermintNetwork<D: Db, T: TransactionTrait, P: P2p> {
|
||||
pub(crate) validators: Arc<Validators>,
|
||||
pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>,
|
||||
|
||||
pub(crate) to_rebroadcast: Arc<RwLock<Vec<Vec<u8>>>>,
|
||||
pub(crate) to_rebroadcast: Arc<RwLock<VecDeque<Vec<u8>>>>,
|
||||
|
||||
pub(crate) p2p: P,
|
||||
}
|
||||
@@ -277,31 +279,10 @@ pub const BLOCK_PROCESSING_TIME: u32 = 999;
|
||||
pub const LATENCY_TIME: u32 = 1667;
|
||||
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
|
||||
|
||||
#[test]
|
||||
fn assert_target_block_time() {
|
||||
use serai_db::MemDb;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyP2p;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl P2p for DummyP2p {
|
||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
// Type paremeters don't matter here since we only need to call the block_time()
|
||||
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
|
||||
// TARGET_BLOCK_TIME is in milliseconds.
|
||||
assert_eq!(
|
||||
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
|
||||
TARGET_BLOCK_TIME / 1000
|
||||
)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
|
||||
type Db = D;
|
||||
|
||||
type ValidatorId = [u8; 32];
|
||||
type SignatureScheme = Arc<Validators>;
|
||||
type Weights = Arc<Validators>;
|
||||
@@ -325,19 +306,28 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
}
|
||||
|
||||
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
|
||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||
to_broadcast.extend(msg.encode());
|
||||
|
||||
// Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second
|
||||
// until the block it's trying to build is complete
|
||||
// If the P2P layer drops a message before all nodes obtained access, or a node had an
|
||||
// intermittent failure, this will ensure reconcilliation
|
||||
// Resolves halts caused by timing discrepancies, which technically are violations of
|
||||
// Tendermint as a BFT protocol, and shouldn't occur yet have in low-powered testing
|
||||
// environments
|
||||
// This is atrocious if there's no content-based deduplication protocol for messages actively
|
||||
// being gossiped
|
||||
// LibP2p, as used by Serai, is configured to content-based deduplicate
|
||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||
to_broadcast.extend(msg.encode());
|
||||
self.to_rebroadcast.write().await.push(to_broadcast.clone());
|
||||
{
|
||||
let mut to_rebroadcast_lock = self.to_rebroadcast.write().await;
|
||||
to_rebroadcast_lock.push_back(to_broadcast.clone());
|
||||
// We should have, ideally, 3 * validators messages within a round
|
||||
// Therefore, this should keep the most recent 2-rounds
|
||||
// TODO: This isn't perfect. Each participant should just rebroadcast their latest round of
|
||||
// messages
|
||||
while to_rebroadcast_lock.len() > (6 * self.validators.weights.len()) {
|
||||
to_rebroadcast_lock.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
self.p2p.broadcast(self.genesis, to_broadcast).await
|
||||
}
|
||||
|
||||
@@ -423,12 +413,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
);
|
||||
match block_res {
|
||||
Ok(()) => {
|
||||
// If we successfully added this block, broadcast it
|
||||
// TODO: Move this under the coordinator once we set up on new block notifications?
|
||||
let mut msg = serialized_block.0;
|
||||
msg.insert(0, BLOCK_MESSAGE);
|
||||
msg.extend(encoded_commit);
|
||||
self.p2p.broadcast(self.genesis, msg).await;
|
||||
// If we successfully added this block, break
|
||||
break;
|
||||
}
|
||||
Err(BlockError::NonLocalProvided(hash)) => {
|
||||
@@ -437,13 +422,14 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
hex::encode(hash),
|
||||
hex::encode(self.genesis)
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
_ => return invalid_block(),
|
||||
}
|
||||
}
|
||||
|
||||
// Since we've added a valid block, clear to_rebroadcast
|
||||
*self.to_rebroadcast.write().await = vec![];
|
||||
*self.to_rebroadcast.write().await = VecDeque::new();
|
||||
|
||||
Some(TendermintBlock(
|
||||
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
#[cfg(test)]
|
||||
mod tendermint;
|
||||
|
||||
mod transaction;
|
||||
pub use transaction::*;
|
||||
|
||||
|
||||
28
coordinator/tributary/src/tests/tendermint.rs
Normal file
28
coordinator/tributary/src/tests/tendermint.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
use tendermint::ext::Network;
|
||||
use crate::{
|
||||
P2p, TendermintTx,
|
||||
tendermint::{TARGET_BLOCK_TIME, TendermintNetwork},
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn assert_target_block_time() {
|
||||
use serai_db::MemDb;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyP2p;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl P2p for DummyP2p {
|
||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
// Type paremeters don't matter here since we only need to call the block_time()
|
||||
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
|
||||
// TARGET_BLOCK_TIME is in milliseconds.
|
||||
assert_eq!(
|
||||
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
|
||||
TARGET_BLOCK_TIME / 1000
|
||||
)
|
||||
}
|
||||
@@ -27,5 +27,7 @@ futures-util = { version = "0.3", default-features = false, features = ["std", "
|
||||
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||
tokio = { version = "1", default-features = false, features = ["time"] }
|
||||
|
||||
serai-db = { path = "../../../common/db", version = "0.1", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] }
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user