57 Commits

Author SHA1 Message Date
akildemir
33fcd27dd1 Merge branch 'emissions' of https://github.com/akildemir/serai into block-emissions 2024-05-03 13:50:52 +03:00
akildemir
04fcb2bba3 fix rotation test 2024-05-03 13:34:01 +03:00
akildemir
90a5232bbd Merge branch 'develop' of https://github.com/serai-dex/serai into emissions 2024-05-03 12:08:52 +03:00
GitHub Actions
21123590bb Update nightly 2024-05-01 01:10:58 -04:00
akildemir
20cf4c930c updato develop latest 2024-04-30 10:00:21 +03:00
akildemir
7ecbfde936 Merge branch 'develop' of https://github.com/serai-dex/serai into emissions 2024-04-30 09:42:04 +03:00
akildemir
926ddd09db add genesis liquidity test & misc fixes 2024-04-29 23:19:55 +03:00
Luke Parker
bc1dec7991 Move TRANSACTION_MESSAGE to 1 2024-04-28 04:04:53 -04:00
Luke Parker
cef63a631a Add a dev ethereum Docker setup
Also adds untested Dockerfiles for reth, lighthouse, and nimbus.
2024-04-24 09:30:54 -04:00
Luke Parker
d57fef8999 Slight documentation tweaks 2024-04-24 03:55:23 -04:00
Luke Parker
d1474e9188 Route top-level transfers through to the processor 2024-04-24 03:38:31 -04:00
Luke Parker
b39c751403 Reduce target peers a bit 2024-04-23 12:59:45 -04:00
Luke Parker
cc7202e0bf Correct recv to try_recv when exhausting channel 2024-04-23 12:40:21 -04:00
Luke Parker
19e68f7f75 Correct selection of to-try peers to prevent infinite loops when to-try < target 2024-04-23 12:04:30 -04:00
Luke Parker
d94c9a4a5e Use a constant for the target amount of peer 2024-04-23 11:59:51 -04:00
Luke Parker
43dc036660 Use a HashSet for which networks to try peer finding for
Prevents a flood of retries from individually failed attempts within a batch of
peer connection attempts.
2024-04-23 10:55:56 -04:00
Luke Parker
95591218bb Remove cbor 2024-04-23 07:01:07 -04:00
Luke Parker
7dd587a864 Inline broadcast_raw now that it doesn't have multiple callers 2024-04-23 06:44:21 -04:00
Luke Parker
023275bcb6 Properly diversify ReqResMessageKind/GossipMessageKind 2024-04-23 06:37:41 -04:00
Luke Parker
8cef9eff6f Move keep alive, heartbeat, block to request/response 2024-04-23 05:44:58 -04:00
Luke Parker
b5e22dca8f Correct no-std Monero after moving from ToString to Display 2024-04-23 05:25:08 -04:00
Luke Parker
a41329c027 Update clippy now that redundant imports has been reverted 2024-04-23 04:31:27 -04:00
Luke Parker
a25e6330bd Remove DLEq proofs from CLSAG multisig
1) Removes the key image DLEq on the Monero side of things, as the produced
   signature share serves as a DLEq for it.
2) Removes the nonce DLEqs from modular-frost as they're unnecessary for
   monero-serai. Updates documentation accordingly.

Without the proof the nonces are internally consistent, the produced signatures
from modular-frost can be argued as a batch-verifiable CP93 DLEq (R0, R1, s),
or as a GSP for the CP93 DLEq statement (which naturally produces (R0, R1, s)).

The lack of proving the nonces consistent does make the process weaker, yet
it's also unnecessary for the class of protocols this is intended to service.
To provide DLEqs for the nonces would be to provide PoKs for the nonce
commitments (in the traditional Schnorr case).
2024-04-21 23:01:32 -04:00
Luke Parker
558a2bfa46 Slight tweaks to BP+ 2024-04-21 21:51:44 -04:00
Luke Parker
c73acb3d62 Log on new tendermint message debug -> trace 2024-04-21 19:28:21 -04:00
Luke Parker
933b17aa91 Revert coordinator/tributary to fd4f247917
\#560 is causing notable CI failures, with its logs including slashes at 10x
the prior rate.
2024-04-21 10:16:12 -04:00
Luke Parker
5fa7e3d450 Line for prior commit 2024-04-21 08:55:29 -04:00
Luke Parker
749d783b1e Comment the insanely aggressive timeout future trace log 2024-04-21 08:53:35 -04:00
Luke Parker
5a3ea80943 Add missing continue to prevent dialing a node we're connected to 2024-04-21 08:36:52 -04:00
Luke Parker
fddbebc7c0 Replace expect with debug log 2024-04-21 08:02:34 -04:00
Luke Parker
e01848aa9e Correct boolean NOT on is_fresh_dial 2024-04-21 07:30:31 -04:00
Luke Parker
320b5627b5 Retry if initial dials fail, not just upon disconnect 2024-04-21 07:26:16 -04:00
Luke Parker
be7780e69d Restart coordinator peer finding upon disconnections 2024-04-21 07:02:49 -04:00
Luke Parker
0ddbaefb38 Correct timing around when we verify precommit signatures 2024-04-21 06:12:01 -04:00
Luke Parker
0f0db14f05 Ethereum Integration (#557)
* Clean up Ethereum

* Consistent contract address for deployed contracts

* Flesh out Router a bit

* Add a Deployer for DoS-less deployment

* Implement Router-finding

* Use CREATE2 helper present in ethers

* Move from CREATE2 to CREATE

Bit more streamlined for our use case.

* Document ethereum-serai

* Tidy tests a bit

* Test updateSeraiKey

* Use encodePacked for updateSeraiKey

* Take in the block hash to read state during

* Add a Sandbox contract to the Ethereum integration

* Add retrieval of transfers from Ethereum

* Add inInstruction function to the Router

* Augment our handling of InInstructions events with a check the transfer event also exists

* Have the Deployer error upon failed deployments

* Add --via-ir

* Make get_transaction test-only

We only used it to get transactions to confirm the resolution of Eventualities.
Eventualities need to be modularized. By introducing the dedicated
confirm_completion function, we remove the need for a non-test get_transaction
AND begin this modularization (by no longer explicitly grabbing a transaction
to check with).

* Modularize Eventuality

Almost fully-deprecates the Transaction trait for Completion. Replaces
Transaction ID with Claim.

* Modularize the Scheduler behind a trait

* Add an extremely basic account Scheduler

* Add nonce uses, key rotation to the account scheduler

* Only report the account Scheduler empty after transferring keys

Also ban payments to the branch/change/forward addresses.

* Make fns reliant on state test-only

* Start of an Ethereum integration for the processor

* Add a session to the Router to prevent updateSeraiKey replaying

This would only happen if an old key was rotated to again, which would require
n-of-n collusion (already ridiculous and a valid fault attributable event). It
just clarifies the formal arguments.

* Add a RouterCommand + SignMachine for producing it to coins/ethereum

* Ethereum which compiles

* Have branch/change/forward return an option

Also defines a UtxoNetwork extension trait for MAX_INPUTS.

* Make external_address exclusively a test fn

* Move the "account" scheduler to "smart contract"

* Remove ABI artifact

* Move refund/forward Plan creation into the Processor

We create forward Plans in the scan path, and need to know their exact fees in
the scan path. This requires adding a somewhat wonky shim_forward_plan method
so we can obtain a Plan equivalent to the actual forward Plan for fee reasons,
yet don't expect it to be the actual forward Plan (which may be distinct if
the Plan pulls from the global state, such as with a nonce).

Also properly types a Scheduler addendum such that the SC scheduler isn't
cramming the nonce to use into the N::Output type.

* Flesh out the Ethereum integration more

* Two commits ago, into the **Scheduler, not Processor

* Remove misc TODOs in SC Scheduler

* Add constructor to RouterCommandMachine

* RouterCommand read, pairing with the prior added write

* Further add serialization methods

* Have the Router's key included with the InInstruction

This does not use the key at the time of the event. This uses the key at the
end of the block for the event. Its much simpler than getting the full event
streams for each, checking when they interlace.

This does not read the state. Every block, this makes a request for every
single key update and simply chooses the last one. This allows pruning state,
only keeping the event tree. Ideally, we'd also introduce a cache to reduce the
cost of the filter (small in events yielded, long in blocks searched).

Since Serai doesn't have any forwarding TXs, nor Branches, nor change, all of
our Plans should solely have payments out, and there's no expectation of a Plan
being made under one key broken by it being received by another key.

* Add read/write to InInstruction

* Abstract the ABI for Call/OutInstruction in ethereum-serai

* Fill out signable_transaction for Ethereum

* Move ethereum-serai to alloy

Resolves #331.

* Use the opaque sol macro instead of generated files

* Move the processor over to the now-alloy-based ethereum-serai

* Use the ecrecover provided by alloy

* Have the SC use nonce for rotation, not session (an independent nonce which wasn't synchronized)

* Always use the latest keys for SC scheduled plans

* get_eventuality_completions for Ethereum

* Finish fleshing out the processor Ethereum integration as needed for serai-processor tests

This doesn't not support any actual deployments, not even the ones simulated by
serai-processor-docker-tests.

* Add alloy-simple-request-transport to the GH workflows

* cargo update

* Clarify a few comments and make one check more robust

* Use a string for 27.0 in .github

* Remove optional from no-longer-optional dependencies in processor

* Add alloy to git deny exception

* Fix no longer optional specification in processor's binaries feature

* Use a version of foundry from 2024

* Correct fetching Bitcoin TXs in the processor docker tests

* Update rustls to resolve RUSTSEC warnings

* Use the monthly nightly foundry, not the deleted daily nightly
2024-04-21 06:02:12 -04:00
Luke Parker
43083dfd49 Remove redundant log from tendermint lib 2024-04-21 05:32:41 -04:00
Luke Parker
523d2ac911 Rewrite tendermint's message handling loop to much more clearly match the paper (#560)
* Rewrite tendermint's message handling loop to much more clearly match the paper

No longer checks relevant branches upon messages, yet all branches upon any
state change. This is slower, yet easier to review and likely without one or
two rare edge cases.

When reviewing, please see page 5 of https://arxiv.org/pdf/1807.04938.pdf.
Lines from the specified algorithm can be found in the code by searching for
"// L".

* Sane rebroadcasting of consensus messages

Instead of broadcasting the last n messages on the Tributary side of things, we
now have the machine rebroadcast the message tape for the current block.

* Only rebroadcast messages which didn't error in some way

* Only rebroadcast our own messages for tendermint
2024-04-21 05:30:31 -04:00
Luke Parker
fd4f247917 Correct log which didn't work as intended 2024-04-20 19:54:16 -04:00
Luke Parker
ac9e356af4 Correct log targets in tendermint-machine 2024-04-20 19:15:15 -04:00
Luke Parker
bba7d2a356 Better logs in tendermint-machine 2024-04-20 18:13:44 -04:00
Luke Parker
4c349ae605 Redo how tendermint-machine checks if messages were prior sent
Instead of saving, for every sent message, if it was sent or not, we track the
latest block/round participated in. These two keys are comprehensive to all
prior block/rounds. We then use three keys for the latest round's
proposal/prevote/precommit, enabling tracking current state as necessary to
prevent equivocations with just 5 keys.

The storage of the latest three messages also enables proper rebroadcasting of
the current round (not implemented in this commit).
2024-04-20 18:10:51 -04:00
akildemir
826f9986e4 implement setting initial values for coins 2024-04-20 10:53:25 +03:00
Luke Parker
a4428761f7 Bitcoin 27.0 2024-04-19 08:00:17 -04:00
Luke Parker
940e9553fd Add missing crates to GH workflows 2024-04-19 06:12:33 -04:00
Luke Parker
593aefd229 Extend time in sync test 2024-04-18 02:51:38 -04:00
Luke Parker
5830c2463d fmt 2024-04-18 02:03:28 -04:00
Luke Parker
bcc88c3e86 Don't broadcast added blocks
Online validators should inherently have them. Offline validators will receive
from the sync protocol.

This does somewhat eliminate the class of nodes who would follow the blockchain
(without validating it), yet that's fine for the performance benefit.
2024-04-18 01:48:11 -04:00
Luke Parker
fea16df567 Only reply to heartbeats after a certain distance 2024-04-18 01:39:34 -04:00
Luke Parker
4960c3222e Ensure we don't reply to stale heartbeats 2024-04-18 01:24:38 -04:00
Luke Parker
6b4df4f2c0 Only have some nodes respond to latent heartbeats
Also only respond if they're more than 2 blocks behind to minimize redundant
sending of blocks.
2024-04-17 21:54:10 -04:00
akildemir
7041dbeb0b make remove liquidity an authorized call 2024-04-17 13:54:23 +03:00
akildemir
df774d153c Merge branch 'develop' of https://github.com/serai-dex/serai into emissions 2024-04-17 13:34:46 +03:00
Luke Parker
dac46c8d7d Correct comment in VS pallet 2024-04-12 20:38:31 -04:00
expiredhotdog
db2e8376df use multiscalar_mul for CLSAG (#553)
* use multiscalar_mul for CLSAG

* use multiscalar_mul for CLSAG signing

* use OnceLock for basepoint precomputation
2024-04-12 19:52:56 -04:00
Luke Parker
33dd412e67 Add bootnode code prior used in testnet-internal (#554)
* Add bootnode code prior used in testnet-internal

Also performs the devnet/testnet differentation done since the testnet branch.

* Fixes

* fmt
2024-04-12 00:38:40 -04:00
Luke Parker
fcad402186 cargo update
Resolves deny error caused by h2.
2024-04-10 06:34:01 -04:00
Boog900
ab4d79628d fix CLSAG verification.
We were not setting c1 to the last calculated c during verification, instead keeping it set to the one provided in the signature.
2024-04-10 05:59:06 -04:00
157 changed files with 8806 additions and 2349 deletions

View File

@@ -5,7 +5,7 @@ inputs:
version:
description: "Version to download and run"
required: false
default: 24.0.1
default: "27.0"
runs:
using: "composite"

View File

@@ -10,7 +10,7 @@ inputs:
bitcoin-version:
description: "Bitcoin version to download and run as a regtest node"
required: false
default: 24.0.1
default: "27.0"
runs:
using: "composite"
@@ -19,9 +19,9 @@ runs:
uses: ./.github/actions/build-dependencies
- name: Install Foundry
uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
with:
version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
cache: false
- name: Run a Monero Regtest Node

View File

@@ -1 +1 @@
nightly-2024-02-07
nightly-2024-05-01

View File

@@ -30,6 +30,7 @@ jobs:
run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p bitcoin-serai \
-p alloy-simple-request-transport \
-p ethereum-serai \
-p monero-generators \
-p monero-serai

View File

@@ -28,4 +28,5 @@ jobs:
-p std-shims \
-p zalloc \
-p serai-db \
-p serai-env
-p serai-env \
-p simple-request

View File

@@ -37,4 +37,4 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Run coordinator Docker tests
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features

View File

@@ -19,4 +19,4 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Run Full Stack Docker tests
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features

View File

@@ -33,4 +33,4 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Run message-queue Docker tests
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features

View File

@@ -37,4 +37,4 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Run processor Docker tests
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features

View File

@@ -33,4 +33,4 @@ jobs:
uses: ./.github/actions/build-dependencies
- name: Run Reproducible Runtime tests
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features

View File

@@ -43,6 +43,7 @@ jobs:
-p tendermint-machine \
-p tributary-chain \
-p serai-coordinator \
-p serai-orchestrator \
-p serai-docker-tests
test-substrate:
@@ -64,7 +65,9 @@ jobs:
-p serai-validator-sets-pallet \
-p serai-in-instructions-primitives \
-p serai-in-instructions-pallet \
-p serai-signals-primitives \
-p serai-signals-pallet \
-p serai-abi \
-p serai-runtime \
-p serai-node

1633
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -36,6 +36,7 @@ members = [
"crypto/schnorrkel",
"coins/bitcoin",
"coins/ethereum/alloy-simple-request-transport",
"coins/ethereum",
"coins/monero/generators",
"coins/monero",

View File

@@ -375,7 +375,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
msg: &[u8],
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
if !msg.is_empty() {
panic!("message was passed to the TransactionMachine when it generates its own");
panic!("message was passed to the TransactionSignMachine when it generates its own");
}
let commitments = (0 .. self.sigs.len())

View File

@@ -1,7 +1,3 @@
# Solidity build outputs
cache
artifacts
# Auto-generated ABI files
src/abi/schnorr.rs
src/abi/router.rs

View File

@@ -18,28 +18,29 @@ workspace = true
[dependencies]
thiserror = { version = "1", default-features = false }
eyre = { version = "0.6", default-features = false }
sha3 = { version = "0.10", default-features = false, features = ["std"] }
group = { version = "0.13", default-features = false }
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa"] }
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] }
ethers-core = { version = "2", default-features = false }
ethers-providers = { version = "2", default-features = false }
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
[build-dependencies]
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
[dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["std"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
serde = { version = "1", default-features = false, features = ["std"] }
serde_json = { version = "1", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] }
sha2 = { version = "0.10", default-features = false, features = ["std"] }
group = { version = "0.13", default-features = false }
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] }
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] }
alloy-core = { version = "0.7", default-features = false }
alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] }
alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false, features = ["k256"] }
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false }
alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
[dev-dependencies]
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] }
tokio = { version = "1", features = ["macros"] }
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
[features]
tests = []

View File

@@ -3,6 +3,12 @@
This package contains Ethereum-related functionality, specifically deploying and
interacting with Serai contracts.
While `monero-serai` and `bitcoin-serai` are general purpose libraries,
`ethereum-serai` is Serai specific. If any of the utilities are generally
desired, please fork and maintain your own copy to ensure the desired
functionality is preserved, or open an issue to request we make this library
general purpose.
### Dependencies
- solc

View File

@@ -0,0 +1,29 @@
[package]
name = "alloy-simple-request-transport"
version = "0.1.0"
description = "A transport for alloy based off simple-request"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/alloy-simple-request-transport"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
tower = "0.4"
serde_json = { version = "1", default-features = false }
simple-request = { path = "../../../common/request", default-features = false }
alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false }
[features]
default = ["tls"]
tls = ["simple-request/tls"]

View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2024 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -0,0 +1,4 @@
# Alloy Simple Request Transport
A transport for alloy based on simple-request, a small HTTP client built around
hyper.

View File

@@ -0,0 +1,60 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
use core::task;
use std::io;
use alloy_json_rpc::{RequestPacket, ResponsePacket};
use alloy_transport::{TransportError, TransportErrorKind, TransportFut};
use simple_request::{hyper, Request, Client};
use tower::Service;
#[derive(Clone, Debug)]
pub struct SimpleRequest {
client: Client,
url: String,
}
impl SimpleRequest {
pub fn new(url: String) -> Self {
Self { client: Client::with_connection_pool(), url }
}
}
impl Service<RequestPacket> for SimpleRequest {
type Response = ResponsePacket;
type Error = TransportError;
type Future = TransportFut<'static>;
#[inline]
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll<Result<(), Self::Error>> {
task::Poll::Ready(Ok(()))
}
#[inline]
fn call(&mut self, req: RequestPacket) -> Self::Future {
let inner = self.clone();
Box::pin(async move {
let packet = req.serialize().map_err(TransportError::SerError)?;
let request = Request::from(
hyper::Request::post(&inner.url)
.header("Content-Type", "application/json")
.body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into())
.unwrap(),
);
let mut res = inner
.client
.request(request)
.await
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?
.body()
.await
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?;
serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, ""))
})
}
}

View File

@@ -1,7 +1,5 @@
use std::process::Command;
use ethers_contract::Abigen;
fn main() {
println!("cargo:rerun-if-changed=contracts/*");
println!("cargo:rerun-if-changed=artifacts/*");
@@ -21,22 +19,23 @@ fn main() {
"--base-path", ".",
"-o", "./artifacts", "--overwrite",
"--bin", "--abi",
"--optimize",
"./contracts/Schnorr.sol", "./contracts/Router.sol",
"--via-ir", "--optimize",
"./contracts/IERC20.sol",
"./contracts/Schnorr.sol",
"./contracts/Deployer.sol",
"./contracts/Sandbox.sol",
"./contracts/Router.sol",
"./src/tests/contracts/Schnorr.sol",
"./src/tests/contracts/ERC20.sol",
"--no-color",
];
assert!(Command::new("solc").args(args).status().unwrap().success());
Abigen::new("Schnorr", "./artifacts/Schnorr.abi")
.unwrap()
.generate()
.unwrap()
.write_to_file("./src/abi/schnorr.rs")
.unwrap();
Abigen::new("Router", "./artifacts/Router.abi")
.unwrap()
.generate()
.unwrap()
.write_to_file("./src/abi/router.rs")
.unwrap();
let solc = Command::new("solc").args(args).output().unwrap();
assert!(solc.status.success());
for line in String::from_utf8(solc.stderr).unwrap().lines() {
assert!(!line.starts_with("Error:"));
}
}

View File

@@ -0,0 +1,52 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
/*
The expected deployment process of the Router is as follows:
1) A transaction deploying Deployer is made. Then, a deterministic signature is
created such that an account with an unknown private key is the creator of
the contract. Anyone can fund this address, and once anyone does, the
transaction deploying Deployer can be published by anyone. No other
transaction may be made from that account.
2) Anyone deploys the Router through the Deployer. This uses a sequential nonce
such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.
While such attacks would still be feasible if the Deployer's address was
controllable, the usage of a deterministic signature with a NUMS method
prevents that.
This doesn't have any denial-of-service risks and will resolve once anyone steps
forward as deployer. This does fail to guarantee an identical address across
every chain, though it enables letting anyone efficiently ask the Deployer for
the address (with the Deployer having an identical address on every chain).
Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the
Deployer contract to use a consistent salt for the Router, yet the Router must
be deployed with a specific public key for Serai. Since Ethereum isn't able to
determine a valid public key (one the result of a Serai DKG) from a dishonest
public key, we have to allow multiple deployments with Serai being the one to
determine which to use.
The alternative would be to have a council publish the Serai key on-Ethereum,
with Serai verifying the published result. This would introduce a DoS risk in
the council not publishing the correct key/not publishing any key.
*/
contract Deployer {
event Deployment(bytes32 indexed init_code_hash, address created);
error DeploymentFailed();
function deploy(bytes memory init_code) external {
address created;
assembly {
created := create(0, add(init_code, 0x20), mload(init_code))
}
if (created == address(0)) {
revert DeploymentFailed();
}
// These may be emitted out of order upon re-entrancy
emit Deployment(keccak256(init_code), created);
}
}

View File

@@ -0,0 +1,20 @@
// SPDX-License-Identifier: CC0
pragma solidity ^0.8.0;
interface IERC20 {
event Transfer(address indexed from, address indexed to, uint256 value);
event Approval(address indexed owner, address indexed spender, uint256 value);
function name() external view returns (string memory);
function symbol() external view returns (string memory);
function decimals() external view returns (uint8);
function totalSupply() external view returns (uint256);
function balanceOf(address owner) external view returns (uint256);
function transfer(address to, uint256 value) external returns (bool);
function transferFrom(address from, address to, uint256 value) external returns (bool);
function approve(address spender, uint256 value) external returns (bool);
function allowance(address owner, address spender) external view returns (uint256);
}

View File

@@ -1,27 +1,24 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
import "./IERC20.sol";
import "./Schnorr.sol";
import "./Sandbox.sol";
contract Router is Schnorr {
// Contract initializer
// TODO: Replace with a MuSig of the genesis validators
address public initializer;
// Nonce is incremented for each batch of transactions executed
contract Router {
// Nonce is incremented for each batch of transactions executed/key update
uint256 public nonce;
// fixed parity for the public keys used in this contract
uint8 constant public KEY_PARITY = 27;
// current public key's x-coordinate
// note: this key must always use the fixed parity defined above
// Current public key's x-coordinate
// This key must always have the parity defined within the Schnorr contract
bytes32 public seraiKey;
struct OutInstruction {
address to;
Call[] calls;
uint256 value;
bytes data;
}
struct Signature {
@@ -29,62 +26,197 @@ contract Router is Schnorr {
bytes32 s;
}
event SeraiKeyUpdated(
uint256 indexed nonce,
bytes32 indexed key,
Signature signature
);
event InInstruction(
address indexed from,
address indexed coin,
uint256 amount,
bytes instruction
);
// success is a uint256 representing a bitfield of transaction successes
event Executed(uint256 nonce, bytes32 batch, uint256 success);
event Executed(
uint256 indexed nonce,
bytes32 indexed batch,
uint256 success,
Signature signature
);
// error types
error NotInitializer();
error AlreadyInitialized();
error InvalidKey();
error InvalidSignature();
error InvalidAmount();
error FailedTransfer();
error TooManyTransactions();
constructor() {
initializer = msg.sender;
modifier _updateSeraiKeyAtEndOfFn(
uint256 _nonce,
bytes32 key,
Signature memory sig
) {
if (
(key == bytes32(0)) ||
((bytes32(uint256(key) % Schnorr.Q)) != key)
) {
revert InvalidKey();
}
_;
seraiKey = key;
emit SeraiKeyUpdated(_nonce, key, sig);
}
// initSeraiKey can be called by the contract initializer to set the first
// public key, only if the public key has yet to be set.
function initSeraiKey(bytes32 _seraiKey) external {
if (msg.sender != initializer) revert NotInitializer();
if (seraiKey != 0) revert AlreadyInitialized();
if (_seraiKey == bytes32(0)) revert InvalidKey();
seraiKey = _seraiKey;
constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn(
0,
_seraiKey,
Signature({ c: bytes32(0), s: bytes32(0) })
) {
nonce = 1;
}
// updateSeraiKey validates the given Schnorr signature against the current public key,
// and if successful, updates the contract's public key to the given one.
// updateSeraiKey validates the given Schnorr signature against the current
// public key, and if successful, updates the contract's public key to the
// given one.
function updateSeraiKey(
bytes32 _seraiKey,
Signature memory sig
) public {
if (_seraiKey == bytes32(0)) revert InvalidKey();
bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey));
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
seraiKey = _seraiKey;
Signature calldata sig
) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) {
bytes memory message =
abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey);
nonce++;
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
revert InvalidSignature();
}
}
// execute accepts a list of transactions to execute as well as a Schnorr signature.
function inInstruction(
address coin,
uint256 amount,
bytes memory instruction
) external payable {
if (coin == address(0)) {
if (amount != msg.value) {
revert InvalidAmount();
}
} else {
(bool success, bytes memory res) =
address(coin).call(
abi.encodeWithSelector(
IERC20.transferFrom.selector,
msg.sender,
address(this),
amount
)
);
// Require there was nothing returned, which is done by some non-standard
// tokens, or that the ERC20 contract did in fact return true
bool nonStandardResOrTrue =
(res.length == 0) || abi.decode(res, (bool));
if (!(success && nonStandardResOrTrue)) {
revert FailedTransfer();
}
}
/*
Due to fee-on-transfer tokens, emitting the amount directly is frowned upon.
The amount instructed to transfer may not actually be the amount
transferred.
If we add nonReentrant to every single function which can effect the
balance, we can check the amount exactly matches. This prevents transfers of
less value than expected occurring, at least, not without an additional
transfer to top up the difference (which isn't routed through this contract
and accordingly isn't trying to artificially create events).
If we don't add nonReentrant, a transfer can be started, and then a new
transfer for the difference can follow it up (again and again until a
rounding error is reached). This contract would believe all transfers were
done in full, despite each only being done in part (except for the last
one).
Given fee-on-transfer tokens aren't intended to be supported, the only
token planned to be supported is Dai and it doesn't have any fee-on-transfer
logic, fee-on-transfer tokens aren't even able to be supported at this time,
we simply classify this entire class of tokens as non-standard
implementations which induce undefined behavior. It is the Serai network's
role not to add support for any non-standard implementations.
*/
emit InInstruction(msg.sender, coin, amount, instruction);
}
// execute accepts a list of transactions to execute as well as a signature.
// if signature verification passes, the given transactions are executed.
// if signature verification fails, this function will revert.
function execute(
OutInstruction[] calldata transactions,
Signature memory sig
) public {
if (transactions.length > 256) revert TooManyTransactions();
Signature calldata sig
) external {
if (transactions.length > 256) {
revert TooManyTransactions();
}
bytes32 message = keccak256(abi.encode("execute", nonce, transactions));
bytes memory message =
abi.encode("execute", block.chainid, nonce, transactions);
uint256 executed_with_nonce = nonce;
// This prevents re-entrancy from causing double spends yet does allow
// out-of-order execution via re-entrancy
nonce++;
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
revert InvalidSignature();
}
uint256 successes;
for(uint256 i = 0; i < transactions.length; i++) {
(bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data);
for (uint256 i = 0; i < transactions.length; i++) {
bool success;
// If there are no calls, send to `to` the value
if (transactions[i].calls.length == 0) {
(success, ) = transactions[i].to.call{
value: transactions[i].value,
gas: 5_000
}("");
} else {
// If there are calls, ignore `to`. Deploy a new Sandbox and proxy the
// calls through that
//
// We could use a single sandbox in order to reduce gas costs, yet that
// risks one person creating an approval that's hooked before another
// user's intended action executes, in order to drain their coins
//
// While technically, that would be a flaw in the sandboxed flow, this
// is robust and prevents such flaws from being possible
//
// We also don't want people to set state via the Sandbox and expect it
// future available when anyone else could set a distinct value
Sandbox sandbox = new Sandbox();
(success, ) = address(sandbox).call{
value: transactions[i].value,
// TODO: Have the Call specify the gas up front
gas: 350_000
}(
abi.encodeWithSelector(
Sandbox.sandbox.selector,
transactions[i].calls
)
);
}
assembly {
successes := or(successes, shl(i, success))
}
}
emit Executed(nonce, message, successes);
emit Executed(
executed_with_nonce,
keccak256(message),
successes,
sig
);
}
}

View File

@@ -0,0 +1,48 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.24;
struct Call {
address to;
uint256 value;
bytes data;
}
// A minimal sandbox focused on gas efficiency.
//
// The first call is executed if any of the calls fail, making it a fallback.
// All other calls are executed sequentially.
contract Sandbox {
error AlreadyCalled();
error CallsFailed();
function sandbox(Call[] calldata calls) external payable {
// Prevent re-entrancy due to this executing arbitrary calls from anyone
// and anywhere
bool called;
assembly { called := tload(0) }
if (called) {
revert AlreadyCalled();
}
assembly { tstore(0, 1) }
// Execute the calls, starting from 1
for (uint256 i = 1; i < calls.length; i++) {
(bool success, ) =
calls[i].to.call{ value: calls[i].value }(calls[i].data);
// If this call failed, execute the fallback (call 0)
if (!success) {
(success, ) =
calls[0].to.call{ value: address(this).balance }(calls[0].data);
// If this call also failed, revert entirely
if (!success) {
revert CallsFailed();
}
return;
}
}
// We don't clear the re-entrancy guard as this contract should never be
// called again, so there's no reason to spend the effort
}
}

View File

@@ -2,38 +2,43 @@
pragma solidity ^0.8.0;
// see https://github.com/noot/schnorr-verify for implementation details
contract Schnorr {
library Schnorr {
// secp256k1 group order
uint256 constant public Q =
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
error InvalidSOrA();
error InvalidSignature();
// Fixed parity for the public keys used in this contract
// This avoids spending a word passing the parity in a similar style to
// Bitcoin's Taproot
uint8 constant public KEY_PARITY = 27;
// parity := public key y-coord parity (27 or 28)
// px := public key x-coord
error InvalidSOrA();
error MalformedSignature();
// px := public key x-coord, where the public key has a parity of KEY_PARITY
// message := 32-byte hash of the message
// c := schnorr signature challenge
// s := schnorr signature
function verify(
uint8 parity,
bytes32 px,
bytes32 message,
bytes memory message,
bytes32 c,
bytes32 s
) public view returns (bool) {
// ecrecover = (m, v, r, s);
) internal pure returns (bool) {
// ecrecover = (m, v, r, s) -> key
// We instead pass the following to obtain the nonce (not the key)
// Then we hash it and verify it matches the challenge
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
// For safety, we want each input to ecrecover to be 0 (sa, px, ca)
// The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero
// That leaves us to check `sa` are non-zero
if (sa == 0) revert InvalidSOrA();
// the ecrecover precompile implementation checks that the `r` and `s`
// inputs are non-zero (in this case, `px` and `ca`), thus we don't need to
// check if they're zero.
address R = ecrecover(sa, parity, px, ca);
if (R == address(0)) revert InvalidSignature();
return c == keccak256(
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
);
address R = ecrecover(sa, KEY_PARITY, px, ca);
if (R == address(0)) revert MalformedSignature();
// Check the signature is correct by rebuilding the challenge
return c == keccak256(abi.encodePacked(R, px, message));
}
}

View File

@@ -1,6 +1,37 @@
use alloy_sol_types::sol;
#[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)]
pub(crate) mod schnorr;
#[allow(clippy::ignored_unit_patterns)]
#[allow(clippy::redundant_closure_for_method_calls)]
mod erc20_container {
use super::*;
sol!("contracts/IERC20.sol");
}
pub use erc20_container::IERC20 as erc20;
#[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)]
pub(crate) mod router;
#[allow(clippy::ignored_unit_patterns)]
#[allow(clippy::redundant_closure_for_method_calls)]
mod deployer_container {
use super::*;
sol!("contracts/Deployer.sol");
}
pub use deployer_container::Deployer as deployer;
#[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)]
#[allow(clippy::ignored_unit_patterns)]
#[allow(clippy::redundant_closure_for_method_calls)]
mod router_container {
use super::*;
sol!(Router, "artifacts/Router.abi");
}
pub use router_container::Router as router;

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,410 @@
pub use schnorr::*;
/// This module was auto-generated with ethers-rs Abigen.
/// More information at: <https://github.com/gakonst/ethers-rs>
#[allow(
clippy::enum_variant_names,
clippy::too_many_arguments,
clippy::upper_case_acronyms,
clippy::type_complexity,
dead_code,
non_camel_case_types,
)]
pub mod schnorr {
#[allow(deprecated)]
fn __abi() -> ::ethers_core::abi::Abi {
::ethers_core::abi::ethabi::Contract {
constructor: ::core::option::Option::None,
functions: ::core::convert::From::from([
(
::std::borrow::ToOwned::to_owned("Q"),
::std::vec![
::ethers_core::abi::ethabi::Function {
name: ::std::borrow::ToOwned::to_owned("Q"),
inputs: ::std::vec![],
outputs: ::std::vec![
::ethers_core::abi::ethabi::Param {
name: ::std::string::String::new(),
kind: ::ethers_core::abi::ethabi::ParamType::Uint(256usize),
internal_type: ::core::option::Option::Some(
::std::borrow::ToOwned::to_owned("uint256"),
),
},
],
constant: ::core::option::Option::None,
state_mutability: ::ethers_core::abi::ethabi::StateMutability::View,
},
],
),
(
::std::borrow::ToOwned::to_owned("verify"),
::std::vec![
::ethers_core::abi::ethabi::Function {
name: ::std::borrow::ToOwned::to_owned("verify"),
inputs: ::std::vec![
::ethers_core::abi::ethabi::Param {
name: ::std::borrow::ToOwned::to_owned("parity"),
kind: ::ethers_core::abi::ethabi::ParamType::Uint(8usize),
internal_type: ::core::option::Option::Some(
::std::borrow::ToOwned::to_owned("uint8"),
),
},
::ethers_core::abi::ethabi::Param {
name: ::std::borrow::ToOwned::to_owned("px"),
kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes(
32usize,
),
internal_type: ::core::option::Option::Some(
::std::borrow::ToOwned::to_owned("bytes32"),
),
},
::ethers_core::abi::ethabi::Param {
name: ::std::borrow::ToOwned::to_owned("message"),
kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes(
32usize,
),
internal_type: ::core::option::Option::Some(
::std::borrow::ToOwned::to_owned("bytes32"),
),
},
::ethers_core::abi::ethabi::Param {
name: ::std::borrow::ToOwned::to_owned("c"),
kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes(
32usize,
),
internal_type: ::core::option::Option::Some(
::std::borrow::ToOwned::to_owned("bytes32"),
),
},
::ethers_core::abi::ethabi::Param {
name: ::std::borrow::ToOwned::to_owned("s"),
kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes(
32usize,
),
internal_type: ::core::option::Option::Some(
::std::borrow::ToOwned::to_owned("bytes32"),
),
},
],
outputs: ::std::vec![
::ethers_core::abi::ethabi::Param {
name: ::std::string::String::new(),
kind: ::ethers_core::abi::ethabi::ParamType::Bool,
internal_type: ::core::option::Option::Some(
::std::borrow::ToOwned::to_owned("bool"),
),
},
],
constant: ::core::option::Option::None,
state_mutability: ::ethers_core::abi::ethabi::StateMutability::View,
},
],
),
]),
events: ::std::collections::BTreeMap::new(),
errors: ::core::convert::From::from([
(
::std::borrow::ToOwned::to_owned("InvalidSOrA"),
::std::vec![
::ethers_core::abi::ethabi::AbiError {
name: ::std::borrow::ToOwned::to_owned("InvalidSOrA"),
inputs: ::std::vec![],
},
],
),
(
::std::borrow::ToOwned::to_owned("InvalidSignature"),
::std::vec![
::ethers_core::abi::ethabi::AbiError {
name: ::std::borrow::ToOwned::to_owned("InvalidSignature"),
inputs: ::std::vec![],
},
],
),
]),
receive: false,
fallback: false,
}
}
///The parsed JSON ABI of the contract.
pub static SCHNORR_ABI: ::ethers_contract::Lazy<::ethers_core::abi::Abi> = ::ethers_contract::Lazy::new(
__abi,
);
pub struct Schnorr<M>(::ethers_contract::Contract<M>);
impl<M> ::core::clone::Clone for Schnorr<M> {
fn clone(&self) -> Self {
Self(::core::clone::Clone::clone(&self.0))
}
}
impl<M> ::core::ops::Deref for Schnorr<M> {
type Target = ::ethers_contract::Contract<M>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<M> ::core::ops::DerefMut for Schnorr<M> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<M> ::core::fmt::Debug for Schnorr<M> {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
f.debug_tuple(::core::stringify!(Schnorr)).field(&self.address()).finish()
}
}
impl<M: ::ethers_providers::Middleware> Schnorr<M> {
/// Creates a new contract instance with the specified `ethers` client at
/// `address`. The contract derefs to a `ethers::Contract` object.
pub fn new<T: Into<::ethers_core::types::Address>>(
address: T,
client: ::std::sync::Arc<M>,
) -> Self {
Self(
::ethers_contract::Contract::new(
address.into(),
SCHNORR_ABI.clone(),
client,
),
)
}
///Calls the contract's `Q` (0xe493ef8c) function
pub fn q(
&self,
) -> ::ethers_contract::builders::ContractCall<M, ::ethers_core::types::U256> {
self.0
.method_hash([228, 147, 239, 140], ())
.expect("method not found (this should never happen)")
}
///Calls the contract's `verify` (0x9186da4c) function
pub fn verify(
&self,
parity: u8,
px: [u8; 32],
message: [u8; 32],
c: [u8; 32],
s: [u8; 32],
) -> ::ethers_contract::builders::ContractCall<M, bool> {
self.0
.method_hash([145, 134, 218, 76], (parity, px, message, c, s))
.expect("method not found (this should never happen)")
}
}
impl<M: ::ethers_providers::Middleware> From<::ethers_contract::Contract<M>>
for Schnorr<M> {
fn from(contract: ::ethers_contract::Contract<M>) -> Self {
Self::new(contract.address(), contract.client())
}
}
///Custom Error type `InvalidSOrA` with signature `InvalidSOrA()` and selector `0x4e99a12e`
#[derive(
Clone,
::ethers_contract::EthError,
::ethers_contract::EthDisplay,
Default,
Debug,
PartialEq,
Eq,
Hash
)]
#[etherror(name = "InvalidSOrA", abi = "InvalidSOrA()")]
pub struct InvalidSOrA;
///Custom Error type `InvalidSignature` with signature `InvalidSignature()` and selector `0x8baa579f`
#[derive(
Clone,
::ethers_contract::EthError,
::ethers_contract::EthDisplay,
Default,
Debug,
PartialEq,
Eq,
Hash
)]
#[etherror(name = "InvalidSignature", abi = "InvalidSignature()")]
pub struct InvalidSignature;
///Container type for all of the contract's custom errors
#[derive(Clone, ::ethers_contract::EthAbiType, Debug, PartialEq, Eq, Hash)]
pub enum SchnorrErrors {
InvalidSOrA(InvalidSOrA),
InvalidSignature(InvalidSignature),
/// The standard solidity revert string, with selector
/// Error(string) -- 0x08c379a0
RevertString(::std::string::String),
}
impl ::ethers_core::abi::AbiDecode for SchnorrErrors {
fn decode(
data: impl AsRef<[u8]>,
) -> ::core::result::Result<Self, ::ethers_core::abi::AbiError> {
let data = data.as_ref();
if let Ok(decoded) = <::std::string::String as ::ethers_core::abi::AbiDecode>::decode(
data,
) {
return Ok(Self::RevertString(decoded));
}
if let Ok(decoded) = <InvalidSOrA as ::ethers_core::abi::AbiDecode>::decode(
data,
) {
return Ok(Self::InvalidSOrA(decoded));
}
if let Ok(decoded) = <InvalidSignature as ::ethers_core::abi::AbiDecode>::decode(
data,
) {
return Ok(Self::InvalidSignature(decoded));
}
Err(::ethers_core::abi::Error::InvalidData.into())
}
}
impl ::ethers_core::abi::AbiEncode for SchnorrErrors {
fn encode(self) -> ::std::vec::Vec<u8> {
match self {
Self::InvalidSOrA(element) => {
::ethers_core::abi::AbiEncode::encode(element)
}
Self::InvalidSignature(element) => {
::ethers_core::abi::AbiEncode::encode(element)
}
Self::RevertString(s) => ::ethers_core::abi::AbiEncode::encode(s),
}
}
}
impl ::ethers_contract::ContractRevert for SchnorrErrors {
fn valid_selector(selector: [u8; 4]) -> bool {
match selector {
[0x08, 0xc3, 0x79, 0xa0] => true,
_ if selector
== <InvalidSOrA as ::ethers_contract::EthError>::selector() => true,
_ if selector
== <InvalidSignature as ::ethers_contract::EthError>::selector() => {
true
}
_ => false,
}
}
}
impl ::core::fmt::Display for SchnorrErrors {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
match self {
Self::InvalidSOrA(element) => ::core::fmt::Display::fmt(element, f),
Self::InvalidSignature(element) => ::core::fmt::Display::fmt(element, f),
Self::RevertString(s) => ::core::fmt::Display::fmt(s, f),
}
}
}
impl ::core::convert::From<::std::string::String> for SchnorrErrors {
fn from(value: String) -> Self {
Self::RevertString(value)
}
}
impl ::core::convert::From<InvalidSOrA> for SchnorrErrors {
fn from(value: InvalidSOrA) -> Self {
Self::InvalidSOrA(value)
}
}
impl ::core::convert::From<InvalidSignature> for SchnorrErrors {
fn from(value: InvalidSignature) -> Self {
Self::InvalidSignature(value)
}
}
///Container type for all input parameters for the `Q` function with signature `Q()` and selector `0xe493ef8c`
#[derive(
Clone,
::ethers_contract::EthCall,
::ethers_contract::EthDisplay,
Default,
Debug,
PartialEq,
Eq,
Hash
)]
#[ethcall(name = "Q", abi = "Q()")]
pub struct QCall;
///Container type for all input parameters for the `verify` function with signature `verify(uint8,bytes32,bytes32,bytes32,bytes32)` and selector `0x9186da4c`
#[derive(
Clone,
::ethers_contract::EthCall,
::ethers_contract::EthDisplay,
Default,
Debug,
PartialEq,
Eq,
Hash
)]
#[ethcall(name = "verify", abi = "verify(uint8,bytes32,bytes32,bytes32,bytes32)")]
pub struct VerifyCall {
pub parity: u8,
pub px: [u8; 32],
pub message: [u8; 32],
pub c: [u8; 32],
pub s: [u8; 32],
}
///Container type for all of the contract's call
#[derive(Clone, ::ethers_contract::EthAbiType, Debug, PartialEq, Eq, Hash)]
pub enum SchnorrCalls {
Q(QCall),
Verify(VerifyCall),
}
impl ::ethers_core::abi::AbiDecode for SchnorrCalls {
fn decode(
data: impl AsRef<[u8]>,
) -> ::core::result::Result<Self, ::ethers_core::abi::AbiError> {
let data = data.as_ref();
if let Ok(decoded) = <QCall as ::ethers_core::abi::AbiDecode>::decode(data) {
return Ok(Self::Q(decoded));
}
if let Ok(decoded) = <VerifyCall as ::ethers_core::abi::AbiDecode>::decode(
data,
) {
return Ok(Self::Verify(decoded));
}
Err(::ethers_core::abi::Error::InvalidData.into())
}
}
impl ::ethers_core::abi::AbiEncode for SchnorrCalls {
fn encode(self) -> Vec<u8> {
match self {
Self::Q(element) => ::ethers_core::abi::AbiEncode::encode(element),
Self::Verify(element) => ::ethers_core::abi::AbiEncode::encode(element),
}
}
}
impl ::core::fmt::Display for SchnorrCalls {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
match self {
Self::Q(element) => ::core::fmt::Display::fmt(element, f),
Self::Verify(element) => ::core::fmt::Display::fmt(element, f),
}
}
}
impl ::core::convert::From<QCall> for SchnorrCalls {
fn from(value: QCall) -> Self {
Self::Q(value)
}
}
impl ::core::convert::From<VerifyCall> for SchnorrCalls {
fn from(value: VerifyCall) -> Self {
Self::Verify(value)
}
}
///Container type for all return fields from the `Q` function with signature `Q()` and selector `0xe493ef8c`
#[derive(
Clone,
::ethers_contract::EthAbiType,
::ethers_contract::EthAbiCodec,
Default,
Debug,
PartialEq,
Eq,
Hash
)]
pub struct QReturn(pub ::ethers_core::types::U256);
///Container type for all return fields from the `verify` function with signature `verify(uint8,bytes32,bytes32,bytes32,bytes32)` and selector `0x9186da4c`
#[derive(
Clone,
::ethers_contract::EthAbiType,
::ethers_contract::EthAbiCodec,
Default,
Debug,
PartialEq,
Eq,
Hash
)]
pub struct VerifyReturn(pub bool);
}

View File

@@ -1,91 +1,185 @@
use sha3::{Digest, Keccak256};
use group::ff::PrimeField;
use k256::{
elliptic_curve::{
bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint,
},
ProjectivePoint, Scalar, U256,
elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint},
ProjectivePoint, Scalar, U256 as KU256,
};
#[cfg(test)]
use k256::{elliptic_curve::point::DecompressPoint, AffinePoint};
use frost::{
algorithm::{Hram, SchnorrSignature},
curve::Secp256k1,
curve::{Ciphersuite, Secp256k1},
};
use alloy_core::primitives::{Parity, Signature as AlloySignature};
use alloy_consensus::{SignableTransaction, Signed, TxLegacy};
use crate::abi::router::{Signature as AbiSignature};
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
Keccak256::digest(data).into()
alloy_core::primitives::keccak256(data).into()
}
pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] {
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(data).into())
}
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
let encoded_point = point.to_encoded_point(false);
// Last 20 bytes of the hash of the concatenated x and y coordinates
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
}
pub(crate) fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
assert!(
tx.chain_id.is_none(),
"chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)"
);
let sig_hash = tx.signature_hash().0;
let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat());
let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat());
loop {
let r_bytes: [u8; 32] = r.to_repr().into();
let s_bytes: [u8; 32] = s.to_repr().into();
let v = Parity::NonEip155(false);
let signature =
AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap();
let tx = tx.clone().into_signed(signature);
if tx.recover_signer().is_ok() {
return tx;
}
// Re-hash until valid
r = hash_to_scalar(r_bytes.as_ref());
s = hash_to_scalar(s_bytes.as_ref());
}
}
/// The public key for a Schnorr-signing account.
#[allow(non_snake_case)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct PublicKey {
pub A: ProjectivePoint,
pub px: Scalar,
pub parity: u8,
pub(crate) A: ProjectivePoint,
pub(crate) px: Scalar,
}
impl PublicKey {
/// Construct a new `PublicKey`.
///
/// This will return None if the provided point isn't eligible to be a public key (due to
/// bounds such as parity).
#[allow(non_snake_case)]
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
let affine = A.to_affine();
let parity = u8::from(bool::from(affine.y_is_odd())) + 27;
if parity != 27 {
// Only allow even keys to save a word within Ethereum
let is_odd = bool::from(affine.y_is_odd());
if is_odd {
None?;
}
let x_coord = affine.x();
let x_coord_scalar = <Scalar as Reduce<U256>>::reduce_bytes(&x_coord);
let x_coord_scalar = <Scalar as Reduce<KU256>>::reduce_bytes(&x_coord);
// Return None if a reduction would occur
// Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less
// headache/concern to have
// This does ban a trivial amoount of public keys
if x_coord_scalar.to_repr() != x_coord {
None?;
}
Some(PublicKey { A, px: x_coord_scalar, parity })
Some(PublicKey { A, px: x_coord_scalar })
}
pub fn point(&self) -> ProjectivePoint {
self.A
}
pub(crate) fn eth_repr(&self) -> [u8; 32] {
self.px.to_repr().into()
}
#[cfg(test)]
pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
#[allow(non_snake_case)]
let A = Option::<AffinePoint>::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into();
Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px })
}
}
/// The HRAm to use for the Schnorr contract.
#[derive(Clone, Default)]
pub struct EthereumHram {}
impl Hram<Secp256k1> for EthereumHram {
#[allow(non_snake_case)]
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
let a_encoded_point = A.to_encoded_point(true);
let mut a_encoded = a_encoded_point.as_ref().to_owned();
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
assert!((a_encoded[0] == 27) || (a_encoded[0] == 28));
let x_coord = A.to_affine().x();
let mut data = address(R).to_vec();
data.append(&mut a_encoded);
data.extend(x_coord.as_slice());
data.extend(m);
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(&data).into())
}
}
/// A signature for the Schnorr contract.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Signature {
pub(crate) c: Scalar,
pub(crate) s: Scalar,
}
impl Signature {
pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {
#[allow(non_snake_case)]
let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c);
EthereumHram::hram(&R, &public_key.A, message) == self.c
}
/// Construct a new `Signature`.
///
/// This will return None if the signature is invalid.
pub fn new(
public_key: &PublicKey,
chain_id: U256,
m: &[u8],
message: &[u8],
signature: SchnorrSignature<Secp256k1>,
) -> Option<Signature> {
let c = EthereumHram::hram(
&signature.R,
&public_key.A,
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
);
let c = EthereumHram::hram(&signature.R, &public_key.A, message);
if !signature.verify(public_key.A, c) {
None?;
}
Some(Signature { c, s: signature.s })
let res = Signature { c, s: signature.s };
assert!(res.verify(public_key, message));
Some(res)
}
pub fn c(&self) -> Scalar {
self.c
}
pub fn s(&self) -> Scalar {
self.s
}
pub fn to_bytes(&self) -> [u8; 64] {
let mut res = [0; 64];
res[.. 32].copy_from_slice(self.c.to_repr().as_ref());
res[32 ..].copy_from_slice(self.s.to_repr().as_ref());
res
}
pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {
let mut reader = bytes.as_slice();
let c = Secp256k1::read_F(&mut reader)?;
let s = Secp256k1::read_F(&mut reader)?;
Ok(Signature { c, s })
}
}
impl From<&Signature> for AbiSignature {
fn from(sig: &Signature) -> AbiSignature {
let c: [u8; 32] = sig.c.to_repr().into();
let s: [u8; 32] = sig.s.to_repr().into();
AbiSignature { c: c.into(), s: s.into() }
}
}

View File

@@ -0,0 +1,120 @@
use std::sync::Arc;
use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind};
use alloy_consensus::{Signed, TxLegacy};
use alloy_sol_types::{SolCall, SolEvent};
use alloy_rpc_types::{BlockNumberOrTag, Filter};
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
use crate::{
Error,
crypto::{self, keccak256, PublicKey},
router::Router,
};
pub use crate::abi::deployer as abi;
/// The Deployer contract for the Router contract.
///
/// This Deployer has a deterministic address, letting it be immediately identified on any
/// compatible chain. It then supports retrieving the Router contract's address (which isn't
/// deterministic) using a single log query.
#[derive(Clone, Debug)]
pub struct Deployer;
impl Deployer {
/// Obtain the transaction to deploy this contract, already signed.
///
/// The account this transaction is sent from (which is populated in `from`) must be sufficiently
/// funded for this transaction to be submitted. This account has no known private key to anyone,
/// so ETH sent can be neither misappropriated nor returned.
pub fn deployment_tx() -> Signed<TxLegacy> {
let bytecode = include_str!("../artifacts/Deployer.bin");
let bytecode =
Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex");
let tx = TxLegacy {
chain_id: None,
nonce: 0,
gas_price: 100_000_000_000u128,
// TODO: Use a more accurate gas limit
gas_limit: 1_000_000u128,
to: TxKind::Create,
value: U256::ZERO,
input: bytecode,
};
crypto::deterministically_sign(&tx)
}
/// Obtain the deterministic address for this contract.
pub fn address() -> [u8; 20] {
let deployer_deployer =
Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature");
**Address::create(&deployer_deployer, 0)
}
/// Construct a new view of the `Deployer`.
pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result<Option<Self>, Error> {
let address = Self::address();
#[cfg(not(test))]
let required_block = BlockNumberOrTag::Finalized;
#[cfg(test)]
let required_block = BlockNumberOrTag::Latest;
let code = provider
.get_code_at(address.into(), required_block.into())
.await
.map_err(|_| Error::ConnectionError)?;
// Contract has yet to be deployed
if code.is_empty() {
return Ok(None);
}
Ok(Some(Self))
}
/// Yield the `ContractCall` necessary to deploy the Router.
pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {
TxLegacy {
to: TxKind::Call(Self::address().into()),
input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(),
gas_limit: 1_000_000,
..Default::default()
}
}
/// Find the first Router deployed with the specified key as its first key.
///
/// This is the Router Serai will use, and is the only way to construct a `Router`.
pub async fn find_router(
&self,
provider: Arc<RootProvider<SimpleRequest>>,
key: &PublicKey,
) -> Result<Option<Router>, Error> {
let init_code = Router::init_code(key);
let init_code_hash = keccak256(&init_code);
#[cfg(not(test))]
let to_block = BlockNumberOrTag::Finalized;
#[cfg(test)]
let to_block = BlockNumberOrTag::Latest;
// Find the first log using this init code (where the init code is binding to the key)
// TODO: Make an abstraction for event filtering (de-duplicating common code)
let filter =
Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address()));
let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH);
let filter = filter.topic1(B256::from(init_code_hash));
let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let Some(first_log) = logs.first() else { return Ok(None) };
let router = first_log
.log_decode::<abi::Deployment>()
.map_err(|_| Error::ConnectionError)?
.inner
.data
.created;
Ok(Some(Router::new(provider, router)))
}
}

118
coins/ethereum/src/erc20.rs Normal file
View File

@@ -0,0 +1,118 @@
use std::{sync::Arc, collections::HashSet};
use alloy_core::primitives::{Address, B256, U256};
use alloy_sol_types::{SolInterface, SolEvent};
use alloy_rpc_types::{BlockNumberOrTag, Filter};
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
use crate::Error;
pub use crate::abi::erc20 as abi;
use abi::{IERC20Calls, Transfer, transferCall, transferFromCall};
#[derive(Clone, Debug)]
pub struct TopLevelErc20Transfer {
pub id: [u8; 32],
pub from: [u8; 20],
pub amount: U256,
pub data: Vec<u8>,
}
/// A view for an ERC20 contract.
#[derive(Clone, Debug)]
pub struct Erc20(Arc<RootProvider<SimpleRequest>>, Address);
impl Erc20 {
/// Construct a new view of the specified ERC20 contract.
///
/// This checks a contract is deployed at that address yet does not check the contract is
/// actually an ERC20.
pub async fn new(
provider: Arc<RootProvider<SimpleRequest>>,
address: [u8; 20],
) -> Result<Option<Self>, Error> {
let code = provider
.get_code_at(address.into(), BlockNumberOrTag::Finalized.into())
.await
.map_err(|_| Error::ConnectionError)?;
// Contract has yet to be deployed
if code.is_empty() {
return Ok(None);
}
Ok(Some(Self(provider.clone(), Address::from(&address))))
}
pub async fn top_level_transfers(
&self,
block: u64,
to: [u8; 20],
) -> Result<Vec<TopLevelErc20Transfer>, Error> {
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(Transfer::SIGNATURE_HASH);
let mut to_topic = [0; 32];
to_topic[12 ..].copy_from_slice(&to);
let filter = filter.topic2(B256::from(to_topic));
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let mut handled = HashSet::new();
let mut top_level_transfers = vec![];
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?;
let tx = self.0.get_transaction_by_hash(tx_id).await.map_err(|_| Error::ConnectionError)?;
// If this is a top-level call...
if tx.to == Some(self.1) {
// And we recognize the call...
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due
// to the InInstruction appended
if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) {
// Extract the top-level call's from/to/value
let (from, call_to, value) = match call {
IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value),
IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => {
(from, call_to, value)
}
// Treat any other function selectors as unrecognized
_ => continue,
};
let log = log.log_decode::<Transfer>().map_err(|_| Error::ConnectionError)?.inner.data;
// Ensure the top-level transfer is equivalent, and this presumably isn't a log for an
// internal transfer
if (log.from != from) || (call_to != to) || (value != log.value) {
continue;
}
// Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's
// the only log we handle
if handled.contains(&tx_id) {
continue;
}
handled.insert(tx_id);
// Read the data appended after
let encoded = call.abi_encode();
let data = tx.input.as_ref()[encoded.len() ..].to_vec();
// Push the transfer
top_level_transfers.push(TopLevelErc20Transfer {
// Since we'll only handle one log for this TX, set the ID to the TX ID
id: *tx_id,
from: *log.from.0,
amount: log.value,
data,
});
}
}
}
Ok(top_level_transfers)
}
}

View File

@@ -1,16 +1,30 @@
use thiserror::Error;
pub use alloy_core;
pub use alloy_consensus;
pub use alloy_rpc_types;
pub use alloy_simple_request_transport;
pub use alloy_rpc_client;
pub use alloy_provider;
pub mod crypto;
pub(crate) mod abi;
pub mod schnorr;
pub mod erc20;
pub mod deployer;
pub mod router;
pub mod machine;
#[cfg(test)]
mod tests;
#[derive(Error, Debug)]
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
pub enum Error {
#[error("failed to verify Schnorr signature")]
InvalidSignature,
#[error("couldn't make call/send TX")]
ConnectionError,
}

View File

@@ -0,0 +1,414 @@
use std::{
io::{self, Read},
collections::HashMap,
};
use rand_core::{RngCore, CryptoRng};
use transcript::{Transcript, RecommendedTranscript};
use group::GroupEncoding;
use frost::{
curve::{Ciphersuite, Secp256k1},
Participant, ThresholdKeys, FrostError,
algorithm::Schnorr,
sign::*,
};
use alloy_core::primitives::U256;
use crate::{
crypto::{PublicKey, EthereumHram, Signature},
router::{
abi::{Call as AbiCall, OutInstruction as AbiOutInstruction},
Router,
},
};
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Call {
pub to: [u8; 20],
pub value: U256,
pub data: Vec<u8>,
}
impl Call {
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut to = [0; 20];
reader.read_exact(&mut to)?;
let value = {
let mut value_bytes = [0; 32];
reader.read_exact(&mut value_bytes)?;
U256::from_le_slice(&value_bytes)
};
let mut data_len = {
let mut data_len = [0; 4];
reader.read_exact(&mut data_len)?;
usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize")
};
// A valid DoS would be to claim a 4 GB data is present for only 4 bytes
// We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB)
let mut data = vec![];
while data_len > 0 {
let chunk_len = data_len.min(1024);
let mut chunk = vec![0; chunk_len];
reader.read_exact(&mut chunk)?;
data.extend(&chunk);
data_len -= chunk_len;
}
Ok(Call { to, value, data })
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&self.to)?;
writer.write_all(&self.value.as_le_bytes())?;
let data_len = u32::try_from(self.data.len())
.map_err(|_| io::Error::other("call data length exceeded 2**32"))?;
writer.write_all(&data_len.to_le_bytes())?;
writer.write_all(&self.data)
}
}
impl From<Call> for AbiCall {
fn from(call: Call) -> AbiCall {
AbiCall { to: call.to.into(), value: call.value, data: call.data.into() }
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum OutInstructionTarget {
Direct([u8; 20]),
Calls(Vec<Call>),
}
impl OutInstructionTarget {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0xff];
reader.read_exact(&mut kind)?;
match kind[0] {
0 => {
let mut addr = [0; 20];
reader.read_exact(&mut addr)?;
Ok(OutInstructionTarget::Direct(addr))
}
1 => {
let mut calls_len = [0; 4];
reader.read_exact(&mut calls_len)?;
let calls_len = u32::from_le_bytes(calls_len);
let mut calls = vec![];
for _ in 0 .. calls_len {
calls.push(Call::read(reader)?);
}
Ok(OutInstructionTarget::Calls(calls))
}
_ => Err(io::Error::other("unrecognized OutInstructionTarget"))?,
}
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
OutInstructionTarget::Direct(addr) => {
writer.write_all(&[0])?;
writer.write_all(addr)?;
}
OutInstructionTarget::Calls(calls) => {
writer.write_all(&[1])?;
let call_len = u32::try_from(calls.len())
.map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?;
writer.write_all(&call_len.to_le_bytes())?;
for call in calls {
call.write(writer)?;
}
}
}
Ok(())
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct OutInstruction {
pub target: OutInstructionTarget,
pub value: U256,
}
impl OutInstruction {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let target = OutInstructionTarget::read(reader)?;
let value = {
let mut value_bytes = [0; 32];
reader.read_exact(&mut value_bytes)?;
U256::from_le_slice(&value_bytes)
};
Ok(OutInstruction { target, value })
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
self.target.write(writer)?;
writer.write_all(&self.value.as_le_bytes())
}
}
impl From<OutInstruction> for AbiOutInstruction {
fn from(instruction: OutInstruction) -> AbiOutInstruction {
match instruction.target {
OutInstructionTarget::Direct(addr) => {
AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value }
}
OutInstructionTarget::Calls(calls) => AbiOutInstruction {
to: [0; 20].into(),
calls: calls.into_iter().map(Into::into).collect(),
value: instruction.value,
},
}
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum RouterCommand {
UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey },
Execute { chain_id: U256, nonce: U256, outs: Vec<OutInstruction> },
}
impl RouterCommand {
pub fn msg(&self) -> Vec<u8> {
match self {
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
Router::update_serai_key_message(*chain_id, *nonce, key)
}
RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message(
*chain_id,
*nonce,
outs.iter().map(|out| out.clone().into()).collect(),
),
}
}
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0xff];
reader.read_exact(&mut kind)?;
match kind[0] {
0 => {
let mut chain_id = [0; 32];
reader.read_exact(&mut chain_id)?;
let mut nonce = [0; 32];
reader.read_exact(&mut nonce)?;
let key = PublicKey::new(Secp256k1::read_G(reader)?)
.ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?;
Ok(RouterCommand::UpdateSeraiKey {
chain_id: U256::from_le_slice(&chain_id),
nonce: U256::from_le_slice(&nonce),
key,
})
}
1 => {
let mut chain_id = [0; 32];
reader.read_exact(&mut chain_id)?;
let chain_id = U256::from_le_slice(&chain_id);
let mut nonce = [0; 32];
reader.read_exact(&mut nonce)?;
let nonce = U256::from_le_slice(&nonce);
let mut outs_len = [0; 4];
reader.read_exact(&mut outs_len)?;
let outs_len = u32::from_le_bytes(outs_len);
let mut outs = vec![];
for _ in 0 .. outs_len {
outs.push(OutInstruction::read(reader)?);
}
Ok(RouterCommand::Execute { chain_id, nonce, outs })
}
_ => Err(io::Error::other("reading unknown type of RouterCommand"))?,
}
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
writer.write_all(&[0])?;
writer.write_all(&chain_id.as_le_bytes())?;
writer.write_all(&nonce.as_le_bytes())?;
writer.write_all(&key.A.to_bytes())
}
RouterCommand::Execute { chain_id, nonce, outs } => {
writer.write_all(&[1])?;
writer.write_all(&chain_id.as_le_bytes())?;
writer.write_all(&nonce.as_le_bytes())?;
writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;
for out in outs {
out.write(writer)?;
}
Ok(())
}
}
}
pub fn serialize(&self) -> Vec<u8> {
let mut res = vec![];
self.write(&mut res).unwrap();
res
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct SignedRouterCommand {
command: RouterCommand,
signature: Signature,
}
impl SignedRouterCommand {
pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option<Self> {
let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?;
let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?;
let signature = Signature { c, s };
if !signature.verify(key, &command.msg()) {
None?
}
Some(SignedRouterCommand { command, signature })
}
pub fn command(&self) -> &RouterCommand {
&self.command
}
pub fn signature(&self) -> &Signature {
&self.signature
}
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let command = RouterCommand::read(reader)?;
let mut sig = [0; 64];
reader.read_exact(&mut sig)?;
let signature = Signature::from_bytes(sig)?;
Ok(SignedRouterCommand { command, signature })
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
self.command.write(writer)?;
writer.write_all(&self.signature.to_bytes())
}
}
pub struct RouterCommandMachine {
key: PublicKey,
command: RouterCommand,
machine: AlgorithmMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
}
impl RouterCommandMachine {
pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> Option<Self> {
// The Schnorr algorithm should be fine without this, even when using the IETF variant
// If this is better and more comprehensive, we should do it, even if not necessary
let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1");
let key = keys.group_key();
transcript.append_message(b"key", key.to_bytes());
transcript.append_message(b"command", command.serialize());
Some(Self {
key: PublicKey::new(key)?,
command,
machine: AlgorithmMachine::new(Schnorr::new(transcript), keys),
})
}
}
impl PreprocessMachine for RouterCommandMachine {
type Preprocess = Preprocess<Secp256k1, ()>;
type Signature = SignedRouterCommand;
type SignMachine = RouterCommandSignMachine;
fn preprocess<R: RngCore + CryptoRng>(
self,
rng: &mut R,
) -> (Self::SignMachine, Self::Preprocess) {
let (machine, preprocess) = self.machine.preprocess(rng);
(RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess)
}
}
pub struct RouterCommandSignMachine {
key: PublicKey,
command: RouterCommand,
machine: AlgorithmSignMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
}
impl SignMachine<SignedRouterCommand> for RouterCommandSignMachine {
type Params = ();
type Keys = ThresholdKeys<Secp256k1>;
type Preprocess = Preprocess<Secp256k1, ()>;
type SignatureShare = SignatureShare<Secp256k1>;
type SignatureMachine = RouterCommandSignatureMachine;
fn cache(self) -> CachedPreprocess {
unimplemented!(
"RouterCommand machines don't support caching their preprocesses due to {}",
"being already bound to a specific command"
);
}
fn from_cache(
(): (),
_: ThresholdKeys<Secp256k1>,
_: CachedPreprocess,
) -> (Self, Self::Preprocess) {
unimplemented!(
"RouterCommand machines don't support caching their preprocesses due to {}",
"being already bound to a specific command"
);
}
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
self.machine.read_preprocess(reader)
}
fn sign(
self,
commitments: HashMap<Participant, Self::Preprocess>,
msg: &[u8],
) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> {
if !msg.is_empty() {
panic!("message was passed to a RouterCommand machine when it generates its own");
}
let (machine, share) = self.machine.sign(commitments, &self.command.msg())?;
Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share))
}
}
pub struct RouterCommandSignatureMachine {
key: PublicKey,
command: RouterCommand,
machine:
AlgorithmSignatureMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
}
impl SignatureMachine<SignedRouterCommand> for RouterCommandSignatureMachine {
type SignatureShare = SignatureShare<Secp256k1>;
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
self.machine.read_share(reader)
}
fn complete(
self,
shares: HashMap<Participant, Self::SignatureShare>,
) -> Result<SignedRouterCommand, FrostError> {
let sig = self.machine.complete(shares)?;
let signature = Signature::new(&self.key, &self.command.msg(), sig)
.expect("machine produced an invalid signature");
Ok(SignedRouterCommand { command: self.command, signature })
}
}

View File

@@ -1,30 +1,428 @@
pub use crate::abi::router::*;
use std::{sync::Arc, io, collections::HashSet};
/*
use crate::crypto::{ProcessedSignature, PublicKey};
use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode};
use eyre::Result;
use std::{convert::From, fs::File, sync::Arc};
use k256::{
elliptic_curve::{group::GroupEncoding, sec1},
ProjectivePoint,
};
pub async fn router_update_public_key<M: Middleware + 'static>(
contract: &Router<M>,
public_key: &PublicKey,
signature: &ProcessedSignature,
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into());
let pending_tx = tx.send().await?;
let receipt = pending_tx.await?;
Ok(receipt)
use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind};
#[cfg(test)]
use alloy_core::primitives::B256;
use alloy_consensus::TxLegacy;
use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent};
use alloy_rpc_types::Filter;
#[cfg(test)]
use alloy_rpc_types::{BlockId, TransactionRequest, TransactionInput};
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
pub use crate::{
Error,
crypto::{PublicKey, Signature},
abi::{erc20::Transfer, router as abi},
};
use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent};
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Coin {
Ether,
Erc20([u8; 20]),
}
pub async fn router_execute<M: Middleware + 'static>(
contract: &Router<M>,
txs: Vec<Rtransaction>,
signature: &ProcessedSignature,
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
let tx = contract.execute(txs, signature.into()).send();
let pending_tx = tx.send().await?;
let receipt = pending_tx.await?;
Ok(receipt)
impl Coin {
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0xff];
reader.read_exact(&mut kind)?;
Ok(match kind[0] {
0 => Coin::Ether,
1 => {
let mut address = [0; 20];
reader.read_exact(&mut address)?;
Coin::Erc20(address)
}
_ => Err(io::Error::other("unrecognized Coin type"))?,
})
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
Coin::Ether => writer.write_all(&[0]),
Coin::Erc20(token) => {
writer.write_all(&[1])?;
writer.write_all(token)
}
}
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct InInstruction {
pub id: ([u8; 32], u64),
pub from: [u8; 20],
pub coin: Coin,
pub amount: U256,
pub data: Vec<u8>,
pub key_at_end_of_block: ProjectivePoint,
}
impl InInstruction {
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let id = {
let mut id_hash = [0; 32];
reader.read_exact(&mut id_hash)?;
let mut id_pos = [0; 8];
reader.read_exact(&mut id_pos)?;
let id_pos = u64::from_le_bytes(id_pos);
(id_hash, id_pos)
};
let mut from = [0; 20];
reader.read_exact(&mut from)?;
let coin = Coin::read(reader)?;
let mut amount = [0; 32];
reader.read_exact(&mut amount)?;
let amount = U256::from_le_slice(&amount);
let mut data_len = [0; 4];
reader.read_exact(&mut data_len)?;
let data_len = usize::try_from(u32::from_le_bytes(data_len))
.map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?;
let mut data = vec![0; data_len];
reader.read_exact(&mut data)?;
let mut key_at_end_of_block = <ProjectivePoint as GroupEncoding>::Repr::default();
reader.read_exact(&mut key_at_end_of_block)?;
let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block))
.ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?;
Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block })
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&self.id.0)?;
writer.write_all(&self.id.1.to_le_bytes())?;
writer.write_all(&self.from)?;
self.coin.write(writer)?;
writer.write_all(&self.amount.as_le_bytes())?;
writer.write_all(
&u32::try_from(self.data.len())
.map_err(|_| {
io::Error::other("InInstruction being written had data exceeding 2**32 in length")
})?
.to_le_bytes(),
)?;
writer.write_all(&self.data)?;
writer.write_all(&self.key_at_end_of_block.to_bytes())
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Executed {
pub tx_id: [u8; 32],
pub nonce: u64,
pub signature: [u8; 64],
}
/// The contract Serai uses to manage its state.
#[derive(Clone, Debug)]
pub struct Router(Arc<RootProvider<SimpleRequest>>, Address);
impl Router {
pub(crate) fn code() -> Vec<u8> {
let bytecode = include_str!("../artifacts/Router.bin");
Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec()
}
pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {
let mut bytecode = Self::code();
// Append the constructor arguments
bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode());
bytecode
}
// This isn't pub in order to force users to use `Deployer::find_router`.
pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
Self(provider, address)
}
pub fn address(&self) -> [u8; 20] {
**self.1
}
/// Get the key for Serai at the specified block.
#[cfg(test)]
pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {
let call = TransactionRequest::default()
.to(Some(self.1))
.input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into()));
let bytes = self
.0
.call(&call, Some(BlockId::Hash(B256::from(at).into())))
.await
.map_err(|_| Error::ConnectionError)?;
let res =
abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError)
}
/// Get the message to be signed in order to update the key for Serai.
pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec<u8> {
let mut buffer = b"updateSeraiKey".to_vec();
buffer.extend(&chain_id.to_be_bytes::<32>());
buffer.extend(&nonce.to_be_bytes::<32>());
buffer.extend(&key.eth_repr());
buffer
}
/// Update the key representing Serai.
pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy {
// TODO: Set a more accurate gas
TxLegacy {
to: TxKind::Call(self.1),
input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into()))
.abi_encode()
.into(),
gas_limit: 100_000,
..Default::default()
}
}
/// Get the current nonce for the published batches.
#[cfg(test)]
pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {
let call = TransactionRequest::default()
.to(Some(self.1))
.input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into()));
let bytes = self
.0
.call(&call, Some(BlockId::Hash(B256::from(at).into())))
.await
.map_err(|_| Error::ConnectionError)?;
let res =
abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
Ok(res._0)
}
/// Get the message to be signed in order to update the key for Serai.
pub(crate) fn execute_message(
chain_id: U256,
nonce: U256,
outs: Vec<abi::OutInstruction>,
) -> Vec<u8> {
("execute".to_string(), chain_id, nonce, outs).abi_encode_params()
}
/// Execute a batch of `OutInstruction`s.
pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy {
TxLegacy {
to: TxKind::Call(self.1),
input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(),
// TODO
gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()),
..Default::default()
}
}
pub async fn key_at_end_of_block(&self, block: u64) -> Result<ProjectivePoint, Error> {
let filter = Filter::new().from_block(0).to_block(block).address(self.1);
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?;
let last_key_x_coordinate = last_key_x_coordinate_log
.log_decode::<SeraiKeyUpdated>()
.map_err(|_| Error::ConnectionError)?
.inner
.data
.key;
let mut compressed_point = <ProjectivePoint as GroupEncoding>::Repr::default();
compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY);
compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice());
Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)
}
pub async fn in_instructions(
&self,
block: u64,
allowed_tokens: &HashSet<[u8; 20]>,
) -> Result<Vec<InInstruction>, Error> {
let key_at_end_of_block = self.key_at_end_of_block(block).await?;
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let mut transfer_check = HashSet::new();
let mut in_instructions = vec![];
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let id = (
log.block_hash.ok_or(Error::ConnectionError)?.into(),
log.log_index.ok_or(Error::ConnectionError)?,
);
let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?;
let tx = self.0.get_transaction_by_hash(tx_hash).await.map_err(|_| Error::ConnectionError)?;
let log =
log.log_decode::<InInstructionEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
let coin = if log.coin.0 == [0; 20] {
Coin::Ether
} else {
let token = *log.coin.0;
if !allowed_tokens.contains(&token) {
continue;
}
// If this also counts as a top-level transfer via the token, drop it
//
// Necessary in order to handle a potential edge case with some theoretical token
// implementations
//
// This will either let it be handled by the top-level transfer hook or will drop it
// entirely on the side of caution
if tx.to == Some(token.into()) {
continue;
}
// Get all logs for this TX
let receipt = self
.0
.get_transaction_receipt(tx_hash)
.await
.map_err(|_| Error::ConnectionError)?
.ok_or(Error::ConnectionError)?;
let tx_logs = receipt.inner.logs();
// Find a matching transfer log
let mut found_transfer = false;
for tx_log in tx_logs {
let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?;
// Ensure we didn't already use this transfer to check a distinct InInstruction event
if transfer_check.contains(&log_index) {
continue;
}
// Check if this log is from the token we expected to be transferred
if tx_log.address().0 != token {
continue;
}
// Check if this is a transfer log
// https://github.com/alloy-rs/core/issues/589
if tx_log.topics()[0] != Transfer::SIGNATURE_HASH {
continue;
}
let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue };
// Check if this is a transfer to us for the expected amount
if (transfer.to == self.1) && (transfer.value == log.amount) {
transfer_check.insert(log_index);
found_transfer = true;
break;
}
}
if !found_transfer {
// This shouldn't be a ConnectionError
// This is an exploit, a non-conforming ERC20, or an invalid connection
// This should halt the process which is sufficient, yet this is sub-optimal
// TODO
Err(Error::ConnectionError)?;
}
Coin::Erc20(token)
};
in_instructions.push(InInstruction {
id,
from: *log.from.0,
coin,
amount: log.amount,
data: log.instruction.as_ref().to_vec(),
key_at_end_of_block,
});
}
Ok(in_instructions)
}
pub async fn executed_commands(&self, block: u64) -> Result<Vec<Executed>, Error> {
let mut res = vec![];
{
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
let log =
log.log_decode::<SeraiKeyUpdated>().map_err(|_| Error::ConnectionError)?.inner.data;
let mut signature = [0; 64];
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
res.push(Executed {
tx_id,
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
signature,
});
}
}
{
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH);
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
let log = log.log_decode::<ExecutedEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
let mut signature = [0; 64];
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
res.push(Executed {
tx_id,
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
signature,
});
}
}
Ok(res)
}
#[cfg(feature = "tests")]
pub fn key_updated_filter(&self) -> Filter {
Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH)
}
#[cfg(feature = "tests")]
pub fn executed_filter(&self) -> Filter {
Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH)
}
}
*/

View File

@@ -1,34 +0,0 @@
use eyre::{eyre, Result};
use group::ff::PrimeField;
use ethers_providers::{Provider, Http};
use crate::{
Error,
crypto::{keccak256, PublicKey, Signature},
};
pub use crate::abi::schnorr::*;
pub async fn call_verify(
contract: &Schnorr<Provider<Http>>,
public_key: &PublicKey,
message: &[u8],
signature: &Signature,
) -> Result<()> {
if contract
.verify(
public_key.parity,
public_key.px.to_repr().into(),
keccak256(message),
signature.c.to_repr().into(),
signature.s.to_repr().into(),
)
.call()
.await?
{
Ok(())
} else {
Err(eyre!(Error::InvalidSignature))
}
}

View File

@@ -0,0 +1,13 @@
use alloy_sol_types::sol;
#[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)]
#[allow(clippy::ignored_unit_patterns)]
#[allow(clippy::redundant_closure_for_method_calls)]
mod schnorr_container {
use super::*;
sol!("src/tests/contracts/Schnorr.sol");
}
pub(crate) use schnorr_container::TestSchnorr as schnorr;

View File

@@ -0,0 +1,51 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
contract TestERC20 {
event Transfer(address indexed from, address indexed to, uint256 value);
event Approval(address indexed owner, address indexed spender, uint256 value);
function name() public pure returns (string memory) {
return "Test ERC20";
}
function symbol() public pure returns (string memory) {
return "TEST";
}
function decimals() public pure returns (uint8) {
return 18;
}
function totalSupply() public pure returns (uint256) {
return 1_000_000 * 10e18;
}
mapping(address => uint256) balances;
mapping(address => mapping(address => uint256)) allowances;
constructor() {
balances[msg.sender] = totalSupply();
}
function balanceOf(address owner) public view returns (uint256) {
return balances[owner];
}
function transfer(address to, uint256 value) public returns (bool) {
balances[msg.sender] -= value;
balances[to] += value;
return true;
}
function transferFrom(address from, address to, uint256 value) public returns (bool) {
allowances[from][msg.sender] -= value;
balances[from] -= value;
balances[to] += value;
return true;
}
function approve(address spender, uint256 value) public returns (bool) {
allowances[msg.sender][spender] = value;
return true;
}
function allowance(address owner, address spender) public view returns (uint256) {
return allowances[owner][spender];
}
}

View File

@@ -0,0 +1,15 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
import "../../../contracts/Schnorr.sol";
contract TestSchnorr {
function verify(
bytes32 px,
bytes calldata message,
bytes32 c,
bytes32 s
) external pure returns (bool) {
return Schnorr.verify(px, message, c, s);
}
}

View File

@@ -1,49 +1,33 @@
use rand_core::OsRng;
use sha2::Sha256;
use sha3::{Digest, Keccak256};
use group::Group;
use group::ff::{Field, PrimeField};
use k256::{
ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey},
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint},
U256, Scalar, AffinePoint, ProjectivePoint,
ecdsa::{
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,
},
Scalar, ProjectivePoint,
};
use frost::{
curve::Secp256k1,
curve::{Ciphersuite, Secp256k1},
algorithm::{Hram, IetfSchnorr},
tests::{algorithm_machines, sign},
};
use crate::{crypto::*, tests::key_gen};
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
}
pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) {
return None;
}
#[allow(non_snake_case)]
let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into());
#[allow(non_snake_case)]
if let Some(R) = Option::<AffinePoint>::from(R) {
#[allow(non_snake_case)]
let R = ProjectivePoint::from(R);
let r = r.invert().unwrap();
let u1 = ProjectivePoint::GENERATOR * (-message * r);
let u2 = R * (s * r);
let key: ProjectivePoint = u1 + u2;
if !bool::from(key.is_identity()) {
return Some(address(&key));
}
}
None
// The ecrecover opcode, yet with parity replacing v
pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
let sig = ecdsa::Signature::from_scalars(r, s).ok()?;
let message: [u8; 32] = message.to_repr().into();
alloy_core::primitives::Signature::from_signature_and_parity(
sig,
alloy_core::primitives::Parity::Parity(odd_y),
)
.ok()?
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
.ok()
.map(Into::into)
}
#[test]
@@ -55,20 +39,23 @@ fn test_ecrecover() {
const MESSAGE: &[u8] = b"Hello, World!";
let (sig, recovery_id) = private
.as_nonzero_scalar()
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
.try_sign_prehashed(
<Secp256k1 as Ciphersuite>::F::random(&mut OsRng),
&keccak256(MESSAGE).into(),
)
.unwrap();
// Sanity check the signature verifies
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
{
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ());
}
// Perform the ecrecover
assert_eq!(
ecrecover(
hash_to_scalar(MESSAGE),
u8::from(recovery_id.unwrap().is_y_odd()) + 27,
u8::from(recovery_id.unwrap().is_y_odd()) == 1,
*sig.r(),
*sig.s()
)
@@ -93,18 +80,13 @@ fn test_signing() {
pub fn preprocess_signature_for_ecrecover(
R: ProjectivePoint,
public_key: &PublicKey,
chain_id: U256,
m: &[u8],
s: Scalar,
) -> (u8, Scalar, Scalar) {
let c = EthereumHram::hram(
&R,
&public_key.A,
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
);
) -> (Scalar, Scalar) {
let c = EthereumHram::hram(&R, &public_key.A, m);
let sa = -(s * public_key.px);
let ca = -(c * public_key.px);
(public_key.parity, sa, ca)
(sa, ca)
}
#[test]
@@ -112,21 +94,12 @@ fn test_ecrecover_hack() {
let (keys, public_key) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let chain_id = U256::ONE;
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
let (parity, sa, ca) =
preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s);
let q = ecrecover(sa, parity, public_key.px, ca).unwrap();
let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s);
let q = ecrecover(sa, false, public_key.px, ca).unwrap();
assert_eq!(q, address(&sig.R));
}

View File

@@ -1,21 +1,25 @@
use std::{sync::Arc, time::Duration, fs::File, collections::HashMap};
use std::{sync::Arc, collections::HashMap};
use rand_core::OsRng;
use group::ff::PrimeField;
use k256::{Scalar, ProjectivePoint};
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
use ethers_core::{
types::{H160, Signature as EthersSignature},
abi::Abi,
use alloy_core::{
primitives::{Address, U256, Bytes, TxKind},
hex::FromHex,
};
use ethers_contract::ContractFactory;
use ethers_providers::{Middleware, Provider, Http};
use alloy_consensus::{SignableTransaction, TxLegacy};
use crate::crypto::PublicKey;
use alloy_rpc_types::TransactionReceipt;
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
use crate::crypto::{address, deterministically_sign, PublicKey};
mod crypto;
mod abi;
mod schnorr;
mod router;
@@ -36,57 +40,88 @@ pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey)
(keys, public_key)
}
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
// to fund the deployer, not create/pass a wallet
// TODO: Deterministic deployments across chains
// TODO: Use a proper error here
pub async fn send(
provider: &RootProvider<SimpleRequest>,
wallet: &k256::ecdsa::SigningKey,
mut tx: TxLegacy,
) -> Option<TransactionReceipt> {
let verifying_key = *wallet.verifying_key().as_affine();
let address = Address::from(address(&verifying_key.into()));
// https://github.com/alloy-rs/alloy/issues/539
// let chain_id = provider.get_chain_id().await.unwrap();
// tx.chain_id = Some(chain_id);
tx.chain_id = None;
tx.nonce = provider.get_transaction_count(address, None).await.unwrap();
// 100 gwei
tx.gas_price = 100_000_000_000u128;
let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap();
assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap());
assert!(
provider.get_balance(address, None).await.unwrap() >
((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value)
);
let mut bytes = vec![];
tx.encode_with_signature_fields(&sig.into(), &mut bytes);
let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?;
pending_tx.get_receipt().await.ok()
}
pub async fn fund_account(
provider: &RootProvider<SimpleRequest>,
wallet: &k256::ecdsa::SigningKey,
to_fund: Address,
value: U256,
) -> Option<()> {
let funding_tx =
TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() };
assert!(send(provider, wallet, funding_tx).await.unwrap().status());
Some(())
}
// TODO: Use a proper error here
pub async fn deploy_contract(
chain_id: u32,
client: Arc<Provider<Http>>,
client: Arc<RootProvider<SimpleRequest>>,
wallet: &k256::ecdsa::SigningKey,
name: &str,
) -> eyre::Result<H160> {
let abi: Abi =
serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap();
) -> Option<Address> {
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
let hex_bin =
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
let bin = hex::decode(hex_bin).unwrap();
let factory = ContractFactory::new(abi, bin.into(), client.clone());
let bin = Bytes::from_hex(hex_bin).unwrap();
let mut deployment_tx = factory.deploy(())?.tx;
deployment_tx.set_chain_id(chain_id);
deployment_tx.set_gas(1_000_000);
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
let deployment_tx = TxLegacy {
chain_id: None,
nonce: 0,
// 100 gwei
gas_price: 100_000_000_000u128,
gas_limit: 1_000_000,
to: TxKind::Create,
value: U256::ZERO,
input: bin,
};
let sig_hash = deployment_tx.sighash();
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
let deployment_tx = deterministically_sign(&deployment_tx);
// EIP-155 v
let mut v = u64::from(rid.to_byte());
assert!((v == 0) || (v == 1));
v += u64::from((chain_id * 2) + 35);
// Fund the deployer address
fund_account(
&client,
wallet,
deployment_tx.recover_signer().unwrap(),
U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price),
)
.await?;
let r = sig.r().to_repr();
let r_ref: &[u8] = r.as_ref();
let s = sig.s().to_repr();
let s_ref: &[u8] = s.as_ref();
let deployment_tx =
deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v });
let (deployment_tx, sig, _) = deployment_tx.into_parts();
let mut bytes = vec![];
deployment_tx.encode_with_signature_fields(&sig, &mut bytes);
let pending_tx = client.send_raw_transaction(&bytes).await.ok()?;
let receipt = pending_tx.get_receipt().await.ok()?;
assert!(receipt.status());
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
let mut receipt;
while {
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
receipt.is_none()
} {
tokio::time::sleep(Duration::from_secs(6)).await;
}
let receipt = receipt.unwrap();
assert!(receipt.status == Some(1.into()));
Ok(receipt.contract_address.unwrap())
Some(receipt.contract_address.unwrap())
}

View File

@@ -2,7 +2,8 @@ use std::{convert::TryFrom, sync::Arc, collections::HashMap};
use rand_core::OsRng;
use group::ff::PrimeField;
use group::Group;
use k256::ProjectivePoint;
use frost::{
curve::Secp256k1,
Participant, ThresholdKeys,
@@ -10,100 +11,173 @@ use frost::{
tests::{algorithm_machines, sign},
};
use ethers_core::{
types::{H160, U256, Bytes},
abi::AbiEncode,
utils::{Anvil, AnvilInstance},
};
use ethers_providers::{Middleware, Provider, Http};
use alloy_core::primitives::{Address, U256};
use alloy_simple_request_transport::SimpleRequest;
use alloy_rpc_client::ClientBuilder;
use alloy_provider::{Provider, RootProvider};
use alloy_node_bindings::{Anvil, AnvilInstance};
use crate::{
crypto::{keccak256, PublicKey, EthereumHram, Signature},
router::{self, *},
tests::{key_gen, deploy_contract},
crypto::*,
deployer::Deployer,
router::{Router, abi as router},
tests::{key_gen, send, fund_account},
};
async fn setup_test() -> (
u32,
AnvilInstance,
Router<Provider<Http>>,
Arc<RootProvider<SimpleRequest>>,
u64,
Router,
HashMap<Participant, ThresholdKeys<Secp256k1>>,
PublicKey,
) {
let anvil = Anvil::new().spawn();
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
let chain_id = provider.get_chainid().await.unwrap().as_u32();
let provider = RootProvider::new(
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
);
let chain_id = provider.get_chain_id().await.unwrap();
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
let contract_address =
deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap();
let contract = Router::new(contract_address, client.clone());
// Make sure the Deployer constructor returns None, as it doesn't exist yet
assert!(Deployer::new(client.clone()).await.unwrap().is_none());
// Deploy the Deployer
let tx = Deployer::deployment_tx();
fund_account(
&client,
&wallet,
tx.recover_signer().unwrap(),
U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price),
)
.await
.unwrap();
let (tx, sig, _) = tx.into_parts();
let mut bytes = vec![];
tx.encode_with_signature_fields(&sig, &mut bytes);
let pending_tx = client.send_raw_transaction(&bytes).await.unwrap();
let receipt = pending_tx.get_receipt().await.unwrap();
assert!(receipt.status());
let deployer =
Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed");
let (keys, public_key) = key_gen();
// Set the key to the threshold keys
let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000);
let pending_tx = tx.send().await.unwrap();
let receipt = pending_tx.await.unwrap().unwrap();
assert!(receipt.status == Some(1.into()));
// Verify the Router constructor returns None, as it doesn't exist yet
assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none());
(chain_id, anvil, contract, keys, public_key)
// Deploy the router
let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key))
.await
.unwrap();
assert!(receipt.status());
let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap();
(anvil, client, chain_id, contract, keys, public_key)
}
async fn latest_block_hash(client: &RootProvider<SimpleRequest>) -> [u8; 32] {
client
.get_block(client.get_block_number().await.unwrap().into(), false)
.await
.unwrap()
.unwrap()
.header
.hash
.unwrap()
.0
}
#[tokio::test]
async fn test_deploy_contract() {
setup_test().await;
let (_anvil, client, _, router, _, public_key) = setup_test().await;
let block_hash = latest_block_hash(&client).await;
assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key);
assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
// TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis
}
pub fn hash_and_sign(
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
public_key: &PublicKey,
chain_id: U256,
message: &[u8],
) -> Signature {
let hashed_message = keccak256(message);
let mut chain_id_bytes = [0; 32];
chain_id.to_big_endian(&mut chain_id_bytes);
let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, keys),
full_message,
);
let sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message);
Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap()
Signature::new(public_key, message, sig).unwrap()
}
#[tokio::test]
async fn test_router_update_serai_key() {
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
let next_key = loop {
let point = ProjectivePoint::random(&mut OsRng);
let Some(next_key) = PublicKey::new(point) else { continue };
break next_key;
};
let message = Router::update_serai_key_message(
U256::try_from(chain_id).unwrap(),
U256::try_from(1u64).unwrap(),
&next_key,
);
let sig = hash_and_sign(&keys, &public_key, &message);
let first_block_hash = latest_block_hash(&client).await;
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
let receipt =
send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig))
.await
.unwrap();
assert!(receipt.status());
let second_block_hash = latest_block_hash(&client).await;
assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key);
// Check this does still offer the historical state
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
// TODO: Check logs
println!("gas used: {:?}", receipt.gas_used);
// println!("logs: {:?}", receipt.logs);
}
#[tokio::test]
async fn test_router_execute() {
let (chain_id, _anvil, contract, keys, public_key) = setup_test().await;
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
let to = H160([0u8; 20]);
let value = U256([0u64; 4]);
let data = Bytes::from([0]);
let tx = OutInstruction { to, value, data: data.clone() };
let to = Address::from([0; 20]);
let value = U256::ZERO;
let tx = router::OutInstruction { to, value, calls: vec![] };
let txs = vec![tx];
let nonce_call = contract.nonce();
let nonce = nonce_call.call().await.unwrap();
let first_block_hash = latest_block_hash(&client).await;
let nonce = contract.nonce(first_block_hash).await.unwrap();
assert_eq!(nonce, U256::try_from(1u64).unwrap());
let encoded =
("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode();
let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded);
let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone());
let sig = hash_and_sign(&keys, &public_key, &message);
let tx = contract
.execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() })
.gas(300_000);
let pending_tx = tx.send().await.unwrap();
let receipt = dbg!(pending_tx.await.unwrap().unwrap());
assert!(receipt.status == Some(1.into()));
let receipt =
send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap();
assert!(receipt.status());
println!("gas used: {:?}", receipt.cumulative_gas_used);
println!("logs: {:?}", receipt.logs);
let second_block_hash = latest_block_hash(&client).await;
assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap());
// Check this does still offer the historical state
assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
// TODO: Check logs
println!("gas used: {:?}", receipt.gas_used);
// println!("logs: {:?}", receipt.logs);
}

View File

@@ -1,11 +1,9 @@
use std::{convert::TryFrom, sync::Arc};
use std::sync::Arc;
use rand_core::OsRng;
use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar};
use ethers_core::utils::{keccak256, Anvil, AnvilInstance};
use ethers_providers::{Middleware, Provider, Http};
use group::ff::PrimeField;
use k256::Scalar;
use frost::{
curve::Secp256k1,
@@ -13,24 +11,34 @@ use frost::{
tests::{algorithm_machines, sign},
};
use alloy_core::primitives::Address;
use alloy_sol_types::SolCall;
use alloy_rpc_types::{TransactionInput, TransactionRequest};
use alloy_simple_request_transport::SimpleRequest;
use alloy_rpc_client::ClientBuilder;
use alloy_provider::{Provider, RootProvider};
use alloy_node_bindings::{Anvil, AnvilInstance};
use crate::{
Error,
crypto::*,
schnorr::*,
tests::{key_gen, deploy_contract},
tests::{key_gen, deploy_contract, abi::schnorr as abi},
};
async fn setup_test() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {
let anvil = Anvil::new().spawn();
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
let chain_id = provider.get_chainid().await.unwrap().as_u32();
let provider = RootProvider::new(
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
);
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
let contract_address =
deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap();
let contract = Schnorr::new(contract_address, client.clone());
(chain_id, anvil, contract)
let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap();
(anvil, client, address)
}
#[tokio::test]
@@ -38,30 +46,48 @@ async fn test_deploy_contract() {
setup_test().await;
}
pub async fn call_verify(
provider: &RootProvider<SimpleRequest>,
contract: Address,
public_key: &PublicKey,
message: &[u8],
signature: &Signature,
) -> Result<(), Error> {
let px: [u8; 32] = public_key.px.to_repr().into();
let c_bytes: [u8; 32] = signature.c.to_repr().into();
let s_bytes: [u8; 32] = signature.s.to_repr().into();
let call = TransactionRequest::default().to(Some(contract)).input(TransactionInput::new(
abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into()))
.abi_encode()
.into(),
));
let bytes = provider.call(&call, None).await.map_err(|_| Error::ConnectionError)?;
let res =
abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
if res._0 {
Ok(())
} else {
Err(Error::InvalidSignature)
}
}
#[tokio::test]
async fn test_ecrecover_hack() {
let (chain_id, _anvil, contract) = setup_test().await;
let chain_id = U256::from(chain_id);
let (_anvil, client, contract) = setup_test().await;
let (keys, public_key) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap();
let sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
let sig = Signature::new(&public_key, MESSAGE, sig).unwrap();
call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap();
call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap();
// Test an invalid signature fails
let mut sig = sig;
sig.s += Scalar::ONE;
assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err());
assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err());
}

View File

@@ -43,7 +43,6 @@ multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features =
# Needed for multisig
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true }
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true }
monero-generators = { path = "generators", version = "0.4", default-features = false }
@@ -91,7 +90,6 @@ std = [
"multiexp/std",
"transcript/std",
"dleq/std",
"monero-generators/std",
@@ -106,7 +104,7 @@ std = [
cache-distribution = ["async-lock"]
http-rpc = ["digest_auth", "simple-request", "tokio"]
multisig = ["transcript", "frost", "dleq", "std"]
multisig = ["transcript", "frost", "std"]
binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"]
experimental = []

View File

@@ -14,7 +14,12 @@ use zeroize::{Zeroize, ZeroizeOnDrop};
use sha3::{Digest, Keccak256};
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
use curve25519_dalek::{
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
scalar::Scalar,
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
traits::VartimePrecomputedMultiscalarMul,
};
pub use monero_generators::{H, decompress_point};
@@ -56,6 +61,13 @@ pub(crate) fn INV_EIGHT() -> Scalar {
*INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert())
}
static BASEPOINT_PRECOMP_CELL: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new();
#[allow(non_snake_case)]
pub(crate) fn BASEPOINT_PRECOMP() -> &'static VartimeEdwardsPrecomputation {
BASEPOINT_PRECOMP_CELL
.get_or_init(|| VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT]))
}
/// Monero protocol version.
///
/// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the

View File

@@ -91,7 +91,7 @@ impl Bulletproofs {
Bulletproofs::Plus(
AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect())
.unwrap()
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap()))
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs.to_vec()).unwrap()))
.unwrap(),
)
})

View File

@@ -24,7 +24,7 @@ use crate::{
},
};
// Figure 3
// Figure 3 of the Bulletproofs+ Paper
#[derive(Clone, Debug)]
pub(crate) struct AggregateRangeStatement {
generators: Generators,
@@ -38,24 +38,15 @@ impl Zeroize for AggregateRangeStatement {
}
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
pub(crate) struct AggregateRangeWitness {
values: Vec<u64>,
gammas: Vec<Scalar>,
}
pub(crate) struct AggregateRangeWitness(Vec<Commitment>);
impl AggregateRangeWitness {
pub(crate) fn new(commitments: &[Commitment]) -> Option<Self> {
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> {
if commitments.is_empty() || (commitments.len() > MAX_M) {
return None;
}
let mut values = Vec::with_capacity(commitments.len());
let mut gammas = Vec::with_capacity(commitments.len());
for commitment in commitments {
values.push(commitment.amount);
gammas.push(Scalar(commitment.mask));
}
Some(AggregateRangeWitness { values, gammas })
Some(AggregateRangeWitness(commitments))
}
}
@@ -162,13 +153,11 @@ impl AggregateRangeStatement {
witness: &AggregateRangeWitness,
) -> Option<AggregateRangeProof> {
// Check for consistency with the witness
if self.V.len() != witness.values.len() {
if self.V.len() != witness.0.len() {
return None;
}
for (commitment, (value, gamma)) in
self.V.iter().zip(witness.values.iter().zip(witness.gammas.iter()))
{
if Commitment::new(**gamma, *value).calculate() != **commitment {
for (commitment, witness) in self.V.iter().zip(witness.0.iter()) {
if witness.calculate() != **commitment {
return None;
}
}
@@ -196,7 +185,13 @@ impl AggregateRangeStatement {
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N));
for j in 1 ..= V.len() {
d_js.push(Self::d_j(j, V.len()));
a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
#[allow(clippy::map_unwrap_or)]
a_l.0.append(
&mut u64_decompose(
*witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0),
)
.0,
);
}
let a_r = a_l.clone() - Scalar::ONE;
@@ -223,8 +218,8 @@ impl AggregateRangeStatement {
let a_l = a_l - z;
let a_r = a_r + &d_descending_y_plus_z;
let mut alpha = alpha;
for j in 1 ..= witness.gammas.len() {
alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;
for j in 1 ..= witness.0.len() {
alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one;
}
Some(AggregateRangeProof {

View File

@@ -15,7 +15,7 @@ use crate::ringct::bulletproofs::plus::{
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
};
// Figure 1
// Figure 1 of the Bulletproofs+ paper
#[derive(Clone, Debug)]
pub(crate) struct WipStatement {
generators: Generators,

View File

@@ -9,17 +9,17 @@ use std_shims::{
use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
use subtle::{ConstantTimeEq, Choice, CtOption};
use subtle::{ConstantTimeEq, ConditionallySelectable};
use curve25519_dalek::{
constants::ED25519_BASEPOINT_TABLE,
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
scalar::Scalar,
traits::{IsIdentity, VartimePrecomputedMultiscalarMul},
traits::{IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul},
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
};
use crate::{
INV_EIGHT, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
INV_EIGHT, BASEPOINT_PRECOMP, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
ringct::hash_to_point, serialize::*,
};
@@ -27,8 +27,6 @@ use crate::{
mod multisig;
#[cfg(feature = "multisig")]
pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
#[cfg(feature = "multisig")]
pub(crate) use multisig::add_key_image_share;
/// Errors returned when CLSAG signing fails.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
@@ -100,8 +98,11 @@ fn core(
) -> ((EdwardsPoint, Scalar, Scalar), Scalar) {
let n = ring.len();
let images_precomp = VartimeEdwardsPrecomputation::new([I, D]);
let D = D * INV_EIGHT();
let images_precomp = match A_c1 {
Mode::Sign(..) => None,
Mode::Verify(..) => Some(VartimeEdwardsPrecomputation::new([I, D])),
};
let D_INV_EIGHT = D * INV_EIGHT();
// Generate the transcript
// Instead of generating multiple, a single transcript is created and then edited as needed
@@ -130,7 +131,7 @@ fn core(
}
to_hash.extend(I.compress().to_bytes());
to_hash.extend(D.compress().to_bytes());
to_hash.extend(D_INV_EIGHT.compress().to_bytes());
to_hash.extend(pseudo_out.compress().to_bytes());
// mu_P with agg_0
let mu_P = hash_to_scalar(&to_hash);
@@ -169,29 +170,44 @@ fn core(
}
// Perform the core loop
let mut c1 = CtOption::new(Scalar::ZERO, Choice::from(0));
let mut c1 = c;
for i in (start .. end).map(|i| i % n) {
// This will only execute once and shouldn't need to be constant time. Making it constant time
// removes the risk of branch prediction creating timing differences depending on ring index
// however
c1 = c1.or_else(|| CtOption::new(c, i.ct_eq(&0)));
let c_p = mu_P * c;
let c_c = mu_C * c;
let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]);
// (s_i * G) + (c_p * P_i) + (c_c * C_i)
let L = match A_c1 {
Mode::Sign(..) => {
EdwardsPoint::multiscalar_mul([s[i], c_p, c_c], [ED25519_BASEPOINT_POINT, P[i], C[i]])
}
Mode::Verify(..) => {
BASEPOINT_PRECOMP().vartime_mixed_multiscalar_mul([s[i]], [c_p, c_c], [P[i], C[i]])
}
};
let PH = hash_to_point(&P[i]);
// Shouldn't be an issue as all of the variables in this vartime statement are public
let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]);
// (c_p * I) + (c_c * D) + (s_i * PH)
let R = match A_c1 {
Mode::Sign(..) => EdwardsPoint::multiscalar_mul([c_p, c_c, s[i]], [I, D, &PH]),
Mode::Verify(..) => {
images_precomp.as_ref().unwrap().vartime_mixed_multiscalar_mul([c_p, c_c], [s[i]], [PH])
}
};
to_hash.truncate(((2 * n) + 3) * 32);
to_hash.extend(L.compress().to_bytes());
to_hash.extend(R.compress().to_bytes());
c = hash_to_scalar(&to_hash);
// This will only execute once and shouldn't need to be constant time. Making it constant time
// removes the risk of branch prediction creating timing differences depending on ring index
// however
c1.conditional_assign(&c, i.ct_eq(&(n - 1)));
}
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
((D, c * mu_P, c * mu_C), c1.unwrap_or(c))
((D_INV_EIGHT, c * mu_P, c * mu_C), c1)
}
/// CLSAG signature, as used in Monero.
@@ -261,8 +277,10 @@ impl Clsag {
nonce.deref() *
hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
);
clsag.s[usize::from(inputs[i].2.decoys.i)] =
(-((p * inputs[i].0.deref()) + c)) + nonce.deref();
// Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring
// member's commitment and our input commitment (which will only have a known discrete log
// over G if the amounts cancel out)
clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce.deref() - ((p * inputs[i].0.deref()) + c);
inputs[i].0.zeroize();
nonce.zeroize();

View File

@@ -1,5 +1,8 @@
use core::{ops::Deref, fmt::Debug};
use std_shims::io::{self, Read, Write};
use std_shims::{
io::{self, Read, Write},
collections::HashMap,
};
use std::sync::{Arc, RwLock};
use rand_core::{RngCore, CryptoRng, SeedableRng};
@@ -9,11 +12,13 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
use group::{ff::Field, Group, GroupEncoding};
use group::{
ff::{Field, PrimeField},
Group, GroupEncoding,
};
use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group as dfg;
use dleq::DLEqProof;
use frost::{
dkg::lagrange,
curve::Ed25519,
@@ -26,10 +31,6 @@ use crate::ringct::{
clsag::{ClsagInput, Clsag},
};
fn dleq_transcript() -> RecommendedTranscript {
RecommendedTranscript::new(b"monero_key_image_dleq")
}
impl ClsagInput {
fn transcript<T: Transcript>(&self, transcript: &mut T) {
// Doesn't domain separate as this is considered part of the larger CLSAG proof
@@ -43,6 +44,7 @@ impl ClsagInput {
// They're just a unreliable reference to this data which will be included in the message
// if in use
transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]);
// This also transcripts the key image generator since it's derived from this key
transcript.append_message(b"key", pair[0].compress().to_bytes());
transcript.append_message(b"commitment", pair[1].compress().to_bytes())
}
@@ -70,13 +72,11 @@ impl ClsagDetails {
#[derive(Clone, PartialEq, Eq, Zeroize, Debug)]
pub struct ClsagAddendum {
pub(crate) key_image: dfg::EdwardsPoint,
dleq: DLEqProof<dfg::EdwardsPoint>,
}
impl WriteAddendum for ClsagAddendum {
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.key_image.compress().to_bytes().as_ref())?;
self.dleq.write(writer)
writer.write_all(self.key_image.compress().to_bytes().as_ref())
}
}
@@ -97,9 +97,8 @@ pub struct ClsagMultisig {
transcript: RecommendedTranscript,
pub(crate) H: EdwardsPoint,
// Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires
// an extra round
image: EdwardsPoint,
key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>,
image: Option<dfg::EdwardsPoint>,
details: Arc<RwLock<Option<ClsagDetails>>>,
@@ -117,7 +116,8 @@ impl ClsagMultisig {
transcript,
H: hash_to_point(&output_key),
image: EdwardsPoint::identity(),
key_image_shares: HashMap::new(),
image: None,
details,
@@ -135,20 +135,6 @@ impl ClsagMultisig {
}
}
pub(crate) fn add_key_image_share(
image: &mut EdwardsPoint,
generator: EdwardsPoint,
offset: Scalar,
included: &[Participant],
participant: Participant,
share: EdwardsPoint,
) {
if image.is_identity().into() {
*image = generator * offset;
}
*image += share * lagrange::<dfg::Scalar>(participant, included).0;
}
impl Algorithm<Ed25519> for ClsagMultisig {
type Transcript = RecommendedTranscript;
type Addendum = ClsagAddendum;
@@ -160,23 +146,10 @@ impl Algorithm<Ed25519> for ClsagMultisig {
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
_rng: &mut R,
keys: &ThresholdKeys<Ed25519>,
) -> ClsagAddendum {
ClsagAddendum {
key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(),
dleq: DLEqProof::prove(
rng,
// Doesn't take in a larger transcript object due to the usage of this
// Every prover would immediately write their own DLEq proof, when they can only do so in
// the proper order if they want to reach consensus
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to
// try to merge later in some form, when it should instead just merge xH (as it does)
&mut dleq_transcript(),
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
keys.secret_share(),
),
}
ClsagAddendum { key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref() }
}
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> {
@@ -190,7 +163,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
Err(io::Error::other("non-canonical key image"))?;
}
Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::<dfg::EdwardsPoint>::read(reader)? })
Ok(ClsagAddendum { key_image: xH })
}
fn process_addendum(
@@ -199,33 +172,29 @@ impl Algorithm<Ed25519> for ClsagMultisig {
l: Participant,
addendum: ClsagAddendum,
) -> Result<(), FrostError> {
// TODO: This check is faulty if two shares are additive inverses of each other
if self.image.is_identity().into() {
if self.image.is_none() {
self.transcript.domain_separate(b"CLSAG");
// Transcript the ring
self.input().transcript(&mut self.transcript);
// Transcript the mask
self.transcript.append_message(b"mask", self.mask().to_bytes());
// Init the image to the offset
self.image = Some(dfg::EdwardsPoint(self.H) * view.offset());
}
// Transcript this participant's contribution
self.transcript.append_message(b"participant", l.to_bytes());
addendum
.dleq
.verify(
&mut dleq_transcript(),
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
&[view.original_verification_share(l), addendum.key_image],
)
.map_err(|_| FrostError::InvalidPreprocess(l))?;
self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes());
add_key_image_share(
&mut self.image,
self.H,
view.offset().0,
view.included(),
l,
addendum.key_image.0,
);
// Accumulate the interpolated share
let interpolated_key_image_share =
addendum.key_image * lagrange::<dfg::Scalar>(l, view.included());
*self.image.as_mut().unwrap() += interpolated_key_image_share;
self
.key_image_shares
.insert(view.verification_share(l).to_bytes(), interpolated_key_image_share);
Ok(())
}
@@ -253,7 +222,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
#[allow(non_snake_case)]
let (clsag, pseudo_out, p, c) = Clsag::sign_core(
&mut rng,
&self.image,
&self.image.expect("verifying a share despite never processing any addendums").0,
&self.input(),
self.mask(),
self.msg.as_ref().unwrap(),
@@ -262,7 +231,8 @@ impl Algorithm<Ed25519> for ClsagMultisig {
);
self.interim = Some(Interim { p, c, clsag, pseudo_out });
(-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref()
// r - p x, where p is the challenge for the keys
*nonces[0] - dfg::Scalar(p) * view.secret_share().deref()
}
#[must_use]
@@ -274,11 +244,13 @@ impl Algorithm<Ed25519> for ClsagMultisig {
) -> Option<Self::Signature> {
let interim = self.interim.as_ref().unwrap();
let mut clsag = interim.clsag.clone();
// We produced shares as `r - p x`, yet the signature is `r - p x - c x`
// Substract `c x` (saved as `c`) now
clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c;
if clsag
.verify(
&self.input().decoys.ring,
&self.image,
&self.image.expect("verifying a signature despite never processing any addendums").0,
&interim.pseudo_out,
self.msg.as_ref().unwrap(),
)
@@ -296,10 +268,61 @@ impl Algorithm<Ed25519> for ClsagMultisig {
share: dfg::Scalar,
) -> Result<Vec<(dfg::Scalar, dfg::EdwardsPoint)>, ()> {
let interim = self.interim.as_ref().unwrap();
Ok(vec![
// For a share `r - p x`, the following two equalities should hold:
// - `(r - p x)G == R.0 - pV`, where `V = xG`
// - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share)
//
// This is effectively a discrete log equality proof for:
// V, K over G, H
// with nonces
// R.0, R.1
// and solution
// s
//
// Which is a batch-verifiable rewrite of the traditional CP93 proof
// (and also writable as Generalized Schnorr Protocol)
//
// That means that given a proper challenge, this alone can be certainly argued to prove the
// key image share is well-formed and the provided signature so proves for that.
// This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of
// the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically
// is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be
// extracted, and the nonces as used in CLSAG are also part of its prover data/transcript).
let key_image_share = self.key_image_shares[&verification_share.to_bytes()];
// Hash every variable relevant here, using the hahs output as the random weight
let mut weight_transcript =
RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share");
weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes());
weight_transcript.append_message(b"H", self.H.to_bytes());
weight_transcript.append_message(b"xG", verification_share.to_bytes());
weight_transcript.append_message(b"xH", key_image_share.to_bytes());
weight_transcript.append_message(b"rG", nonces[0][0].to_bytes());
weight_transcript.append_message(b"rH", nonces[0][1].to_bytes());
weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr());
weight_transcript.append_message(b"s", share.to_repr());
let weight = weight_transcript.challenge(b"weight");
let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into()));
let part_one = vec![
(share, dfg::EdwardsPoint::generator()),
(dfg::Scalar(interim.p), verification_share),
// -(R.0 - pV) == -R.0 + pV
(-dfg::Scalar::ONE, nonces[0][0]),
])
(dfg::Scalar(interim.p), verification_share),
];
let mut part_two = vec![
(weight * share, dfg::EdwardsPoint(self.H)),
// -(R.1 - pK) == -R.1 + pK
(-weight, nonces[0][1]),
(weight * dfg::Scalar(interim.p), key_image_share),
];
let mut all = part_one;
all.append(&mut part_two);
Ok(all)
}
}

View File

@@ -21,7 +21,7 @@ fn test_aggregate_range_proof() {
}
let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect();
let statement = AggregateRangeStatement::new(commitment_points).unwrap();
let witness = AggregateRangeWitness::new(&commitments).unwrap();
let witness = AggregateRangeWitness::new(commitments).unwrap();
let proof = statement.clone().prove(&mut OsRng, &witness).unwrap();
statement.verify(&mut OsRng, &mut verifier, (), proof);

View File

@@ -57,7 +57,7 @@ fn clsag() {
}
let image = generate_key_image(&secrets.0);
let (clsag, pseudo_out) = Clsag::sign(
let (mut clsag, pseudo_out) = Clsag::sign(
&mut OsRng,
vec![(
secrets.0,
@@ -76,7 +76,12 @@ fn clsag() {
msg,
)
.swap_remove(0);
clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap();
// make sure verification fails if we throw a random `c1` at it.
clsag.c1 = random_scalar(&mut OsRng);
assert!(clsag.verify(&ring, &image, &pseudo_out, &msg).is_err());
}
}

View File

@@ -1,5 +1,5 @@
use core::{marker::PhantomData, fmt::Debug};
use std_shims::string::{String, ToString};
use core::{marker::PhantomData, fmt};
use std_shims::string::ToString;
use zeroize::Zeroize;
@@ -81,7 +81,7 @@ impl AddressType {
}
/// A type which returns the byte for a given address.
pub trait AddressBytes: Clone + Copy + PartialEq + Eq + Debug {
pub trait AddressBytes: Clone + Copy + PartialEq + Eq + fmt::Debug {
fn network_bytes(network: Network) -> (u8, u8, u8, u8);
}
@@ -191,8 +191,8 @@ pub struct Address<B: AddressBytes> {
pub view: EdwardsPoint,
}
impl<B: AddressBytes> core::fmt::Debug for Address<B> {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
impl<B: AddressBytes> fmt::Debug for Address<B> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
fmt
.debug_struct("Address")
.field("meta", &self.meta)
@@ -212,8 +212,8 @@ impl<B: AddressBytes> Zeroize for Address<B> {
}
}
impl<B: AddressBytes> ToString for Address<B> {
fn to_string(&self) -> String {
impl<B: AddressBytes> fmt::Display for Address<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let mut data = vec![self.meta.to_byte()];
data.extend(self.spend.compress().to_bytes());
data.extend(self.view.compress().to_bytes());
@@ -226,7 +226,7 @@ impl<B: AddressBytes> ToString for Address<B> {
if let Some(id) = self.meta.kind.payment_id() {
data.extend(id);
}
encode_check(&data).unwrap()
write!(f, "{}", encode_check(&data).unwrap())
}
}

View File

@@ -18,6 +18,7 @@ use transcript::{Transcript, RecommendedTranscript};
use frost::{
curve::Ed25519,
Participant, FrostError, ThresholdKeys,
dkg::lagrange,
sign::{
Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine,
SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
@@ -27,7 +28,7 @@ use frost::{
use crate::{
random_scalar,
ringct::{
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share},
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig},
RctPrunable,
},
transaction::{Input, Transaction},
@@ -261,8 +262,13 @@ impl SignMachine<Transaction> for TransactionSignMachine {
included.push(self.i);
included.sort_unstable();
// Convert the unified commitments to a Vec of the individual commitments
// Start calculating the key images, as needed on the TX level
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
for (image, (generator, offset)) in images.iter_mut().zip(&self.key_images) {
*image = generator * offset;
}
// Convert the serialized nonces commitments to a parallelized Vec
let mut commitments = (0 .. self.clsags.len())
.map(|c| {
included
@@ -291,14 +297,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
// provides the easiest API overall, as this is where the TX is (which needs the key
// images in its message), along with where the outputs are determined (where our
// outputs may need these in order to guarantee uniqueness)
add_key_image_share(
&mut images[c],
self.key_images[c].0,
self.key_images[c].1,
&included,
*l,
preprocess.addendum.key_image.0,
);
images[c] += preprocess.addendum.key_image.0 * lagrange::<dfg::Scalar>(*l, &included).0;
Ok((*l, preprocess))
})

View File

@@ -11,7 +11,7 @@ impl Get for Transaction<'_> {
let mut res = self.0.get(&key);
for change in &self.1 {
if change.1 == key.as_ref() {
res = change.2.clone();
res.clone_from(&change.2);
}
}
res

View File

@@ -51,7 +51,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim
futures-util = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] }
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
[dev-dependencies]
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }

View File

@@ -22,7 +22,7 @@ use serai_db::{Get, DbTxn, Db, create_db};
use processor_messages::coordinator::cosign_block_msg;
use crate::{
p2p::{CosignedBlock, P2pMessageKind, P2p},
p2p::{CosignedBlock, GossipMessageKind, P2p},
substrate::LatestCosignedBlock,
};
@@ -323,7 +323,7 @@ impl<D: Db> CosignEvaluator<D> {
for cosign in cosigns {
let mut buf = vec![];
cosign.serialize(&mut buf).unwrap();
P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, buf).await;
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
}
sleep(Duration::from_secs(60)).await;
}

View File

@@ -260,7 +260,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
cosign_channel.send(cosigned_block).unwrap();
let mut buf = vec![];
cosigned_block.serialize(&mut buf).unwrap();
P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await;
P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await;
None
}
// This causes an action on Substrate yet not on any Tributary

File diff suppressed because it is too large Load Diff

View File

@@ -14,7 +14,7 @@ use tokio::sync::RwLock;
use crate::{
processors::{Message, Processors},
TributaryP2p, P2pMessageKind, P2p,
TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p,
};
pub mod tributary;
@@ -45,7 +45,10 @@ impl Processors for MemProcessors {
#[allow(clippy::type_complexity)]
#[derive(Clone, Debug)]
pub struct LocalP2p(usize, pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, Vec<u8>)>>)>>);
pub struct LocalP2p(
usize,
pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, P2pMessageKind, Vec<u8>)>>)>>,
);
impl LocalP2p {
pub fn new(validators: usize) -> Vec<LocalP2p> {
@@ -65,11 +68,13 @@ impl P2p for LocalP2p {
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
self.1.write().await.1[to].push_back((self.0, msg));
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
let mut msg_ref = msg.as_slice();
let kind = ReqResMessageKind::read(&mut msg_ref).unwrap();
self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec()));
}
async fn broadcast_raw(&self, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
// Content-based deduplication
let mut lock = self.1.write().await;
{
@@ -81,19 +86,26 @@ impl P2p for LocalP2p {
}
let queues = &mut lock.1;
let kind_len = (match kind {
P2pMessageKind::ReqRes(kind) => kind.serialize(),
P2pMessageKind::Gossip(kind) => kind.serialize(),
})
.len();
let msg = msg[kind_len ..].to_vec();
for (i, msg_queue) in queues.iter_mut().enumerate() {
if i == self.0 {
continue;
}
msg_queue.push_back((self.0, msg.clone()));
msg_queue.push_back((self.0, kind, msg.clone()));
}
}
async fn receive_raw(&self) -> (Self::Id, Vec<u8>) {
async fn receive(&self) -> P2pMessage<Self> {
// This is a cursed way to implement an async read from a Vec
loop {
if let Some(res) = self.1.write().await.1[self.0].pop_front() {
return res;
if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() {
return P2pMessage { sender, kind, msg };
}
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
}
@@ -103,6 +115,11 @@ impl P2p for LocalP2p {
#[async_trait]
impl TributaryP2p for LocalP2p {
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
<Self as P2p>::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await
<Self as P2p>::broadcast(
self,
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)),
msg,
)
.await
}
}

View File

@@ -26,7 +26,7 @@ use serai_db::MemDb;
use tributary::Tributary;
use crate::{
P2pMessageKind, P2p,
GossipMessageKind, P2pMessageKind, P2p,
tributary::{Transaction, TributarySpec},
tests::LocalP2p,
};
@@ -98,7 +98,7 @@ pub async fn run_tributaries(
for (p2p, tributary) in &mut tributaries {
while let Poll::Ready(msg) = poll!(p2p.receive()) {
match msg.kind {
P2pMessageKind::Tributary(genesis) => {
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
assert_eq!(genesis, tributary.genesis());
if tributary.handle_message(&msg.msg).await {
p2p.broadcast(msg.kind, msg.msg).await;
@@ -173,7 +173,7 @@ async fn tributary_test() {
for (p2p, tributary) in &mut tributaries {
while let Poll::Ready(msg) = poll!(p2p.receive()) {
match msg.kind {
P2pMessageKind::Tributary(genesis) => {
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
assert_eq!(genesis, tributary.genesis());
tributary.handle_message(&msg.msg).await;
}
@@ -199,7 +199,7 @@ async fn tributary_test() {
for (p2p, tributary) in &mut tributaries {
while let Poll::Ready(msg) = poll!(p2p.receive()) {
match msg.kind {
P2pMessageKind::Tributary(genesis) => {
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
assert_eq!(genesis, tributary.genesis());
tributary.handle_message(&msg.msg).await;
}

View File

@@ -116,8 +116,8 @@ async fn sync_test() {
.map_err(|_| "failed to send ActiveTributary to heartbeat")
.unwrap();
// The heartbeat is once every 10 blocks
sleep(Duration::from_secs(10 * block_time)).await;
// The heartbeat is once every 10 blocks, with some limitations
sleep(Duration::from_secs(20 * block_time)).await;
assert!(syncer_tributary.tip().await != spec.genesis());
// Verify it synced to the tip

View File

@@ -59,8 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
pub(crate) const BLOCK_MESSAGE: u8 = 1;
pub(crate) const TRANSACTION_MESSAGE: u8 = 2;
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
#[allow(clippy::large_enum_variant)]
#[derive(Clone, PartialEq, Eq, Debug)]
@@ -336,9 +335,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
// Return true if the message should be rebroadcasted.
pub async fn handle_message(&self, msg: &[u8]) -> bool {
// Acquire the lock now to prevent sync_block from being run at the same time
let mut sync_block = self.synced_block_result.write().await;
match msg.first() {
Some(&TRANSACTION_MESSAGE) => {
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
@@ -370,19 +366,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
false
}
Some(&BLOCK_MESSAGE) => {
let mut msg_ref = &msg[1 ..];
let Ok(block) = Block::<T>::read(&mut msg_ref) else {
log::error!("received invalid block message");
return false;
};
let commit = msg[(msg.len() - msg_ref.len()) ..].to_vec();
if self.sync_block_internal(block, commit, &mut sync_block).await {
log::debug!("synced block over p2p net instead of building the commit ourselves");
}
false
}
_ => false,
}
}

View File

@@ -74,7 +74,7 @@ impl<D: Db, T: Transaction> ProvidedTransactions<D, T> {
panic!("provided transaction saved to disk wasn't provided");
};
if res.transactions.get(order).is_none() {
if !res.transactions.contains_key(order) {
res.transactions.insert(order, VecDeque::new());
}
res.transactions.get_mut(order).unwrap().push_back(tx);
@@ -135,7 +135,7 @@ impl<D: Db, T: Transaction> ProvidedTransactions<D, T> {
txn.put(current_provided_key, currently_provided);
txn.commit();
if self.transactions.get(order).is_none() {
if !self.transactions.contains_key(order) {
self.transactions.insert(order, VecDeque::new());
}
self.transactions.get_mut(order).unwrap().push_back(tx);

View File

@@ -41,9 +41,8 @@ use tendermint::{
use tokio::sync::RwLock;
use crate::{
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite,
transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError,
Blockchain, P2p,
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait,
Transaction, BlockHeader, Block, BlockError, Blockchain, P2p,
};
pub mod tx;
@@ -414,12 +413,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
);
match block_res {
Ok(()) => {
// If we successfully added this block, broadcast it
// TODO: Move this under the coordinator once we set up on new block notifications?
let mut msg = serialized_block.0;
msg.insert(0, BLOCK_MESSAGE);
msg.extend(encoded_commit);
self.p2p.broadcast(self.genesis, msg).await;
// If we successfully added this block, break
break;
}
Err(BlockError::NonLocalProvided(hash)) => {
@@ -428,6 +422,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
hex::encode(hash),
hex::encode(self.genesis)
);
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
}
_ => return invalid_block(),
}

View File

@@ -139,10 +139,8 @@ impl<N: Network> BlockData<N> {
// 27, 33, 41, 46, 60, 64
self.round_mut().step = data.step();
// Only return a message to if we're actually a current validator and haven't prior posted a
// message
// Only return a message to if we're actually a current validator
let round_number = self.round().number;
let step = data.step();
let res = self.validator_id.map(|validator_id| Message {
sender: validator_id,
block: self.number,
@@ -150,21 +148,59 @@ impl<N: Network> BlockData<N> {
data,
});
if res.is_some() {
if let Some(res) = res.as_ref() {
const LATEST_BLOCK_KEY: &[u8] = b"tendermint-machine-sent_block";
const LATEST_ROUND_KEY: &[u8] = b"tendermint-machine-sent_round";
const PROPOSE_KEY: &[u8] = b"tendermint-machine-sent_propose";
const PEVOTE_KEY: &[u8] = b"tendermint-machine-sent_prevote";
const PRECOMMIT_KEY: &[u8] = b"tendermint-machine-sent_commit";
let genesis = self.genesis;
let key = |prefix: &[u8]| [prefix, &genesis].concat();
let mut txn = self.db.txn();
let key = [
b"tendermint-machine_already_sent_message".as_ref(),
&self.genesis,
&self.number.0.to_le_bytes(),
&round_number.0.to_le_bytes(),
&step.encode(),
]
.concat();
// If we've already sent a message, return
if txn.get(&key).is_some() {
// Ensure we haven't prior sent a message for a future block/round
let last_block_or_round = |txn: &mut <N::Db as Db>::Transaction<'_>, prefix, current| {
let key = key(prefix);
let latest =
u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap());
if latest > current {
None?;
}
if current > latest {
txn.put(&key, current.to_le_bytes());
return Some(true);
}
Some(false)
};
let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?;
if new_block {
// Delete the latest round key
txn.del(&key(LATEST_ROUND_KEY));
}
let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?;
if new_block || new_round {
// Delete the messages for the old round
txn.del(&key(PROPOSE_KEY));
txn.del(&key(PEVOTE_KEY));
txn.del(&key(PRECOMMIT_KEY));
}
// Check we haven't sent this message within this round
let msg_key = key(match res.data.step() {
Step::Propose => PROPOSE_KEY,
Step::Prevote => PEVOTE_KEY,
Step::Precommit => PRECOMMIT_KEY,
});
if txn.get(&msg_key).is_some() {
assert!(!new_block);
assert!(!new_round);
None?;
}
txn.put(&key, []);
// Put this message to the DB
txn.put(&msg_key, res.encode());
txn.commit();
}

View File

@@ -313,11 +313,16 @@ impl<N: Network + 'static> TendermintMachine<N> {
let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now());
if time_until_round_end == Duration::ZERO {
log::trace!(
target: "tendermint",
"resetting when prior round ended {}ms ago",
Instant::now().saturating_duration_since(round_end.instant()).as_millis(),
);
}
log::trace!("sleeping until round ends in {}ms", time_until_round_end.as_millis());
log::trace!(
target: "tendermint",
"sleeping until round ends in {}ms",
time_until_round_end.as_millis(),
);
sleep(time_until_round_end).await;
// Clear our outbound message queue
@@ -509,7 +514,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
match step {
Step::Propose => {
// Slash the validator for not proposing when they should've
log::debug!(target: "tendermint", "Validator didn't propose when they should have");
log::debug!(target: "tendermint", "validator didn't propose when they should have");
// this slash will be voted on.
self.slash(
self.weights.proposer(self.block.number, self.block.round().number),
@@ -598,7 +603,11 @@ impl<N: Network + 'static> TendermintMachine<N> {
);
let id = block.id();
let proposal = self.network.add_block(block, commit).await;
log::trace!("added block {} (produced by machine)", hex::encode(id.as_ref()));
log::trace!(
target: "tendermint",
"added block {} (produced by machine)",
hex::encode(id.as_ref()),
);
self.reset(msg.round, proposal).await;
}
Err(TendermintError::Malicious(sender, evidence)) => {
@@ -692,7 +701,12 @@ impl<N: Network + 'static> TendermintMachine<N> {
(msg.round == self.block.round().number) &&
(msg.data.step() == Step::Propose)
{
log::trace!("received Propose for block {}, round {}", msg.block.0, msg.round.0);
log::trace!(
target: "tendermint",
"received Propose for block {}, round {}",
msg.block.0,
msg.round.0,
);
}
// If this is a precommit, verify its signature
@@ -710,7 +724,13 @@ impl<N: Network + 'static> TendermintMachine<N> {
if !self.block.log.log(signed.clone())? {
return Err(TendermintError::AlreadyHandled);
}
log::debug!(target: "tendermint", "received new tendermint message");
log::trace!(
target: "tendermint",
"received new tendermint message (block: {}, round: {}, step: {:?})",
msg.block.0,
msg.round.0,
msg.data.step(),
);
// All functions, except for the finalizer and the jump, are locked to the current round
@@ -757,6 +777,13 @@ impl<N: Network + 'static> TendermintMachine<N> {
// 55-56
// Jump, enabling processing by the below code
if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() {
log::debug!(
target: "tendermint",
"jumping from round {} to round {}",
self.block.round().number.0,
msg.round.0,
);
// Jump to the new round.
let proposer = self.round(msg.round, None);
@@ -814,13 +841,26 @@ impl<N: Network + 'static> TendermintMachine<N> {
if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) {
let (participation, weight) =
self.block.log.message_instances(self.block.round().number, &Data::Prevote(None));
let threshold_weight = self.weights.threshold();
if participation < threshold_weight {
log::trace!(
target: "tendermint",
"progess towards setting prevote timeout, participation: {}, needed: {}",
participation,
threshold_weight,
);
}
// 34-35
if participation >= self.weights.threshold() {
if participation >= threshold_weight {
log::trace!(
target: "tendermint",
"setting timeout for prevote due to sufficient participation",
);
self.block.round_mut().set_timeout(Step::Prevote);
}
// 44-46
if weight >= self.weights.threshold() {
if weight >= threshold_weight {
self.broadcast(Data::Precommit(None));
return Ok(None);
}
@@ -830,6 +870,10 @@ impl<N: Network + 'static> TendermintMachine<N> {
if matches!(msg.data, Data::Precommit(_)) &&
self.block.log.has_participation(self.block.round().number, Step::Precommit)
{
log::trace!(
target: "tendermint",
"setting timeout for precommit due to sufficient participation",
);
self.block.round_mut().set_timeout(Step::Precommit);
}

View File

@@ -1,6 +1,5 @@
use std::{sync::Arc, collections::HashMap};
use log::debug;
use parity_scale_codec::Encode;
use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence};
@@ -27,7 +26,7 @@ impl<N: Network> MessageLog<N> {
let step = msg.data.step();
if let Some(existing) = msgs.get(&step) {
if existing.msg.data != msg.data {
debug!(
log::debug!(
target: "tendermint",
"Validator sent multiple messages for the same block + round + step"
);

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-gr
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
edition = "2021"
rust-version = "1.65"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021"
rust-version = "1.70"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true

View File

@@ -6,7 +6,7 @@ license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.73"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ed448"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ed448", "ff", "group"]
edition = "2021"
rust-version = "1.65"
rust-version = "1.66"
[package.metadata.docs.rs]
all-features = true

View File

@@ -38,7 +38,6 @@ ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features =
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
dleq = { path = "../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] }

View File

@@ -39,6 +39,13 @@ pub trait Algorithm<C: Curve>: Send + Sync + Clone {
/// Obtain the list of nonces to generate, as specified by the generators to create commitments
/// against per-nonce.
///
/// The Algorithm is responsible for all transcripting of these nonce specifications/generators.
///
/// The prover will be passed the commitments, and the commitments will be sent to all other
/// participants. No guarantees the commitments are internally consistent (have the same discrete
/// logarithm across generators) are made. Any Algorithm which specifies multiple generators for
/// a single nonce must handle that itself.
fn nonces(&self) -> Vec<Vec<C::G>>;
/// Generate an addendum to FROST"s preprocessing stage.

View File

@@ -1,13 +1,9 @@
// FROST defines its nonce as sum(Di, Ei * bi)
// Monero needs not just the nonce over G however, yet also over H
// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once
//
// Accordingly, in order for this library to be robust, it supports generating an arbitrary amount
// of nonces, each against an arbitrary list of generators
// In order for this library to be robust, it supports generating an arbitrary amount of nonces,
// each against an arbitrary list of generators
//
// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)
// When representations across multiple generators are provided, a DLEq proof is also provided to
// confirm their integrity
use core::ops::Deref;
use std::{
@@ -24,32 +20,8 @@ use transcript::Transcript;
use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding};
use multiexp::multiexp_vartime;
use dleq::MultiDLEqProof;
use crate::{curve::Curve, Participant};
// Transcript used to aggregate binomial nonces for usage within a single DLEq proof.
fn aggregation_transcript<T: Transcript>(context: &[u8]) -> T {
let mut transcript = T::new(b"FROST DLEq Aggregation v0.5");
transcript.append_message(b"context", context);
transcript
}
// Every participant proves for their commitments at the start of the protocol
// These proofs are verified sequentially, requiring independent transcripts
// In order to make these transcripts more robust, the FROST transcript (at time of preprocess) is
// challenged in order to create a commitment to it, carried in each independent transcript
// (effectively forking the original transcript)
//
// For FROST, as defined by the IETF, this will do nothing (and this transcript will never even be
// constructed). For higher level protocols, the transcript may have contextual info these proofs
// will then be bound to
fn dleq_transcript<T: Transcript>(context: &[u8]) -> T {
let mut transcript = T::new(b"FROST Commitments DLEq v0.5");
transcript.append_message(b"context", context);
transcript
}
// Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper
// This is considered a single nonce as r = d + be
#[derive(Clone, Zeroize)]
@@ -69,7 +41,7 @@ impl<C: Curve> GeneratorCommitments<C> {
}
}
// A single nonce's commitments and relevant proofs
// A single nonce's commitments
#[derive(Clone, PartialEq, Eq)]
pub(crate) struct NonceCommitments<C: Curve> {
// Called generators as these commitments are indexed by generator later on
@@ -121,12 +93,6 @@ impl<C: Curve> NonceCommitments<C> {
t.append_message(b"commitment_E", commitments.0[1].to_bytes());
}
}
fn aggregation_factor<T: Transcript>(&self, context: &[u8]) -> C::F {
let mut transcript = aggregation_transcript::<T>(context);
self.transcript(&mut transcript);
<C as Curve>::hash_to_F(b"dleq_aggregation", transcript.challenge(b"binding").as_ref())
}
}
/// Commitments for all the nonces across all their generators.
@@ -135,51 +101,26 @@ pub(crate) struct Commitments<C: Curve> {
// Called nonces as these commitments are indexed by nonce
// So to get the commitments for the first nonce, it'd be commitments.nonces[0]
pub(crate) nonces: Vec<NonceCommitments<C>>,
// DLEq Proof proving that each set of commitments were generated using a single pair of discrete
// logarithms
pub(crate) dleq: Option<MultiDLEqProof<C::G>>,
}
impl<C: Curve> Commitments<C> {
pub(crate) fn new<R: RngCore + CryptoRng, T: Transcript>(
pub(crate) fn new<R: RngCore + CryptoRng>(
rng: &mut R,
secret_share: &Zeroizing<C::F>,
planned_nonces: &[Vec<C::G>],
context: &[u8],
) -> (Vec<Nonce<C>>, Commitments<C>) {
let mut nonces = vec![];
let mut commitments = vec![];
let mut dleq_generators = vec![];
let mut dleq_nonces = vec![];
for generators in planned_nonces {
let (nonce, these_commitments): (Nonce<C>, _) =
NonceCommitments::new(&mut *rng, secret_share, generators);
if generators.len() > 1 {
dleq_generators.push(generators.clone());
dleq_nonces.push(Zeroizing::new(
(these_commitments.aggregation_factor::<T>(context) * nonce.0[1].deref()) +
nonce.0[0].deref(),
));
}
nonces.push(nonce);
commitments.push(these_commitments);
}
let dleq = if !dleq_generators.is_empty() {
Some(MultiDLEqProof::prove(
rng,
&mut dleq_transcript::<T>(context),
&dleq_generators,
&dleq_nonces,
))
} else {
None
};
(nonces, Commitments { nonces: commitments, dleq })
(nonces, Commitments { nonces: commitments })
}
pub(crate) fn transcript<T: Transcript>(&self, t: &mut T) {
@@ -187,58 +128,20 @@ impl<C: Curve> Commitments<C> {
for nonce in &self.nonces {
nonce.transcript(t);
}
// Transcripting the DLEqs implicitly transcripts the exact generators used for the nonces in
// an exact order
// This means it shouldn't be possible for variadic generators to cause conflicts
if let Some(dleq) = &self.dleq {
t.append_message(b"dleq", dleq.serialize());
}
}
pub(crate) fn read<R: Read, T: Transcript>(
reader: &mut R,
generators: &[Vec<C::G>],
context: &[u8],
) -> io::Result<Self> {
pub(crate) fn read<R: Read>(reader: &mut R, generators: &[Vec<C::G>]) -> io::Result<Self> {
let nonces = (0 .. generators.len())
.map(|i| NonceCommitments::read(reader, &generators[i]))
.collect::<Result<Vec<NonceCommitments<C>>, _>>()?;
let mut dleq_generators = vec![];
let mut dleq_nonces = vec![];
for (generators, nonce) in generators.iter().cloned().zip(&nonces) {
if generators.len() > 1 {
let binding = nonce.aggregation_factor::<T>(context);
let mut aggregated = vec![];
for commitments in &nonce.generators {
aggregated.push(commitments.0[0] + (commitments.0[1] * binding));
}
dleq_generators.push(generators);
dleq_nonces.push(aggregated);
}
}
let dleq = if !dleq_generators.is_empty() {
let dleq = MultiDLEqProof::read(reader, dleq_generators.len())?;
dleq
.verify(&mut dleq_transcript::<T>(context), &dleq_generators, &dleq_nonces)
.map_err(|_| io::Error::other("invalid DLEq proof"))?;
Some(dleq)
} else {
None
};
Ok(Commitments { nonces, dleq })
Ok(Commitments { nonces })
}
pub(crate) fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
for nonce in &self.nonces {
nonce.write(writer)?;
}
if let Some(dleq) = &self.dleq {
dleq.write(writer)?;
}
Ok(())
}
}

View File

@@ -125,14 +125,8 @@ impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
let mut params = self.params;
let mut rng = ChaCha20Rng::from_seed(*seed.0);
// Get a challenge to the existing transcript for use when proving for the commitments
let commitments_challenge = params.algorithm.transcript().challenge(b"commitments");
let (nonces, commitments) = Commitments::new::<_, A::Transcript>(
&mut rng,
params.keys.secret_share(),
&params.algorithm.nonces(),
commitments_challenge.as_ref(),
);
let (nonces, commitments) =
Commitments::new::<_>(&mut rng, params.keys.secret_share(), &params.algorithm.nonces());
let addendum = params.algorithm.preprocess_addendum(&mut rng, &params.keys);
let preprocess = Preprocess { commitments, addendum };
@@ -141,27 +135,18 @@ impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
let mut blame_entropy = [0; 32];
rng.fill_bytes(&mut blame_entropy);
(
AlgorithmSignMachine {
params,
seed,
commitments_challenge,
nonces,
preprocess: preprocess.clone(),
blame_entropy,
},
AlgorithmSignMachine { params, seed, nonces, preprocess: preprocess.clone(), blame_entropy },
preprocess,
)
}
#[cfg(any(test, feature = "tests"))]
pub(crate) fn unsafe_override_preprocess(
mut self,
self,
nonces: Vec<Nonce<C>>,
preprocess: Preprocess<C, A::Addendum>,
) -> AlgorithmSignMachine<C, A> {
AlgorithmSignMachine {
commitments_challenge: self.params.algorithm.transcript().challenge(b"commitments"),
params: self.params,
seed: CachedPreprocess(Zeroizing::new([0; 32])),
@@ -255,8 +240,6 @@ pub struct AlgorithmSignMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>,
seed: CachedPreprocess,
#[zeroize(skip)]
commitments_challenge: <A::Transcript as Transcript>::Challenge,
pub(crate) nonces: Vec<Nonce<C>>,
// Skips the preprocess due to being too large a bound to feasibly enforce on users
#[zeroize(skip)]
@@ -285,11 +268,7 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
Ok(Preprocess {
commitments: Commitments::read::<_, A::Transcript>(
reader,
&self.params.algorithm.nonces(),
self.commitments_challenge.as_ref(),
)?,
commitments: Commitments::read::<_>(reader, &self.params.algorithm.nonces())?,
addendum: self.params.algorithm.read_addendum(reader)?,
})
}

View File

@@ -12,7 +12,7 @@ use crate::{
/// Tests for the nonce handling code.
pub mod nonces;
use nonces::{test_multi_nonce, test_invalid_commitment, test_invalid_dleq_proof};
use nonces::test_multi_nonce;
/// Vectorized test suite to ensure consistency.
pub mod vectors;
@@ -267,6 +267,4 @@ pub fn test_ciphersuite<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut
test_schnorr_blame::<R, C, H>(rng);
test_multi_nonce::<R, C>(rng);
test_invalid_commitment::<R, C>(rng);
test_invalid_dleq_proof::<R, C>(rng);
}

View File

@@ -9,14 +9,12 @@ use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::group::{ff::Field, Group, GroupEncoding};
use dleq::MultiDLEqProof;
pub use dkg::tests::{key_gen, recover_key};
use crate::{
Curve, Participant, ThresholdView, ThresholdKeys, FrostError,
algorithm::Algorithm,
sign::{Writable, SignMachine},
tests::{algorithm_machines, preprocess, sign},
tests::{algorithm_machines, sign},
};
#[derive(Clone)]
@@ -157,75 +155,3 @@ pub fn test_multi_nonce<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
sign(&mut *rng, &MultiNonce::<C>::new(), keys.clone(), machines, &[]);
}
/// Test malleating a commitment for a nonce across generators causes the preprocess to error.
pub fn test_invalid_commitment<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<R, C>(&mut *rng);
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {});
// Select a random participant to give an invalid commitment
let participants = preprocesses.keys().collect::<Vec<_>>();
let faulty = *participants
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
// Grab their preprocess
let mut preprocess = preprocesses.remove(&faulty).unwrap();
// Mutate one of the commitments
let nonce =
preprocess.commitments.nonces.get_mut(usize::try_from(rng.next_u64()).unwrap() % 2).unwrap();
let generators_len = nonce.generators.len();
nonce.generators[usize::try_from(rng.next_u64()).unwrap() % generators_len].0
[usize::try_from(rng.next_u64()).unwrap() % 2] = C::G::random(&mut *rng);
// The commitments are validated at time of deserialization (read_preprocess)
// Accordingly, serialize it and read it again to make sure that errors
assert!(machines
.iter()
.next()
.unwrap()
.1
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
.is_err());
}
/// Test malleating the DLEq proof for a preprocess causes it to error.
pub fn test_invalid_dleq_proof<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<R, C>(&mut *rng);
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {});
// Select a random participant to give an invalid DLEq proof
let participants = preprocesses.keys().collect::<Vec<_>>();
let faulty = *participants
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
// Invalidate it by replacing it with a completely different proof
let dlogs = [Zeroizing::new(C::F::random(&mut *rng)), Zeroizing::new(C::F::random(&mut *rng))];
let mut preprocess = preprocesses.remove(&faulty).unwrap();
preprocess.commitments.dleq = Some(MultiDLEqProof::prove(
&mut *rng,
&mut RecommendedTranscript::new(b"Invalid DLEq Proof"),
&nonces::<C>(),
&dlogs,
));
assert!(machines
.iter()
.next()
.unwrap()
.1
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
.is_err());
// Also test None for a proof will cause an error
preprocess.commitments.dleq = None;
assert!(machines
.iter()
.next()
.unwrap()
.1
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
.is_err());
}

View File

@@ -14,7 +14,7 @@ use ciphersuite::group::{ff::PrimeField, GroupEncoding};
use crate::{
curve::Curve,
Participant, ThresholdCore, ThresholdKeys,
algorithm::{IetfTranscript, Hram, IetfSchnorr},
algorithm::{Hram, IetfSchnorr},
sign::{
Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess,
PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
@@ -191,7 +191,6 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
nonces: vec![NonceCommitments {
generators: vec![GeneratorCommitments(these_commitments)],
}],
dleq: None,
},
addendum: (),
};
@@ -301,12 +300,8 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
}
// Also test it at the Commitments level
let (generated_nonces, commitments) = Commitments::<C>::new::<_, IetfTranscript>(
&mut TransparentRng(randomness),
&share,
&[vec![C::generator()]],
&[],
);
let (generated_nonces, commitments) =
Commitments::<C>::new::<_>(&mut TransparentRng(randomness), &share, &[vec![C::generator()]]);
assert_eq!(generated_nonces.len(), 1);
assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]);

View File

@@ -101,6 +101,7 @@ allow-git = [
"https://github.com/rust-lang-nursery/lazy-static.rs",
"https://github.com/serai-dex/substrate-bip39",
"https://github.com/serai-dex/substrate",
"https://github.com/alloy-rs/alloy",
"https://github.com/monero-rs/base58-monero",
"https://github.com/kayabaNerve/dockertest-rs",
]

View File

@@ -1,6 +1,3 @@
#!/bin/sh
geth --dev --networkid 5208 --datadir "eth-devnet" \
--http --http.api "web3,net,eth,miner" \
--http.addr 0.0.0.0 --http.port 8545 \
--http.vhosts="*" --http.corsdomain "*"
~/.foundry/bin/anvil --no-mining --slots-in-an-epoch 32

View File

@@ -1,4 +1,4 @@
use std::{path::Path};
use std::path::Path;
use crate::{Network, Os, mimalloc, os, write_dockerfile};
@@ -7,7 +7,7 @@ pub fn bitcoin(orchestration_path: &Path, network: Network) {
const DOWNLOAD_BITCOIN: &str = r#"
FROM alpine:latest as bitcoin
ENV BITCOIN_VERSION=26.0
ENV BITCOIN_VERSION=27.0
RUN apk --no-cache add git gnupg

View File

@@ -1,5 +0,0 @@
use std::path::Path;
pub fn ethereum(_orchestration_path: &Path) {
// TODO
}

View File

@@ -0,0 +1,36 @@
use crate::Network;
pub fn lighthouse(network: Network) -> (String, String, String) {
assert_ne!(network, Network::Dev);
#[rustfmt::skip]
const DOWNLOAD_LIGHTHOUSE: &str = r#"
FROM alpine:latest as lighthouse
ENV LIGHTHOUSE_VERSION=5.1.3
RUN apk --no-cache add git gnupg
# Download lighthouse
RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc
# Verify the signature
gpg --keyserver keyserver.ubuntu.com --recv-keys 15E66D941F697E28F49381F426416DC3F30674B0
gpg --verify lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
# Extract lighthouse
RUN tar xvf lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
"#;
let run_lighthouse = format!(
r#"
COPY --from=lighthouse --chown=ethereum lighthouse /bin
ADD /orchestration/{}/coins/ethereum/consensus/lighthouse/run.sh /consensus_layer.sh
"#,
network.label()
);
(DOWNLOAD_LIGHTHOUSE.to_string(), String::new(), run_lighthouse)
}

View File

@@ -0,0 +1,6 @@
mod lighthouse;
#[allow(unused)]
pub use lighthouse::lighthouse;
mod nimbus;
pub use nimbus::nimbus;

View File

@@ -0,0 +1,49 @@
use crate::Network;
pub fn nimbus(network: Network) -> (String, String, String) {
assert_ne!(network, Network::Dev);
let platform = match std::env::consts::ARCH {
"x86_64" => "amd64",
"arm" => "arm32v7",
"aarch64" => "arm64v8",
_ => panic!("unsupported platform"),
};
#[rustfmt::skip]
let checksum = match platform {
"amd64" => "5da10222cfb555ce2e3820ece12e8e30318945e3ed4b2b88d295963c879daeee071623c47926f880f3db89ce537fd47c6b26fe37e47aafbae3222b58bcec2fba",
"arm32v7" => "7055da77bfa1186ee2e7ce2a48b923d45ccb039592f529c58d93d55a62bca46566ada451bd7497c3ae691260544f0faf303602afd85ccc18388fdfdac0bb2b45",
"arm64v8" => "1a68f44598462abfade0dbeb6adf10b52614ba03605a8bf487b99493deb41468317926ef2d657479fcc26fce640aeebdbd880956beec3fb110b5abc97bd83556",
_ => panic!("unsupported platform"),
};
#[rustfmt::skip]
let download_nimbus = format!(r#"
FROM alpine:latest as nimbus
ENV NIMBUS_VERSION=24.3.0
ENV NIMBUS_COMMIT=dc19b082
# Download nimbus
RUN wget https://github.com/status-im/nimbus-eth2/releases/download/v${{NIMBUS_VERSION}}/nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz
# Extract nimbus
RUN tar xvf nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz
RUN mv nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}/build/nimbus_beacon_node ./nimbus
# Verify the checksum
RUN sha512sum nimbus | grep {checksum}
"#);
let run_nimbus = format!(
r#"
COPY --from=nimbus --chown=ethereum nimbus /bin
ADD /orchestration/{}/coins/ethereum/consensus/nimbus/run.sh /consensus_layer.sh
"#,
network.label()
);
(download_nimbus, String::new(), run_nimbus)
}

View File

@@ -0,0 +1,14 @@
use crate::Network;
pub fn anvil(network: Network) -> (String, String, String) {
assert_eq!(network, Network::Dev);
const ANVIL_SETUP: &str = r#"
RUN curl -L https://foundry.paradigm.xyz | bash || exit 0
RUN ~/.foundry/bin/foundryup
EXPOSE 8545
"#;
(String::new(), "RUN apt install git curl -y".to_string(), ANVIL_SETUP.to_string())
}

View File

@@ -0,0 +1,5 @@
mod reth;
pub use reth::reth;
mod anvil;
pub use anvil::anvil;

View File

@@ -0,0 +1,38 @@
use crate::Network;
pub fn reth(network: Network) -> (String, String, String) {
assert_ne!(network, Network::Dev);
#[rustfmt::skip]
const DOWNLOAD_RETH: &str = r#"
FROM alpine:latest as reth
ENV RETH_VERSION=0.2.0-beta.6
RUN apk --no-cache add git gnupg
# Download reth
RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc
# Verify the signature
gpg --keyserver keyserver.ubuntu.com --recv-keys A3AE097C89093A124049DF1F5391A3C4100530B4
gpg --verify reth-v${RETH_VERSION}-$(uname -m).tar.gz.asc reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
# Extract reth
RUN tar xvf reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
"#;
let run_reth = format!(
r#"
COPY --from=reth --chown=ethereum reth /bin
EXPOSE 30303 9001 8545
ADD /orchestration/{}/coins/ethereum/execution/reth/run.sh /execution_layer.sh
"#,
network.label()
);
(DOWNLOAD_RETH.to_string(), String::new(), run_reth)
}

View File

@@ -0,0 +1,43 @@
use std::path::Path;
use crate::{Network, Os, mimalloc, os, write_dockerfile};
mod execution;
use execution::*;
mod consensus;
use consensus::*;
pub fn ethereum(orchestration_path: &Path, network: Network) {
let ((el_download, el_run_as_root, el_run), (cl_download, cl_run_as_root, cl_run)) =
if network == Network::Dev {
(anvil(network), (String::new(), String::new(), String::new()))
} else {
// TODO: Select an EL/CL based off a RNG seeded from the public key
(reth(network), nimbus(network))
};
let download = mimalloc(Os::Alpine).to_string() + &el_download + &cl_download;
let run = format!(
r#"
ADD /orchestration/{}/coins/ethereum/run.sh /run.sh
CMD ["/run.sh"]
"#,
network.label()
);
let run = mimalloc(Os::Debian).to_string() +
&os(Os::Debian, &(el_run_as_root + "\r\n" + &cl_run_as_root), "ethereum") +
&el_run +
&cl_run +
&run;
let res = download + &run;
let mut ethereum_path = orchestration_path.to_path_buf();
ethereum_path.push("coins");
ethereum_path.push("ethereum");
ethereum_path.push("Dockerfile");
write_dockerfile(ethereum_path, &res);
}

View File

@@ -1,4 +1,4 @@
use std::{path::Path};
use std::path::Path;
use crate::{Network, Os, mimalloc, write_dockerfile};

View File

@@ -1,4 +1,4 @@
use std::{path::Path};
use std::path::Path;
use zeroize::Zeroizing;

View File

@@ -266,7 +266,7 @@ fn dockerfiles(network: Network) {
let orchestration_path = orchestration_path(network);
bitcoin(&orchestration_path, network);
ethereum(&orchestration_path);
ethereum(&orchestration_path, network);
monero(&orchestration_path, network);
if network == Network::Dev {
monero_wallet_rpc(&orchestration_path);

View File

@@ -1,4 +1,4 @@
use std::{path::Path};
use std::path::Path;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};

View File

@@ -1,4 +1,4 @@
use std::{path::Path};
use std::path::Path;
use zeroize::Zeroizing;

View File

@@ -1,4 +1,4 @@
use std::{path::Path};
use std::path::Path;
use zeroize::Zeroizing;
use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};

View File

@@ -0,0 +1,3 @@
#!/bin/sh
RUST_LOG=info lighthouse bn --execution-endpoint http://localhost:8551 --execution-jwt /home/ethereum/.jwt

View File

@@ -0,0 +1,3 @@
#!/bin/sh
exit 1

View File

@@ -0,0 +1,8 @@
#!/bin/sh
#geth --dev --networkid 5208 \
# --http --http.api "web3,net,eth,miner" \
# --http.addr 0.0.0.0 --http.port 8545 \
# --http.vhosts="*" --http.corsdomain "*"
exit 1

Some files were not shown because too many files have changed in this diff Show More