31 Commits

Author SHA1 Message Date
Luke Parker
2ba6d77ee7 Reduce target peers a bit 2024-04-23 12:59:34 -04:00
Luke Parker
67a0ff825b Correct recv to try_recv when exhausting channel 2024-04-23 12:41:10 -04:00
Luke Parker
6518379981 Correct selection of to-try peers to prevent infinite loops when to-try < target 2024-04-23 12:41:00 -04:00
Luke Parker
0c6ab50e35 Use a constant for the target amount of peer 2024-04-23 12:40:50 -04:00
Luke Parker
f73ce37e18 Use a HashSet for which networks to try peer finding for
Prevents a flood of retries from individually failed attempts within a batch of
peer connection attempts.
2024-04-23 12:40:41 -04:00
Luke Parker
973dcf065e Correct port forward in orchestration 2024-04-23 09:47:11 -04:00
Luke Parker
8f5aaa8492 New coordinator port, genesis 2024-04-23 09:32:44 -04:00
Luke Parker
93ba8d840a Remove cbor 2024-04-23 09:31:33 -04:00
Luke Parker
485e454680 Inline broadcast_raw now that it doesn't have multiple callers 2024-04-23 09:31:17 -04:00
Luke Parker
c3b6abf020 Properly diversify ReqResMessageKind/GossipMessageKind 2024-04-23 09:31:09 -04:00
Luke Parker
f3ccf1cab0 Move keep alive, heartbeat, block to request/response 2024-04-23 09:30:58 -04:00
Luke Parker
0deee0ec6b Line for prior commit 2024-04-21 08:55:50 -04:00
Luke Parker
6b428948d4 Comment the insanely aggressive timeout future trace log 2024-04-21 08:55:32 -04:00
Luke Parker
6986257d4f Add missing continue to prevent dialing a node we're connected to 2024-04-21 08:37:06 -04:00
Luke Parker
a3c37cba21 Replace expect with debug log 2024-04-21 08:03:01 -04:00
Luke Parker
b5f2ff1397 Correct boolean NOT on is_fresh_dial 2024-04-21 07:30:18 -04:00
Luke Parker
c84931c6ae Retry if initial dials fail, not just upon disconnect 2024-04-21 07:26:29 -04:00
Luke Parker
63abf2d022 Restart coordinator peer finding upon disconnections 2024-04-21 07:03:03 -04:00
Luke Parker
a62d2d05ad Correct log which didn't work as intended 2024-04-20 19:55:17 -04:00
Luke Parker
967cc16748 Correct log targets in tendermint-machine 2024-04-20 19:55:06 -04:00
Luke Parker
ab4b8cc2d5 Better logs in tendermint-machine 2024-04-20 18:13:57 -04:00
Luke Parker
387ccbad3a Extend time in sync test 2024-04-18 16:39:16 -04:00
Luke Parker
26cdfdd824 fmt 2024-04-18 16:39:03 -04:00
Luke Parker
68e77384ac Don't broadcast added blocks
Online validators should inherently have them. Offline validators will receive
from the sync protocol.

This does somewhat eliminate the class of nodes who would follow the blockchain
(without validating it), yet that's fine for the performance benefit.
2024-04-18 16:38:52 -04:00
Luke Parker
68da88c1f3 Only reply to heartbeats after a certain distance 2024-04-18 16:38:43 -04:00
Luke Parker
2b481ab71e Ensure we don't reply to stale heartbeats 2024-04-18 16:38:21 -04:00
Luke Parker
05e6d81948 Only have some nodes respond to latent heartbeats
Also only respond if they're more than 2 blocks behind to minimize redundant
sending of blocks.
2024-04-18 16:38:16 -04:00
Luke Parker
e426cd00bd Correct protocol ID -> ID 2024-04-11 23:11:23 -04:00
Luke Parker
09e3881b7d Add worksmarter at the last minute 2024-04-11 16:08:14 -04:00
Luke Parker
10124ac4a8 Add Testnet 2 Config
Starts Tuesday, April 16th, with confirmed keys/boot nodes.
2024-04-11 15:49:32 -04:00
Luke Parker
1987983f88 Add bootnode code prior used in testnet-internal
Also performs the devnet/testnet differentation done since the testnet branch.
2024-04-11 14:37:09 -04:00
182 changed files with 2949 additions and 8223 deletions

View File

@@ -5,7 +5,7 @@ inputs:
version: version:
description: "Version to download and run" description: "Version to download and run"
required: false required: false
default: "27.0" default: 24.0.1
runs: runs:
using: "composite" using: "composite"

View File

@@ -10,7 +10,7 @@ inputs:
bitcoin-version: bitcoin-version:
description: "Bitcoin version to download and run as a regtest node" description: "Bitcoin version to download and run as a regtest node"
required: false required: false
default: "27.0" default: 24.0.1
runs: runs:
using: "composite" using: "composite"
@@ -19,9 +19,9 @@ runs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Install Foundry - name: Install Foundry
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf
with: with:
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9 version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2
cache: false cache: false
- name: Run a Monero Regtest Node - name: Run a Monero Regtest Node

View File

@@ -1 +1 @@
nightly-2024-06-01 nightly-2024-02-07

View File

@@ -30,7 +30,6 @@ jobs:
run: | run: |
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
-p bitcoin-serai \ -p bitcoin-serai \
-p alloy-simple-request-transport \
-p ethereum-serai \ -p ethereum-serai \
-p monero-generators \ -p monero-generators \
-p monero-serai -p monero-serai

View File

@@ -28,5 +28,4 @@ jobs:
-p std-shims \ -p std-shims \
-p zalloc \ -p zalloc \
-p serai-db \ -p serai-db \
-p serai-env \ -p serai-env
-p simple-request

View File

@@ -37,4 +37,4 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Run coordinator Docker tests - name: Run coordinator Docker tests
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test

View File

@@ -19,4 +19,4 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Run Full Stack Docker tests - name: Run Full Stack Docker tests
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test

View File

@@ -33,4 +33,4 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Run message-queue Docker tests - name: Run message-queue Docker tests
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test

View File

@@ -37,4 +37,4 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Run processor Docker tests - name: Run processor Docker tests
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test

View File

@@ -33,4 +33,4 @@ jobs:
uses: ./.github/actions/build-dependencies uses: ./.github/actions/build-dependencies
- name: Run Reproducible Runtime tests - name: Run Reproducible Runtime tests
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test

View File

@@ -43,7 +43,6 @@ jobs:
-p tendermint-machine \ -p tendermint-machine \
-p tributary-chain \ -p tributary-chain \
-p serai-coordinator \ -p serai-coordinator \
-p serai-orchestrator \
-p serai-docker-tests -p serai-docker-tests
test-substrate: test-substrate:
@@ -65,9 +64,7 @@ jobs:
-p serai-validator-sets-pallet \ -p serai-validator-sets-pallet \
-p serai-in-instructions-primitives \ -p serai-in-instructions-primitives \
-p serai-in-instructions-pallet \ -p serai-in-instructions-pallet \
-p serai-signals-primitives \
-p serai-signals-pallet \ -p serai-signals-pallet \
-p serai-abi \
-p serai-runtime \ -p serai-runtime \
-p serai-node -p serai-node

2197
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -2,8 +2,6 @@
resolver = "2" resolver = "2"
members = [ members = [
# Version patches # Version patches
"patches/parking_lot_core",
"patches/parking_lot",
"patches/zstd", "patches/zstd",
"patches/rocksdb", "patches/rocksdb",
"patches/proc-macro-crate", "patches/proc-macro-crate",
@@ -38,11 +36,7 @@ members = [
"crypto/schnorrkel", "crypto/schnorrkel",
"coins/bitcoin", "coins/bitcoin",
"coins/ethereum/alloy-simple-request-transport",
"coins/ethereum", "coins/ethereum",
"coins/ethereum/relayer",
"coins/monero/generators", "coins/monero/generators",
"coins/monero", "coins/monero",
@@ -115,10 +109,8 @@ panic = "unwind"
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" } lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
# Needed due to dockertest's usage of `Rc`s when we need `Arc`s # Needed due to dockertest's usage of `Rc`s when we need `Arc`s
dockertest = { git = "https://github.com/orcalabs/dockertest-rs", rev = "4dd6ae24738aa6dc5c89444cc822ea4745517493" } dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "arc" }
parking_lot_core = { path = "patches/parking_lot_core" }
parking_lot = { path = "patches/parking_lot" }
# wasmtime pulls in an old version for this # wasmtime pulls in an old version for this
zstd = { path = "patches/zstd" } zstd = { path = "patches/zstd" }
# Needed for WAL compression # Needed for WAL compression

View File

@@ -23,7 +23,7 @@ thiserror = { version = "1", default-features = false, optional = true }
zeroize = { version = "^1.5", default-features = false } zeroize = { version = "^1.5", default-features = false }
rand_core = { version = "0.6", default-features = false } rand_core = { version = "0.6", default-features = false }
bitcoin = { version = "0.32", default-features = false } bitcoin = { version = "0.31", default-features = false, features = ["no-std"] }
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits"] } k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits"] }
@@ -36,7 +36,7 @@ serde_json = { version = "1", default-features = false, optional = true }
simple-request = { path = "../../common/request", version = "0.1", default-features = false, features = ["tls", "basic-auth"], optional = true } simple-request = { path = "../../common/request", version = "0.1", default-features = false, features = ["tls", "basic-auth"], optional = true }
[dev-dependencies] [dev-dependencies]
secp256k1 = { version = "0.29", default-features = false, features = ["std"] } secp256k1 = { version = "0.28", default-features = false, features = ["std"] }
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] }

View File

@@ -195,13 +195,13 @@ impl Rpc {
// If this was already successfully published, consider this having succeeded // If this was already successfully published, consider this having succeeded
if let RpcError::RequestError(Error { code, .. }) = e { if let RpcError::RequestError(Error { code, .. }) = e {
if code == RPC_VERIFY_ALREADY_IN_CHAIN { if code == RPC_VERIFY_ALREADY_IN_CHAIN {
return Ok(tx.compute_txid()); return Ok(tx.txid());
} }
} }
Err(e)? Err(e)?
} }
}; };
if txid != tx.compute_txid() { if txid != tx.txid() {
Err(RpcError::InvalidResponse("returned TX ID inequals calculated TX ID"))?; Err(RpcError::InvalidResponse("returned TX ID inequals calculated TX ID"))?;
} }
Ok(txid) Ok(txid)
@@ -215,7 +215,7 @@ impl Rpc {
let tx: Transaction = encode::deserialize(&bytes) let tx: Transaction = encode::deserialize(&bytes)
.map_err(|_| RpcError::InvalidResponse("node sent an improperly serialized transaction"))?; .map_err(|_| RpcError::InvalidResponse("node sent an improperly serialized transaction"))?;
let mut tx_hash = *tx.compute_txid().as_raw_hash().as_byte_array(); let mut tx_hash = *tx.txid().as_raw_hash().as_byte_array();
tx_hash.reverse(); tx_hash.reverse();
if hash != &tx_hash { if hash != &tx_hash {
Err(RpcError::InvalidResponse("node replied with a different transaction"))?; Err(RpcError::InvalidResponse("node replied with a different transaction"))?;

View File

@@ -39,7 +39,7 @@ fn test_algorithm() {
.verify_schnorr( .verify_schnorr(
&Signature::from_slice(&sig) &Signature::from_slice(&sig)
.expect("couldn't convert produced signature to secp256k1::Signature"), .expect("couldn't convert produced signature to secp256k1::Signature"),
&Message::from_digest_slice(Hash::hash(MESSAGE).as_ref()).unwrap(), &Message::from(Hash::hash(MESSAGE)),
&x_only(&keys[&Participant::new(1).unwrap()].group_key()), &x_only(&keys[&Participant::new(1).unwrap()].group_key()),
) )
.unwrap() .unwrap()

View File

@@ -4,7 +4,7 @@ use std_shims::{
io::{self, Write}, io::{self, Write},
}; };
#[cfg(feature = "std")] #[cfg(feature = "std")]
use std::io::{Read, BufReader}; use std_shims::io::Read;
use k256::{ use k256::{
elliptic_curve::sec1::{Tag, ToEncodedPoint}, elliptic_curve::sec1::{Tag, ToEncodedPoint},
@@ -18,8 +18,8 @@ use frost::{
}; };
use bitcoin::{ use bitcoin::{
consensus::encode::serialize, key::TweakedPublicKey, OutPoint, ScriptBuf, TxOut, Transaction, consensus::encode::serialize, key::TweakedPublicKey, address::Payload, OutPoint, ScriptBuf,
Block, TxOut, Transaction, Block,
}; };
#[cfg(feature = "std")] #[cfg(feature = "std")]
use bitcoin::consensus::encode::Decodable; use bitcoin::consensus::encode::Decodable;
@@ -46,12 +46,12 @@ pub fn tweak_keys(keys: &ThresholdKeys<Secp256k1>) -> ThresholdKeys<Secp256k1> {
/// Return the Taproot address payload for a public key. /// Return the Taproot address payload for a public key.
/// ///
/// If the key is odd, this will return None. /// If the key is odd, this will return None.
pub fn p2tr_script_buf(key: ProjectivePoint) -> Option<ScriptBuf> { pub fn address_payload(key: ProjectivePoint) -> Option<Payload> {
if key.to_encoded_point(true).tag() != Tag::CompressedEvenY { if key.to_encoded_point(true).tag() != Tag::CompressedEvenY {
return None; return None;
} }
Some(ScriptBuf::new_p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key)))) Some(Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key))))
} }
/// A spendable output. /// A spendable output.
@@ -89,17 +89,11 @@ impl ReceivedOutput {
/// Read a ReceivedOutput from a generic satisfying Read. /// Read a ReceivedOutput from a generic satisfying Read.
#[cfg(feature = "std")] #[cfg(feature = "std")]
pub fn read<R: Read>(r: &mut R) -> io::Result<ReceivedOutput> { pub fn read<R: Read>(r: &mut R) -> io::Result<ReceivedOutput> {
let offset = Secp256k1::read_F(r)?; Ok(ReceivedOutput {
let output; offset: Secp256k1::read_F(r)?,
let outpoint; output: TxOut::consensus_decode(r).map_err(|_| io::Error::other("invalid TxOut"))?,
{ outpoint: OutPoint::consensus_decode(r).map_err(|_| io::Error::other("invalid OutPoint"))?,
let mut buf_r = BufReader::with_capacity(0, r); })
output =
TxOut::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid TxOut"))?;
outpoint =
OutPoint::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid OutPoint"))?;
}
Ok(ReceivedOutput { offset, output, outpoint })
} }
/// Write a ReceivedOutput to a generic satisfying Write. /// Write a ReceivedOutput to a generic satisfying Write.
@@ -130,7 +124,7 @@ impl Scanner {
/// Returns None if this key can't be scanned for. /// Returns None if this key can't be scanned for.
pub fn new(key: ProjectivePoint) -> Option<Scanner> { pub fn new(key: ProjectivePoint) -> Option<Scanner> {
let mut scripts = HashMap::new(); let mut scripts = HashMap::new();
scripts.insert(p2tr_script_buf(key)?, Scalar::ZERO); scripts.insert(address_payload(key)?.script_pubkey(), Scalar::ZERO);
Some(Scanner { key, scripts }) Some(Scanner { key, scripts })
} }
@@ -147,8 +141,9 @@ impl Scanner {
// chance of being even // chance of being even
// That means this should terminate within a very small amount of iterations // That means this should terminate within a very small amount of iterations
loop { loop {
match p2tr_script_buf(self.key + (ProjectivePoint::GENERATOR * offset)) { match address_payload(self.key + (ProjectivePoint::GENERATOR * offset)) {
Some(script) => { Some(address) => {
let script = address.script_pubkey();
if self.scripts.contains_key(&script) { if self.scripts.contains_key(&script) {
None?; None?;
} }
@@ -171,7 +166,7 @@ impl Scanner {
res.push(ReceivedOutput { res.push(ReceivedOutput {
offset: *offset, offset: *offset,
output: output.clone(), output: output.clone(),
outpoint: OutPoint::new(tx.compute_txid(), vout), outpoint: OutPoint::new(tx.txid(), vout),
}); });
} }
} }

View File

@@ -18,12 +18,12 @@ use bitcoin::{
absolute::LockTime, absolute::LockTime,
script::{PushBytesBuf, ScriptBuf}, script::{PushBytesBuf, ScriptBuf},
transaction::{Version, Transaction}, transaction::{Version, Transaction},
OutPoint, Sequence, Witness, TxIn, Amount, TxOut, OutPoint, Sequence, Witness, TxIn, Amount, TxOut, Address,
}; };
use crate::{ use crate::{
crypto::Schnorr, crypto::Schnorr,
wallet::{ReceivedOutput, p2tr_script_buf}, wallet::{ReceivedOutput, address_payload},
}; };
#[rustfmt::skip] #[rustfmt::skip]
@@ -61,11 +61,7 @@ pub struct SignableTransaction {
} }
impl SignableTransaction { impl SignableTransaction {
fn calculate_weight( fn calculate_weight(inputs: usize, payments: &[(Address, u64)], change: Option<&Address>) -> u64 {
inputs: usize,
payments: &[(ScriptBuf, u64)],
change: Option<&ScriptBuf>,
) -> u64 {
// Expand this a full transaction in order to use the bitcoin library's weight function // Expand this a full transaction in order to use the bitcoin library's weight function
let mut tx = Transaction { let mut tx = Transaction {
version: Version(2), version: Version(2),
@@ -90,14 +86,14 @@ impl SignableTransaction {
// The script pub key is not of a fixed size and does have to be used here // The script pub key is not of a fixed size and does have to be used here
.map(|payment| TxOut { .map(|payment| TxOut {
value: Amount::from_sat(payment.1), value: Amount::from_sat(payment.1),
script_pubkey: payment.0.clone(), script_pubkey: payment.0.script_pubkey(),
}) })
.collect(), .collect(),
}; };
if let Some(change) = change { if let Some(change) = change {
// Use a 0 value since we're currently unsure what the change amount will be, and since // Use a 0 value since we're currently unsure what the change amount will be, and since
// the value is fixed size (so any value could be used here) // the value is fixed size (so any value could be used here)
tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.clone() }); tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.script_pubkey() });
} }
u64::from(tx.weight()) u64::from(tx.weight())
} }
@@ -125,8 +121,8 @@ impl SignableTransaction {
/// If data is specified, an OP_RETURN output will be added with it. /// If data is specified, an OP_RETURN output will be added with it.
pub fn new( pub fn new(
mut inputs: Vec<ReceivedOutput>, mut inputs: Vec<ReceivedOutput>,
payments: &[(ScriptBuf, u64)], payments: &[(Address, u64)],
change: Option<ScriptBuf>, change: Option<&Address>,
data: Option<Vec<u8>>, data: Option<Vec<u8>>,
fee_per_weight: u64, fee_per_weight: u64,
) -> Result<SignableTransaction, TransactionError> { ) -> Result<SignableTransaction, TransactionError> {
@@ -163,7 +159,10 @@ impl SignableTransaction {
let payment_sat = payments.iter().map(|payment| payment.1).sum::<u64>(); let payment_sat = payments.iter().map(|payment| payment.1).sum::<u64>();
let mut tx_outs = payments let mut tx_outs = payments
.iter() .iter()
.map(|payment| TxOut { value: Amount::from_sat(payment.1), script_pubkey: payment.0.clone() }) .map(|payment| TxOut {
value: Amount::from_sat(payment.1),
script_pubkey: payment.0.script_pubkey(),
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
// Add the OP_RETURN output // Add the OP_RETURN output
@@ -214,11 +213,12 @@ impl SignableTransaction {
// If there's a change address, check if there's change to give it // If there's a change address, check if there's change to give it
if let Some(change) = change { if let Some(change) = change {
let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(&change)); let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(change));
let fee_with_change = fee_per_weight * weight_with_change; let fee_with_change = fee_per_weight * weight_with_change;
if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) { if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) {
if value >= DUST { if value >= DUST {
tx_outs.push(TxOut { value: Amount::from_sat(value), script_pubkey: change }); tx_outs
.push(TxOut { value: Amount::from_sat(value), script_pubkey: change.script_pubkey() });
weight = weight_with_change; weight = weight_with_change;
needed_fee = fee_with_change; needed_fee = fee_with_change;
} }
@@ -248,7 +248,7 @@ impl SignableTransaction {
/// Returns the TX ID of the transaction this will create. /// Returns the TX ID of the transaction this will create.
pub fn txid(&self) -> [u8; 32] { pub fn txid(&self) -> [u8; 32] {
let mut res = self.tx.compute_txid().to_byte_array(); let mut res = self.tx.txid().to_byte_array();
res.reverse(); res.reverse();
res res
} }
@@ -288,7 +288,7 @@ impl SignableTransaction {
transcript.append_message(b"signing_input", u32::try_from(i).unwrap().to_le_bytes()); transcript.append_message(b"signing_input", u32::try_from(i).unwrap().to_le_bytes());
let offset = keys.clone().offset(self.offsets[i]); let offset = keys.clone().offset(self.offsets[i]);
if p2tr_script_buf(offset.group_key())? != self.prevouts[i].script_pubkey { if address_payload(offset.group_key())?.script_pubkey() != self.prevouts[i].script_pubkey {
None?; None?;
} }
@@ -375,7 +375,7 @@ impl SignMachine<Transaction> for TransactionSignMachine {
msg: &[u8], msg: &[u8],
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> { ) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
if !msg.is_empty() { if !msg.is_empty() {
panic!("message was passed to the TransactionSignMachine when it generates its own"); panic!("message was passed to the TransactionMachine when it generates its own");
} }
let commitments = (0 .. self.sigs.len()) let commitments = (0 .. self.sigs.len())

View File

@@ -22,10 +22,11 @@ use bitcoin_serai::{
hashes::Hash as HashTrait, hashes::Hash as HashTrait,
blockdata::opcodes::all::OP_RETURN, blockdata::opcodes::all::OP_RETURN,
script::{PushBytesBuf, Instruction, Instructions, Script}, script::{PushBytesBuf, Instruction, Instructions, Script},
address::NetworkChecked,
OutPoint, Amount, TxOut, Transaction, Network, Address, OutPoint, Amount, TxOut, Transaction, Network, Address,
}, },
wallet::{ wallet::{
tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, SignableTransaction, tweak_keys, address_payload, ReceivedOutput, Scanner, TransactionError, SignableTransaction,
}, },
rpc::Rpc, rpc::Rpc,
}; };
@@ -47,7 +48,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint)
"generatetoaddress", "generatetoaddress",
serde_json::json!([ serde_json::json!([
1, 1,
Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap() Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap())
]), ]),
) )
.await .await
@@ -68,7 +69,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint)
assert_eq!(outputs, scanner.scan_transaction(&block.txdata[0])); assert_eq!(outputs, scanner.scan_transaction(&block.txdata[0]));
assert_eq!(outputs.len(), 1); assert_eq!(outputs.len(), 1);
assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].compute_txid(), 0)); assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].txid(), 0));
assert_eq!(outputs[0].value(), block.txdata[0].output[0].value.to_sat()); assert_eq!(outputs[0].value(), block.txdata[0].output[0].value.to_sat());
assert_eq!( assert_eq!(
@@ -192,7 +193,7 @@ async_sequential! {
assert_eq!(output.offset(), Scalar::ZERO); assert_eq!(output.offset(), Scalar::ZERO);
let inputs = vec![output]; let inputs = vec![output];
let addr = || p2tr_script_buf(key).unwrap(); let addr = || Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap());
let payments = vec![(addr(), 1000)]; let payments = vec![(addr(), 1000)];
assert!(SignableTransaction::new(inputs.clone(), &payments, None, None, FEE).is_ok()); assert!(SignableTransaction::new(inputs.clone(), &payments, None, None, FEE).is_ok());
@@ -205,7 +206,7 @@ async_sequential! {
// No change // No change
assert!(SignableTransaction::new(inputs.clone(), &[(addr(), 1000)], None, None, FEE).is_ok()); assert!(SignableTransaction::new(inputs.clone(), &[(addr(), 1000)], None, None, FEE).is_ok());
// Consolidation TX // Consolidation TX
assert!(SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, FEE).is_ok()); assert!(SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, FEE).is_ok());
// Data // Data
assert!(SignableTransaction::new(inputs.clone(), &[], None, Some(vec![]), FEE).is_ok()); assert!(SignableTransaction::new(inputs.clone(), &[], None, Some(vec![]), FEE).is_ok());
// No outputs // No outputs
@@ -228,7 +229,7 @@ async_sequential! {
); );
assert_eq!( assert_eq!(
SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, 0), SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, 0),
Err(TransactionError::TooLowFee), Err(TransactionError::TooLowFee),
); );
@@ -260,19 +261,20 @@ async_sequential! {
// Declare payments, change, fee // Declare payments, change, fee
let payments = [ let payments = [
(p2tr_script_buf(key).unwrap(), 1005), (Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap()), 1005),
(p2tr_script_buf(offset_key).unwrap(), 1007) (Address::<NetworkChecked>::new(Network::Regtest, address_payload(offset_key).unwrap()), 1007)
]; ];
let change_offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap(); let change_offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap();
let change_key = key + (ProjectivePoint::GENERATOR * change_offset); let change_key = key + (ProjectivePoint::GENERATOR * change_offset);
let change_addr = p2tr_script_buf(change_key).unwrap(); let change_addr =
Address::<NetworkChecked>::new(Network::Regtest, address_payload(change_key).unwrap());
// Create and sign the TX // Create and sign the TX
let tx = SignableTransaction::new( let tx = SignableTransaction::new(
vec![output.clone(), offset_output.clone()], vec![output.clone(), offset_output.clone()],
&payments, &payments,
Some(change_addr.clone()), Some(&change_addr),
None, None,
FEE FEE
).unwrap(); ).unwrap();
@@ -285,7 +287,7 @@ async_sequential! {
// Ensure we can scan it // Ensure we can scan it
let outputs = scanner.scan_transaction(&tx); let outputs = scanner.scan_transaction(&tx);
for (o, output) in outputs.iter().enumerate() { for (o, output) in outputs.iter().enumerate() {
assert_eq!(output.outpoint(), &OutPoint::new(tx.compute_txid(), u32::try_from(o).unwrap())); assert_eq!(output.outpoint(), &OutPoint::new(tx.txid(), u32::try_from(o).unwrap()));
assert_eq!(&ReceivedOutput::read::<&[u8]>(&mut output.serialize().as_ref()).unwrap(), output); assert_eq!(&ReceivedOutput::read::<&[u8]>(&mut output.serialize().as_ref()).unwrap(), output);
} }
@@ -297,7 +299,7 @@ async_sequential! {
for ((output, scanned), payment) in tx.output.iter().zip(outputs.iter()).zip(payments.iter()) { for ((output, scanned), payment) in tx.output.iter().zip(outputs.iter()).zip(payments.iter()) {
assert_eq!( assert_eq!(
output, output,
&TxOut { script_pubkey: payment.0.clone(), value: Amount::from_sat(payment.1) }, &TxOut { script_pubkey: payment.0.script_pubkey(), value: Amount::from_sat(payment.1) },
); );
assert_eq!(scanned.value(), payment.1 ); assert_eq!(scanned.value(), payment.1 );
} }
@@ -312,13 +314,13 @@ async_sequential! {
input_value - payments.iter().map(|payment| payment.1).sum::<u64>() - needed_fee; input_value - payments.iter().map(|payment| payment.1).sum::<u64>() - needed_fee;
assert_eq!( assert_eq!(
tx.output[2], tx.output[2],
TxOut { script_pubkey: change_addr, value: Amount::from_sat(change_amount) }, TxOut { script_pubkey: change_addr.script_pubkey(), value: Amount::from_sat(change_amount) },
); );
// This also tests send_raw_transaction and get_transaction, which the RPC test can't // This also tests send_raw_transaction and get_transaction, which the RPC test can't
// effectively test // effectively test
rpc.send_raw_transaction(&tx).await.unwrap(); rpc.send_raw_transaction(&tx).await.unwrap();
let mut hash = *tx.compute_txid().as_raw_hash().as_byte_array(); let mut hash = *tx.txid().as_raw_hash().as_byte_array();
hash.reverse(); hash.reverse();
assert_eq!(tx, rpc.get_transaction(&hash).await.unwrap()); assert_eq!(tx, rpc.get_transaction(&hash).await.unwrap());
assert_eq!(expected_id, hash); assert_eq!(expected_id, hash);
@@ -342,7 +344,7 @@ async_sequential! {
&SignableTransaction::new( &SignableTransaction::new(
vec![output], vec![output],
&[], &[],
Some(p2tr_script_buf(key).unwrap()), Some(&Address::<NetworkChecked>::new(Network::Regtest, address_payload(key).unwrap())),
Some(data.clone()), Some(data.clone()),
FEE FEE
).unwrap() ).unwrap()

View File

@@ -1,3 +1,7 @@
# Solidity build outputs # Solidity build outputs
cache cache
artifacts artifacts
# Auto-generated ABI files
src/abi/schnorr.rs
src/abi/router.rs

View File

@@ -18,32 +18,28 @@ workspace = true
[dependencies] [dependencies]
thiserror = { version = "1", default-features = false } thiserror = { version = "1", default-features = false }
eyre = { version = "0.6", default-features = false }
rand_core = { version = "0.6", default-features = false, features = ["std"] } sha3 = { version = "0.10", default-features = false, features = ["std"] }
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] }
group = { version = "0.13", default-features = false } group = { version = "0.13", default-features = false }
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa"] }
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] }
alloy-core = { version = "0.7", default-features = false } ethers-core = { version = "2", default-features = false }
alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] } ethers-providers = { version = "2", default-features = false }
alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false, features = ["k256"] } ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false }
alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false, optional = true } [build-dependencies]
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
[dev-dependencies] [dev-dependencies]
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] } rand_core = { version = "0.6", default-features = false, features = ["std"] }
hex = { version = "0.4", default-features = false, features = ["std"] }
serde = { version = "1", default-features = false, features = ["std"] }
serde_json = { version = "1", default-features = false, features = ["std"] }
sha2 = { version = "0.10", default-features = false, features = ["std"] }
tokio = { version = "1", features = ["macros"] } tokio = { version = "1", features = ["macros"] }
alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
[features]
tests = ["alloy-node-bindings", "frost/tests"]

View File

@@ -3,12 +3,6 @@
This package contains Ethereum-related functionality, specifically deploying and This package contains Ethereum-related functionality, specifically deploying and
interacting with Serai contracts. interacting with Serai contracts.
While `monero-serai` and `bitcoin-serai` are general purpose libraries,
`ethereum-serai` is Serai specific. If any of the utilities are generally
desired, please fork and maintain your own copy to ensure the desired
functionality is preserved, or open an issue to request we make this library
general purpose.
### Dependencies ### Dependencies
- solc - solc

View File

@@ -1,29 +0,0 @@
[package]
name = "alloy-simple-request-transport"
version = "0.1.0"
description = "A transport for alloy based off simple-request"
license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/alloy-simple-request-transport"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
tower = "0.4"
serde_json = { version = "1", default-features = false }
simple-request = { path = "../../../common/request", default-features = false }
alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false }
[features]
default = ["tls"]
tls = ["simple-request/tls"]

View File

@@ -1,21 +0,0 @@
MIT License
Copyright (c) 2024 Luke Parker
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,4 +0,0 @@
# Alloy Simple Request Transport
A transport for alloy based on simple-request, a small HTTP client built around
hyper.

View File

@@ -1,60 +0,0 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
use core::task;
use std::io;
use alloy_json_rpc::{RequestPacket, ResponsePacket};
use alloy_transport::{TransportError, TransportErrorKind, TransportFut};
use simple_request::{hyper, Request, Client};
use tower::Service;
#[derive(Clone, Debug)]
pub struct SimpleRequest {
client: Client,
url: String,
}
impl SimpleRequest {
pub fn new(url: String) -> Self {
Self { client: Client::with_connection_pool(), url }
}
}
impl Service<RequestPacket> for SimpleRequest {
type Response = ResponsePacket;
type Error = TransportError;
type Future = TransportFut<'static>;
#[inline]
fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll<Result<(), Self::Error>> {
task::Poll::Ready(Ok(()))
}
#[inline]
fn call(&mut self, req: RequestPacket) -> Self::Future {
let inner = self.clone();
Box::pin(async move {
let packet = req.serialize().map_err(TransportError::SerError)?;
let request = Request::from(
hyper::Request::post(&inner.url)
.header("Content-Type", "application/json")
.body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into())
.unwrap(),
);
let mut res = inner
.client
.request(request)
.await
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?
.body()
.await
.map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?;
serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, ""))
})
}
}

View File

@@ -1,5 +1,7 @@
use std::process::Command; use std::process::Command;
use ethers_contract::Abigen;
fn main() { fn main() {
println!("cargo:rerun-if-changed=contracts/*"); println!("cargo:rerun-if-changed=contracts/*");
println!("cargo:rerun-if-changed=artifacts/*"); println!("cargo:rerun-if-changed=artifacts/*");
@@ -19,23 +21,22 @@ fn main() {
"--base-path", ".", "--base-path", ".",
"-o", "./artifacts", "--overwrite", "-o", "./artifacts", "--overwrite",
"--bin", "--abi", "--bin", "--abi",
"--via-ir", "--optimize", "--optimize",
"./contracts/Schnorr.sol", "./contracts/Router.sol",
"./contracts/IERC20.sol",
"./contracts/Schnorr.sol",
"./contracts/Deployer.sol",
"./contracts/Sandbox.sol",
"./contracts/Router.sol",
"./src/tests/contracts/Schnorr.sol",
"./src/tests/contracts/ERC20.sol",
"--no-color",
]; ];
let solc = Command::new("solc").args(args).output().unwrap(); assert!(Command::new("solc").args(args).status().unwrap().success());
assert!(solc.status.success());
for line in String::from_utf8(solc.stderr).unwrap().lines() { Abigen::new("Schnorr", "./artifacts/Schnorr.abi")
assert!(!line.starts_with("Error:")); .unwrap()
} .generate()
.unwrap()
.write_to_file("./src/abi/schnorr.rs")
.unwrap();
Abigen::new("Router", "./artifacts/Router.abi")
.unwrap()
.generate()
.unwrap()
.write_to_file("./src/abi/router.rs")
.unwrap();
} }

View File

@@ -1,52 +0,0 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
/*
The expected deployment process of the Router is as follows:
1) A transaction deploying Deployer is made. Then, a deterministic signature is
created such that an account with an unknown private key is the creator of
the contract. Anyone can fund this address, and once anyone does, the
transaction deploying Deployer can be published by anyone. No other
transaction may be made from that account.
2) Anyone deploys the Router through the Deployer. This uses a sequential nonce
such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.
While such attacks would still be feasible if the Deployer's address was
controllable, the usage of a deterministic signature with a NUMS method
prevents that.
This doesn't have any denial-of-service risks and will resolve once anyone steps
forward as deployer. This does fail to guarantee an identical address across
every chain, though it enables letting anyone efficiently ask the Deployer for
the address (with the Deployer having an identical address on every chain).
Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the
Deployer contract to use a consistent salt for the Router, yet the Router must
be deployed with a specific public key for Serai. Since Ethereum isn't able to
determine a valid public key (one the result of a Serai DKG) from a dishonest
public key, we have to allow multiple deployments with Serai being the one to
determine which to use.
The alternative would be to have a council publish the Serai key on-Ethereum,
with Serai verifying the published result. This would introduce a DoS risk in
the council not publishing the correct key/not publishing any key.
*/
contract Deployer {
event Deployment(bytes32 indexed init_code_hash, address created);
error DeploymentFailed();
function deploy(bytes memory init_code) external {
address created;
assembly {
created := create(0, add(init_code, 0x20), mload(init_code))
}
if (created == address(0)) {
revert DeploymentFailed();
}
// These may be emitted out of order upon re-entrancy
emit Deployment(keccak256(init_code), created);
}
}

View File

@@ -1,20 +0,0 @@
// SPDX-License-Identifier: CC0
pragma solidity ^0.8.0;
interface IERC20 {
event Transfer(address indexed from, address indexed to, uint256 value);
event Approval(address indexed owner, address indexed spender, uint256 value);
function name() external view returns (string memory);
function symbol() external view returns (string memory);
function decimals() external view returns (uint8);
function totalSupply() external view returns (uint256);
function balanceOf(address owner) external view returns (uint256);
function transfer(address to, uint256 value) external returns (bool);
function transferFrom(address from, address to, uint256 value) external returns (bool);
function approve(address spender, uint256 value) external returns (bool);
function allowance(address owner, address spender) external view returns (uint256);
}

View File

@@ -1,24 +1,27 @@
// SPDX-License-Identifier: AGPLv3 // SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0; pragma solidity ^0.8.0;
import "./IERC20.sol";
import "./Schnorr.sol"; import "./Schnorr.sol";
import "./Sandbox.sol";
contract Router { contract Router is Schnorr {
// Nonce is incremented for each batch of transactions executed/key update // Contract initializer
// TODO: Replace with a MuSig of the genesis validators
address public initializer;
// Nonce is incremented for each batch of transactions executed
uint256 public nonce; uint256 public nonce;
// Current public key's x-coordinate // fixed parity for the public keys used in this contract
// This key must always have the parity defined within the Schnorr contract uint8 constant public KEY_PARITY = 27;
// current public key's x-coordinate
// note: this key must always use the fixed parity defined above
bytes32 public seraiKey; bytes32 public seraiKey;
struct OutInstruction { struct OutInstruction {
address to; address to;
Call[] calls;
uint256 value; uint256 value;
bytes data;
} }
struct Signature { struct Signature {
@@ -26,197 +29,62 @@ contract Router {
bytes32 s; bytes32 s;
} }
event SeraiKeyUpdated(
uint256 indexed nonce,
bytes32 indexed key,
Signature signature
);
event InInstruction(
address indexed from,
address indexed coin,
uint256 amount,
bytes instruction
);
// success is a uint256 representing a bitfield of transaction successes // success is a uint256 representing a bitfield of transaction successes
event Executed( event Executed(uint256 nonce, bytes32 batch, uint256 success);
uint256 indexed nonce,
bytes32 indexed batch,
uint256 success,
Signature signature
);
// error types // error types
error NotInitializer();
error AlreadyInitialized();
error InvalidKey(); error InvalidKey();
error InvalidSignature();
error InvalidAmount();
error FailedTransfer();
error TooManyTransactions(); error TooManyTransactions();
modifier _updateSeraiKeyAtEndOfFn( constructor() {
uint256 _nonce, initializer = msg.sender;
bytes32 key,
Signature memory sig
) {
if (
(key == bytes32(0)) ||
((bytes32(uint256(key) % Schnorr.Q)) != key)
) {
revert InvalidKey();
} }
_; // initSeraiKey can be called by the contract initializer to set the first
// public key, only if the public key has yet to be set.
seraiKey = key; function initSeraiKey(bytes32 _seraiKey) external {
emit SeraiKeyUpdated(_nonce, key, sig); if (msg.sender != initializer) revert NotInitializer();
if (seraiKey != 0) revert AlreadyInitialized();
if (_seraiKey == bytes32(0)) revert InvalidKey();
seraiKey = _seraiKey;
} }
constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn( // updateSeraiKey validates the given Schnorr signature against the current public key,
0, // and if successful, updates the contract's public key to the given one.
_seraiKey,
Signature({ c: bytes32(0), s: bytes32(0) })
) {
nonce = 1;
}
// updateSeraiKey validates the given Schnorr signature against the current
// public key, and if successful, updates the contract's public key to the
// given one.
function updateSeraiKey( function updateSeraiKey(
bytes32 _seraiKey, bytes32 _seraiKey,
Signature calldata sig Signature memory sig
) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) { ) public {
bytes memory message = if (_seraiKey == bytes32(0)) revert InvalidKey();
abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey); bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey));
nonce++; if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
seraiKey = _seraiKey;
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
revert InvalidSignature();
}
} }
function inInstruction( // execute accepts a list of transactions to execute as well as a Schnorr signature.
address coin,
uint256 amount,
bytes memory instruction
) external payable {
if (coin == address(0)) {
if (amount != msg.value) {
revert InvalidAmount();
}
} else {
(bool success, bytes memory res) =
address(coin).call(
abi.encodeWithSelector(
IERC20.transferFrom.selector,
msg.sender,
address(this),
amount
)
);
// Require there was nothing returned, which is done by some non-standard
// tokens, or that the ERC20 contract did in fact return true
bool nonStandardResOrTrue =
(res.length == 0) || abi.decode(res, (bool));
if (!(success && nonStandardResOrTrue)) {
revert FailedTransfer();
}
}
/*
Due to fee-on-transfer tokens, emitting the amount directly is frowned upon.
The amount instructed to transfer may not actually be the amount
transferred.
If we add nonReentrant to every single function which can effect the
balance, we can check the amount exactly matches. This prevents transfers of
less value than expected occurring, at least, not without an additional
transfer to top up the difference (which isn't routed through this contract
and accordingly isn't trying to artificially create events).
If we don't add nonReentrant, a transfer can be started, and then a new
transfer for the difference can follow it up (again and again until a
rounding error is reached). This contract would believe all transfers were
done in full, despite each only being done in part (except for the last
one).
Given fee-on-transfer tokens aren't intended to be supported, the only
token planned to be supported is Dai and it doesn't have any fee-on-transfer
logic, fee-on-transfer tokens aren't even able to be supported at this time,
we simply classify this entire class of tokens as non-standard
implementations which induce undefined behavior. It is the Serai network's
role not to add support for any non-standard implementations.
*/
emit InInstruction(msg.sender, coin, amount, instruction);
}
// execute accepts a list of transactions to execute as well as a signature.
// if signature verification passes, the given transactions are executed. // if signature verification passes, the given transactions are executed.
// if signature verification fails, this function will revert. // if signature verification fails, this function will revert.
function execute( function execute(
OutInstruction[] calldata transactions, OutInstruction[] calldata transactions,
Signature calldata sig Signature memory sig
) external { ) public {
if (transactions.length > 256) { if (transactions.length > 256) revert TooManyTransactions();
revert TooManyTransactions();
}
bytes memory message = bytes32 message = keccak256(abi.encode("execute", nonce, transactions));
abi.encode("execute", block.chainid, nonce, transactions);
uint256 executed_with_nonce = nonce;
// This prevents re-entrancy from causing double spends yet does allow // This prevents re-entrancy from causing double spends yet does allow
// out-of-order execution via re-entrancy // out-of-order execution via re-entrancy
nonce++; nonce++;
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
revert InvalidSignature();
}
uint256 successes; uint256 successes;
for(uint256 i = 0; i < transactions.length; i++) { for(uint256 i = 0; i < transactions.length; i++) {
bool success; (bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data);
// If there are no calls, send to `to` the value
if (transactions[i].calls.length == 0) {
(success, ) = transactions[i].to.call{
value: transactions[i].value,
gas: 5_000
}("");
} else {
// If there are calls, ignore `to`. Deploy a new Sandbox and proxy the
// calls through that
//
// We could use a single sandbox in order to reduce gas costs, yet that
// risks one person creating an approval that's hooked before another
// user's intended action executes, in order to drain their coins
//
// While technically, that would be a flaw in the sandboxed flow, this
// is robust and prevents such flaws from being possible
//
// We also don't want people to set state via the Sandbox and expect it
// future available when anyone else could set a distinct value
Sandbox sandbox = new Sandbox();
(success, ) = address(sandbox).call{
value: transactions[i].value,
// TODO: Have the Call specify the gas up front
gas: 350_000
}(
abi.encodeWithSelector(
Sandbox.sandbox.selector,
transactions[i].calls
)
);
}
assembly { assembly {
successes := or(successes, shl(i, success)) successes := or(successes, shl(i, success))
} }
} }
emit Executed( emit Executed(nonce, message, successes);
executed_with_nonce,
keccak256(message),
successes,
sig
);
} }
} }

View File

@@ -1,48 +0,0 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.24;
struct Call {
address to;
uint256 value;
bytes data;
}
// A minimal sandbox focused on gas efficiency.
//
// The first call is executed if any of the calls fail, making it a fallback.
// All other calls are executed sequentially.
contract Sandbox {
error AlreadyCalled();
error CallsFailed();
function sandbox(Call[] calldata calls) external payable {
// Prevent re-entrancy due to this executing arbitrary calls from anyone
// and anywhere
bool called;
assembly { called := tload(0) }
if (called) {
revert AlreadyCalled();
}
assembly { tstore(0, 1) }
// Execute the calls, starting from 1
for (uint256 i = 1; i < calls.length; i++) {
(bool success, ) =
calls[i].to.call{ value: calls[i].value }(calls[i].data);
// If this call failed, execute the fallback (call 0)
if (!success) {
(success, ) =
calls[0].to.call{ value: address(this).balance }(calls[0].data);
// If this call also failed, revert entirely
if (!success) {
revert CallsFailed();
}
return;
}
}
// We don't clear the re-entrancy guard as this contract should never be
// called again, so there's no reason to spend the effort
}
}

View File

@@ -2,43 +2,38 @@
pragma solidity ^0.8.0; pragma solidity ^0.8.0;
// see https://github.com/noot/schnorr-verify for implementation details // see https://github.com/noot/schnorr-verify for implementation details
library Schnorr { contract Schnorr {
// secp256k1 group order // secp256k1 group order
uint256 constant public Q = uint256 constant public Q =
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
// Fixed parity for the public keys used in this contract
// This avoids spending a word passing the parity in a similar style to
// Bitcoin's Taproot
uint8 constant public KEY_PARITY = 27;
error InvalidSOrA(); error InvalidSOrA();
error MalformedSignature(); error InvalidSignature();
// px := public key x-coord, where the public key has a parity of KEY_PARITY // parity := public key y-coord parity (27 or 28)
// px := public key x-coord
// message := 32-byte hash of the message // message := 32-byte hash of the message
// c := schnorr signature challenge // c := schnorr signature challenge
// s := schnorr signature // s := schnorr signature
function verify( function verify(
uint8 parity,
bytes32 px, bytes32 px,
bytes memory message, bytes32 message,
bytes32 c, bytes32 c,
bytes32 s bytes32 s
) internal pure returns (bool) { ) public view returns (bool) {
// ecrecover = (m, v, r, s) -> key // ecrecover = (m, v, r, s);
// We instead pass the following to obtain the nonce (not the key)
// Then we hash it and verify it matches the challenge
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
// For safety, we want each input to ecrecover to be 0 (sa, px, ca)
// The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero
// That leaves us to check `sa` are non-zero
if (sa == 0) revert InvalidSOrA(); if (sa == 0) revert InvalidSOrA();
address R = ecrecover(sa, KEY_PARITY, px, ca); // the ecrecover precompile implementation checks that the `r` and `s`
if (R == address(0)) revert MalformedSignature(); // inputs are non-zero (in this case, `px` and `ca`), thus we don't need to
// check if they're zero.
// Check the signature is correct by rebuilding the challenge address R = ecrecover(sa, parity, px, ca);
return c == keccak256(abi.encodePacked(R, px, message)); if (R == address(0)) revert InvalidSignature();
return c == keccak256(
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
);
} }
} }

View File

@@ -1,30 +0,0 @@
[package]
name = "serai-ethereum-relayer"
version = "0.1.0"
description = "A relayer for Serai's Ethereum transactions"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/relayer"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
log = { version = "0.4", default-features = false, features = ["std"] }
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] }
serai-env = { path = "../../../common/env" }
serai-db = { path = "../../../common/db" }
[features]
parity-db = ["serai-db/parity-db"]
rocksdb = ["serai-db/rocksdb"]

View File

@@ -1,15 +0,0 @@
AGPL-3.0-only license
Copyright (c) 2023-2024 Luke Parker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.

View File

@@ -1,4 +0,0 @@
# Ethereum Transaction Relayer
This server collects Ethereum router commands to be published, offering an RPC
to fetch them.

View File

@@ -1,100 +0,0 @@
pub(crate) use tokio::{
io::{AsyncReadExt, AsyncWriteExt},
net::TcpListener,
};
use serai_db::{Get, DbTxn, Db as DbTrait};
#[tokio::main(flavor = "current_thread")]
async fn main() {
// Override the panic handler with one which will panic if any tokio task panics
{
let existing = std::panic::take_hook();
std::panic::set_hook(Box::new(move |panic| {
existing(panic);
const MSG: &str = "exiting the process due to a task panicking";
println!("{MSG}");
log::error!("{MSG}");
std::process::exit(1);
}));
}
if std::env::var("RUST_LOG").is_err() {
std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string()));
}
env_logger::init();
log::info!("Starting Ethereum relayer server...");
// Open the DB
#[allow(unused_variables, unreachable_code)]
let db = {
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
panic!("built with parity-db and rocksdb");
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
let db =
serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
#[cfg(feature = "rocksdb")]
let db =
serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
db
};
// Start command recipience server
// This should not be publicly exposed
// TODO: Add auth
tokio::spawn({
let db = db.clone();
async move {
// 5132 ^ ((b'E' << 8) | b'R')
let server = TcpListener::bind("0.0.0.0:20830").await.unwrap();
loop {
let (mut socket, _) = server.accept().await.unwrap();
let db = db.clone();
tokio::spawn(async move {
let mut db = db.clone();
loop {
let Ok(msg_len) = socket.read_u32_le().await else { break };
let mut buf = vec![0; usize::try_from(msg_len).unwrap()];
let Ok(_) = socket.read_exact(&mut buf).await else { break };
if buf.len() < 5 {
break;
}
let nonce = u32::from_le_bytes(buf[.. 4].try_into().unwrap());
let mut txn = db.txn();
txn.put(nonce.to_le_bytes(), &buf[4 ..]);
txn.commit();
let Ok(()) = socket.write_all(&[1]).await else { break };
log::info!("received signed command #{nonce}");
}
});
}
}
});
// Start command fetch server
// 5132 ^ ((b'E' << 8) | b'R') + 1
let server = TcpListener::bind("0.0.0.0:20831").await.unwrap();
loop {
let (mut socket, _) = server.accept().await.unwrap();
let db = db.clone();
tokio::spawn(async move {
let db = db.clone();
loop {
// Nonce to get the router comamnd for
let mut buf = vec![0; 4];
let Ok(_) = socket.read_exact(&mut buf).await else { break };
let command = db.get(&buf[.. 4]).unwrap_or(vec![]);
let Ok(()) = socket.write_all(&u32::try_from(command.len()).unwrap().to_le_bytes()).await
else {
break;
};
let Ok(()) = socket.write_all(&command).await else { break };
}
});
}
}

View File

@@ -1,37 +1,6 @@
use alloy_sol_types::sol;
#[rustfmt::skip] #[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)] #[allow(clippy::all)]
#[allow(clippy::ignored_unit_patterns)] pub(crate) mod schnorr;
#[allow(clippy::redundant_closure_for_method_calls)]
mod erc20_container {
use super::*;
sol!("contracts/IERC20.sol");
}
pub use erc20_container::IERC20 as erc20;
#[rustfmt::skip] #[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)] #[allow(clippy::all)]
#[allow(clippy::ignored_unit_patterns)] pub(crate) mod router;
#[allow(clippy::redundant_closure_for_method_calls)]
mod deployer_container {
use super::*;
sol!("contracts/Deployer.sol");
}
pub use deployer_container::Deployer as deployer;
#[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)]
#[allow(clippy::ignored_unit_patterns)]
#[allow(clippy::redundant_closure_for_method_calls)]
mod router_container {
use super::*;
sol!(Router, "artifacts/Router.abi");
}
pub use router_container::Router as router;

View File

@@ -1,188 +1,91 @@
use sha3::{Digest, Keccak256};
use group::ff::PrimeField; use group::ff::PrimeField;
use k256::{ use k256::{
elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, elliptic_curve::{
ProjectivePoint, Scalar, U256 as KU256, bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint,
},
ProjectivePoint, Scalar, U256,
}; };
#[cfg(test)]
use k256::{elliptic_curve::point::DecompressPoint, AffinePoint};
use frost::{ use frost::{
algorithm::{Hram, SchnorrSignature}, algorithm::{Hram, SchnorrSignature},
curve::{Ciphersuite, Secp256k1}, curve::Secp256k1,
}; };
use alloy_core::primitives::{Parity, Signature as AlloySignature};
use alloy_consensus::{SignableTransaction, Signed, TxLegacy};
use crate::abi::router::{Signature as AbiSignature};
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
alloy_core::primitives::keccak256(data).into() Keccak256::digest(data).into()
} }
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar { pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] {
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(data).into())
}
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
let encoded_point = point.to_encoded_point(false); let encoded_point = point.to_encoded_point(false);
// Last 20 bytes of the hash of the concatenated x and y coordinates // Last 20 bytes of the hash of the concatenated x and y coordinates
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
} }
/// Deterministically sign a transaction.
///
/// This function panics if passed a transaction with a non-None chain ID.
pub fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
assert!(
tx.chain_id.is_none(),
"chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)"
);
let sig_hash = tx.signature_hash().0;
let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat());
let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat());
loop {
let r_bytes: [u8; 32] = r.to_repr().into();
let s_bytes: [u8; 32] = s.to_repr().into();
let v = Parity::NonEip155(false);
let signature =
AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap();
let tx = tx.clone().into_signed(signature);
if tx.recover_signer().is_ok() {
return tx;
}
// Re-hash until valid
r = hash_to_scalar(r_bytes.as_ref());
s = hash_to_scalar(s_bytes.as_ref());
}
}
/// The public key for a Schnorr-signing account.
#[allow(non_snake_case)] #[allow(non_snake_case)]
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct PublicKey { pub struct PublicKey {
pub(crate) A: ProjectivePoint, pub A: ProjectivePoint,
pub(crate) px: Scalar, pub px: Scalar,
pub parity: u8,
} }
impl PublicKey { impl PublicKey {
/// Construct a new `PublicKey`.
///
/// This will return None if the provided point isn't eligible to be a public key (due to
/// bounds such as parity).
#[allow(non_snake_case)] #[allow(non_snake_case)]
pub fn new(A: ProjectivePoint) -> Option<PublicKey> { pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
let affine = A.to_affine(); let affine = A.to_affine();
// Only allow even keys to save a word within Ethereum let parity = u8::from(bool::from(affine.y_is_odd())) + 27;
let is_odd = bool::from(affine.y_is_odd()); if parity != 27 {
if is_odd {
None?; None?;
} }
let x_coord = affine.x(); let x_coord = affine.x();
let x_coord_scalar = <Scalar as Reduce<KU256>>::reduce_bytes(&x_coord); let x_coord_scalar = <Scalar as Reduce<U256>>::reduce_bytes(&x_coord);
// Return None if a reduction would occur // Return None if a reduction would occur
// Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less
// headache/concern to have
// This does ban a trivial amoount of public keys
if x_coord_scalar.to_repr() != x_coord { if x_coord_scalar.to_repr() != x_coord {
None?; None?;
} }
Some(PublicKey { A, px: x_coord_scalar }) Some(PublicKey { A, px: x_coord_scalar, parity })
}
pub fn point(&self) -> ProjectivePoint {
self.A
}
pub(crate) fn eth_repr(&self) -> [u8; 32] {
self.px.to_repr().into()
}
#[cfg(test)]
pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
#[allow(non_snake_case)]
let A = Option::<AffinePoint>::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into();
Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px })
} }
} }
/// The HRAm to use for the Schnorr contract.
#[derive(Clone, Default)] #[derive(Clone, Default)]
pub struct EthereumHram {} pub struct EthereumHram {}
impl Hram<Secp256k1> for EthereumHram { impl Hram<Secp256k1> for EthereumHram {
#[allow(non_snake_case)] #[allow(non_snake_case)]
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
let x_coord = A.to_affine().x(); let a_encoded_point = A.to_encoded_point(true);
let mut a_encoded = a_encoded_point.as_ref().to_owned();
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
assert!((a_encoded[0] == 27) || (a_encoded[0] == 28));
let mut data = address(R).to_vec(); let mut data = address(R).to_vec();
data.extend(x_coord.as_slice()); data.append(&mut a_encoded);
data.extend(m); data.extend(m);
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(&data).into())
} }
} }
/// A signature for the Schnorr contract.
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
pub struct Signature { pub struct Signature {
pub(crate) c: Scalar, pub(crate) c: Scalar,
pub(crate) s: Scalar, pub(crate) s: Scalar,
} }
impl Signature { impl Signature {
pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {
#[allow(non_snake_case)]
let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c);
EthereumHram::hram(&R, &public_key.A, message) == self.c
}
/// Construct a new `Signature`.
///
/// This will return None if the signature is invalid.
pub fn new( pub fn new(
public_key: &PublicKey, public_key: &PublicKey,
message: &[u8], chain_id: U256,
m: &[u8],
signature: SchnorrSignature<Secp256k1>, signature: SchnorrSignature<Secp256k1>,
) -> Option<Signature> { ) -> Option<Signature> {
let c = EthereumHram::hram(&signature.R, &public_key.A, message); let c = EthereumHram::hram(
&signature.R,
&public_key.A,
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
);
if !signature.verify(public_key.A, c) { if !signature.verify(public_key.A, c) {
None?; None?;
} }
Some(Signature { c, s: signature.s })
let res = Signature { c, s: signature.s };
assert!(res.verify(public_key, message));
Some(res)
}
pub fn c(&self) -> Scalar {
self.c
}
pub fn s(&self) -> Scalar {
self.s
}
pub fn to_bytes(&self) -> [u8; 64] {
let mut res = [0; 64];
res[.. 32].copy_from_slice(self.c.to_repr().as_ref());
res[32 ..].copy_from_slice(self.s.to_repr().as_ref());
res
}
pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {
let mut reader = bytes.as_slice();
let c = Secp256k1::read_F(&mut reader)?;
let s = Secp256k1::read_F(&mut reader)?;
Ok(Signature { c, s })
}
}
impl From<&Signature> for AbiSignature {
fn from(sig: &Signature) -> AbiSignature {
let c: [u8; 32] = sig.c.to_repr().into();
let s: [u8; 32] = sig.s.to_repr().into();
AbiSignature { c: c.into(), s: s.into() }
} }
} }

View File

@@ -1,113 +0,0 @@
use std::sync::Arc;
use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind};
use alloy_consensus::{Signed, TxLegacy};
use alloy_sol_types::{SolCall, SolEvent};
use alloy_rpc_types::{BlockNumberOrTag, Filter};
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
use crate::{
Error,
crypto::{self, keccak256, PublicKey},
router::Router,
};
pub use crate::abi::deployer as abi;
/// The Deployer contract for the Router contract.
///
/// This Deployer has a deterministic address, letting it be immediately identified on any
/// compatible chain. It then supports retrieving the Router contract's address (which isn't
/// deterministic) using a single log query.
#[derive(Clone, Debug)]
pub struct Deployer;
impl Deployer {
/// Obtain the transaction to deploy this contract, already signed.
///
/// The account this transaction is sent from (which is populated in `from`) must be sufficiently
/// funded for this transaction to be submitted. This account has no known private key to anyone,
/// so ETH sent can be neither misappropriated nor returned.
pub fn deployment_tx() -> Signed<TxLegacy> {
let bytecode = include_str!("../artifacts/Deployer.bin");
let bytecode =
Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex");
let tx = TxLegacy {
chain_id: None,
nonce: 0,
gas_price: 100_000_000_000u128,
// TODO: Use a more accurate gas limit
gas_limit: 1_000_000u128,
to: TxKind::Create,
value: U256::ZERO,
input: bytecode,
};
crypto::deterministically_sign(&tx)
}
/// Obtain the deterministic address for this contract.
pub fn address() -> [u8; 20] {
let deployer_deployer =
Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature");
**Address::create(&deployer_deployer, 0)
}
/// Construct a new view of the `Deployer`.
pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result<Option<Self>, Error> {
let address = Self::address();
let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?;
// Contract has yet to be deployed
if code.is_empty() {
return Ok(None);
}
Ok(Some(Self))
}
/// Yield the `ContractCall` necessary to deploy the Router.
pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {
TxLegacy {
to: TxKind::Call(Self::address().into()),
input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(),
gas_limit: 1_000_000,
..Default::default()
}
}
/// Find the first Router deployed with the specified key as its first key.
///
/// This is the Router Serai will use, and is the only way to construct a `Router`.
pub async fn find_router(
&self,
provider: Arc<RootProvider<SimpleRequest>>,
key: &PublicKey,
) -> Result<Option<Router>, Error> {
let init_code = Router::init_code(key);
let init_code_hash = keccak256(&init_code);
#[cfg(not(test))]
let to_block = BlockNumberOrTag::Finalized;
#[cfg(test)]
let to_block = BlockNumberOrTag::Latest;
// Find the first log using this init code (where the init code is binding to the key)
// TODO: Make an abstraction for event filtering (de-duplicating common code)
let filter =
Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address()));
let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH);
let filter = filter.topic1(B256::from(init_code_hash));
let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let Some(first_log) = logs.first() else { return Ok(None) };
let router = first_log
.log_decode::<abi::Deployment>()
.map_err(|_| Error::ConnectionError)?
.inner
.data
.created;
Ok(Some(Router::new(provider, router)))
}
}

View File

@@ -1,105 +0,0 @@
use std::{sync::Arc, collections::HashSet};
use alloy_core::primitives::{Address, B256, U256};
use alloy_sol_types::{SolInterface, SolEvent};
use alloy_rpc_types::Filter;
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
use crate::Error;
pub use crate::abi::erc20 as abi;
use abi::{IERC20Calls, Transfer, transferCall, transferFromCall};
#[derive(Clone, Debug)]
pub struct TopLevelErc20Transfer {
pub id: [u8; 32],
pub from: [u8; 20],
pub amount: U256,
pub data: Vec<u8>,
}
/// A view for an ERC20 contract.
#[derive(Clone, Debug)]
pub struct Erc20(Arc<RootProvider<SimpleRequest>>, Address);
impl Erc20 {
/// Construct a new view of the specified ERC20 contract.
pub fn new(provider: Arc<RootProvider<SimpleRequest>>, address: [u8; 20]) -> Self {
Self(provider, Address::from(&address))
}
pub async fn top_level_transfers(
&self,
block: u64,
to: [u8; 20],
) -> Result<Vec<TopLevelErc20Transfer>, Error> {
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(Transfer::SIGNATURE_HASH);
let mut to_topic = [0; 32];
to_topic[12 ..].copy_from_slice(&to);
let filter = filter.topic2(B256::from(to_topic));
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let mut handled = HashSet::new();
let mut top_level_transfers = vec![];
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?;
let tx =
self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?;
// If this is a top-level call...
if tx.to == Some(self.1) {
// And we recognize the call...
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due
// to the InInstruction appended
if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) {
// Extract the top-level call's from/to/value
let (from, call_to, value) = match call {
IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value),
IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => {
(from, call_to, value)
}
// Treat any other function selectors as unrecognized
_ => continue,
};
let log = log.log_decode::<Transfer>().map_err(|_| Error::ConnectionError)?.inner.data;
// Ensure the top-level transfer is equivalent, and this presumably isn't a log for an
// internal transfer
if (log.from != from) || (call_to != to) || (value != log.value) {
continue;
}
// Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's
// the only log we handle
if handled.contains(&tx_id) {
continue;
}
handled.insert(tx_id);
// Read the data appended after
let encoded = call.abi_encode();
let data = tx.input.as_ref()[encoded.len() ..].to_vec();
// Push the transfer
top_level_transfers.push(TopLevelErc20Transfer {
// Since we'll only handle one log for this TX, set the ID to the TX ID
id: *tx_id,
from: *log.from.0,
amount: log.value,
data,
});
}
}
}
Ok(top_level_transfers)
}
}

View File

@@ -1,35 +1,16 @@
use thiserror::Error; use thiserror::Error;
pub mod alloy {
pub use alloy_core::primitives;
pub use alloy_core as core;
pub use alloy_sol_types as sol_types;
pub use alloy_consensus as consensus;
pub use alloy_network as network;
pub use alloy_rpc_types as rpc_types;
pub use alloy_simple_request_transport as simple_request_transport;
pub use alloy_rpc_client as rpc_client;
pub use alloy_provider as provider;
}
pub mod crypto; pub mod crypto;
pub(crate) mod abi; pub(crate) mod abi;
pub mod schnorr;
pub mod erc20;
pub mod deployer;
pub mod router; pub mod router;
pub mod machine; #[cfg(test)]
mod tests;
#[cfg(any(test, feature = "tests"))] #[derive(Error, Debug)]
pub mod tests;
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
pub enum Error { pub enum Error {
#[error("failed to verify Schnorr signature")] #[error("failed to verify Schnorr signature")]
InvalidSignature, InvalidSignature,
#[error("couldn't make call/send TX")]
ConnectionError,
} }

View File

@@ -1,414 +0,0 @@
use std::{
io::{self, Read},
collections::HashMap,
};
use rand_core::{RngCore, CryptoRng};
use transcript::{Transcript, RecommendedTranscript};
use group::GroupEncoding;
use frost::{
curve::{Ciphersuite, Secp256k1},
Participant, ThresholdKeys, FrostError,
algorithm::Schnorr,
sign::*,
};
use alloy_core::primitives::U256;
use crate::{
crypto::{PublicKey, EthereumHram, Signature},
router::{
abi::{Call as AbiCall, OutInstruction as AbiOutInstruction},
Router,
},
};
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Call {
pub to: [u8; 20],
pub value: U256,
pub data: Vec<u8>,
}
impl Call {
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut to = [0; 20];
reader.read_exact(&mut to)?;
let value = {
let mut value_bytes = [0; 32];
reader.read_exact(&mut value_bytes)?;
U256::from_le_slice(&value_bytes)
};
let mut data_len = {
let mut data_len = [0; 4];
reader.read_exact(&mut data_len)?;
usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize")
};
// A valid DoS would be to claim a 4 GB data is present for only 4 bytes
// We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB)
let mut data = vec![];
while data_len > 0 {
let chunk_len = data_len.min(1024);
let mut chunk = vec![0; chunk_len];
reader.read_exact(&mut chunk)?;
data.extend(&chunk);
data_len -= chunk_len;
}
Ok(Call { to, value, data })
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&self.to)?;
writer.write_all(&self.value.as_le_bytes())?;
let data_len = u32::try_from(self.data.len())
.map_err(|_| io::Error::other("call data length exceeded 2**32"))?;
writer.write_all(&data_len.to_le_bytes())?;
writer.write_all(&self.data)
}
}
impl From<Call> for AbiCall {
fn from(call: Call) -> AbiCall {
AbiCall { to: call.to.into(), value: call.value, data: call.data.into() }
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum OutInstructionTarget {
Direct([u8; 20]),
Calls(Vec<Call>),
}
impl OutInstructionTarget {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0xff];
reader.read_exact(&mut kind)?;
match kind[0] {
0 => {
let mut addr = [0; 20];
reader.read_exact(&mut addr)?;
Ok(OutInstructionTarget::Direct(addr))
}
1 => {
let mut calls_len = [0; 4];
reader.read_exact(&mut calls_len)?;
let calls_len = u32::from_le_bytes(calls_len);
let mut calls = vec![];
for _ in 0 .. calls_len {
calls.push(Call::read(reader)?);
}
Ok(OutInstructionTarget::Calls(calls))
}
_ => Err(io::Error::other("unrecognized OutInstructionTarget"))?,
}
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
OutInstructionTarget::Direct(addr) => {
writer.write_all(&[0])?;
writer.write_all(addr)?;
}
OutInstructionTarget::Calls(calls) => {
writer.write_all(&[1])?;
let call_len = u32::try_from(calls.len())
.map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?;
writer.write_all(&call_len.to_le_bytes())?;
for call in calls {
call.write(writer)?;
}
}
}
Ok(())
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct OutInstruction {
pub target: OutInstructionTarget,
pub value: U256,
}
impl OutInstruction {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let target = OutInstructionTarget::read(reader)?;
let value = {
let mut value_bytes = [0; 32];
reader.read_exact(&mut value_bytes)?;
U256::from_le_slice(&value_bytes)
};
Ok(OutInstruction { target, value })
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
self.target.write(writer)?;
writer.write_all(&self.value.as_le_bytes())
}
}
impl From<OutInstruction> for AbiOutInstruction {
fn from(instruction: OutInstruction) -> AbiOutInstruction {
match instruction.target {
OutInstructionTarget::Direct(addr) => {
AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value }
}
OutInstructionTarget::Calls(calls) => AbiOutInstruction {
to: [0; 20].into(),
calls: calls.into_iter().map(Into::into).collect(),
value: instruction.value,
},
}
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum RouterCommand {
UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey },
Execute { chain_id: U256, nonce: U256, outs: Vec<OutInstruction> },
}
impl RouterCommand {
pub fn msg(&self) -> Vec<u8> {
match self {
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
Router::update_serai_key_message(*chain_id, *nonce, key)
}
RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message(
*chain_id,
*nonce,
outs.iter().map(|out| out.clone().into()).collect(),
),
}
}
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0xff];
reader.read_exact(&mut kind)?;
match kind[0] {
0 => {
let mut chain_id = [0; 32];
reader.read_exact(&mut chain_id)?;
let mut nonce = [0; 32];
reader.read_exact(&mut nonce)?;
let key = PublicKey::new(Secp256k1::read_G(reader)?)
.ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?;
Ok(RouterCommand::UpdateSeraiKey {
chain_id: U256::from_le_slice(&chain_id),
nonce: U256::from_le_slice(&nonce),
key,
})
}
1 => {
let mut chain_id = [0; 32];
reader.read_exact(&mut chain_id)?;
let chain_id = U256::from_le_slice(&chain_id);
let mut nonce = [0; 32];
reader.read_exact(&mut nonce)?;
let nonce = U256::from_le_slice(&nonce);
let mut outs_len = [0; 4];
reader.read_exact(&mut outs_len)?;
let outs_len = u32::from_le_bytes(outs_len);
let mut outs = vec![];
for _ in 0 .. outs_len {
outs.push(OutInstruction::read(reader)?);
}
Ok(RouterCommand::Execute { chain_id, nonce, outs })
}
_ => Err(io::Error::other("reading unknown type of RouterCommand"))?,
}
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
writer.write_all(&[0])?;
writer.write_all(&chain_id.as_le_bytes())?;
writer.write_all(&nonce.as_le_bytes())?;
writer.write_all(&key.A.to_bytes())
}
RouterCommand::Execute { chain_id, nonce, outs } => {
writer.write_all(&[1])?;
writer.write_all(&chain_id.as_le_bytes())?;
writer.write_all(&nonce.as_le_bytes())?;
writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;
for out in outs {
out.write(writer)?;
}
Ok(())
}
}
}
pub fn serialize(&self) -> Vec<u8> {
let mut res = vec![];
self.write(&mut res).unwrap();
res
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct SignedRouterCommand {
command: RouterCommand,
signature: Signature,
}
impl SignedRouterCommand {
pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option<Self> {
let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?;
let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?;
let signature = Signature { c, s };
if !signature.verify(key, &command.msg()) {
None?
}
Some(SignedRouterCommand { command, signature })
}
pub fn command(&self) -> &RouterCommand {
&self.command
}
pub fn signature(&self) -> &Signature {
&self.signature
}
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let command = RouterCommand::read(reader)?;
let mut sig = [0; 64];
reader.read_exact(&mut sig)?;
let signature = Signature::from_bytes(sig)?;
Ok(SignedRouterCommand { command, signature })
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
self.command.write(writer)?;
writer.write_all(&self.signature.to_bytes())
}
}
pub struct RouterCommandMachine {
key: PublicKey,
command: RouterCommand,
machine: AlgorithmMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
}
impl RouterCommandMachine {
pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> Option<Self> {
// The Schnorr algorithm should be fine without this, even when using the IETF variant
// If this is better and more comprehensive, we should do it, even if not necessary
let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1");
let key = keys.group_key();
transcript.append_message(b"key", key.to_bytes());
transcript.append_message(b"command", command.serialize());
Some(Self {
key: PublicKey::new(key)?,
command,
machine: AlgorithmMachine::new(Schnorr::new(transcript), keys),
})
}
}
impl PreprocessMachine for RouterCommandMachine {
type Preprocess = Preprocess<Secp256k1, ()>;
type Signature = SignedRouterCommand;
type SignMachine = RouterCommandSignMachine;
fn preprocess<R: RngCore + CryptoRng>(
self,
rng: &mut R,
) -> (Self::SignMachine, Self::Preprocess) {
let (machine, preprocess) = self.machine.preprocess(rng);
(RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess)
}
}
pub struct RouterCommandSignMachine {
key: PublicKey,
command: RouterCommand,
machine: AlgorithmSignMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
}
impl SignMachine<SignedRouterCommand> for RouterCommandSignMachine {
type Params = ();
type Keys = ThresholdKeys<Secp256k1>;
type Preprocess = Preprocess<Secp256k1, ()>;
type SignatureShare = SignatureShare<Secp256k1>;
type SignatureMachine = RouterCommandSignatureMachine;
fn cache(self) -> CachedPreprocess {
unimplemented!(
"RouterCommand machines don't support caching their preprocesses due to {}",
"being already bound to a specific command"
);
}
fn from_cache(
(): (),
_: ThresholdKeys<Secp256k1>,
_: CachedPreprocess,
) -> (Self, Self::Preprocess) {
unimplemented!(
"RouterCommand machines don't support caching their preprocesses due to {}",
"being already bound to a specific command"
);
}
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
self.machine.read_preprocess(reader)
}
fn sign(
self,
commitments: HashMap<Participant, Self::Preprocess>,
msg: &[u8],
) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> {
if !msg.is_empty() {
panic!("message was passed to a RouterCommand machine when it generates its own");
}
let (machine, share) = self.machine.sign(commitments, &self.command.msg())?;
Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share))
}
}
pub struct RouterCommandSignatureMachine {
key: PublicKey,
command: RouterCommand,
machine:
AlgorithmSignatureMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
}
impl SignatureMachine<SignedRouterCommand> for RouterCommandSignatureMachine {
type SignatureShare = SignatureShare<Secp256k1>;
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
self.machine.read_share(reader)
}
fn complete(
self,
shares: HashMap<Participant, Self::SignatureShare>,
) -> Result<SignedRouterCommand, FrostError> {
let sig = self.machine.complete(shares)?;
let signature = Signature::new(&self.key, &self.command.msg(), sig)
.expect("machine produced an invalid signature");
Ok(SignedRouterCommand { command: self.command, signature })
}
}

View File

@@ -1,443 +1,30 @@
use std::{sync::Arc, io, collections::HashSet}; pub use crate::abi::router::*;
use k256::{ /*
elliptic_curve::{group::GroupEncoding, sec1}, use crate::crypto::{ProcessedSignature, PublicKey};
ProjectivePoint, use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode};
}; use eyre::Result;
use std::{convert::From, fs::File, sync::Arc};
use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; pub async fn router_update_public_key<M: Middleware + 'static>(
#[cfg(test)] contract: &Router<M>,
use alloy_core::primitives::B256; public_key: &PublicKey,
use alloy_consensus::TxLegacy; signature: &ProcessedSignature,
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into());
let pending_tx = tx.send().await?;
use alloy_rpc_types::Filter; let receipt = pending_tx.await?;
#[cfg(test)] Ok(receipt)
use alloy_rpc_types::{BlockId, TransactionRequest, TransactionInput};
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
pub use crate::{
Error,
crypto::{PublicKey, Signature},
abi::{erc20::Transfer, router as abi},
};
use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent};
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Coin {
Ether,
Erc20([u8; 20]),
} }
impl Coin { pub async fn router_execute<M: Middleware + 'static>(
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> { contract: &Router<M>,
let mut kind = [0xff]; txs: Vec<Rtransaction>,
reader.read_exact(&mut kind)?; signature: &ProcessedSignature,
Ok(match kind[0] { ) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
0 => Coin::Ether, let tx = contract.execute(txs, signature.into()).send();
1 => { let pending_tx = tx.send().await?;
let mut address = [0; 20]; let receipt = pending_tx.await?;
reader.read_exact(&mut address)?; Ok(receipt)
Coin::Erc20(address)
}
_ => Err(io::Error::other("unrecognized Coin type"))?,
})
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
Coin::Ether => writer.write_all(&[0]),
Coin::Erc20(token) => {
writer.write_all(&[1])?;
writer.write_all(token)
}
}
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct InInstruction {
pub id: ([u8; 32], u64),
pub from: [u8; 20],
pub coin: Coin,
pub amount: U256,
pub data: Vec<u8>,
pub key_at_end_of_block: ProjectivePoint,
}
impl InInstruction {
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let id = {
let mut id_hash = [0; 32];
reader.read_exact(&mut id_hash)?;
let mut id_pos = [0; 8];
reader.read_exact(&mut id_pos)?;
let id_pos = u64::from_le_bytes(id_pos);
(id_hash, id_pos)
};
let mut from = [0; 20];
reader.read_exact(&mut from)?;
let coin = Coin::read(reader)?;
let mut amount = [0; 32];
reader.read_exact(&mut amount)?;
let amount = U256::from_le_slice(&amount);
let mut data_len = [0; 4];
reader.read_exact(&mut data_len)?;
let data_len = usize::try_from(u32::from_le_bytes(data_len))
.map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?;
let mut data = vec![0; data_len];
reader.read_exact(&mut data)?;
let mut key_at_end_of_block = <ProjectivePoint as GroupEncoding>::Repr::default();
reader.read_exact(&mut key_at_end_of_block)?;
let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block))
.ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?;
Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block })
}
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&self.id.0)?;
writer.write_all(&self.id.1.to_le_bytes())?;
writer.write_all(&self.from)?;
self.coin.write(writer)?;
writer.write_all(&self.amount.as_le_bytes())?;
writer.write_all(
&u32::try_from(self.data.len())
.map_err(|_| {
io::Error::other("InInstruction being written had data exceeding 2**32 in length")
})?
.to_le_bytes(),
)?;
writer.write_all(&self.data)?;
writer.write_all(&self.key_at_end_of_block.to_bytes())
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Executed {
pub tx_id: [u8; 32],
pub nonce: u64,
pub signature: [u8; 64],
}
/// The contract Serai uses to manage its state.
#[derive(Clone, Debug)]
pub struct Router(Arc<RootProvider<SimpleRequest>>, Address);
impl Router {
pub(crate) fn code() -> Vec<u8> {
let bytecode = include_str!("../artifacts/Router.bin");
Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec()
}
pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {
let mut bytecode = Self::code();
// Append the constructor arguments
bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode());
bytecode
}
// This isn't pub in order to force users to use `Deployer::find_router`.
pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
Self(provider, address)
}
pub fn address(&self) -> [u8; 20] {
**self.1
}
/// Get the key for Serai at the specified block.
#[cfg(test)]
pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {
let call = TransactionRequest::default()
.to(self.1)
.input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into()));
let bytes = self
.0
.call(&call)
.block(BlockId::Hash(B256::from(at).into()))
.await
.map_err(|_| Error::ConnectionError)?;
let res =
abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError)
}
/// Get the message to be signed in order to update the key for Serai.
pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec<u8> {
let mut buffer = b"updateSeraiKey".to_vec();
buffer.extend(&chain_id.to_be_bytes::<32>());
buffer.extend(&nonce.to_be_bytes::<32>());
buffer.extend(&key.eth_repr());
buffer
}
/// Update the key representing Serai.
pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy {
// TODO: Set a more accurate gas
TxLegacy {
to: TxKind::Call(self.1),
input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into()))
.abi_encode()
.into(),
gas_limit: 100_000,
..Default::default()
}
}
/// Get the current nonce for the published batches.
#[cfg(test)]
pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {
let call = TransactionRequest::default()
.to(self.1)
.input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into()));
let bytes = self
.0
.call(&call)
.block(BlockId::Hash(B256::from(at).into()))
.await
.map_err(|_| Error::ConnectionError)?;
let res =
abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
Ok(res._0)
}
/// Get the message to be signed in order to update the key for Serai.
pub(crate) fn execute_message(
chain_id: U256,
nonce: U256,
outs: Vec<abi::OutInstruction>,
) -> Vec<u8> {
("execute".to_string(), chain_id, nonce, outs).abi_encode_params()
}
/// Execute a batch of `OutInstruction`s.
pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy {
TxLegacy {
to: TxKind::Call(self.1),
input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(),
// TODO
gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()),
..Default::default()
}
}
pub async fn key_at_end_of_block(&self, block: u64) -> Result<Option<ProjectivePoint>, Error> {
let filter = Filter::new().from_block(0).to_block(block).address(self.1);
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
if all_keys.is_empty() {
return Ok(None);
};
let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?;
let last_key_x_coordinate = last_key_x_coordinate_log
.log_decode::<SeraiKeyUpdated>()
.map_err(|_| Error::ConnectionError)?
.inner
.data
.key;
let mut compressed_point = <ProjectivePoint as GroupEncoding>::Repr::default();
compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY);
compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice());
let key =
Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?;
Ok(Some(key))
}
pub async fn in_instructions(
&self,
block: u64,
allowed_tokens: &HashSet<[u8; 20]>,
) -> Result<Vec<InInstruction>, Error> {
let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else {
return Ok(vec![]);
};
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
let mut transfer_check = HashSet::new();
let mut in_instructions = vec![];
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let id = (
log.block_hash.ok_or(Error::ConnectionError)?.into(),
log.log_index.ok_or(Error::ConnectionError)?,
);
let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?;
let tx = self
.0
.get_transaction_by_hash(tx_hash)
.await
.ok()
.flatten()
.ok_or(Error::ConnectionError)?;
let log =
log.log_decode::<InInstructionEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
let coin = if log.coin.0 == [0; 20] {
Coin::Ether
} else {
let token = *log.coin.0;
if !allowed_tokens.contains(&token) {
continue;
}
// If this also counts as a top-level transfer via the token, drop it
//
// Necessary in order to handle a potential edge case with some theoretical token
// implementations
//
// This will either let it be handled by the top-level transfer hook or will drop it
// entirely on the side of caution
if tx.to == Some(token.into()) {
continue;
}
// Get all logs for this TX
let receipt = self
.0
.get_transaction_receipt(tx_hash)
.await
.map_err(|_| Error::ConnectionError)?
.ok_or(Error::ConnectionError)?;
let tx_logs = receipt.inner.logs();
// Find a matching transfer log
let mut found_transfer = false;
for tx_log in tx_logs {
let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?;
// Ensure we didn't already use this transfer to check a distinct InInstruction event
if transfer_check.contains(&log_index) {
continue;
}
// Check if this log is from the token we expected to be transferred
if tx_log.address().0 != token {
continue;
}
// Check if this is a transfer log
// https://github.com/alloy-rs/core/issues/589
if tx_log.topics()[0] != Transfer::SIGNATURE_HASH {
continue;
}
let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue };
// Check if this is a transfer to us for the expected amount
if (transfer.to == self.1) && (transfer.value == log.amount) {
transfer_check.insert(log_index);
found_transfer = true;
break;
}
}
if !found_transfer {
// This shouldn't be a ConnectionError
// This is an exploit, a non-conforming ERC20, or an invalid connection
// This should halt the process which is sufficient, yet this is sub-optimal
// TODO
Err(Error::ConnectionError)?;
}
Coin::Erc20(token)
};
in_instructions.push(InInstruction {
id,
from: *log.from.0,
coin,
amount: log.amount,
data: log.instruction.as_ref().to_vec(),
key_at_end_of_block,
});
}
Ok(in_instructions)
}
pub async fn executed_commands(&self, block: u64) -> Result<Vec<Executed>, Error> {
let mut res = vec![];
{
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
let log =
log.log_decode::<SeraiKeyUpdated>().map_err(|_| Error::ConnectionError)?.inner.data;
let mut signature = [0; 64];
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
res.push(Executed {
tx_id,
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
signature,
});
}
}
{
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH);
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
for log in logs {
// Double check the address which emitted this log
if log.address() != self.1 {
Err(Error::ConnectionError)?;
}
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
let log = log.log_decode::<ExecutedEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
let mut signature = [0; 64];
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
res.push(Executed {
tx_id,
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
signature,
});
}
}
Ok(res)
}
#[cfg(feature = "tests")]
pub fn key_updated_filter(&self) -> Filter {
Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH)
}
#[cfg(feature = "tests")]
pub fn executed_filter(&self) -> Filter {
Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH)
}
} }
*/

View File

@@ -0,0 +1,34 @@
use eyre::{eyre, Result};
use group::ff::PrimeField;
use ethers_providers::{Provider, Http};
use crate::{
Error,
crypto::{keccak256, PublicKey, Signature},
};
pub use crate::abi::schnorr::*;
pub async fn call_verify(
contract: &Schnorr<Provider<Http>>,
public_key: &PublicKey,
message: &[u8],
signature: &Signature,
) -> Result<()> {
if contract
.verify(
public_key.parity,
public_key.px.to_repr().into(),
keccak256(message),
signature.c.to_repr().into(),
signature.s.to_repr().into(),
)
.call()
.await?
{
Ok(())
} else {
Err(eyre!(Error::InvalidSignature))
}
}

View File

@@ -1,13 +0,0 @@
use alloy_sol_types::sol;
#[rustfmt::skip]
#[allow(warnings)]
#[allow(needless_pass_by_value)]
#[allow(clippy::all)]
#[allow(clippy::ignored_unit_patterns)]
#[allow(clippy::redundant_closure_for_method_calls)]
mod schnorr_container {
use super::*;
sol!("src/tests/contracts/Schnorr.sol");
}
pub(crate) use schnorr_container::TestSchnorr as schnorr;

View File

@@ -1,51 +0,0 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
contract TestERC20 {
event Transfer(address indexed from, address indexed to, uint256 value);
event Approval(address indexed owner, address indexed spender, uint256 value);
function name() public pure returns (string memory) {
return "Test ERC20";
}
function symbol() public pure returns (string memory) {
return "TEST";
}
function decimals() public pure returns (uint8) {
return 18;
}
function totalSupply() public pure returns (uint256) {
return 1_000_000 * 10e18;
}
mapping(address => uint256) balances;
mapping(address => mapping(address => uint256)) allowances;
constructor() {
balances[msg.sender] = totalSupply();
}
function balanceOf(address owner) public view returns (uint256) {
return balances[owner];
}
function transfer(address to, uint256 value) public returns (bool) {
balances[msg.sender] -= value;
balances[to] += value;
return true;
}
function transferFrom(address from, address to, uint256 value) public returns (bool) {
allowances[from][msg.sender] -= value;
balances[from] -= value;
balances[to] += value;
return true;
}
function approve(address spender, uint256 value) public returns (bool) {
allowances[msg.sender][spender] = value;
return true;
}
function allowance(address owner, address spender) public view returns (uint256) {
return allowances[owner][spender];
}
}

View File

@@ -1,15 +0,0 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
import "../../../contracts/Schnorr.sol";
contract TestSchnorr {
function verify(
bytes32 px,
bytes calldata message,
bytes32 c,
bytes32 s
) external pure returns (bool) {
return Schnorr.verify(px, message, c, s);
}
}

View File

@@ -1,33 +1,49 @@
use rand_core::OsRng; use rand_core::OsRng;
use group::ff::{Field, PrimeField}; use sha2::Sha256;
use sha3::{Digest, Keccak256};
use group::Group;
use k256::{ use k256::{
ecdsa::{ ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey},
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint},
}, U256, Scalar, AffinePoint, ProjectivePoint,
Scalar, ProjectivePoint,
}; };
use frost::{ use frost::{
curve::{Ciphersuite, Secp256k1}, curve::Secp256k1,
algorithm::{Hram, IetfSchnorr}, algorithm::{Hram, IetfSchnorr},
tests::{algorithm_machines, sign}, tests::{algorithm_machines, sign},
}; };
use crate::{crypto::*, tests::key_gen}; use crate::{crypto::*, tests::key_gen};
// The ecrecover opcode, yet with parity replacing v pub fn hash_to_scalar(data: &[u8]) -> Scalar {
pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { Scalar::reduce(U256::from_be_slice(&keccak256(data)))
let sig = ecdsa::Signature::from_scalars(r, s).ok()?; }
let message: [u8; 32] = message.to_repr().into();
alloy_core::primitives::Signature::from_signature_and_parity( pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
sig, if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) {
alloy_core::primitives::Parity::Parity(odd_y), return None;
) }
.ok()?
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) #[allow(non_snake_case)]
.ok() let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into());
.map(Into::into) #[allow(non_snake_case)]
if let Some(R) = Option::<AffinePoint>::from(R) {
#[allow(non_snake_case)]
let R = ProjectivePoint::from(R);
let r = r.invert().unwrap();
let u1 = ProjectivePoint::GENERATOR * (-message * r);
let u2 = R * (s * r);
let key: ProjectivePoint = u1 + u2;
if !bool::from(key.is_identity()) {
return Some(address(&key));
}
}
None
} }
#[test] #[test]
@@ -39,23 +55,20 @@ fn test_ecrecover() {
const MESSAGE: &[u8] = b"Hello, World!"; const MESSAGE: &[u8] = b"Hello, World!";
let (sig, recovery_id) = private let (sig, recovery_id) = private
.as_nonzero_scalar() .as_nonzero_scalar()
.try_sign_prehashed( .try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
<Secp256k1 as Ciphersuite>::F::random(&mut OsRng),
&keccak256(MESSAGE).into(),
)
.unwrap(); .unwrap();
// Sanity check the signature verifies // Sanity check the signature verifies
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool> #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
{ {
assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ()); assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
} }
// Perform the ecrecover // Perform the ecrecover
assert_eq!( assert_eq!(
ecrecover( ecrecover(
hash_to_scalar(MESSAGE), hash_to_scalar(MESSAGE),
u8::from(recovery_id.unwrap().is_y_odd()) == 1, u8::from(recovery_id.unwrap().is_y_odd()) + 27,
*sig.r(), *sig.r(),
*sig.s() *sig.s()
) )
@@ -80,13 +93,18 @@ fn test_signing() {
pub fn preprocess_signature_for_ecrecover( pub fn preprocess_signature_for_ecrecover(
R: ProjectivePoint, R: ProjectivePoint,
public_key: &PublicKey, public_key: &PublicKey,
chain_id: U256,
m: &[u8], m: &[u8],
s: Scalar, s: Scalar,
) -> (Scalar, Scalar) { ) -> (u8, Scalar, Scalar) {
let c = EthereumHram::hram(&R, &public_key.A, m); let c = EthereumHram::hram(
&R,
&public_key.A,
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
);
let sa = -(s * public_key.px); let sa = -(s * public_key.px);
let ca = -(c * public_key.px); let ca = -(c * public_key.px);
(sa, ca) (public_key.parity, sa, ca)
} }
#[test] #[test]
@@ -94,12 +112,21 @@ fn test_ecrecover_hack() {
let (keys, public_key) = key_gen(); let (keys, public_key) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!"; const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let chain_id = U256::ONE;
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf(); let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = let sig = sign(
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); &mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s); let (parity, sa, ca) =
let q = ecrecover(sa, false, public_key.px, ca).unwrap(); preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s);
let q = ecrecover(sa, parity, public_key.px, ca).unwrap();
assert_eq!(q, address(&sig.R)); assert_eq!(q, address(&sig.R));
} }

View File

@@ -1,30 +1,22 @@
use std::{sync::Arc, collections::HashMap}; use std::{sync::Arc, time::Duration, fs::File, collections::HashMap};
use rand_core::OsRng; use rand_core::OsRng;
use group::ff::PrimeField;
use k256::{Scalar, ProjectivePoint}; use k256::{Scalar, ProjectivePoint};
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen}; use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
use alloy_core::{ use ethers_core::{
primitives::{Address, U256, Bytes, TxKind}, types::{H160, Signature as EthersSignature},
hex::FromHex, abi::Abi,
}; };
use alloy_consensus::{SignableTransaction, TxLegacy}; use ethers_contract::ContractFactory;
use ethers_providers::{Middleware, Provider, Http};
use alloy_rpc_types::TransactionReceipt; use crate::crypto::PublicKey;
use alloy_simple_request_transport::SimpleRequest;
use alloy_provider::{Provider, RootProvider};
use crate::crypto::{address, deterministically_sign, PublicKey};
#[cfg(test)]
mod crypto; mod crypto;
#[cfg(test)]
mod abi;
#[cfg(test)]
mod schnorr; mod schnorr;
#[cfg(test)]
mod router; mod router;
pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) { pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) {
@@ -44,88 +36,57 @@ pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey)
(keys, public_key) (keys, public_key)
} }
// TODO: Use a proper error here // TODO: Replace with a contract deployment from an unknown account, so the environment solely has
pub async fn send( // to fund the deployer, not create/pass a wallet
provider: &RootProvider<SimpleRequest>, // TODO: Deterministic deployments across chains
wallet: &k256::ecdsa::SigningKey,
mut tx: TxLegacy,
) -> Option<TransactionReceipt> {
let verifying_key = *wallet.verifying_key().as_affine();
let address = Address::from(address(&verifying_key.into()));
// https://github.com/alloy-rs/alloy/issues/539
// let chain_id = provider.get_chain_id().await.unwrap();
// tx.chain_id = Some(chain_id);
tx.chain_id = None;
tx.nonce = provider.get_transaction_count(address).await.unwrap();
// 100 gwei
tx.gas_price = 100_000_000_000u128;
let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap();
assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap());
assert!(
provider.get_balance(address).await.unwrap() >
((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value)
);
let mut bytes = vec![];
tx.encode_with_signature_fields(&sig.into(), &mut bytes);
let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?;
pending_tx.get_receipt().await.ok()
}
pub async fn fund_account(
provider: &RootProvider<SimpleRequest>,
wallet: &k256::ecdsa::SigningKey,
to_fund: Address,
value: U256,
) -> Option<()> {
let funding_tx =
TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() };
assert!(send(provider, wallet, funding_tx).await.unwrap().status());
Some(())
}
// TODO: Use a proper error here
pub async fn deploy_contract( pub async fn deploy_contract(
client: Arc<RootProvider<SimpleRequest>>, chain_id: u32,
client: Arc<Provider<Http>>,
wallet: &k256::ecdsa::SigningKey, wallet: &k256::ecdsa::SigningKey,
name: &str, name: &str,
) -> Option<Address> { ) -> eyre::Result<H160> {
let abi: Abi =
serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap();
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap(); let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
let hex_bin = let hex_bin =
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf }; if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
let bin = Bytes::from_hex(hex_bin).unwrap(); let bin = hex::decode(hex_bin).unwrap();
let factory = ContractFactory::new(abi, bin.into(), client.clone());
let deployment_tx = TxLegacy { let mut deployment_tx = factory.deploy(())?.tx;
chain_id: None, deployment_tx.set_chain_id(chain_id);
nonce: 0, deployment_tx.set_gas(1_000_000);
// 100 gwei let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
gas_price: 100_000_000_000u128, deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
gas_limit: 1_000_000, deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
to: TxKind::Create,
value: U256::ZERO,
input: bin,
};
let deployment_tx = deterministically_sign(&deployment_tx); let sig_hash = deployment_tx.sighash();
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
// Fund the deployer address // EIP-155 v
fund_account( let mut v = u64::from(rid.to_byte());
&client, assert!((v == 0) || (v == 1));
wallet, v += u64::from((chain_id * 2) + 35);
deployment_tx.recover_signer().unwrap(),
U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price),
)
.await?;
let (deployment_tx, sig, _) = deployment_tx.into_parts(); let r = sig.r().to_repr();
let mut bytes = vec![]; let r_ref: &[u8] = r.as_ref();
deployment_tx.encode_with_signature_fields(&sig, &mut bytes); let s = sig.s().to_repr();
let pending_tx = client.send_raw_transaction(&bytes).await.ok()?; let s_ref: &[u8] = s.as_ref();
let receipt = pending_tx.get_receipt().await.ok()?; let deployment_tx =
assert!(receipt.status()); deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v });
Some(receipt.contract_address.unwrap()) let pending_tx = client.send_raw_transaction(deployment_tx).await?;
let mut receipt;
while {
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
receipt.is_none()
} {
tokio::time::sleep(Duration::from_secs(6)).await;
}
let receipt = receipt.unwrap();
assert!(receipt.status == Some(1.into()));
Ok(receipt.contract_address.unwrap())
} }

View File

@@ -2,8 +2,7 @@ use std::{convert::TryFrom, sync::Arc, collections::HashMap};
use rand_core::OsRng; use rand_core::OsRng;
use group::Group; use group::ff::PrimeField;
use k256::ProjectivePoint;
use frost::{ use frost::{
curve::Secp256k1, curve::Secp256k1,
Participant, ThresholdKeys, Participant, ThresholdKeys,
@@ -11,173 +10,100 @@ use frost::{
tests::{algorithm_machines, sign}, tests::{algorithm_machines, sign},
}; };
use alloy_core::primitives::{Address, U256}; use ethers_core::{
types::{H160, U256, Bytes},
use alloy_simple_request_transport::SimpleRequest; abi::AbiEncode,
use alloy_rpc_client::ClientBuilder; utils::{Anvil, AnvilInstance},
use alloy_provider::{Provider, RootProvider}; };
use ethers_providers::{Middleware, Provider, Http};
use alloy_node_bindings::{Anvil, AnvilInstance};
use crate::{ use crate::{
crypto::*, crypto::{keccak256, PublicKey, EthereumHram, Signature},
deployer::Deployer, router::{self, *},
router::{Router, abi as router}, tests::{key_gen, deploy_contract},
tests::{key_gen, send, fund_account},
}; };
async fn setup_test() -> ( async fn setup_test() -> (
u32,
AnvilInstance, AnvilInstance,
Arc<RootProvider<SimpleRequest>>, Router<Provider<Http>>,
u64,
Router,
HashMap<Participant, ThresholdKeys<Secp256k1>>, HashMap<Participant, ThresholdKeys<Secp256k1>>,
PublicKey, PublicKey,
) { ) {
let anvil = Anvil::new().spawn(); let anvil = Anvil::new().spawn();
let provider = RootProvider::new( let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), let chain_id = provider.get_chainid().await.unwrap().as_u32();
);
let chain_id = provider.get_chain_id().await.unwrap();
let wallet = anvil.keys()[0].clone().into(); let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider); let client = Arc::new(provider);
// Make sure the Deployer constructor returns None, as it doesn't exist yet let contract_address =
assert!(Deployer::new(client.clone()).await.unwrap().is_none()); deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap();
let contract = Router::new(contract_address, client.clone());
// Deploy the Deployer
let tx = Deployer::deployment_tx();
fund_account(
&client,
&wallet,
tx.recover_signer().unwrap(),
U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price),
)
.await
.unwrap();
let (tx, sig, _) = tx.into_parts();
let mut bytes = vec![];
tx.encode_with_signature_fields(&sig, &mut bytes);
let pending_tx = client.send_raw_transaction(&bytes).await.unwrap();
let receipt = pending_tx.get_receipt().await.unwrap();
assert!(receipt.status());
let deployer =
Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed");
let (keys, public_key) = key_gen(); let (keys, public_key) = key_gen();
// Verify the Router constructor returns None, as it doesn't exist yet // Set the key to the threshold keys
assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none()); let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000);
let pending_tx = tx.send().await.unwrap();
let receipt = pending_tx.await.unwrap().unwrap();
assert!(receipt.status == Some(1.into()));
// Deploy the router (chain_id, anvil, contract, keys, public_key)
let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key))
.await
.unwrap();
assert!(receipt.status());
let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap();
(anvil, client, chain_id, contract, keys, public_key)
}
async fn latest_block_hash(client: &RootProvider<SimpleRequest>) -> [u8; 32] {
client
.get_block(client.get_block_number().await.unwrap().into(), false)
.await
.unwrap()
.unwrap()
.header
.hash
.unwrap()
.0
} }
#[tokio::test] #[tokio::test]
async fn test_deploy_contract() { async fn test_deploy_contract() {
let (_anvil, client, _, router, _, public_key) = setup_test().await; setup_test().await;
let block_hash = latest_block_hash(&client).await;
assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key);
assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
// TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis
} }
pub fn hash_and_sign( pub fn hash_and_sign(
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>, keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
public_key: &PublicKey, public_key: &PublicKey,
chain_id: U256,
message: &[u8], message: &[u8],
) -> Signature { ) -> Signature {
let hashed_message = keccak256(message);
let mut chain_id_bytes = [0; 32];
chain_id.to_big_endian(&mut chain_id_bytes);
let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf(); let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = let sig = sign(
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message); &mut OsRng,
&algo,
Signature::new(public_key, message, sig).unwrap() keys.clone(),
} algorithm_machines(&mut OsRng, &algo, keys),
full_message,
#[tokio::test]
async fn test_router_update_serai_key() {
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await;
let next_key = loop {
let point = ProjectivePoint::random(&mut OsRng);
let Some(next_key) = PublicKey::new(point) else { continue };
break next_key;
};
let message = Router::update_serai_key_message(
U256::try_from(chain_id).unwrap(),
U256::try_from(1u64).unwrap(),
&next_key,
); );
let sig = hash_and_sign(&keys, &public_key, &message);
let first_block_hash = latest_block_hash(&client).await; Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap()
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
let receipt =
send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig))
.await
.unwrap();
assert!(receipt.status());
let second_block_hash = latest_block_hash(&client).await;
assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key);
// Check this does still offer the historical state
assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key);
// TODO: Check logs
println!("gas used: {:?}", receipt.gas_used);
// println!("logs: {:?}", receipt.logs);
} }
#[tokio::test] #[tokio::test]
async fn test_router_execute() { async fn test_router_execute() {
let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; let (chain_id, _anvil, contract, keys, public_key) = setup_test().await;
let to = Address::from([0; 20]); let to = H160([0u8; 20]);
let value = U256::ZERO; let value = U256([0u64; 4]);
let tx = router::OutInstruction { to, value, calls: vec![] }; let data = Bytes::from([0]);
let txs = vec![tx]; let tx = OutInstruction { to, value, data: data.clone() };
let first_block_hash = latest_block_hash(&client).await; let nonce_call = contract.nonce();
let nonce = contract.nonce(first_block_hash).await.unwrap(); let nonce = nonce_call.call().await.unwrap();
assert_eq!(nonce, U256::try_from(1u64).unwrap());
let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone()); let encoded =
let sig = hash_and_sign(&keys, &public_key, &message); ("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode();
let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded);
let receipt = let tx = contract
send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap(); .execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() })
assert!(receipt.status()); .gas(300_000);
let pending_tx = tx.send().await.unwrap();
let receipt = dbg!(pending_tx.await.unwrap().unwrap());
assert!(receipt.status == Some(1.into()));
let second_block_hash = latest_block_hash(&client).await; println!("gas used: {:?}", receipt.cumulative_gas_used);
assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap()); println!("logs: {:?}", receipt.logs);
// Check this does still offer the historical state
assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap());
// TODO: Check logs
println!("gas used: {:?}", receipt.gas_used);
// println!("logs: {:?}", receipt.logs);
} }

View File

@@ -1,9 +1,11 @@
use std::sync::Arc; use std::{convert::TryFrom, sync::Arc};
use rand_core::OsRng; use rand_core::OsRng;
use group::ff::PrimeField; use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar};
use k256::Scalar;
use ethers_core::utils::{keccak256, Anvil, AnvilInstance};
use ethers_providers::{Middleware, Provider, Http};
use frost::{ use frost::{
curve::Secp256k1, curve::Secp256k1,
@@ -11,34 +13,24 @@ use frost::{
tests::{algorithm_machines, sign}, tests::{algorithm_machines, sign},
}; };
use alloy_core::primitives::Address;
use alloy_sol_types::SolCall;
use alloy_rpc_types::{TransactionInput, TransactionRequest};
use alloy_simple_request_transport::SimpleRequest;
use alloy_rpc_client::ClientBuilder;
use alloy_provider::{Provider, RootProvider};
use alloy_node_bindings::{Anvil, AnvilInstance};
use crate::{ use crate::{
Error,
crypto::*, crypto::*,
tests::{key_gen, deploy_contract, abi::schnorr as abi}, schnorr::*,
tests::{key_gen, deploy_contract},
}; };
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) { async fn setup_test() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
let anvil = Anvil::new().spawn(); let anvil = Anvil::new().spawn();
let provider = RootProvider::new( let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), let chain_id = provider.get_chainid().await.unwrap().as_u32();
);
let wallet = anvil.keys()[0].clone().into(); let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider); let client = Arc::new(provider);
let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap(); let contract_address =
(anvil, client, address) deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap();
let contract = Schnorr::new(contract_address, client.clone());
(chain_id, anvil, contract)
} }
#[tokio::test] #[tokio::test]
@@ -46,48 +38,30 @@ async fn test_deploy_contract() {
setup_test().await; setup_test().await;
} }
pub async fn call_verify(
provider: &RootProvider<SimpleRequest>,
contract: Address,
public_key: &PublicKey,
message: &[u8],
signature: &Signature,
) -> Result<(), Error> {
let px: [u8; 32] = public_key.px.to_repr().into();
let c_bytes: [u8; 32] = signature.c.to_repr().into();
let s_bytes: [u8; 32] = signature.s.to_repr().into();
let call = TransactionRequest::default().to(contract).input(TransactionInput::new(
abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into()))
.abi_encode()
.into(),
));
let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?;
let res =
abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
if res._0 {
Ok(())
} else {
Err(Error::InvalidSignature)
}
}
#[tokio::test] #[tokio::test]
async fn test_ecrecover_hack() { async fn test_ecrecover_hack() {
let (_anvil, client, contract) = setup_test().await; let (chain_id, _anvil, contract) = setup_test().await;
let chain_id = U256::from(chain_id);
let (keys, public_key) = key_gen(); let (keys, public_key) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!"; const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf(); let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = let sig = sign(
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); &mut OsRng,
let sig = Signature::new(&public_key, MESSAGE, sig).unwrap(); &algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap();
call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap(); call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap();
// Test an invalid signature fails // Test an invalid signature fails
let mut sig = sig; let mut sig = sig;
sig.s += Scalar::ONE; sig.s += Scalar::ONE;
assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err()); assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err());
} }

View File

@@ -43,6 +43,7 @@ multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features =
# Needed for multisig # Needed for multisig
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true } transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true }
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true }
monero-generators = { path = "generators", version = "0.4", default-features = false } monero-generators = { path = "generators", version = "0.4", default-features = false }
@@ -90,6 +91,7 @@ std = [
"multiexp/std", "multiexp/std",
"transcript/std", "transcript/std",
"dleq/std",
"monero-generators/std", "monero-generators/std",
@@ -104,7 +106,7 @@ std = [
cache-distribution = ["async-lock"] cache-distribution = ["async-lock"]
http-rpc = ["digest_auth", "simple-request", "tokio"] http-rpc = ["digest_auth", "simple-request", "tokio"]
multisig = ["transcript", "frost", "std"] multisig = ["transcript", "frost", "dleq", "std"]
binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"] binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"]
experimental = [] experimental = []

View File

@@ -14,12 +14,7 @@ use zeroize::{Zeroize, ZeroizeOnDrop};
use sha3::{Digest, Keccak256}; use sha3::{Digest, Keccak256};
use curve25519_dalek::{ use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT},
scalar::Scalar,
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
traits::VartimePrecomputedMultiscalarMul,
};
pub use monero_generators::{H, decompress_point}; pub use monero_generators::{H, decompress_point};
@@ -61,13 +56,6 @@ pub(crate) fn INV_EIGHT() -> Scalar {
*INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert()) *INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert())
} }
static BASEPOINT_PRECOMP_CELL: OnceLock<VartimeEdwardsPrecomputation> = OnceLock::new();
#[allow(non_snake_case)]
pub(crate) fn BASEPOINT_PRECOMP() -> &'static VartimeEdwardsPrecomputation {
BASEPOINT_PRECOMP_CELL
.get_or_init(|| VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT]))
}
/// Monero protocol version. /// Monero protocol version.
/// ///
/// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the /// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the

View File

@@ -91,7 +91,7 @@ impl Bulletproofs {
Bulletproofs::Plus( Bulletproofs::Plus(
AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect()) AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect())
.unwrap() .unwrap()
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs.to_vec()).unwrap())) .prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap()))
.unwrap(), .unwrap(),
) )
}) })

View File

@@ -24,7 +24,7 @@ use crate::{
}, },
}; };
// Figure 3 of the Bulletproofs+ Paper // Figure 3
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(crate) struct AggregateRangeStatement { pub(crate) struct AggregateRangeStatement {
generators: Generators, generators: Generators,
@@ -38,15 +38,24 @@ impl Zeroize for AggregateRangeStatement {
} }
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)] #[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
pub(crate) struct AggregateRangeWitness(Vec<Commitment>); pub(crate) struct AggregateRangeWitness {
values: Vec<u64>,
gammas: Vec<Scalar>,
}
impl AggregateRangeWitness { impl AggregateRangeWitness {
pub(crate) fn new(commitments: Vec<Commitment>) -> Option<Self> { pub(crate) fn new(commitments: &[Commitment]) -> Option<Self> {
if commitments.is_empty() || (commitments.len() > MAX_M) { if commitments.is_empty() || (commitments.len() > MAX_M) {
return None; return None;
} }
Some(AggregateRangeWitness(commitments)) let mut values = Vec::with_capacity(commitments.len());
let mut gammas = Vec::with_capacity(commitments.len());
for commitment in commitments {
values.push(commitment.amount);
gammas.push(Scalar(commitment.mask));
}
Some(AggregateRangeWitness { values, gammas })
} }
} }
@@ -153,11 +162,13 @@ impl AggregateRangeStatement {
witness: &AggregateRangeWitness, witness: &AggregateRangeWitness,
) -> Option<AggregateRangeProof> { ) -> Option<AggregateRangeProof> {
// Check for consistency with the witness // Check for consistency with the witness
if self.V.len() != witness.0.len() { if self.V.len() != witness.values.len() {
return None; return None;
} }
for (commitment, witness) in self.V.iter().zip(witness.0.iter()) { for (commitment, (value, gamma)) in
if witness.calculate() != **commitment { self.V.iter().zip(witness.values.iter().zip(witness.gammas.iter()))
{
if Commitment::new(**gamma, *value).calculate() != **commitment {
return None; return None;
} }
} }
@@ -185,13 +196,7 @@ impl AggregateRangeStatement {
let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N)); let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N));
for j in 1 ..= V.len() { for j in 1 ..= V.len() {
d_js.push(Self::d_j(j, V.len())); d_js.push(Self::d_j(j, V.len()));
#[allow(clippy::map_unwrap_or)] a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
a_l.0.append(
&mut u64_decompose(
*witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0),
)
.0,
);
} }
let a_r = a_l.clone() - Scalar::ONE; let a_r = a_l.clone() - Scalar::ONE;
@@ -218,8 +223,8 @@ impl AggregateRangeStatement {
let a_l = a_l - z; let a_l = a_l - z;
let a_r = a_r + &d_descending_y_plus_z; let a_r = a_r + &d_descending_y_plus_z;
let mut alpha = alpha; let mut alpha = alpha;
for j in 1 ..= witness.0.len() { for j in 1 ..= witness.gammas.len() {
alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one; alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;
} }
Some(AggregateRangeProof { Some(AggregateRangeProof {

View File

@@ -15,7 +15,7 @@ use crate::ringct::bulletproofs::plus::{
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*, ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
}; };
// Figure 1 of the Bulletproofs+ paper // Figure 1
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub(crate) struct WipStatement { pub(crate) struct WipStatement {
generators: Generators, generators: Generators,

View File

@@ -12,14 +12,14 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
use subtle::{ConstantTimeEq, ConditionallySelectable}; use subtle::{ConstantTimeEq, ConditionallySelectable};
use curve25519_dalek::{ use curve25519_dalek::{
constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT}, constants::ED25519_BASEPOINT_TABLE,
scalar::Scalar, scalar::Scalar,
traits::{IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul}, traits::{IsIdentity, VartimePrecomputedMultiscalarMul},
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation}, edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
}; };
use crate::{ use crate::{
INV_EIGHT, BASEPOINT_PRECOMP, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys, INV_EIGHT, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
ringct::hash_to_point, serialize::*, ringct::hash_to_point, serialize::*,
}; };
@@ -27,6 +27,8 @@ use crate::{
mod multisig; mod multisig;
#[cfg(feature = "multisig")] #[cfg(feature = "multisig")]
pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig}; pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
#[cfg(feature = "multisig")]
pub(crate) use multisig::add_key_image_share;
/// Errors returned when CLSAG signing fails. /// Errors returned when CLSAG signing fails.
#[derive(Clone, Copy, PartialEq, Eq, Debug)] #[derive(Clone, Copy, PartialEq, Eq, Debug)]
@@ -98,11 +100,8 @@ fn core(
) -> ((EdwardsPoint, Scalar, Scalar), Scalar) { ) -> ((EdwardsPoint, Scalar, Scalar), Scalar) {
let n = ring.len(); let n = ring.len();
let images_precomp = match A_c1 { let images_precomp = VartimeEdwardsPrecomputation::new([I, D]);
Mode::Sign(..) => None, let D = D * INV_EIGHT();
Mode::Verify(..) => Some(VartimeEdwardsPrecomputation::new([I, D])),
};
let D_INV_EIGHT = D * INV_EIGHT();
// Generate the transcript // Generate the transcript
// Instead of generating multiple, a single transcript is created and then edited as needed // Instead of generating multiple, a single transcript is created and then edited as needed
@@ -131,7 +130,7 @@ fn core(
} }
to_hash.extend(I.compress().to_bytes()); to_hash.extend(I.compress().to_bytes());
to_hash.extend(D_INV_EIGHT.compress().to_bytes()); to_hash.extend(D.compress().to_bytes());
to_hash.extend(pseudo_out.compress().to_bytes()); to_hash.extend(pseudo_out.compress().to_bytes());
// mu_P with agg_0 // mu_P with agg_0
let mu_P = hash_to_scalar(&to_hash); let mu_P = hash_to_scalar(&to_hash);
@@ -175,25 +174,10 @@ fn core(
let c_p = mu_P * c; let c_p = mu_P * c;
let c_c = mu_C * c; let c_c = mu_C * c;
// (s_i * G) + (c_p * P_i) + (c_c * C_i) let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]);
let L = match A_c1 {
Mode::Sign(..) => {
EdwardsPoint::multiscalar_mul([s[i], c_p, c_c], [ED25519_BASEPOINT_POINT, P[i], C[i]])
}
Mode::Verify(..) => {
BASEPOINT_PRECOMP().vartime_mixed_multiscalar_mul([s[i]], [c_p, c_c], [P[i], C[i]])
}
};
let PH = hash_to_point(&P[i]); let PH = hash_to_point(&P[i]);
// Shouldn't be an issue as all of the variables in this vartime statement are public
// (c_p * I) + (c_c * D) + (s_i * PH) let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]);
let R = match A_c1 {
Mode::Sign(..) => EdwardsPoint::multiscalar_mul([c_p, c_c, s[i]], [I, D, &PH]),
Mode::Verify(..) => {
images_precomp.as_ref().unwrap().vartime_mixed_multiscalar_mul([c_p, c_c], [s[i]], [PH])
}
};
to_hash.truncate(((2 * n) + 3) * 32); to_hash.truncate(((2 * n) + 3) * 32);
to_hash.extend(L.compress().to_bytes()); to_hash.extend(L.compress().to_bytes());
@@ -207,7 +191,7 @@ fn core(
} }
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with // This first tuple is needed to continue signing, the latter is the c to be tested/worked with
((D_INV_EIGHT, c * mu_P, c * mu_C), c1) ((D, c * mu_P, c * mu_C), c1)
} }
/// CLSAG signature, as used in Monero. /// CLSAG signature, as used in Monero.
@@ -277,10 +261,8 @@ impl Clsag {
nonce.deref() * nonce.deref() *
hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]), hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
); );
// Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring clsag.s[usize::from(inputs[i].2.decoys.i)] =
// member's commitment and our input commitment (which will only have a known discrete log (-((p * inputs[i].0.deref()) + c)) + nonce.deref();
// over G if the amounts cancel out)
clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce.deref() - ((p * inputs[i].0.deref()) + c);
inputs[i].0.zeroize(); inputs[i].0.zeroize();
nonce.zeroize(); nonce.zeroize();

View File

@@ -1,8 +1,5 @@
use core::{ops::Deref, fmt::Debug}; use core::{ops::Deref, fmt::Debug};
use std_shims::{ use std_shims::io::{self, Read, Write};
io::{self, Read, Write},
collections::HashMap,
};
use std::sync::{Arc, RwLock}; use std::sync::{Arc, RwLock};
use rand_core::{RngCore, CryptoRng, SeedableRng}; use rand_core::{RngCore, CryptoRng, SeedableRng};
@@ -12,13 +9,11 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint}; use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
use group::{ use group::{ff::Field, Group, GroupEncoding};
ff::{Field, PrimeField},
Group, GroupEncoding,
};
use transcript::{Transcript, RecommendedTranscript}; use transcript::{Transcript, RecommendedTranscript};
use dalek_ff_group as dfg; use dalek_ff_group as dfg;
use dleq::DLEqProof;
use frost::{ use frost::{
dkg::lagrange, dkg::lagrange,
curve::Ed25519, curve::Ed25519,
@@ -31,6 +26,10 @@ use crate::ringct::{
clsag::{ClsagInput, Clsag}, clsag::{ClsagInput, Clsag},
}; };
fn dleq_transcript() -> RecommendedTranscript {
RecommendedTranscript::new(b"monero_key_image_dleq")
}
impl ClsagInput { impl ClsagInput {
fn transcript<T: Transcript>(&self, transcript: &mut T) { fn transcript<T: Transcript>(&self, transcript: &mut T) {
// Doesn't domain separate as this is considered part of the larger CLSAG proof // Doesn't domain separate as this is considered part of the larger CLSAG proof
@@ -44,7 +43,6 @@ impl ClsagInput {
// They're just a unreliable reference to this data which will be included in the message // They're just a unreliable reference to this data which will be included in the message
// if in use // if in use
transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]); transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]);
// This also transcripts the key image generator since it's derived from this key
transcript.append_message(b"key", pair[0].compress().to_bytes()); transcript.append_message(b"key", pair[0].compress().to_bytes());
transcript.append_message(b"commitment", pair[1].compress().to_bytes()) transcript.append_message(b"commitment", pair[1].compress().to_bytes())
} }
@@ -72,11 +70,13 @@ impl ClsagDetails {
#[derive(Clone, PartialEq, Eq, Zeroize, Debug)] #[derive(Clone, PartialEq, Eq, Zeroize, Debug)]
pub struct ClsagAddendum { pub struct ClsagAddendum {
pub(crate) key_image: dfg::EdwardsPoint, pub(crate) key_image: dfg::EdwardsPoint,
dleq: DLEqProof<dfg::EdwardsPoint>,
} }
impl WriteAddendum for ClsagAddendum { impl WriteAddendum for ClsagAddendum {
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> { fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(self.key_image.compress().to_bytes().as_ref()) writer.write_all(self.key_image.compress().to_bytes().as_ref())?;
self.dleq.write(writer)
} }
} }
@@ -97,8 +97,9 @@ pub struct ClsagMultisig {
transcript: RecommendedTranscript, transcript: RecommendedTranscript,
pub(crate) H: EdwardsPoint, pub(crate) H: EdwardsPoint,
key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>, // Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires
image: Option<dfg::EdwardsPoint>, // an extra round
image: EdwardsPoint,
details: Arc<RwLock<Option<ClsagDetails>>>, details: Arc<RwLock<Option<ClsagDetails>>>,
@@ -116,8 +117,7 @@ impl ClsagMultisig {
transcript, transcript,
H: hash_to_point(&output_key), H: hash_to_point(&output_key),
key_image_shares: HashMap::new(), image: EdwardsPoint::identity(),
image: None,
details, details,
@@ -135,6 +135,20 @@ impl ClsagMultisig {
} }
} }
pub(crate) fn add_key_image_share(
image: &mut EdwardsPoint,
generator: EdwardsPoint,
offset: Scalar,
included: &[Participant],
participant: Participant,
share: EdwardsPoint,
) {
if image.is_identity().into() {
*image = generator * offset;
}
*image += share * lagrange::<dfg::Scalar>(participant, included).0;
}
impl Algorithm<Ed25519> for ClsagMultisig { impl Algorithm<Ed25519> for ClsagMultisig {
type Transcript = RecommendedTranscript; type Transcript = RecommendedTranscript;
type Addendum = ClsagAddendum; type Addendum = ClsagAddendum;
@@ -146,10 +160,23 @@ impl Algorithm<Ed25519> for ClsagMultisig {
fn preprocess_addendum<R: RngCore + CryptoRng>( fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self, &mut self,
_rng: &mut R, rng: &mut R,
keys: &ThresholdKeys<Ed25519>, keys: &ThresholdKeys<Ed25519>,
) -> ClsagAddendum { ) -> ClsagAddendum {
ClsagAddendum { key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref() } ClsagAddendum {
key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(),
dleq: DLEqProof::prove(
rng,
// Doesn't take in a larger transcript object due to the usage of this
// Every prover would immediately write their own DLEq proof, when they can only do so in
// the proper order if they want to reach consensus
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to
// try to merge later in some form, when it should instead just merge xH (as it does)
&mut dleq_transcript(),
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
keys.secret_share(),
),
}
} }
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> { fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> {
@@ -163,7 +190,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
Err(io::Error::other("non-canonical key image"))?; Err(io::Error::other("non-canonical key image"))?;
} }
Ok(ClsagAddendum { key_image: xH }) Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::<dfg::EdwardsPoint>::read(reader)? })
} }
fn process_addendum( fn process_addendum(
@@ -172,29 +199,33 @@ impl Algorithm<Ed25519> for ClsagMultisig {
l: Participant, l: Participant,
addendum: ClsagAddendum, addendum: ClsagAddendum,
) -> Result<(), FrostError> { ) -> Result<(), FrostError> {
if self.image.is_none() { // TODO: This check is faulty if two shares are additive inverses of each other
if self.image.is_identity().into() {
self.transcript.domain_separate(b"CLSAG"); self.transcript.domain_separate(b"CLSAG");
// Transcript the ring
self.input().transcript(&mut self.transcript); self.input().transcript(&mut self.transcript);
// Transcript the mask
self.transcript.append_message(b"mask", self.mask().to_bytes()); self.transcript.append_message(b"mask", self.mask().to_bytes());
// Init the image to the offset
self.image = Some(dfg::EdwardsPoint(self.H) * view.offset());
} }
// Transcript this participant's contribution
self.transcript.append_message(b"participant", l.to_bytes()); self.transcript.append_message(b"participant", l.to_bytes());
addendum
.dleq
.verify(
&mut dleq_transcript(),
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
&[view.original_verification_share(l), addendum.key_image],
)
.map_err(|_| FrostError::InvalidPreprocess(l))?;
self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes()); self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes());
add_key_image_share(
// Accumulate the interpolated share &mut self.image,
let interpolated_key_image_share = self.H,
addendum.key_image * lagrange::<dfg::Scalar>(l, view.included()); view.offset().0,
*self.image.as_mut().unwrap() += interpolated_key_image_share; view.included(),
l,
self addendum.key_image.0,
.key_image_shares );
.insert(view.verification_share(l).to_bytes(), interpolated_key_image_share);
Ok(()) Ok(())
} }
@@ -222,7 +253,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
#[allow(non_snake_case)] #[allow(non_snake_case)]
let (clsag, pseudo_out, p, c) = Clsag::sign_core( let (clsag, pseudo_out, p, c) = Clsag::sign_core(
&mut rng, &mut rng,
&self.image.expect("verifying a share despite never processing any addendums").0, &self.image,
&self.input(), &self.input(),
self.mask(), self.mask(),
self.msg.as_ref().unwrap(), self.msg.as_ref().unwrap(),
@@ -231,8 +262,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
); );
self.interim = Some(Interim { p, c, clsag, pseudo_out }); self.interim = Some(Interim { p, c, clsag, pseudo_out });
// r - p x, where p is the challenge for the keys (-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref()
*nonces[0] - dfg::Scalar(p) * view.secret_share().deref()
} }
#[must_use] #[must_use]
@@ -244,13 +274,11 @@ impl Algorithm<Ed25519> for ClsagMultisig {
) -> Option<Self::Signature> { ) -> Option<Self::Signature> {
let interim = self.interim.as_ref().unwrap(); let interim = self.interim.as_ref().unwrap();
let mut clsag = interim.clsag.clone(); let mut clsag = interim.clsag.clone();
// We produced shares as `r - p x`, yet the signature is `r - p x - c x`
// Substract `c x` (saved as `c`) now
clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c; clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c;
if clsag if clsag
.verify( .verify(
&self.input().decoys.ring, &self.input().decoys.ring,
&self.image.expect("verifying a signature despite never processing any addendums").0, &self.image,
&interim.pseudo_out, &interim.pseudo_out,
self.msg.as_ref().unwrap(), self.msg.as_ref().unwrap(),
) )
@@ -268,61 +296,10 @@ impl Algorithm<Ed25519> for ClsagMultisig {
share: dfg::Scalar, share: dfg::Scalar,
) -> Result<Vec<(dfg::Scalar, dfg::EdwardsPoint)>, ()> { ) -> Result<Vec<(dfg::Scalar, dfg::EdwardsPoint)>, ()> {
let interim = self.interim.as_ref().unwrap(); let interim = self.interim.as_ref().unwrap();
Ok(vec![
// For a share `r - p x`, the following two equalities should hold:
// - `(r - p x)G == R.0 - pV`, where `V = xG`
// - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share)
//
// This is effectively a discrete log equality proof for:
// V, K over G, H
// with nonces
// R.0, R.1
// and solution
// s
//
// Which is a batch-verifiable rewrite of the traditional CP93 proof
// (and also writable as Generalized Schnorr Protocol)
//
// That means that given a proper challenge, this alone can be certainly argued to prove the
// key image share is well-formed and the provided signature so proves for that.
// This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of
// the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically
// is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be
// extracted, and the nonces as used in CLSAG are also part of its prover data/transcript).
let key_image_share = self.key_image_shares[&verification_share.to_bytes()];
// Hash every variable relevant here, using the hahs output as the random weight
let mut weight_transcript =
RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share");
weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes());
weight_transcript.append_message(b"H", self.H.to_bytes());
weight_transcript.append_message(b"xG", verification_share.to_bytes());
weight_transcript.append_message(b"xH", key_image_share.to_bytes());
weight_transcript.append_message(b"rG", nonces[0][0].to_bytes());
weight_transcript.append_message(b"rH", nonces[0][1].to_bytes());
weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr());
weight_transcript.append_message(b"s", share.to_repr());
let weight = weight_transcript.challenge(b"weight");
let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into()));
let part_one = vec![
(share, dfg::EdwardsPoint::generator()), (share, dfg::EdwardsPoint::generator()),
// -(R.0 - pV) == -R.0 + pV
(-dfg::Scalar::ONE, nonces[0][0]),
(dfg::Scalar(interim.p), verification_share), (dfg::Scalar(interim.p), verification_share),
]; (-dfg::Scalar::ONE, nonces[0][0]),
])
let mut part_two = vec![
(weight * share, dfg::EdwardsPoint(self.H)),
// -(R.1 - pK) == -R.1 + pK
(-weight, nonces[0][1]),
(weight * dfg::Scalar(interim.p), key_image_share),
];
let mut all = part_one;
all.append(&mut part_two);
Ok(all)
} }
} }

View File

@@ -21,7 +21,7 @@ fn test_aggregate_range_proof() {
} }
let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect(); let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect();
let statement = AggregateRangeStatement::new(commitment_points).unwrap(); let statement = AggregateRangeStatement::new(commitment_points).unwrap();
let witness = AggregateRangeWitness::new(commitments).unwrap(); let witness = AggregateRangeWitness::new(&commitments).unwrap();
let proof = statement.clone().prove(&mut OsRng, &witness).unwrap(); let proof = statement.clone().prove(&mut OsRng, &witness).unwrap();
statement.verify(&mut OsRng, &mut verifier, (), proof); statement.verify(&mut OsRng, &mut verifier, (), proof);

View File

@@ -1,5 +1,5 @@
use core::{marker::PhantomData, fmt}; use core::{marker::PhantomData, fmt::Debug};
use std_shims::string::ToString; use std_shims::string::{String, ToString};
use zeroize::Zeroize; use zeroize::Zeroize;
@@ -81,7 +81,7 @@ impl AddressType {
} }
/// A type which returns the byte for a given address. /// A type which returns the byte for a given address.
pub trait AddressBytes: Clone + Copy + PartialEq + Eq + fmt::Debug { pub trait AddressBytes: Clone + Copy + PartialEq + Eq + Debug {
fn network_bytes(network: Network) -> (u8, u8, u8, u8); fn network_bytes(network: Network) -> (u8, u8, u8, u8);
} }
@@ -191,8 +191,8 @@ pub struct Address<B: AddressBytes> {
pub view: EdwardsPoint, pub view: EdwardsPoint,
} }
impl<B: AddressBytes> fmt::Debug for Address<B> { impl<B: AddressBytes> core::fmt::Debug for Address<B> {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
fmt fmt
.debug_struct("Address") .debug_struct("Address")
.field("meta", &self.meta) .field("meta", &self.meta)
@@ -212,8 +212,8 @@ impl<B: AddressBytes> Zeroize for Address<B> {
} }
} }
impl<B: AddressBytes> fmt::Display for Address<B> { impl<B: AddressBytes> ToString for Address<B> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn to_string(&self) -> String {
let mut data = vec![self.meta.to_byte()]; let mut data = vec![self.meta.to_byte()];
data.extend(self.spend.compress().to_bytes()); data.extend(self.spend.compress().to_bytes());
data.extend(self.view.compress().to_bytes()); data.extend(self.view.compress().to_bytes());
@@ -226,7 +226,7 @@ impl<B: AddressBytes> fmt::Display for Address<B> {
if let Some(id) = self.meta.kind.payment_id() { if let Some(id) = self.meta.kind.payment_id() {
data.extend(id); data.extend(id);
} }
write!(f, "{}", encode_check(&data).unwrap()) encode_check(&data).unwrap()
} }
} }

View File

@@ -18,7 +18,6 @@ use transcript::{Transcript, RecommendedTranscript};
use frost::{ use frost::{
curve::Ed25519, curve::Ed25519,
Participant, FrostError, ThresholdKeys, Participant, FrostError, ThresholdKeys,
dkg::lagrange,
sign::{ sign::{
Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine, Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine,
SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
@@ -28,7 +27,7 @@ use frost::{
use crate::{ use crate::{
random_scalar, random_scalar,
ringct::{ ringct::{
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig}, clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share},
RctPrunable, RctPrunable,
}, },
transaction::{Input, Transaction}, transaction::{Input, Transaction},
@@ -262,13 +261,8 @@ impl SignMachine<Transaction> for TransactionSignMachine {
included.push(self.i); included.push(self.i);
included.sort_unstable(); included.sort_unstable();
// Start calculating the key images, as needed on the TX level // Convert the unified commitments to a Vec of the individual commitments
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()]; let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
for (image, (generator, offset)) in images.iter_mut().zip(&self.key_images) {
*image = generator * offset;
}
// Convert the serialized nonces commitments to a parallelized Vec
let mut commitments = (0 .. self.clsags.len()) let mut commitments = (0 .. self.clsags.len())
.map(|c| { .map(|c| {
included included
@@ -297,7 +291,14 @@ impl SignMachine<Transaction> for TransactionSignMachine {
// provides the easiest API overall, as this is where the TX is (which needs the key // provides the easiest API overall, as this is where the TX is (which needs the key
// images in its message), along with where the outputs are determined (where our // images in its message), along with where the outputs are determined (where our
// outputs may need these in order to guarantee uniqueness) // outputs may need these in order to guarantee uniqueness)
images[c] += preprocess.addendum.key_image.0 * lagrange::<dfg::Scalar>(*l, &included).0; add_key_image_share(
&mut images[c],
self.key_images[c].0,
self.key_images[c].1,
&included,
*l,
preprocess.addendum.key_image.0,
);
Ok((*l, preprocess)) Ok((*l, preprocess))
}) })

View File

@@ -11,7 +11,7 @@ impl Get for Transaction<'_> {
let mut res = self.0.get(&key); let mut res = self.0.get(&key);
for change in &self.1 { for change in &self.1 {
if change.1 == key.as_ref() { if change.1 == key.as_ref() {
res.clone_from(&change.2); res = change.2.clone();
} }
} }
res res

View File

@@ -55,8 +55,6 @@ impl Client {
fn connector() -> Connector { fn connector() -> Connector {
let mut res = HttpConnector::new(); let mut res = HttpConnector::new();
res.set_keepalive(Some(core::time::Duration::from_secs(60))); res.set_keepalive(Some(core::time::Duration::from_secs(60)));
res.set_nodelay(true);
res.set_reuse_address(true);
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
let res = HttpsConnectorBuilder::new() let res = HttpsConnectorBuilder::new()
.with_native_roots() .with_native_roots()
@@ -70,9 +68,7 @@ impl Client {
pub fn with_connection_pool() -> Client { pub fn with_connection_pool() -> Client {
Client { Client {
connection: Connection::ConnectionPool( connection: Connection::ConnectionPool(
HyperClient::builder(TokioExecutor::new()) HyperClient::builder(TokioExecutor::new()).build(Self::connector()),
.pool_idle_timeout(core::time::Duration::from_secs(60))
.build(Self::connector()),
), ),
} }
} }

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = [] keywords = []
edition = "2021" edition = "2021"
rust-version = "1.77.0" rust-version = "1.60"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true
@@ -19,10 +19,8 @@ workspace = true
[dependencies] [dependencies]
zeroize = { version = "^1.5", default-features = false } zeroize = { version = "^1.5", default-features = false }
[build-dependencies]
rustversion = { version = "1", default-features = false }
[features] [features]
std = ["zeroize/std"] std = ["zeroize/std"]
default = ["std"] default = ["std"]
allocator = [] # Commented for now as it requires nightly and we don't use nightly
# allocator = []

View File

@@ -1,10 +0,0 @@
#[rustversion::nightly]
fn main() {
println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)");
println!("cargo::rustc-cfg=zalloc_rustc_nightly");
}
#[rustversion::not(nightly)]
fn main() {
println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)");
}

View File

@@ -1,6 +1,6 @@
#![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(docsrs, feature(doc_auto_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))] #![cfg_attr(feature = "allocator", feature(allocator_api))]
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation. //! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
//! This can either be used with Box (requires nightly and the "allocator" feature) to provide the //! This can either be used with Box (requires nightly and the "allocator" feature) to provide the
@@ -17,12 +17,12 @@ use zeroize::Zeroize;
/// An allocator wrapper which zeroizes its memory on dealloc. /// An allocator wrapper which zeroizes its memory on dealloc.
pub struct ZeroizingAlloc<T>(pub T); pub struct ZeroizingAlloc<T>(pub T);
#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))] #[cfg(feature = "allocator")]
use core::{ use core::{
ptr::NonNull, ptr::NonNull,
alloc::{AllocError, Allocator}, alloc::{AllocError, Allocator},
}; };
#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))] #[cfg(feature = "allocator")]
unsafe impl<T: Allocator> Allocator for ZeroizingAlloc<T> { unsafe impl<T: Allocator> Allocator for ZeroizingAlloc<T> {
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> { fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.0.allocate(layout) self.0.allocate(layout)

View File

@@ -122,7 +122,7 @@ impl QueuedBatchesDb {
pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> { pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
let batches_vec = Self::get(txn, set).unwrap_or_default(); let batches_vec = Self::get(txn, set).unwrap_or_default();
txn.del(Self::key(set)); txn.del(&Self::key(set));
let mut batches: &[u8] = &batches_vec; let mut batches: &[u8] = &batches_vec;
let mut res = vec![]; let mut res = vec![];

View File

@@ -358,7 +358,7 @@ impl LibP2p {
.with_behaviour(|_| behavior) .with_behaviour(|_| behavior)
.unwrap() .unwrap()
.build(); .build();
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o') const PORT: u16 = 30564; // 5132 ^ (('c' << 8) | 'o')
swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap(); swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap();
let (send_send, mut send_recv) = mpsc::unbounded_channel(); let (send_send, mut send_recv) = mpsc::unbounded_channel();

View File

@@ -74,7 +74,7 @@ impl TributarySpec {
pub fn genesis(&self) -> [u8; 32] { pub fn genesis(&self) -> [u8; 32] {
// Calculate the genesis for this Tributary // Calculate the genesis for this Tributary
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis"); let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis Testnet 2.1");
// This locks it to a specific Serai chain // This locks it to a specific Serai chain
genesis.append_message(b"serai_block", self.serai_block); genesis.append_message(b"serai_block", self.serai_block);
genesis.append_message(b"session", self.set.session.0.to_le_bytes()); genesis.append_message(b"session", self.set.session.0.to_le_bytes());

View File

@@ -59,7 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000; pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
pub(crate) const TENDERMINT_MESSAGE: u8 = 0; pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
pub(crate) const TRANSACTION_MESSAGE: u8 = 1; pub(crate) const TRANSACTION_MESSAGE: u8 = 2; // TODO: Normalize to 1
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]

View File

@@ -74,7 +74,7 @@ impl<D: Db, T: Transaction> ProvidedTransactions<D, T> {
panic!("provided transaction saved to disk wasn't provided"); panic!("provided transaction saved to disk wasn't provided");
}; };
if !res.transactions.contains_key(order) { if res.transactions.get(order).is_none() {
res.transactions.insert(order, VecDeque::new()); res.transactions.insert(order, VecDeque::new());
} }
res.transactions.get_mut(order).unwrap().push_back(tx); res.transactions.get_mut(order).unwrap().push_back(tx);
@@ -135,7 +135,7 @@ impl<D: Db, T: Transaction> ProvidedTransactions<D, T> {
txn.put(current_provided_key, currently_provided); txn.put(current_provided_key, currently_provided);
txn.commit(); txn.commit();
if !self.transactions.contains_key(order) { if self.transactions.get(order).is_none() {
self.transactions.insert(order, VecDeque::new()); self.transactions.insert(order, VecDeque::new());
} }
self.transactions.get_mut(order).unwrap().push_back(tx); self.transactions.get_mut(order).unwrap().push_back(tx);

View File

@@ -139,8 +139,10 @@ impl<N: Network> BlockData<N> {
// 27, 33, 41, 46, 60, 64 // 27, 33, 41, 46, 60, 64
self.round_mut().step = data.step(); self.round_mut().step = data.step();
// Only return a message to if we're actually a current validator // Only return a message to if we're actually a current validator and haven't prior posted a
// message
let round_number = self.round().number; let round_number = self.round().number;
let step = data.step();
let res = self.validator_id.map(|validator_id| Message { let res = self.validator_id.map(|validator_id| Message {
sender: validator_id, sender: validator_id,
block: self.number, block: self.number,
@@ -148,59 +150,21 @@ impl<N: Network> BlockData<N> {
data, data,
}); });
if let Some(res) = res.as_ref() { if res.is_some() {
const LATEST_BLOCK_KEY: &[u8] = b"tendermint-machine-sent_block";
const LATEST_ROUND_KEY: &[u8] = b"tendermint-machine-sent_round";
const PROPOSE_KEY: &[u8] = b"tendermint-machine-sent_propose";
const PEVOTE_KEY: &[u8] = b"tendermint-machine-sent_prevote";
const PRECOMMIT_KEY: &[u8] = b"tendermint-machine-sent_commit";
let genesis = self.genesis;
let key = |prefix: &[u8]| [prefix, &genesis].concat();
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let key = [
// Ensure we haven't prior sent a message for a future block/round b"tendermint-machine_already_sent_message".as_ref(),
let last_block_or_round = |txn: &mut <N::Db as Db>::Transaction<'_>, prefix, current| { &self.genesis,
let key = key(prefix); &self.number.0.to_le_bytes(),
let latest = &round_number.0.to_le_bytes(),
u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap()); &step.encode(),
if latest > current { ]
.concat();
// If we've already sent a message, return
if txn.get(&key).is_some() {
None?; None?;
} }
if current > latest { txn.put(&key, []);
txn.put(&key, current.to_le_bytes());
return Some(true);
}
Some(false)
};
let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?;
if new_block {
// Delete the latest round key
txn.del(key(LATEST_ROUND_KEY));
}
let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?;
if new_block || new_round {
// Delete the messages for the old round
txn.del(key(PROPOSE_KEY));
txn.del(key(PEVOTE_KEY));
txn.del(key(PRECOMMIT_KEY));
}
// Check we haven't sent this message within this round
let msg_key = key(match res.data.step() {
Step::Propose => PROPOSE_KEY,
Step::Prevote => PEVOTE_KEY,
Step::Precommit => PRECOMMIT_KEY,
});
if txn.get(&msg_key).is_some() {
assert!(!new_block);
assert!(!new_round);
None?;
}
// Put this message to the DB
txn.put(&msg_key, res.encode());
txn.commit(); txn.commit();
} }

View File

@@ -514,7 +514,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
match step { match step {
Step::Propose => { Step::Propose => {
// Slash the validator for not proposing when they should've // Slash the validator for not proposing when they should've
log::debug!(target: "tendermint", "validator didn't propose when they should have"); log::debug!(target: "tendermint", "Validator didn't propose when they should have");
// this slash will be voted on. // this slash will be voted on.
self.slash( self.slash(
self.weights.proposer(self.block.number, self.block.round().number), self.weights.proposer(self.block.number, self.block.round().number),
@@ -724,7 +724,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
if !self.block.log.log(signed.clone())? { if !self.block.log.log(signed.clone())? {
return Err(TendermintError::AlreadyHandled); return Err(TendermintError::AlreadyHandled);
} }
log::trace!( log::debug!(
target: "tendermint", target: "tendermint",
"received new tendermint message (block: {}, round: {}, step: {:?})", "received new tendermint message (block: {}, round: {}, step: {:?})",
msg.block.0, msg.block.0,

View File

@@ -57,6 +57,7 @@ impl<N: Network> RoundData<N> {
// Poll all set timeouts, returning the Step whose timeout has just expired // Poll all set timeouts, returning the Step whose timeout has just expired
pub(crate) async fn timeout_future(&self) -> Step { pub(crate) async fn timeout_future(&self) -> Step {
/*
let now = Instant::now(); let now = Instant::now();
log::trace!( log::trace!(
target: "tendermint", target: "tendermint",
@@ -64,6 +65,7 @@ impl<N: Network> RoundData<N> {
self.step, self.step,
self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::<HashMap<_, _>>() self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::<HashMap<_, _>>()
); );
*/
let timeout_future = |step| { let timeout_future = |step| {
let timeout = self.timeouts.get(&step).copied(); let timeout = self.timeouts.get(&step).copied();

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-gr
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"] keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"]
edition = "2021" edition = "2021"
rust-version = "1.66" rust-version = "1.65"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["dkg", "multisig", "threshold", "ff", "group"] keywords = ["dkg", "multisig", "threshold", "ff", "group"]
edition = "2021" edition = "2021"
rust-version = "1.74" rust-version = "1.70"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -6,7 +6,7 @@ license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021" edition = "2021"
rust-version = "1.74" rust-version = "1.73"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ed448"
authors = ["Luke Parker <lukeparker5132@gmail.com>"] authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = ["ed448", "ff", "group"] keywords = ["ed448", "ff", "group"]
edition = "2021" edition = "2021"
rust-version = "1.66" rust-version = "1.65"
[package.metadata.docs.rs] [package.metadata.docs.rs]
all-features = true all-features = true

View File

@@ -34,7 +34,7 @@ macro_rules! math_op {
impl $Op<$Other> for $Value { impl $Op<$Other> for $Value {
type Output = $Value; type Output = $Value;
fn $op_fn(self, other: $Other) -> Self::Output { fn $op_fn(self, other: $Other) -> Self::Output {
$Value($function(self.0, other.0)) Self($function(self.0, other.0))
} }
} }
impl $Assign<$Other> for $Value { impl $Assign<$Other> for $Value {
@@ -45,7 +45,7 @@ macro_rules! math_op {
impl<'a> $Op<&'a $Other> for $Value { impl<'a> $Op<&'a $Other> for $Value {
type Output = $Value; type Output = $Value;
fn $op_fn(self, other: &'a $Other) -> Self::Output { fn $op_fn(self, other: &'a $Other) -> Self::Output {
$Value($function(self.0, other.0)) Self($function(self.0, other.0))
} }
} }
impl<'a> $Assign<&'a $Other> for $Value { impl<'a> $Assign<&'a $Other> for $Value {
@@ -60,7 +60,7 @@ macro_rules! from_wrapper {
($wrapper: ident, $inner: ident, $uint: ident) => { ($wrapper: ident, $inner: ident, $uint: ident) => {
impl From<$uint> for $wrapper { impl From<$uint> for $wrapper {
fn from(a: $uint) -> $wrapper { fn from(a: $uint) -> $wrapper {
$wrapper(Residue::new(&$inner::from(a))) Self(Residue::new(&$inner::from(a)))
} }
} }
}; };
@@ -127,7 +127,7 @@ macro_rules! field {
impl Neg for $FieldName { impl Neg for $FieldName {
type Output = $FieldName; type Output = $FieldName;
fn neg(self) -> $FieldName { fn neg(self) -> $FieldName {
$FieldName(self.0.neg()) Self(self.0.neg())
} }
} }
@@ -141,13 +141,13 @@ macro_rules! field {
impl $FieldName { impl $FieldName {
/// Perform an exponentiation. /// Perform an exponentiation.
pub fn pow(&self, other: $FieldName) -> $FieldName { pub fn pow(&self, other: $FieldName) -> $FieldName {
let mut table = [$FieldName(Residue::ONE); 16]; let mut table = [Self(Residue::ONE); 16];
table[1] = *self; table[1] = *self;
for i in 2 .. 16 { for i in 2 .. 16 {
table[i] = table[i - 1] * self; table[i] = table[i - 1] * self;
} }
let mut res = $FieldName(Residue::ONE); let mut res = Self(Residue::ONE);
let mut bits = 0; let mut bits = 0;
for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() {
bits <<= 1; bits <<= 1;
@@ -170,8 +170,8 @@ macro_rules! field {
} }
impl Field for $FieldName { impl Field for $FieldName {
const ZERO: Self = $FieldName(Residue::ZERO); const ZERO: Self = Self(Residue::ZERO);
const ONE: Self = $FieldName(Residue::ONE); const ONE: Self = Self(Residue::ONE);
fn random(mut rng: impl RngCore) -> Self { fn random(mut rng: impl RngCore) -> Self {
let mut bytes = [0; 112]; let mut bytes = [0; 112];
@@ -188,12 +188,12 @@ macro_rules! field {
fn invert(&self) -> CtOption<Self> { fn invert(&self) -> CtOption<Self> {
const NEG_2: $FieldName = const NEG_2: $FieldName =
$FieldName($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::from_u8(2)))); Self($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::from_u8(2))));
CtOption::new(self.pow(NEG_2), !self.is_zero()) CtOption::new(self.pow(NEG_2), !self.is_zero())
} }
fn sqrt(&self) -> CtOption<Self> { fn sqrt(&self) -> CtOption<Self> {
const MOD_1_4: $FieldName = $FieldName($ResidueType::new( const MOD_1_4: $FieldName = Self($ResidueType::new(
&$MODULUS.saturating_add(&U448::ONE).wrapping_div(&U448::from_u8(4)), &$MODULUS.saturating_add(&U448::ONE).wrapping_div(&U448::from_u8(4)),
)); ));
@@ -217,14 +217,14 @@ macro_rules! field {
const TWO_INV: Self = $FieldName($ResidueType::new(&U448::from_u8(2)).invert().0); const TWO_INV: Self = $FieldName($ResidueType::new(&U448::from_u8(2)).invert().0);
const MULTIPLICATIVE_GENERATOR: Self = const MULTIPLICATIVE_GENERATOR: Self =
$FieldName(Residue::new(&U448::from_u8($MULTIPLICATIVE_GENERATOR))); Self(Residue::new(&U448::from_u8($MULTIPLICATIVE_GENERATOR)));
// True for both the Ed448 Scalar field and FieldElement field // True for both the Ed448 Scalar field and FieldElement field
const S: u32 = 1; const S: u32 = 1;
// Both fields have their root of unity as -1 // Both fields have their root of unity as -1
const ROOT_OF_UNITY: Self = const ROOT_OF_UNITY: Self =
$FieldName($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::ONE))); Self($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::ONE)));
const ROOT_OF_UNITY_INV: Self = $FieldName(Self::ROOT_OF_UNITY.0.invert().0); const ROOT_OF_UNITY_INV: Self = Self(Self::ROOT_OF_UNITY.0.invert().0);
const DELTA: Self = $FieldName(Residue::new(&U448::from_le_hex($DELTA))); const DELTA: Self = $FieldName(Residue::new(&U448::from_le_hex($DELTA)));

View File

@@ -38,6 +38,7 @@ ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features =
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] } multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
dleq = { path = "../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] }
dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] } dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] }

View File

@@ -10,7 +10,7 @@ integrating with existing systems.
This library offers ciphersuites compatible with the This library offers ciphersuites compatible with the
[IETF draft](https://github.com/cfrg/draft-irtf-cfrg-frost). Currently, version [IETF draft](https://github.com/cfrg/draft-irtf-cfrg-frost). Currently, version
15 is supported. 11 is supported.
This library was This library was
[audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf),

View File

@@ -39,13 +39,6 @@ pub trait Algorithm<C: Curve>: Send + Sync + Clone {
/// Obtain the list of nonces to generate, as specified by the generators to create commitments /// Obtain the list of nonces to generate, as specified by the generators to create commitments
/// against per-nonce. /// against per-nonce.
///
/// The Algorithm is responsible for all transcripting of these nonce specifications/generators.
///
/// The prover will be passed the commitments, and the commitments will be sent to all other
/// participants. No guarantees the commitments are internally consistent (have the same discrete
/// logarithm across generators) are made. Any Algorithm which specifies multiple generators for
/// a single nonce must handle that itself.
fn nonces(&self) -> Vec<Vec<C::G>>; fn nonces(&self) -> Vec<Vec<C::G>>;
/// Generate an addendum to FROST"s preprocessing stage. /// Generate an addendum to FROST"s preprocessing stage.

View File

@@ -1,9 +1,13 @@
// FROST defines its nonce as sum(Di, Ei * bi) // FROST defines its nonce as sum(Di, Ei * bi)
// Monero needs not just the nonce over G however, yet also over H
// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once
// //
// In order for this library to be robust, it supports generating an arbitrary amount of nonces, // Accordingly, in order for this library to be robust, it supports generating an arbitrary amount
// each against an arbitrary list of generators // of nonces, each against an arbitrary list of generators
// //
// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b) // Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)
// When representations across multiple generators are provided, a DLEq proof is also provided to
// confirm their integrity
use core::ops::Deref; use core::ops::Deref;
use std::{ use std::{
@@ -20,8 +24,32 @@ use transcript::Transcript;
use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding}; use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding};
use multiexp::multiexp_vartime; use multiexp::multiexp_vartime;
use dleq::MultiDLEqProof;
use crate::{curve::Curve, Participant}; use crate::{curve::Curve, Participant};
// Transcript used to aggregate binomial nonces for usage within a single DLEq proof.
fn aggregation_transcript<T: Transcript>(context: &[u8]) -> T {
let mut transcript = T::new(b"FROST DLEq Aggregation v0.5");
transcript.append_message(b"context", context);
transcript
}
// Every participant proves for their commitments at the start of the protocol
// These proofs are verified sequentially, requiring independent transcripts
// In order to make these transcripts more robust, the FROST transcript (at time of preprocess) is
// challenged in order to create a commitment to it, carried in each independent transcript
// (effectively forking the original transcript)
//
// For FROST, as defined by the IETF, this will do nothing (and this transcript will never even be
// constructed). For higher level protocols, the transcript may have contextual info these proofs
// will then be bound to
fn dleq_transcript<T: Transcript>(context: &[u8]) -> T {
let mut transcript = T::new(b"FROST Commitments DLEq v0.5");
transcript.append_message(b"context", context);
transcript
}
// Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper // Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper
// This is considered a single nonce as r = d + be // This is considered a single nonce as r = d + be
#[derive(Clone, Zeroize)] #[derive(Clone, Zeroize)]
@@ -41,7 +69,7 @@ impl<C: Curve> GeneratorCommitments<C> {
} }
} }
// A single nonce's commitments // A single nonce's commitments and relevant proofs
#[derive(Clone, PartialEq, Eq)] #[derive(Clone, PartialEq, Eq)]
pub(crate) struct NonceCommitments<C: Curve> { pub(crate) struct NonceCommitments<C: Curve> {
// Called generators as these commitments are indexed by generator later on // Called generators as these commitments are indexed by generator later on
@@ -93,6 +121,12 @@ impl<C: Curve> NonceCommitments<C> {
t.append_message(b"commitment_E", commitments.0[1].to_bytes()); t.append_message(b"commitment_E", commitments.0[1].to_bytes());
} }
} }
fn aggregation_factor<T: Transcript>(&self, context: &[u8]) -> C::F {
let mut transcript = aggregation_transcript::<T>(context);
self.transcript(&mut transcript);
<C as Curve>::hash_to_F(b"dleq_aggregation", transcript.challenge(b"binding").as_ref())
}
} }
/// Commitments for all the nonces across all their generators. /// Commitments for all the nonces across all their generators.
@@ -101,26 +135,51 @@ pub(crate) struct Commitments<C: Curve> {
// Called nonces as these commitments are indexed by nonce // Called nonces as these commitments are indexed by nonce
// So to get the commitments for the first nonce, it'd be commitments.nonces[0] // So to get the commitments for the first nonce, it'd be commitments.nonces[0]
pub(crate) nonces: Vec<NonceCommitments<C>>, pub(crate) nonces: Vec<NonceCommitments<C>>,
// DLEq Proof proving that each set of commitments were generated using a single pair of discrete
// logarithms
pub(crate) dleq: Option<MultiDLEqProof<C::G>>,
} }
impl<C: Curve> Commitments<C> { impl<C: Curve> Commitments<C> {
pub(crate) fn new<R: RngCore + CryptoRng>( pub(crate) fn new<R: RngCore + CryptoRng, T: Transcript>(
rng: &mut R, rng: &mut R,
secret_share: &Zeroizing<C::F>, secret_share: &Zeroizing<C::F>,
planned_nonces: &[Vec<C::G>], planned_nonces: &[Vec<C::G>],
context: &[u8],
) -> (Vec<Nonce<C>>, Commitments<C>) { ) -> (Vec<Nonce<C>>, Commitments<C>) {
let mut nonces = vec![]; let mut nonces = vec![];
let mut commitments = vec![]; let mut commitments = vec![];
let mut dleq_generators = vec![];
let mut dleq_nonces = vec![];
for generators in planned_nonces { for generators in planned_nonces {
let (nonce, these_commitments): (Nonce<C>, _) = let (nonce, these_commitments): (Nonce<C>, _) =
NonceCommitments::new(&mut *rng, secret_share, generators); NonceCommitments::new(&mut *rng, secret_share, generators);
if generators.len() > 1 {
dleq_generators.push(generators.clone());
dleq_nonces.push(Zeroizing::new(
(these_commitments.aggregation_factor::<T>(context) * nonce.0[1].deref()) +
nonce.0[0].deref(),
));
}
nonces.push(nonce); nonces.push(nonce);
commitments.push(these_commitments); commitments.push(these_commitments);
} }
(nonces, Commitments { nonces: commitments }) let dleq = if !dleq_generators.is_empty() {
Some(MultiDLEqProof::prove(
rng,
&mut dleq_transcript::<T>(context),
&dleq_generators,
&dleq_nonces,
))
} else {
None
};
(nonces, Commitments { nonces: commitments, dleq })
} }
pub(crate) fn transcript<T: Transcript>(&self, t: &mut T) { pub(crate) fn transcript<T: Transcript>(&self, t: &mut T) {
@@ -128,20 +187,58 @@ impl<C: Curve> Commitments<C> {
for nonce in &self.nonces { for nonce in &self.nonces {
nonce.transcript(t); nonce.transcript(t);
} }
// Transcripting the DLEqs implicitly transcripts the exact generators used for the nonces in
// an exact order
// This means it shouldn't be possible for variadic generators to cause conflicts
if let Some(dleq) = &self.dleq {
t.append_message(b"dleq", dleq.serialize());
}
} }
pub(crate) fn read<R: Read>(reader: &mut R, generators: &[Vec<C::G>]) -> io::Result<Self> { pub(crate) fn read<R: Read, T: Transcript>(
reader: &mut R,
generators: &[Vec<C::G>],
context: &[u8],
) -> io::Result<Self> {
let nonces = (0 .. generators.len()) let nonces = (0 .. generators.len())
.map(|i| NonceCommitments::read(reader, &generators[i])) .map(|i| NonceCommitments::read(reader, &generators[i]))
.collect::<Result<Vec<NonceCommitments<C>>, _>>()?; .collect::<Result<Vec<NonceCommitments<C>>, _>>()?;
Ok(Commitments { nonces }) let mut dleq_generators = vec![];
let mut dleq_nonces = vec![];
for (generators, nonce) in generators.iter().cloned().zip(&nonces) {
if generators.len() > 1 {
let binding = nonce.aggregation_factor::<T>(context);
let mut aggregated = vec![];
for commitments in &nonce.generators {
aggregated.push(commitments.0[0] + (commitments.0[1] * binding));
}
dleq_generators.push(generators);
dleq_nonces.push(aggregated);
}
}
let dleq = if !dleq_generators.is_empty() {
let dleq = MultiDLEqProof::read(reader, dleq_generators.len())?;
dleq
.verify(&mut dleq_transcript::<T>(context), &dleq_generators, &dleq_nonces)
.map_err(|_| io::Error::other("invalid DLEq proof"))?;
Some(dleq)
} else {
None
};
Ok(Commitments { nonces, dleq })
} }
pub(crate) fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> { pub(crate) fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
for nonce in &self.nonces { for nonce in &self.nonces {
nonce.write(writer)?; nonce.write(writer)?;
} }
if let Some(dleq) = &self.dleq {
dleq.write(writer)?;
}
Ok(()) Ok(())
} }
} }

View File

@@ -125,8 +125,14 @@ impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
let mut params = self.params; let mut params = self.params;
let mut rng = ChaCha20Rng::from_seed(*seed.0); let mut rng = ChaCha20Rng::from_seed(*seed.0);
let (nonces, commitments) = // Get a challenge to the existing transcript for use when proving for the commitments
Commitments::new::<_>(&mut rng, params.keys.secret_share(), &params.algorithm.nonces()); let commitments_challenge = params.algorithm.transcript().challenge(b"commitments");
let (nonces, commitments) = Commitments::new::<_, A::Transcript>(
&mut rng,
params.keys.secret_share(),
&params.algorithm.nonces(),
commitments_challenge.as_ref(),
);
let addendum = params.algorithm.preprocess_addendum(&mut rng, &params.keys); let addendum = params.algorithm.preprocess_addendum(&mut rng, &params.keys);
let preprocess = Preprocess { commitments, addendum }; let preprocess = Preprocess { commitments, addendum };
@@ -135,18 +141,27 @@ impl<C: Curve, A: Algorithm<C>> AlgorithmMachine<C, A> {
let mut blame_entropy = [0; 32]; let mut blame_entropy = [0; 32];
rng.fill_bytes(&mut blame_entropy); rng.fill_bytes(&mut blame_entropy);
( (
AlgorithmSignMachine { params, seed, nonces, preprocess: preprocess.clone(), blame_entropy }, AlgorithmSignMachine {
params,
seed,
commitments_challenge,
nonces,
preprocess: preprocess.clone(),
blame_entropy,
},
preprocess, preprocess,
) )
} }
#[cfg(any(test, feature = "tests"))] #[cfg(any(test, feature = "tests"))]
pub(crate) fn unsafe_override_preprocess( pub(crate) fn unsafe_override_preprocess(
self, mut self,
nonces: Vec<Nonce<C>>, nonces: Vec<Nonce<C>>,
preprocess: Preprocess<C, A::Addendum>, preprocess: Preprocess<C, A::Addendum>,
) -> AlgorithmSignMachine<C, A> { ) -> AlgorithmSignMachine<C, A> {
AlgorithmSignMachine { AlgorithmSignMachine {
commitments_challenge: self.params.algorithm.transcript().challenge(b"commitments"),
params: self.params, params: self.params,
seed: CachedPreprocess(Zeroizing::new([0; 32])), seed: CachedPreprocess(Zeroizing::new([0; 32])),
@@ -240,6 +255,8 @@ pub struct AlgorithmSignMachine<C: Curve, A: Algorithm<C>> {
params: Params<C, A>, params: Params<C, A>,
seed: CachedPreprocess, seed: CachedPreprocess,
#[zeroize(skip)]
commitments_challenge: <A::Transcript as Transcript>::Challenge,
pub(crate) nonces: Vec<Nonce<C>>, pub(crate) nonces: Vec<Nonce<C>>,
// Skips the preprocess due to being too large a bound to feasibly enforce on users // Skips the preprocess due to being too large a bound to feasibly enforce on users
#[zeroize(skip)] #[zeroize(skip)]
@@ -268,7 +285,11 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> { fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
Ok(Preprocess { Ok(Preprocess {
commitments: Commitments::read::<_>(reader, &self.params.algorithm.nonces())?, commitments: Commitments::read::<_, A::Transcript>(
reader,
&self.params.algorithm.nonces(),
self.commitments_challenge.as_ref(),
)?,
addendum: self.params.algorithm.read_addendum(reader)?, addendum: self.params.algorithm.read_addendum(reader)?,
}) })
} }
@@ -362,7 +383,9 @@ impl<C: Curve, A: Algorithm<C>> SignMachine<A::Signature> for AlgorithmSignMachi
rho_transcript.append_message(b"message", C::hash_msg(msg)); rho_transcript.append_message(b"message", C::hash_msg(msg));
rho_transcript.append_message( rho_transcript.append_message(
b"preprocesses", b"preprocesses",
C::hash_commitments(self.params.algorithm.transcript().challenge(b"preprocesses").as_ref()), &C::hash_commitments(
self.params.algorithm.transcript().challenge(b"preprocesses").as_ref(),
),
); );
// Generate the per-signer binding factors // Generate the per-signer binding factors

View File

@@ -12,7 +12,7 @@ use crate::{
/// Tests for the nonce handling code. /// Tests for the nonce handling code.
pub mod nonces; pub mod nonces;
use nonces::test_multi_nonce; use nonces::{test_multi_nonce, test_invalid_commitment, test_invalid_dleq_proof};
/// Vectorized test suite to ensure consistency. /// Vectorized test suite to ensure consistency.
pub mod vectors; pub mod vectors;
@@ -267,4 +267,6 @@ pub fn test_ciphersuite<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(rng: &mut
test_schnorr_blame::<R, C, H>(rng); test_schnorr_blame::<R, C, H>(rng);
test_multi_nonce::<R, C>(rng); test_multi_nonce::<R, C>(rng);
test_invalid_commitment::<R, C>(rng);
test_invalid_dleq_proof::<R, C>(rng);
} }

View File

@@ -9,12 +9,14 @@ use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::group::{ff::Field, Group, GroupEncoding}; use ciphersuite::group::{ff::Field, Group, GroupEncoding};
use dleq::MultiDLEqProof;
pub use dkg::tests::{key_gen, recover_key}; pub use dkg::tests::{key_gen, recover_key};
use crate::{ use crate::{
Curve, Participant, ThresholdView, ThresholdKeys, FrostError, Curve, Participant, ThresholdView, ThresholdKeys, FrostError,
algorithm::Algorithm, algorithm::Algorithm,
tests::{algorithm_machines, sign}, sign::{Writable, SignMachine},
tests::{algorithm_machines, preprocess, sign},
}; };
#[derive(Clone)] #[derive(Clone)]
@@ -155,3 +157,75 @@ pub fn test_multi_nonce<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys); let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
sign(&mut *rng, &MultiNonce::<C>::new(), keys.clone(), machines, &[]); sign(&mut *rng, &MultiNonce::<C>::new(), keys.clone(), machines, &[]);
} }
/// Test malleating a commitment for a nonce across generators causes the preprocess to error.
pub fn test_invalid_commitment<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<R, C>(&mut *rng);
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {});
// Select a random participant to give an invalid commitment
let participants = preprocesses.keys().collect::<Vec<_>>();
let faulty = *participants
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
// Grab their preprocess
let mut preprocess = preprocesses.remove(&faulty).unwrap();
// Mutate one of the commitments
let nonce =
preprocess.commitments.nonces.get_mut(usize::try_from(rng.next_u64()).unwrap() % 2).unwrap();
let generators_len = nonce.generators.len();
nonce.generators[usize::try_from(rng.next_u64()).unwrap() % generators_len].0
[usize::try_from(rng.next_u64()).unwrap() % 2] = C::G::random(&mut *rng);
// The commitments are validated at time of deserialization (read_preprocess)
// Accordingly, serialize it and read it again to make sure that errors
assert!(machines
.iter()
.next()
.unwrap()
.1
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
.is_err());
}
/// Test malleating the DLEq proof for a preprocess causes it to error.
pub fn test_invalid_dleq_proof<R: RngCore + CryptoRng, C: Curve>(rng: &mut R) {
let keys = key_gen::<R, C>(&mut *rng);
let machines = algorithm_machines(&mut *rng, &MultiNonce::<C>::new(), &keys);
let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {});
// Select a random participant to give an invalid DLEq proof
let participants = preprocesses.keys().collect::<Vec<_>>();
let faulty = *participants
[usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()];
// Invalidate it by replacing it with a completely different proof
let dlogs = [Zeroizing::new(C::F::random(&mut *rng)), Zeroizing::new(C::F::random(&mut *rng))];
let mut preprocess = preprocesses.remove(&faulty).unwrap();
preprocess.commitments.dleq = Some(MultiDLEqProof::prove(
&mut *rng,
&mut RecommendedTranscript::new(b"Invalid DLEq Proof"),
&nonces::<C>(),
&dlogs,
));
assert!(machines
.iter()
.next()
.unwrap()
.1
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
.is_err());
// Also test None for a proof will cause an error
preprocess.commitments.dleq = None;
assert!(machines
.iter()
.next()
.unwrap()
.1
.read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref())
.is_err());
}

View File

@@ -14,7 +14,7 @@ use ciphersuite::group::{ff::PrimeField, GroupEncoding};
use crate::{ use crate::{
curve::Curve, curve::Curve,
Participant, ThresholdCore, ThresholdKeys, Participant, ThresholdCore, ThresholdKeys,
algorithm::{Hram, IetfSchnorr}, algorithm::{IetfTranscript, Hram, IetfSchnorr},
sign::{ sign::{
Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess, Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess,
PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
@@ -191,6 +191,7 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
nonces: vec![NonceCommitments { nonces: vec![NonceCommitments {
generators: vec![GeneratorCommitments(these_commitments)], generators: vec![GeneratorCommitments(these_commitments)],
}], }],
dleq: None,
}, },
addendum: (), addendum: (),
}; };
@@ -300,8 +301,12 @@ pub fn test_with_vectors<R: RngCore + CryptoRng, C: Curve, H: Hram<C>>(
} }
// Also test it at the Commitments level // Also test it at the Commitments level
let (generated_nonces, commitments) = let (generated_nonces, commitments) = Commitments::<C>::new::<_, IetfTranscript>(
Commitments::<C>::new::<_>(&mut TransparentRng(randomness), &share, &[vec![C::generator()]]); &mut TransparentRng(randomness),
&share,
&[vec![C::generator()]],
&[],
);
assert_eq!(generated_nonces.len(), 1); assert_eq!(generated_nonces.len(), 1);
assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]); assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]);

View File

@@ -52,7 +52,7 @@ fn test_rfc8032() {
SchnorrSignature::<Ed25519>::read::<&[u8]>(&mut hex::decode(vector.2).unwrap().as_ref()) SchnorrSignature::<Ed25519>::read::<&[u8]>(&mut hex::decode(vector.2).unwrap().as_ref())
.unwrap(); .unwrap();
let hram = Sha512::new_with_prefix( let hram = Sha512::new_with_prefix(
[sig.R.to_bytes().as_ref(), &key.to_bytes(), &hex::decode(vector.1).unwrap()].concat(), &[sig.R.to_bytes().as_ref(), &key.to_bytes(), &hex::decode(vector.1).unwrap()].concat(),
); );
assert!(sig.verify(key, Scalar::from_hash(hram))); assert!(sig.verify(key, Scalar::from_hash(hram)));
} }

View File

@@ -44,7 +44,6 @@ exceptions = [
{ allow = ["AGPL-3.0"], name = "serai-env" }, { allow = ["AGPL-3.0"], name = "serai-env" },
{ allow = ["AGPL-3.0"], name = "ethereum-serai" }, { allow = ["AGPL-3.0"], name = "ethereum-serai" },
{ allow = ["AGPL-3.0"], name = "serai-ethereum-relayer" },
{ allow = ["AGPL-3.0"], name = "serai-message-queue" }, { allow = ["AGPL-3.0"], name = "serai-message-queue" },
@@ -100,7 +99,6 @@ allow-git = [
"https://github.com/rust-lang-nursery/lazy-static.rs", "https://github.com/rust-lang-nursery/lazy-static.rs",
"https://github.com/serai-dex/substrate-bip39", "https://github.com/serai-dex/substrate-bip39",
"https://github.com/serai-dex/substrate", "https://github.com/serai-dex/substrate",
"https://github.com/alloy-rs/alloy",
"https://github.com/monero-rs/base58-monero", "https://github.com/monero-rs/base58-monero",
"https://github.com/orcalabs/dockertest-rs", "https://github.com/kayabaNerve/dockertest-rs",
] ]

View File

@@ -13,7 +13,7 @@ GEM
forwardable-extended (2.6.0) forwardable-extended (2.6.0)
google-protobuf (3.25.3-x86_64-linux) google-protobuf (3.25.3-x86_64-linux)
http_parser.rb (0.8.0) http_parser.rb (0.8.0)
i18n (1.14.5) i18n (1.14.4)
concurrent-ruby (~> 1.0) concurrent-ruby (~> 1.0)
jekyll (4.3.3) jekyll (4.3.3)
addressable (~> 2.4) addressable (~> 2.4)
@@ -55,19 +55,17 @@ GEM
mercenary (0.4.0) mercenary (0.4.0)
pathutil (0.16.2) pathutil (0.16.2)
forwardable-extended (~> 2.6) forwardable-extended (~> 2.6)
public_suffix (5.0.5) public_suffix (5.0.4)
rake (13.2.1) rake (13.1.0)
rb-fsevent (0.11.2) rb-fsevent (0.11.2)
rb-inotify (0.11.1) rb-inotify (0.10.1)
ffi (~> 1.0) ffi (~> 1.0)
rexml (3.2.8) rexml (3.2.6)
strscan (>= 3.0.9) rouge (4.2.0)
rouge (4.2.1)
safe_yaml (1.0.5) safe_yaml (1.0.5)
sass-embedded (1.63.6) sass-embedded (1.63.6)
google-protobuf (~> 3.23) google-protobuf (~> 3.23)
rake (>= 13.0.0) rake (>= 13.0.0)
strscan (3.1.0)
terminal-table (3.0.2) terminal-table (3.0.2)
unicode-display_width (>= 1.1.1, < 3) unicode-display_width (>= 1.1.1, < 3)
unicode-display_width (2.5.0) unicode-display_width (2.5.0)

View File

@@ -1,11 +0,0 @@
#!/bin/sh
RPC_USER="${RPC_USER:=serai}"
RPC_PASS="${RPC_PASS:=seraidex}"
# Run Monero
monerod --non-interactive --regtest --offline --fixed-difficulty=1 \
--no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \
--rpc-access-control-origins "*" --disable-rpc-ban \
--rpc-login=$RPC_USER:$RPC_PASS \
$1

View File

@@ -1,3 +1,6 @@
#!/bin/sh #!/bin/sh
~/.foundry/bin/anvil --host 0.0.0.0 --no-cors --no-mining --slots-in-an-epoch 32 --silent geth --dev --networkid 5208 --datadir "eth-devnet" \
--http --http.api "web3,net,eth,miner" \
--http.addr 0.0.0.0 --http.port 8545 \
--http.vhosts="*" --http.corsdomain "*"

View File

@@ -1,4 +1,4 @@
use std::path::Path; use std::{path::Path};
use crate::{Network, Os, mimalloc, os, write_dockerfile}; use crate::{Network, Os, mimalloc, os, write_dockerfile};
@@ -7,7 +7,7 @@ pub fn bitcoin(orchestration_path: &Path, network: Network) {
const DOWNLOAD_BITCOIN: &str = r#" const DOWNLOAD_BITCOIN: &str = r#"
FROM alpine:latest as bitcoin FROM alpine:latest as bitcoin
ENV BITCOIN_VERSION=27.0 ENV BITCOIN_VERSION=26.0
RUN apk --no-cache add git gnupg RUN apk --no-cache add git gnupg

View File

@@ -0,0 +1,5 @@
use std::path::Path;
pub fn ethereum(_orchestration_path: &Path) {
// TODO
}

View File

@@ -1,36 +0,0 @@
use crate::Network;
pub fn lighthouse(network: Network) -> (String, String, String) {
assert_ne!(network, Network::Dev);
#[rustfmt::skip]
const DOWNLOAD_LIGHTHOUSE: &str = r#"
FROM alpine:latest as lighthouse
ENV LIGHTHOUSE_VERSION=5.1.3
RUN apk --no-cache add git gnupg
# Download lighthouse
RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc
# Verify the signature
gpg --keyserver keyserver.ubuntu.com --recv-keys 15E66D941F697E28F49381F426416DC3F30674B0
gpg --verify lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
# Extract lighthouse
RUN tar xvf lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
"#;
let run_lighthouse = format!(
r#"
COPY --from=lighthouse --chown=ethereum lighthouse /bin
ADD /orchestration/{}/coins/ethereum/consensus/lighthouse/run.sh /consensus_layer.sh
"#,
network.label()
);
(DOWNLOAD_LIGHTHOUSE.to_string(), String::new(), run_lighthouse)
}

View File

@@ -1,6 +0,0 @@
mod lighthouse;
#[allow(unused)]
pub use lighthouse::lighthouse;
mod nimbus;
pub use nimbus::nimbus;

View File

@@ -1,49 +0,0 @@
use crate::Network;
pub fn nimbus(network: Network) -> (String, String, String) {
assert_ne!(network, Network::Dev);
let platform = match std::env::consts::ARCH {
"x86_64" => "amd64",
"arm" => "arm32v7",
"aarch64" => "arm64v8",
_ => panic!("unsupported platform"),
};
#[rustfmt::skip]
let checksum = match platform {
"amd64" => "5da10222cfb555ce2e3820ece12e8e30318945e3ed4b2b88d295963c879daeee071623c47926f880f3db89ce537fd47c6b26fe37e47aafbae3222b58bcec2fba",
"arm32v7" => "7055da77bfa1186ee2e7ce2a48b923d45ccb039592f529c58d93d55a62bca46566ada451bd7497c3ae691260544f0faf303602afd85ccc18388fdfdac0bb2b45",
"arm64v8" => "1a68f44598462abfade0dbeb6adf10b52614ba03605a8bf487b99493deb41468317926ef2d657479fcc26fce640aeebdbd880956beec3fb110b5abc97bd83556",
_ => panic!("unsupported platform"),
};
#[rustfmt::skip]
let download_nimbus = format!(r#"
FROM alpine:latest as nimbus
ENV NIMBUS_VERSION=24.3.0
ENV NIMBUS_COMMIT=dc19b082
# Download nimbus
RUN wget https://github.com/status-im/nimbus-eth2/releases/download/v${{NIMBUS_VERSION}}/nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz
# Extract nimbus
RUN tar xvf nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz
RUN mv nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}/build/nimbus_beacon_node ./nimbus
# Verify the checksum
RUN sha512sum nimbus | grep {checksum}
"#);
let run_nimbus = format!(
r#"
COPY --from=nimbus --chown=ethereum nimbus /bin
ADD /orchestration/{}/coins/ethereum/consensus/nimbus/run.sh /consensus_layer.sh
"#,
network.label()
);
(download_nimbus, String::new(), run_nimbus)
}

View File

@@ -1,14 +0,0 @@
use crate::Network;
pub fn anvil(network: Network) -> (String, String, String) {
assert_eq!(network, Network::Dev);
const ANVIL_SETUP: &str = r#"
RUN curl -L https://foundry.paradigm.xyz | bash || exit 0
RUN ~/.foundry/bin/foundryup
EXPOSE 8545
"#;
(String::new(), "RUN apt install git curl -y".to_string(), ANVIL_SETUP.to_string())
}

View File

@@ -1,5 +0,0 @@
mod reth;
pub use reth::reth;
mod anvil;
pub use anvil::anvil;

View File

@@ -1,38 +0,0 @@
use crate::Network;
pub fn reth(network: Network) -> (String, String, String) {
assert_ne!(network, Network::Dev);
#[rustfmt::skip]
const DOWNLOAD_RETH: &str = r#"
FROM alpine:latest as reth
ENV RETH_VERSION=0.2.0-beta.6
RUN apk --no-cache add git gnupg
# Download reth
RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc
# Verify the signature
gpg --keyserver keyserver.ubuntu.com --recv-keys A3AE097C89093A124049DF1F5391A3C4100530B4
gpg --verify reth-v${RETH_VERSION}-$(uname -m).tar.gz.asc reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
# Extract reth
RUN tar xvf reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz
"#;
let run_reth = format!(
r#"
COPY --from=reth --chown=ethereum reth /bin
EXPOSE 30303 9001 8545
ADD /orchestration/{}/coins/ethereum/execution/reth/run.sh /execution_layer.sh
"#,
network.label()
);
(DOWNLOAD_RETH.to_string(), String::new(), run_reth)
}

Some files were not shown because too many files have changed in this diff Show More