1 Commits

Author SHA1 Message Date
Luke Parker
466af2bc19 Experiment with a 3s message latency in the coordinator
This effects a 10s block time.
2024-03-02 14:46:06 -05:00
132 changed files with 1711 additions and 3360 deletions

View File

@@ -42,8 +42,8 @@ runs:
shell: bash shell: bash
run: | run: |
cargo install svm-rs cargo install svm-rs
svm install 0.8.25 svm install 0.8.16
svm use 0.8.25 svm use 0.8.16
# - name: Cache Rust # - name: Cache Rust
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43

View File

@@ -1,90 +0,0 @@
# MIT License
#
# Copyright (c) 2022 just-the-docs
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# This workflow uses actions that are not certified by GitHub.
# They are provided by a third-party and are governed by
# separate terms of service, privacy policy, and support
# documentation.
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
name: Deploy Jekyll site to Pages
on:
push:
branches:
- "develop"
paths:
- "docs/**"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: read
pages: write
id-token: write
# Allow one concurrent deployment
concurrency:
group: "pages"
cancel-in-progress: true
jobs:
# Build job
build:
runs-on: ubuntu-latest
defaults:
run:
working-directory: docs
steps:
- name: Checkout
uses: actions/checkout@v3
- name: Setup Ruby
uses: ruby/setup-ruby@v1
with:
bundler-cache: true
cache-version: 0
working-directory: "${{ github.workspace }}/docs"
- name: Setup Pages
id: pages
uses: actions/configure-pages@v3
- name: Build with Jekyll
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
env:
JEKYLL_ENV: production
- name: Upload artifact
uses: actions/upload-pages-artifact@v1
with:
path: "docs/_site/"
# Deployment job
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
runs-on: ubuntu-latest
needs: build
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v2

761
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,6 @@ resolver = "2"
members = [ members = [
# Version patches # Version patches
"patches/zstd", "patches/zstd",
"patches/rocksdb",
"patches/proc-macro-crate", "patches/proc-macro-crate",
# std patches # std patches
@@ -113,8 +112,6 @@ dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "a
# wasmtime pulls in an old version for this # wasmtime pulls in an old version for this
zstd = { path = "patches/zstd" } zstd = { path = "patches/zstd" }
# Needed for WAL compression
rocksdb = { path = "patches/rocksdb" }
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3 # proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
proc-macro-crate = { path = "patches/proc-macro-crate" } proc-macro-crate = { path = "patches/proc-macro-crate" }

View File

@@ -5,16 +5,13 @@ Bitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading
experience. Funds are stored in an economically secured threshold-multisig experience. Funds are stored in an economically secured threshold-multisig
wallet. wallet.
[Getting Started](spec/Getting%20Started.md) [Getting Started](docs/Getting%20Started.md)
### Layout ### Layout
- `audits`: Audits for various parts of Serai. - `audits`: Audits for various parts of Serai.
- `spec`: The specification of the Serai protocol, both internally and as - `docs`: Documentation on the Serai protocol.
networked.
- `docs`: User-facing documentation on the Serai protocol.
- `common`: Crates containing utilities common to a variety of areas under - `common`: Crates containing utilities common to a variety of areas under
Serai, none neatly fitting under another category. Serai, none neatly fitting under another category.

View File

@@ -1,7 +1,3 @@
# Solidity build outputs # solidity build outputs
cache cache
artifacts artifacts
# Auto-generated ABI files
src/abi/schnorr.rs
src/abi/router.rs

View File

@@ -30,9 +30,6 @@ ethers-core = { version = "2", default-features = false }
ethers-providers = { version = "2", default-features = false } ethers-providers = { version = "2", default-features = false }
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] } ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
[build-dependencies]
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
[dev-dependencies] [dev-dependencies]
rand_core = { version = "0.6", default-features = false, features = ["std"] } rand_core = { version = "0.6", default-features = false, features = ["std"] }

View File

@@ -1,20 +1,6 @@
use std::process::Command;
use ethers_contract::Abigen;
fn main() { fn main() {
println!("cargo:rerun-if-changed=contracts/*"); println!("cargo:rerun-if-changed=contracts");
println!("cargo:rerun-if-changed=artifacts/*"); println!("cargo:rerun-if-changed=artifacts");
for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout)
.unwrap()
.lines()
{
if let Some(version) = line.strip_prefix("Version: ") {
let version = version.split('+').next().unwrap();
assert_eq!(version, "0.8.25");
}
}
#[rustfmt::skip] #[rustfmt::skip]
let args = [ let args = [
@@ -22,21 +8,8 @@ fn main() {
"-o", "./artifacts", "--overwrite", "-o", "./artifacts", "--overwrite",
"--bin", "--abi", "--bin", "--abi",
"--optimize", "--optimize",
"./contracts/Schnorr.sol", "./contracts/Router.sol", "./contracts/Schnorr.sol"
]; ];
assert!(Command::new("solc").args(args).status().unwrap().success());
Abigen::new("Schnorr", "./artifacts/Schnorr.abi") assert!(std::process::Command::new("solc").args(args).status().unwrap().success());
.unwrap()
.generate()
.unwrap()
.write_to_file("./src/abi/schnorr.rs")
.unwrap();
Abigen::new("Router", "./artifacts/Router.abi")
.unwrap()
.generate()
.unwrap()
.write_to_file("./src/abi/router.rs")
.unwrap();
} }

View File

@@ -1,90 +0,0 @@
// SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0;
import "./Schnorr.sol";
contract Router is Schnorr {
// Contract initializer
// TODO: Replace with a MuSig of the genesis validators
address public initializer;
// Nonce is incremented for each batch of transactions executed
uint256 public nonce;
// fixed parity for the public keys used in this contract
uint8 constant public KEY_PARITY = 27;
// current public key's x-coordinate
// note: this key must always use the fixed parity defined above
bytes32 public seraiKey;
struct OutInstruction {
address to;
uint256 value;
bytes data;
}
struct Signature {
bytes32 c;
bytes32 s;
}
// success is a uint256 representing a bitfield of transaction successes
event Executed(uint256 nonce, bytes32 batch, uint256 success);
// error types
error NotInitializer();
error AlreadyInitialized();
error InvalidKey();
error TooManyTransactions();
constructor() {
initializer = msg.sender;
}
// initSeraiKey can be called by the contract initializer to set the first
// public key, only if the public key has yet to be set.
function initSeraiKey(bytes32 _seraiKey) external {
if (msg.sender != initializer) revert NotInitializer();
if (seraiKey != 0) revert AlreadyInitialized();
if (_seraiKey == bytes32(0)) revert InvalidKey();
seraiKey = _seraiKey;
}
// updateSeraiKey validates the given Schnorr signature against the current public key,
// and if successful, updates the contract's public key to the given one.
function updateSeraiKey(
bytes32 _seraiKey,
Signature memory sig
) public {
if (_seraiKey == bytes32(0)) revert InvalidKey();
bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey));
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
seraiKey = _seraiKey;
}
// execute accepts a list of transactions to execute as well as a Schnorr signature.
// if signature verification passes, the given transactions are executed.
// if signature verification fails, this function will revert.
function execute(
OutInstruction[] calldata transactions,
Signature memory sig
) public {
if (transactions.length > 256) revert TooManyTransactions();
bytes32 message = keccak256(abi.encode("execute", nonce, transactions));
// This prevents re-entrancy from causing double spends yet does allow
// out-of-order execution via re-entrancy
nonce++;
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
uint256 successes;
for(uint256 i = 0; i < transactions.length; i++) {
(bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data);
assembly {
successes := or(successes, shl(i, success))
}
}
emit Executed(nonce, message, successes);
}
}

View File

@@ -1,4 +1,4 @@
// SPDX-License-Identifier: AGPLv3 //SPDX-License-Identifier: AGPLv3
pragma solidity ^0.8.0; pragma solidity ^0.8.0;
// see https://github.com/noot/schnorr-verify for implementation details // see https://github.com/noot/schnorr-verify for implementation details
@@ -7,32 +7,29 @@ contract Schnorr {
uint256 constant public Q = uint256 constant public Q =
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
error InvalidSOrA();
error InvalidSignature();
// parity := public key y-coord parity (27 or 28) // parity := public key y-coord parity (27 or 28)
// px := public key x-coord // px := public key x-coord
// message := 32-byte hash of the message // message := 32-byte message
// c := schnorr signature challenge
// s := schnorr signature // s := schnorr signature
// e := schnorr signature challenge
function verify( function verify(
uint8 parity, uint8 parity,
bytes32 px, bytes32 px,
bytes32 message, bytes32 message,
bytes32 c, bytes32 s,
bytes32 s bytes32 e
) public view returns (bool) { ) public view returns (bool) {
// ecrecover = (m, v, r, s); // ecrecover = (m, v, r, s);
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); bytes32 sp = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); bytes32 ep = bytes32(Q - mulmod(uint256(e), uint256(px), Q));
if (sa == 0) revert InvalidSOrA(); require(sp != 0);
// the ecrecover precompile implementation checks that the `r` and `s` // the ecrecover precompile implementation checks that the `r` and `s`
// inputs are non-zero (in this case, `px` and `ca`), thus we don't need to // inputs are non-zero (in this case, `px` and `ep`), thus we don't need to
// check if they're zero. // check if they're zero.will make me
address R = ecrecover(sa, parity, px, ca); address R = ecrecover(sp, parity, px, ep);
if (R == address(0)) revert InvalidSignature(); require(R != address(0), "ecrecover failed");
return c == keccak256( return e == keccak256(
abi.encodePacked(R, uint8(parity), px, block.chainid, message) abi.encodePacked(R, uint8(parity), px, block.chainid, message)
); );
} }

View File

@@ -1,6 +0,0 @@
#[rustfmt::skip]
#[allow(clippy::all)]
pub(crate) mod schnorr;
#[rustfmt::skip]
#[allow(clippy::all)]
pub(crate) mod router;

View File

@@ -0,0 +1,36 @@
use thiserror::Error;
use eyre::{eyre, Result};
use ethers_providers::{Provider, Http};
use ethers_contract::abigen;
use crate::crypto::ProcessedSignature;
#[derive(Error, Debug)]
pub enum EthereumError {
#[error("failed to verify Schnorr signature")]
VerificationError,
}
abigen!(Schnorr, "./artifacts/Schnorr.abi");
pub async fn call_verify(
contract: &Schnorr<Provider<Http>>,
params: &ProcessedSignature,
) -> Result<()> {
if contract
.verify(
params.parity + 27,
params.px.to_bytes().into(),
params.message,
params.s.to_bytes().into(),
params.e.to_bytes().into(),
)
.call()
.await?
{
Ok(())
} else {
Err(eyre!(EthereumError::VerificationError))
}
}

View File

@@ -1,54 +1,50 @@
use sha3::{Digest, Keccak256}; use sha3::{Digest, Keccak256};
use group::ff::PrimeField; use group::Group;
use k256::{ use k256::{
elliptic_curve::{ elliptic_curve::{
bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint, bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint, sec1::ToEncodedPoint,
}, },
ProjectivePoint, Scalar, U256, AffinePoint, ProjectivePoint, Scalar, U256,
}; };
use frost::{ use frost::{algorithm::Hram, curve::Secp256k1};
algorithm::{Hram, SchnorrSignature},
curve::Secp256k1,
};
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { pub fn keccak256(data: &[u8]) -> [u8; 32] {
Keccak256::digest(data).into() Keccak256::digest(data).into()
} }
pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] { pub fn hash_to_scalar(data: &[u8]) -> Scalar {
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
}
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
let encoded_point = point.to_encoded_point(false); let encoded_point = point.to_encoded_point(false);
// Last 20 bytes of the hash of the concatenated x and y coordinates keccak256(&encoded_point.as_ref()[1 .. 65])[12 .. 32].try_into().unwrap()
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
} }
#[allow(non_snake_case)] pub fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
pub struct PublicKey { if r.is_zero().into() || s.is_zero().into() {
pub A: ProjectivePoint, return None;
pub px: Scalar,
pub parity: u8,
}
impl PublicKey {
#[allow(non_snake_case)]
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
let affine = A.to_affine();
let parity = u8::from(bool::from(affine.y_is_odd())) + 27;
if parity != 27 {
None?;
}
let x_coord = affine.x();
let x_coord_scalar = <Scalar as Reduce<U256>>::reduce_bytes(&x_coord);
// Return None if a reduction would occur
if x_coord_scalar.to_repr() != x_coord {
None?;
}
Some(PublicKey { A, px: x_coord_scalar, parity })
} }
#[allow(non_snake_case)]
let R = AffinePoint::decompress(&r.to_bytes(), v.into());
#[allow(non_snake_case)]
if let Some(R) = Option::<AffinePoint>::from(R) {
#[allow(non_snake_case)]
let R = ProjectivePoint::from(R);
let r = r.invert().unwrap();
let u1 = ProjectivePoint::GENERATOR * (-message * r);
let u2 = R * (s * r);
let key: ProjectivePoint = u1 + u2;
if !bool::from(key.is_identity()) {
return Some(address(&key));
}
}
None
} }
#[derive(Clone, Default)] #[derive(Clone, Default)]
@@ -59,33 +55,53 @@ impl Hram<Secp256k1> for EthereumHram {
let a_encoded_point = A.to_encoded_point(true); let a_encoded_point = A.to_encoded_point(true);
let mut a_encoded = a_encoded_point.as_ref().to_owned(); let mut a_encoded = a_encoded_point.as_ref().to_owned();
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
assert!((a_encoded[0] == 27) || (a_encoded[0] == 28));
let mut data = address(R).to_vec(); let mut data = address(R).to_vec();
data.append(&mut a_encoded); data.append(&mut a_encoded);
data.extend(m); data.append(&mut m.to_vec());
Scalar::reduce(U256::from_be_slice(&keccak256(&data))) Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
} }
} }
pub struct Signature { pub struct ProcessedSignature {
pub(crate) c: Scalar, pub s: Scalar,
pub(crate) s: Scalar, pub px: Scalar,
pub parity: u8,
pub message: [u8; 32],
pub e: Scalar,
} }
impl Signature {
pub fn new( #[allow(non_snake_case)]
public_key: &PublicKey, pub fn preprocess_signature_for_ecrecover(
chain_id: U256, m: [u8; 32],
m: &[u8], R: &ProjectivePoint,
signature: SchnorrSignature<Secp256k1>, s: Scalar,
) -> Option<Signature> { A: &ProjectivePoint,
let c = EthereumHram::hram( chain_id: U256,
&signature.R, ) -> (Scalar, Scalar) {
&public_key.A, let processed_sig = process_signature_for_contract(m, R, s, A, chain_id);
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(), let sr = processed_sig.s.mul(&processed_sig.px).negate();
); let er = processed_sig.e.mul(&processed_sig.px).negate();
if !signature.verify(public_key.A, c) { (sr, er)
None?; }
}
Some(Signature { c, s: signature.s }) #[allow(non_snake_case)]
pub fn process_signature_for_contract(
m: [u8; 32],
R: &ProjectivePoint,
s: Scalar,
A: &ProjectivePoint,
chain_id: U256,
) -> ProcessedSignature {
let encoded_pk = A.to_encoded_point(true);
let px = &encoded_pk.as_ref()[1 .. 33];
let px_scalar = Scalar::reduce(U256::from_be_slice(px));
let e = EthereumHram::hram(R, A, &[chain_id.to_be_byte_array().as_slice(), &m].concat());
ProcessedSignature {
s,
px: px_scalar,
parity: &encoded_pk.as_ref()[0] - 2,
#[allow(non_snake_case)]
message: m,
e,
} }
} }

View File

@@ -1,16 +1,2 @@
use thiserror::Error; pub mod contract;
pub mod crypto; pub mod crypto;
pub(crate) mod abi;
pub mod schnorr;
pub mod router;
#[cfg(test)]
mod tests;
#[derive(Error, Debug)]
pub enum Error {
#[error("failed to verify Schnorr signature")]
InvalidSignature,
}

View File

@@ -1,30 +0,0 @@
pub use crate::abi::router::*;
/*
use crate::crypto::{ProcessedSignature, PublicKey};
use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode};
use eyre::Result;
use std::{convert::From, fs::File, sync::Arc};
pub async fn router_update_public_key<M: Middleware + 'static>(
contract: &Router<M>,
public_key: &PublicKey,
signature: &ProcessedSignature,
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into());
let pending_tx = tx.send().await?;
let receipt = pending_tx.await?;
Ok(receipt)
}
pub async fn router_execute<M: Middleware + 'static>(
contract: &Router<M>,
txs: Vec<Rtransaction>,
signature: &ProcessedSignature,
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
let tx = contract.execute(txs, signature.into()).send();
let pending_tx = tx.send().await?;
let receipt = pending_tx.await?;
Ok(receipt)
}
*/

View File

@@ -1,34 +0,0 @@
use eyre::{eyre, Result};
use group::ff::PrimeField;
use ethers_providers::{Provider, Http};
use crate::{
Error,
crypto::{keccak256, PublicKey, Signature},
};
pub use crate::abi::schnorr::*;
pub async fn call_verify(
contract: &Schnorr<Provider<Http>>,
public_key: &PublicKey,
message: &[u8],
signature: &Signature,
) -> Result<()> {
if contract
.verify(
public_key.parity,
public_key.px.to_repr().into(),
keccak256(message),
signature.c.to_repr().into(),
signature.s.to_repr().into(),
)
.call()
.await?
{
Ok(())
} else {
Err(eyre!(Error::InvalidSignature))
}
}

View File

@@ -1,132 +0,0 @@
use rand_core::OsRng;
use sha2::Sha256;
use sha3::{Digest, Keccak256};
use group::Group;
use k256::{
ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey},
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint},
U256, Scalar, AffinePoint, ProjectivePoint,
};
use frost::{
curve::Secp256k1,
algorithm::{Hram, IetfSchnorr},
tests::{algorithm_machines, sign},
};
use crate::{crypto::*, tests::key_gen};
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
}
pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) {
return None;
}
#[allow(non_snake_case)]
let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into());
#[allow(non_snake_case)]
if let Some(R) = Option::<AffinePoint>::from(R) {
#[allow(non_snake_case)]
let R = ProjectivePoint::from(R);
let r = r.invert().unwrap();
let u1 = ProjectivePoint::GENERATOR * (-message * r);
let u2 = R * (s * r);
let key: ProjectivePoint = u1 + u2;
if !bool::from(key.is_identity()) {
return Some(address(&key));
}
}
None
}
#[test]
fn test_ecrecover() {
let private = SigningKey::random(&mut OsRng);
let public = VerifyingKey::from(&private);
// Sign the signature
const MESSAGE: &[u8] = b"Hello, World!";
let (sig, recovery_id) = private
.as_nonzero_scalar()
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
.unwrap();
// Sanity check the signature verifies
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
{
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
}
// Perform the ecrecover
assert_eq!(
ecrecover(
hash_to_scalar(MESSAGE),
u8::from(recovery_id.unwrap().is_y_odd()) + 27,
*sig.r(),
*sig.s()
)
.unwrap(),
address(&ProjectivePoint::from(public.as_affine()))
);
}
// Run the sign test with the EthereumHram
#[test]
fn test_signing() {
let (keys, _) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let _sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
}
#[allow(non_snake_case)]
pub fn preprocess_signature_for_ecrecover(
R: ProjectivePoint,
public_key: &PublicKey,
chain_id: U256,
m: &[u8],
s: Scalar,
) -> (u8, Scalar, Scalar) {
let c = EthereumHram::hram(
&R,
&public_key.A,
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
);
let sa = -(s * public_key.px);
let ca = -(c * public_key.px);
(public_key.parity, sa, ca)
}
#[test]
fn test_ecrecover_hack() {
let (keys, public_key) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let chain_id = U256::ONE;
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let (parity, sa, ca) =
preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s);
let q = ecrecover(sa, parity, public_key.px, ca).unwrap();
assert_eq!(q, address(&sig.R));
}

View File

@@ -1,92 +0,0 @@
use std::{sync::Arc, time::Duration, fs::File, collections::HashMap};
use rand_core::OsRng;
use group::ff::PrimeField;
use k256::{Scalar, ProjectivePoint};
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
use ethers_core::{
types::{H160, Signature as EthersSignature},
abi::Abi,
};
use ethers_contract::ContractFactory;
use ethers_providers::{Middleware, Provider, Http};
use crate::crypto::PublicKey;
mod crypto;
mod schnorr;
mod router;
pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) {
let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng);
let mut group_key = keys[&Participant::new(1).unwrap()].group_key();
let mut offset = Scalar::ZERO;
while PublicKey::new(group_key).is_none() {
offset += Scalar::ONE;
group_key += ProjectivePoint::GENERATOR;
}
for keys in keys.values_mut() {
*keys = keys.offset(offset);
}
let public_key = PublicKey::new(group_key).unwrap();
(keys, public_key)
}
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
// to fund the deployer, not create/pass a wallet
// TODO: Deterministic deployments across chains
pub async fn deploy_contract(
chain_id: u32,
client: Arc<Provider<Http>>,
wallet: &k256::ecdsa::SigningKey,
name: &str,
) -> eyre::Result<H160> {
let abi: Abi =
serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap();
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
let hex_bin =
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
let bin = hex::decode(hex_bin).unwrap();
let factory = ContractFactory::new(abi, bin.into(), client.clone());
let mut deployment_tx = factory.deploy(())?.tx;
deployment_tx.set_chain_id(chain_id);
deployment_tx.set_gas(1_000_000);
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
let sig_hash = deployment_tx.sighash();
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
// EIP-155 v
let mut v = u64::from(rid.to_byte());
assert!((v == 0) || (v == 1));
v += u64::from((chain_id * 2) + 35);
let r = sig.r().to_repr();
let r_ref: &[u8] = r.as_ref();
let s = sig.s().to_repr();
let s_ref: &[u8] = s.as_ref();
let deployment_tx =
deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v });
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
let mut receipt;
while {
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
receipt.is_none()
} {
tokio::time::sleep(Duration::from_secs(6)).await;
}
let receipt = receipt.unwrap();
assert!(receipt.status == Some(1.into()));
Ok(receipt.contract_address.unwrap())
}

View File

@@ -1,109 +0,0 @@
use std::{convert::TryFrom, sync::Arc, collections::HashMap};
use rand_core::OsRng;
use group::ff::PrimeField;
use frost::{
curve::Secp256k1,
Participant, ThresholdKeys,
algorithm::IetfSchnorr,
tests::{algorithm_machines, sign},
};
use ethers_core::{
types::{H160, U256, Bytes},
abi::AbiEncode,
utils::{Anvil, AnvilInstance},
};
use ethers_providers::{Middleware, Provider, Http};
use crate::{
crypto::{keccak256, PublicKey, EthereumHram, Signature},
router::{self, *},
tests::{key_gen, deploy_contract},
};
async fn setup_test() -> (
u32,
AnvilInstance,
Router<Provider<Http>>,
HashMap<Participant, ThresholdKeys<Secp256k1>>,
PublicKey,
) {
let anvil = Anvil::new().spawn();
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
let chain_id = provider.get_chainid().await.unwrap().as_u32();
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
let contract_address =
deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap();
let contract = Router::new(contract_address, client.clone());
let (keys, public_key) = key_gen();
// Set the key to the threshold keys
let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000);
let pending_tx = tx.send().await.unwrap();
let receipt = pending_tx.await.unwrap().unwrap();
assert!(receipt.status == Some(1.into()));
(chain_id, anvil, contract, keys, public_key)
}
#[tokio::test]
async fn test_deploy_contract() {
setup_test().await;
}
pub fn hash_and_sign(
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
public_key: &PublicKey,
chain_id: U256,
message: &[u8],
) -> Signature {
let hashed_message = keccak256(message);
let mut chain_id_bytes = [0; 32];
chain_id.to_big_endian(&mut chain_id_bytes);
let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, keys),
full_message,
);
Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap()
}
#[tokio::test]
async fn test_router_execute() {
let (chain_id, _anvil, contract, keys, public_key) = setup_test().await;
let to = H160([0u8; 20]);
let value = U256([0u64; 4]);
let data = Bytes::from([0]);
let tx = OutInstruction { to, value, data: data.clone() };
let nonce_call = contract.nonce();
let nonce = nonce_call.call().await.unwrap();
let encoded =
("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode();
let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded);
let tx = contract
.execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() })
.gas(300_000);
let pending_tx = tx.send().await.unwrap();
let receipt = dbg!(pending_tx.await.unwrap().unwrap());
assert!(receipt.status == Some(1.into()));
println!("gas used: {:?}", receipt.cumulative_gas_used);
println!("logs: {:?}", receipt.logs);
}

View File

@@ -1,67 +0,0 @@
use std::{convert::TryFrom, sync::Arc};
use rand_core::OsRng;
use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar};
use ethers_core::utils::{keccak256, Anvil, AnvilInstance};
use ethers_providers::{Middleware, Provider, Http};
use frost::{
curve::Secp256k1,
algorithm::IetfSchnorr,
tests::{algorithm_machines, sign},
};
use crate::{
crypto::*,
schnorr::*,
tests::{key_gen, deploy_contract},
};
async fn setup_test() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
let anvil = Anvil::new().spawn();
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
let chain_id = provider.get_chainid().await.unwrap().as_u32();
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
let contract_address =
deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap();
let contract = Schnorr::new(contract_address, client.clone());
(chain_id, anvil, contract)
}
#[tokio::test]
async fn test_deploy_contract() {
setup_test().await;
}
#[tokio::test]
async fn test_ecrecover_hack() {
let (chain_id, _anvil, contract) = setup_test().await;
let chain_id = U256::from(chain_id);
let (keys, public_key) = key_gen();
const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap();
call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap();
// Test an invalid signature fails
let mut sig = sig;
sig.s += Scalar::ONE;
assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err());
}

View File

@@ -0,0 +1,128 @@
use std::{convert::TryFrom, sync::Arc, time::Duration, fs::File};
use rand_core::OsRng;
use ::k256::{
elliptic_curve::{bigint::ArrayEncoding, PrimeField},
U256,
};
use ethers_core::{
types::Signature,
abi::Abi,
utils::{keccak256, Anvil, AnvilInstance},
};
use ethers_contract::ContractFactory;
use ethers_providers::{Middleware, Provider, Http};
use frost::{
curve::Secp256k1,
Participant,
algorithm::IetfSchnorr,
tests::{key_gen, algorithm_machines, sign},
};
use ethereum_serai::{
crypto,
contract::{Schnorr, call_verify},
};
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
// to fund the deployer, not create/pass a wallet
pub async fn deploy_schnorr_verifier_contract(
chain_id: u32,
client: Arc<Provider<Http>>,
wallet: &k256::ecdsa::SigningKey,
) -> eyre::Result<Schnorr<Provider<Http>>> {
let abi: Abi = serde_json::from_reader(File::open("./artifacts/Schnorr.abi").unwrap()).unwrap();
let hex_bin_buf = std::fs::read_to_string("./artifacts/Schnorr.bin").unwrap();
let hex_bin =
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
let bin = hex::decode(hex_bin).unwrap();
let factory = ContractFactory::new(abi, bin.into(), client.clone());
let mut deployment_tx = factory.deploy(())?.tx;
deployment_tx.set_chain_id(chain_id);
deployment_tx.set_gas(500_000);
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
let sig_hash = deployment_tx.sighash();
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
// EIP-155 v
let mut v = u64::from(rid.to_byte());
assert!((v == 0) || (v == 1));
v += u64::from((chain_id * 2) + 35);
let r = sig.r().to_repr();
let r_ref: &[u8] = r.as_ref();
let s = sig.s().to_repr();
let s_ref: &[u8] = s.as_ref();
let deployment_tx = deployment_tx.rlp_signed(&Signature { r: r_ref.into(), s: s_ref.into(), v });
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
let mut receipt;
while {
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
receipt.is_none()
} {
tokio::time::sleep(Duration::from_secs(6)).await;
}
let receipt = receipt.unwrap();
assert!(receipt.status == Some(1.into()));
let contract = Schnorr::new(receipt.contract_address.unwrap(), client.clone());
Ok(contract)
}
async fn deploy_test_contract() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
let anvil = Anvil::new().spawn();
let provider =
Provider::<Http>::try_from(anvil.endpoint()).unwrap().interval(Duration::from_millis(10u64));
let chain_id = provider.get_chainid().await.unwrap().as_u32();
let wallet = anvil.keys()[0].clone().into();
let client = Arc::new(provider);
(chain_id, anvil, deploy_schnorr_verifier_contract(chain_id, client, &wallet).await.unwrap())
}
#[tokio::test]
async fn test_deploy_contract() {
deploy_test_contract().await;
}
#[tokio::test]
async fn test_ecrecover_hack() {
let (chain_id, _anvil, contract) = deploy_test_contract().await;
let chain_id = U256::from(chain_id);
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
let group_key = keys[&Participant::new(1).unwrap()].group_key();
const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, crypto::EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let mut processed_sig =
crypto::process_signature_for_contract(hashed_message, &sig.R, sig.s, &group_key, chain_id);
call_verify(&contract, &processed_sig).await.unwrap();
// test invalid signature fails
processed_sig.message[0] = 0;
assert!(call_verify(&contract, &processed_sig).await.is_err());
}

View File

@@ -0,0 +1,87 @@
use k256::{
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, sec1::ToEncodedPoint},
ProjectivePoint, Scalar, U256,
};
use frost::{curve::Secp256k1, Participant};
use ethereum_serai::crypto::*;
#[test]
fn test_ecrecover() {
use rand_core::OsRng;
use sha2::Sha256;
use sha3::{Digest, Keccak256};
use k256::ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey};
let private = SigningKey::random(&mut OsRng);
let public = VerifyingKey::from(&private);
const MESSAGE: &[u8] = b"Hello, World!";
let (sig, recovery_id) = private
.as_nonzero_scalar()
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
.unwrap();
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
{
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
}
assert_eq!(
ecrecover(hash_to_scalar(MESSAGE), recovery_id.unwrap().is_y_odd().into(), *sig.r(), *sig.s())
.unwrap(),
address(&ProjectivePoint::from(public.as_affine()))
);
}
#[test]
fn test_signing() {
use frost::{
algorithm::IetfSchnorr,
tests::{algorithm_machines, key_gen, sign},
};
use rand_core::OsRng;
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
let _group_key = keys[&Participant::new(1).unwrap()].group_key();
const MESSAGE: &[u8] = b"Hello, World!";
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let _sig =
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
}
#[test]
fn test_ecrecover_hack() {
use frost::{
algorithm::IetfSchnorr,
tests::{algorithm_machines, key_gen, sign},
};
use rand_core::OsRng;
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
let group_key = keys[&Participant::new(1).unwrap()].group_key();
let group_key_encoded = group_key.to_encoded_point(true);
let group_key_compressed = group_key_encoded.as_ref();
let group_key_x = Scalar::reduce(U256::from_be_slice(&group_key_compressed[1 .. 33]));
const MESSAGE: &[u8] = b"Hello, World!";
let hashed_message = keccak256(MESSAGE);
let chain_id = U256::ONE;
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
let sig = sign(
&mut OsRng,
&algo,
keys.clone(),
algorithm_machines(&mut OsRng, &algo, &keys),
full_message,
);
let (sr, er) =
preprocess_signature_for_ecrecover(hashed_message, &sig.R, sig.s, &group_key, chain_id);
let q = ecrecover(sr, group_key_compressed[0] - 2, group_key_x, er).unwrap();
assert_eq!(q, address(&sig.R));
}

View File

@@ -0,0 +1,2 @@
mod contract;
mod crypto;

View File

@@ -9,7 +9,7 @@ use curve25519_dalek::{scalar::Scalar as DalekScalar, edwards::EdwardsPoint as D
use group::{ff::Field, Group}; use group::{ff::Field, Group};
use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint}; use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint};
use multiexp::{BatchVerifier, multiexp}; use multiexp::BatchVerifier;
use crate::{Commitment, ringct::bulletproofs::core::*}; use crate::{Commitment, ringct::bulletproofs::core::*};
@@ -17,20 +17,7 @@ include!(concat!(env!("OUT_DIR"), "/generators.rs"));
static IP12_CELL: OnceLock<Scalar> = OnceLock::new(); static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
pub(crate) fn IP12() -> Scalar { pub(crate) fn IP12() -> Scalar {
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N())) *IP12_CELL.get_or_init(|| inner_product(&ScalarVector(vec![Scalar::ONE; N]), TWO_N()))
}
pub(crate) fn hadamard_fold(
l: &[EdwardsPoint],
r: &[EdwardsPoint],
a: Scalar,
b: Scalar,
) -> Vec<EdwardsPoint> {
let mut res = Vec::with_capacity(l.len() / 2);
for i in 0 .. l.len() {
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
}
res
} }
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
@@ -70,7 +57,7 @@ impl OriginalStruct {
let mut cache = hash_to_scalar(&y.to_bytes()); let mut cache = hash_to_scalar(&y.to_bytes());
let z = cache; let z = cache;
let l0 = aL - z; let l0 = &aL - z;
let l1 = sL; let l1 = sL;
let mut zero_twos = Vec::with_capacity(MN); let mut zero_twos = Vec::with_capacity(MN);
@@ -82,12 +69,12 @@ impl OriginalStruct {
} }
let yMN = ScalarVector::powers(y, MN); let yMN = ScalarVector::powers(y, MN);
let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos); let r0 = (&(aR + z) * &yMN) + ScalarVector(zero_twos);
let r1 = yMN * &sR; let r1 = yMN * sR;
let (T1, T2, x, mut taux) = { let (T1, T2, x, mut taux) = {
let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1); let t1 = inner_product(&l0, &r1) + inner_product(&l1, &r0);
let t2 = l1.clone().inner_product(&r1); let t2 = inner_product(&l1, &r1);
let mut tau1 = Scalar::random(&mut *rng); let mut tau1 = Scalar::random(&mut *rng);
let mut tau2 = Scalar::random(&mut *rng); let mut tau2 = Scalar::random(&mut *rng);
@@ -113,10 +100,10 @@ impl OriginalStruct {
taux += zpow[i + 2] * gamma; taux += zpow[i + 2] * gamma;
} }
let l = l0 + &(l1 * x); let l = &l0 + &(l1 * x);
let r = r0 + &(r1 * x); let r = &r0 + &(r1 * x);
let t = l.clone().inner_product(&r); let t = inner_product(&l, &r);
let x_ip = let x_ip =
hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]); hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
@@ -139,8 +126,8 @@ impl OriginalStruct {
let (aL, aR) = a.split(); let (aL, aR) = a.split();
let (bL, bR) = b.split(); let (bL, bR) = b.split();
let cL = aL.clone().inner_product(&bR); let cL = inner_product(&aL, &bR);
let cR = aR.clone().inner_product(&bL); let cR = inner_product(&aR, &bL);
let (G_L, G_R) = G_proof.split_at(aL.len()); let (G_L, G_R) = G_proof.split_at(aL.len());
let (H_L, H_R) = H_proof.split_at(aL.len()); let (H_L, H_R) = H_proof.split_at(aL.len());
@@ -153,8 +140,8 @@ impl OriginalStruct {
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]); let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
let winv = w.invert().unwrap(); let winv = w.invert().unwrap();
a = (aL * w) + &(aR * winv); a = (aL * w) + (aR * winv);
b = (bL * winv) + &(bR * w); b = (bL * winv) + (bR * w);
if a.len() != 1 { if a.len() != 1 {
G_proof = hadamard_fold(G_L, G_R, winv, w); G_proof = hadamard_fold(G_L, G_R, winv, w);

View File

@@ -112,7 +112,7 @@ impl AggregateRangeStatement {
let mut d = ScalarVector::new(mn); let mut d = ScalarVector::new(mn);
for j in 1 ..= V.len() { for j in 1 ..= V.len() {
z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this
d = d + &(Self::d_j(j, V.len()) * (z_pow[j - 1])); d = d.add_vec(&Self::d_j(j, V.len()).mul(z_pow[j - 1]));
} }
let mut ascending_y = ScalarVector(vec![y]); let mut ascending_y = ScalarVector(vec![y]);
@@ -124,8 +124,7 @@ impl AggregateRangeStatement {
let mut descending_y = ascending_y.clone(); let mut descending_y = ascending_y.clone();
descending_y.0.reverse(); descending_y.0.reverse();
let d_descending_y = d.clone() * &descending_y; let d_descending_y = d.mul_vec(&descending_y);
let d_descending_y_plus_z = d_descending_y + z;
let y_mn_plus_one = descending_y[0] * y; let y_mn_plus_one = descending_y[0] * y;
@@ -136,9 +135,9 @@ impl AggregateRangeStatement {
let neg_z = -z; let neg_z = -z;
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2); let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2);
for (i, d_y_z) in d_descending_y_plus_z.0.iter().enumerate() { for (i, d_y_z) in d_descending_y.add(z).0.drain(..).enumerate() {
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i))); A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i)));
A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold1, i))); A_terms.push((d_y_z, generators.generator(GeneratorsList::HBold1, i)));
} }
A_terms.push((y_mn_plus_one, commitment_accum)); A_terms.push((y_mn_plus_one, commitment_accum));
A_terms.push(( A_terms.push((
@@ -146,14 +145,7 @@ impl AggregateRangeStatement {
Generators::g(), Generators::g(),
)); ));
( (y, d_descending_y, y_mn_plus_one, z, ScalarVector(z_pow), A + multiexp_vartime(&A_terms))
y,
d_descending_y_plus_z,
y_mn_plus_one,
z,
ScalarVector(z_pow),
A + multiexp_vartime(&A_terms),
)
} }
pub(crate) fn prove<R: RngCore + CryptoRng>( pub(crate) fn prove<R: RngCore + CryptoRng>(
@@ -199,7 +191,7 @@ impl AggregateRangeStatement {
a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0); a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
} }
let a_r = a_l.clone() - Scalar::ONE; let a_r = a_l.sub(Scalar::ONE);
let alpha = Scalar::random(&mut *rng); let alpha = Scalar::random(&mut *rng);
@@ -217,11 +209,11 @@ impl AggregateRangeStatement {
// Multiply by INV_EIGHT per earlier commentary // Multiply by INV_EIGHT per earlier commentary
A.0 *= crate::INV_EIGHT(); A.0 *= crate::INV_EIGHT();
let (y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat) = let (y, d_descending_y, y_mn_plus_one, z, z_pow, A_hat) =
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A); Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A);
let a_l = a_l - z; let a_l = a_l.sub(z);
let a_r = a_r + &d_descending_y_plus_z; let a_r = a_r.add_vec(&d_descending_y).add(z);
let mut alpha = alpha; let mut alpha = alpha;
for j in 1 ..= witness.gammas.len() { for j in 1 ..= witness.gammas.len() {
alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one; alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;

View File

@@ -3,7 +3,8 @@
use group::Group; use group::Group;
use dalek_ff_group::{Scalar, EdwardsPoint}; use dalek_ff_group::{Scalar, EdwardsPoint};
pub(crate) use crate::ringct::bulletproofs::scalar_vector::ScalarVector; mod scalar_vector;
pub(crate) use scalar_vector::{ScalarVector, weighted_inner_product};
mod point_vector; mod point_vector;
pub(crate) use point_vector::PointVector; pub(crate) use point_vector::PointVector;

View File

@@ -0,0 +1,114 @@
use core::{
borrow::Borrow,
ops::{Index, IndexMut},
};
use std_shims::vec::Vec;
use zeroize::Zeroize;
use group::ff::Field;
use dalek_ff_group::Scalar;
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
impl Index<usize> for ScalarVector {
type Output = Scalar;
fn index(&self, index: usize) -> &Scalar {
&self.0[index]
}
}
impl IndexMut<usize> for ScalarVector {
fn index_mut(&mut self, index: usize) -> &mut Scalar {
&mut self.0[index]
}
}
impl ScalarVector {
pub(crate) fn new(len: usize) -> Self {
ScalarVector(vec![Scalar::ZERO; len])
}
pub(crate) fn add(&self, scalar: impl Borrow<Scalar>) -> Self {
let mut res = self.clone();
for val in &mut res.0 {
*val += scalar.borrow();
}
res
}
pub(crate) fn sub(&self, scalar: impl Borrow<Scalar>) -> Self {
let mut res = self.clone();
for val in &mut res.0 {
*val -= scalar.borrow();
}
res
}
pub(crate) fn mul(&self, scalar: impl Borrow<Scalar>) -> Self {
let mut res = self.clone();
for val in &mut res.0 {
*val *= scalar.borrow();
}
res
}
pub(crate) fn add_vec(&self, vector: &Self) -> Self {
debug_assert_eq!(self.len(), vector.len());
let mut res = self.clone();
for (i, val) in res.0.iter_mut().enumerate() {
*val += vector.0[i];
}
res
}
pub(crate) fn mul_vec(&self, vector: &Self) -> Self {
debug_assert_eq!(self.len(), vector.len());
let mut res = self.clone();
for (i, val) in res.0.iter_mut().enumerate() {
*val *= vector.0[i];
}
res
}
pub(crate) fn inner_product(&self, vector: &Self) -> Scalar {
self.mul_vec(vector).sum()
}
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
debug_assert!(len != 0);
let mut res = Vec::with_capacity(len);
res.push(Scalar::ONE);
res.push(x);
for i in 2 .. len {
res.push(res[i - 1] * x);
}
res.truncate(len);
ScalarVector(res)
}
pub(crate) fn sum(mut self) -> Scalar {
self.0.drain(..).sum()
}
pub(crate) fn len(&self) -> usize {
self.0.len()
}
pub(crate) fn split(mut self) -> (Self, Self) {
debug_assert!(self.len() > 1);
let r = self.0.split_off(self.0.len() / 2);
debug_assert_eq!(self.len(), r.len());
(self, ScalarVector(r))
}
}
pub(crate) fn weighted_inner_product(
a: &ScalarVector,
b: &ScalarVector,
y: &ScalarVector,
) -> Scalar {
a.inner_product(&b.mul_vec(y))
}

View File

@@ -4,7 +4,7 @@ use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop}; use zeroize::{Zeroize, ZeroizeOnDrop};
use multiexp::{BatchVerifier, multiexp, multiexp_vartime}; use multiexp::{multiexp, multiexp_vartime, BatchVerifier};
use group::{ use group::{
ff::{Field, PrimeField}, ff::{Field, PrimeField},
GroupEncoding, GroupEncoding,
@@ -12,7 +12,8 @@ use group::{
use dalek_ff_group::{Scalar, EdwardsPoint}; use dalek_ff_group::{Scalar, EdwardsPoint};
use crate::ringct::bulletproofs::plus::{ use crate::ringct::bulletproofs::plus::{
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*, ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, weighted_inner_product,
transcript::*,
}; };
// Figure 1 // Figure 1
@@ -218,7 +219,7 @@ impl WipStatement {
.zip(g_bold.0.iter().copied()) .zip(g_bold.0.iter().copied())
.chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied())) .chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied()))
.collect::<Vec<_>>(); .collect::<Vec<_>>();
P_terms.push((witness.a.clone().weighted_inner_product(&witness.b, &y), g)); P_terms.push((weighted_inner_product(&witness.a, &witness.b, &y), g));
P_terms.push((witness.alpha, h)); P_terms.push((witness.alpha, h));
debug_assert_eq!(multiexp(&P_terms), P); debug_assert_eq!(multiexp(&P_terms), P);
P_terms.zeroize(); P_terms.zeroize();
@@ -257,13 +258,14 @@ impl WipStatement {
let d_l = Scalar::random(&mut *rng); let d_l = Scalar::random(&mut *rng);
let d_r = Scalar::random(&mut *rng); let d_r = Scalar::random(&mut *rng);
let c_l = a1.clone().weighted_inner_product(&b2, &y); let c_l = weighted_inner_product(&a1, &b2, &y);
let c_r = (a2.clone() * y_n_hat).weighted_inner_product(&b1, &y); let c_r = weighted_inner_product(&(a2.mul(y_n_hat)), &b1, &y);
// TODO: Calculate these with a batch inversion // TODO: Calculate these with a batch inversion
let y_inv_n_hat = y_n_hat.invert().unwrap(); let y_inv_n_hat = y_n_hat.invert().unwrap();
let mut L_terms = (a1.clone() * y_inv_n_hat) let mut L_terms = a1
.mul(y_inv_n_hat)
.0 .0
.drain(..) .drain(..)
.zip(g_bold2.0.iter().copied()) .zip(g_bold2.0.iter().copied())
@@ -275,7 +277,8 @@ impl WipStatement {
L_vec.push(L); L_vec.push(L);
L_terms.zeroize(); L_terms.zeroize();
let mut R_terms = (a2.clone() * y_n_hat) let mut R_terms = a2
.mul(y_n_hat)
.0 .0
.drain(..) .drain(..)
.zip(g_bold1.0.iter().copied()) .zip(g_bold1.0.iter().copied())
@@ -291,8 +294,8 @@ impl WipStatement {
(e, inv_e, e_square, inv_e_square, g_bold, h_bold) = (e, inv_e, e_square, inv_e_square, g_bold, h_bold) =
Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat); Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat);
a = (a1 * e) + &(a2 * (y_n_hat * inv_e)); a = a1.mul(e).add_vec(&a2.mul(y_n_hat * inv_e));
b = (b1 * inv_e) + &(b2 * e); b = b1.mul(inv_e).add_vec(&b2.mul(e));
alpha += (d_l * e_square) + (d_r * inv_e_square); alpha += (d_l * e_square) + (d_r * inv_e_square);
debug_assert_eq!(g_bold.len(), a.len()); debug_assert_eq!(g_bold.len(), a.len());

View File

@@ -1,17 +1,85 @@
use core::{ use core::ops::{Add, Sub, Mul, Index};
borrow::Borrow,
ops::{Index, IndexMut, Add, Sub, Mul},
};
use std_shims::vec::Vec; use std_shims::vec::Vec;
use zeroize::{Zeroize, ZeroizeOnDrop}; use zeroize::{Zeroize, ZeroizeOnDrop};
use group::ff::Field; use group::ff::Field;
use dalek_ff_group::{Scalar, EdwardsPoint}; use dalek_ff_group::{Scalar, EdwardsPoint};
use multiexp::multiexp; use multiexp::multiexp;
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)] #[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>); pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
macro_rules! math_op {
($Op: ident, $op: ident, $f: expr) => {
#[allow(clippy::redundant_closure_call)]
impl $Op<Scalar> for ScalarVector {
type Output = ScalarVector;
fn $op(self, b: Scalar) -> ScalarVector {
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
}
}
#[allow(clippy::redundant_closure_call)]
impl $Op<Scalar> for &ScalarVector {
type Output = ScalarVector;
fn $op(self, b: Scalar) -> ScalarVector {
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
}
}
#[allow(clippy::redundant_closure_call)]
impl $Op<ScalarVector> for ScalarVector {
type Output = ScalarVector;
fn $op(self, b: ScalarVector) -> ScalarVector {
debug_assert_eq!(self.len(), b.len());
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
}
}
#[allow(clippy::redundant_closure_call)]
impl $Op<&ScalarVector> for &ScalarVector {
type Output = ScalarVector;
fn $op(self, b: &ScalarVector) -> ScalarVector {
debug_assert_eq!(self.len(), b.len());
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
}
}
};
}
math_op!(Add, add, |(a, b): (&Scalar, &Scalar)| *a + *b);
math_op!(Sub, sub, |(a, b): (&Scalar, &Scalar)| *a - *b);
math_op!(Mul, mul, |(a, b): (&Scalar, &Scalar)| *a * *b);
impl ScalarVector {
pub(crate) fn new(len: usize) -> ScalarVector {
ScalarVector(vec![Scalar::ZERO; len])
}
pub(crate) fn powers(x: Scalar, len: usize) -> ScalarVector {
debug_assert!(len != 0);
let mut res = Vec::with_capacity(len);
res.push(Scalar::ONE);
for i in 1 .. len {
res.push(res[i - 1] * x);
}
ScalarVector(res)
}
pub(crate) fn sum(mut self) -> Scalar {
self.0.drain(..).sum()
}
pub(crate) fn len(&self) -> usize {
self.0.len()
}
pub(crate) fn split(self) -> (ScalarVector, ScalarVector) {
let (l, r) = self.0.split_at(self.0.len() / 2);
(ScalarVector(l.to_vec()), ScalarVector(r.to_vec()))
}
}
impl Index<usize> for ScalarVector { impl Index<usize> for ScalarVector {
type Output = Scalar; type Output = Scalar;
@@ -19,120 +87,28 @@ impl Index<usize> for ScalarVector {
&self.0[index] &self.0[index]
} }
} }
impl IndexMut<usize> for ScalarVector {
fn index_mut(&mut self, index: usize) -> &mut Scalar {
&mut self.0[index]
}
}
impl<S: Borrow<Scalar>> Add<S> for ScalarVector { pub(crate) fn inner_product(a: &ScalarVector, b: &ScalarVector) -> Scalar {
type Output = ScalarVector; (a * b).sum()
fn add(mut self, scalar: S) -> ScalarVector {
for s in &mut self.0 {
*s += scalar.borrow();
}
self
}
}
impl<S: Borrow<Scalar>> Sub<S> for ScalarVector {
type Output = ScalarVector;
fn sub(mut self, scalar: S) -> ScalarVector {
for s in &mut self.0 {
*s -= scalar.borrow();
}
self
}
}
impl<S: Borrow<Scalar>> Mul<S> for ScalarVector {
type Output = ScalarVector;
fn mul(mut self, scalar: S) -> ScalarVector {
for s in &mut self.0 {
*s *= scalar.borrow();
}
self
}
}
impl Add<&ScalarVector> for ScalarVector {
type Output = ScalarVector;
fn add(mut self, other: &ScalarVector) -> ScalarVector {
debug_assert_eq!(self.len(), other.len());
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s += o;
}
self
}
}
impl Sub<&ScalarVector> for ScalarVector {
type Output = ScalarVector;
fn sub(mut self, other: &ScalarVector) -> ScalarVector {
debug_assert_eq!(self.len(), other.len());
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s -= o;
}
self
}
}
impl Mul<&ScalarVector> for ScalarVector {
type Output = ScalarVector;
fn mul(mut self, other: &ScalarVector) -> ScalarVector {
debug_assert_eq!(self.len(), other.len());
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
*s *= o;
}
self
}
} }
impl Mul<&[EdwardsPoint]> for &ScalarVector { impl Mul<&[EdwardsPoint]> for &ScalarVector {
type Output = EdwardsPoint; type Output = EdwardsPoint;
fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint { fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint {
debug_assert_eq!(self.len(), b.len()); debug_assert_eq!(self.len(), b.len());
let mut multiexp_args = self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>(); multiexp(&self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>())
let res = multiexp(&multiexp_args);
multiexp_args.zeroize();
res
} }
} }
impl ScalarVector { pub(crate) fn hadamard_fold(
pub(crate) fn new(len: usize) -> Self { l: &[EdwardsPoint],
ScalarVector(vec![Scalar::ZERO; len]) r: &[EdwardsPoint],
} a: Scalar,
b: Scalar,
pub(crate) fn powers(x: Scalar, len: usize) -> Self { ) -> Vec<EdwardsPoint> {
debug_assert!(len != 0); let mut res = Vec::with_capacity(l.len() / 2);
for i in 0 .. l.len() {
let mut res = Vec::with_capacity(len); res.push(multiexp(&[(a, l[i]), (b, r[i])]));
res.push(Scalar::ONE);
res.push(x);
for i in 2 .. len {
res.push(res[i - 1] * x);
}
res.truncate(len);
ScalarVector(res)
}
pub(crate) fn len(&self) -> usize {
self.0.len()
}
pub(crate) fn sum(mut self) -> Scalar {
self.0.drain(..).sum()
}
pub(crate) fn inner_product(self, vector: &Self) -> Scalar {
(self * vector).sum()
}
pub(crate) fn weighted_inner_product(self, vector: &Self, y: &Self) -> Scalar {
(self * vector * y).sum()
}
pub(crate) fn split(mut self) -> (Self, Self) {
debug_assert!(self.len() > 1);
let r = self.0.split_off(self.0.len() / 2);
debug_assert_eq!(self.len(), r.len());
(self, ScalarVector(r))
} }
res
} }

View File

@@ -9,7 +9,7 @@ use std_shims::{
use rand_core::{RngCore, CryptoRng}; use rand_core::{RngCore, CryptoRng};
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
use subtle::{ConstantTimeEq, ConditionallySelectable}; use subtle::{ConstantTimeEq, Choice, CtOption};
use curve25519_dalek::{ use curve25519_dalek::{
constants::ED25519_BASEPOINT_TABLE, constants::ED25519_BASEPOINT_TABLE,
@@ -169,8 +169,13 @@ fn core(
} }
// Perform the core loop // Perform the core loop
let mut c1 = c; let mut c1 = CtOption::new(Scalar::ZERO, Choice::from(0));
for i in (start .. end).map(|i| i % n) { for i in (start .. end).map(|i| i % n) {
// This will only execute once and shouldn't need to be constant time. Making it constant time
// removes the risk of branch prediction creating timing differences depending on ring index
// however
c1 = c1.or_else(|| CtOption::new(c, i.ct_eq(&0)));
let c_p = mu_P * c; let c_p = mu_P * c;
let c_c = mu_C * c; let c_c = mu_C * c;
@@ -183,15 +188,10 @@ fn core(
to_hash.extend(L.compress().to_bytes()); to_hash.extend(L.compress().to_bytes());
to_hash.extend(R.compress().to_bytes()); to_hash.extend(R.compress().to_bytes());
c = hash_to_scalar(&to_hash); c = hash_to_scalar(&to_hash);
// This will only execute once and shouldn't need to be constant time. Making it constant time
// removes the risk of branch prediction creating timing differences depending on ring index
// however
c1.conditional_assign(&c, i.ct_eq(&(n - 1)));
} }
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with // This first tuple is needed to continue signing, the latter is the c to be tested/worked with
((D, c * mu_P, c * mu_C), c1) ((D, c * mu_P, c * mu_C), c1.unwrap_or(c))
} }
/// CLSAG signature, as used in Monero. /// CLSAG signature, as used in Monero.

View File

@@ -199,7 +199,6 @@ impl Algorithm<Ed25519> for ClsagMultisig {
l: Participant, l: Participant,
addendum: ClsagAddendum, addendum: ClsagAddendum,
) -> Result<(), FrostError> { ) -> Result<(), FrostError> {
// TODO: This check is faulty if two shares are additive inverses of each other
if self.image.is_identity().into() { if self.image.is_identity().into() {
self.transcript.domain_separate(b"CLSAG"); self.transcript.domain_separate(b"CLSAG");
self.input().transcript(&mut self.transcript); self.input().transcript(&mut self.transcript);

View File

@@ -9,6 +9,7 @@ use dalek_ff_group::{Scalar, EdwardsPoint};
use crate::ringct::bulletproofs::plus::{ use crate::ringct::bulletproofs::plus::{
ScalarVector, PointVector, GeneratorsList, Generators, ScalarVector, PointVector, GeneratorsList, Generators,
weighted_inner_product::{WipStatement, WipWitness}, weighted_inner_product::{WipStatement, WipWitness},
weighted_inner_product,
}; };
#[test] #[test]
@@ -67,7 +68,7 @@ fn test_weighted_inner_product() {
#[allow(non_snake_case)] #[allow(non_snake_case)]
let P = g_bold.multiexp(&a) + let P = g_bold.multiexp(&a) +
h_bold.multiexp(&b) + h_bold.multiexp(&b) +
(g * a.clone().weighted_inner_product(&b, &y_vec)) + (g * weighted_inner_product(&a, &b, &y_vec)) +
(h * alpha); (h * alpha);
let statement = WipStatement::new(generators, P, y); let statement = WipStatement::new(generators, P, y);

View File

@@ -57,7 +57,7 @@ fn clsag() {
} }
let image = generate_key_image(&secrets.0); let image = generate_key_image(&secrets.0);
let (mut clsag, pseudo_out) = Clsag::sign( let (clsag, pseudo_out) = Clsag::sign(
&mut OsRng, &mut OsRng,
vec![( vec![(
secrets.0, secrets.0,
@@ -76,12 +76,7 @@ fn clsag() {
msg, msg,
) )
.swap_remove(0); .swap_remove(0);
clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap(); clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap();
// make sure verification fails if we throw a random `c1` at it.
clsag.c1 = random_scalar(&mut OsRng);
assert!(clsag.verify(&ring, &image, &pseudo_out, &msg).is_err());
} }
} }

View File

@@ -88,7 +88,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
.unwrap(); .unwrap();
let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap(); let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap();
// TODO: Needs https://github.com/monero-project/monero/pull/9260 // TODO: Needs https://github.com/monero-project/monero/pull/8882
// let fee_rate = daemon_rpc // let fee_rate = daemon_rpc
// .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant) // .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant)
// .await // .await
@@ -107,7 +107,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap(); let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap();
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0); let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
// TODO: Needs https://github.com/monero-project/monero/pull/9260 // TODO: Needs https://github.com/monero-project/monero/pull/8882
// runner::check_weight_and_fee(&tx, fee_rate); // runner::check_weight_and_fee(&tx, fee_rate);
match spec { match spec {

View File

@@ -18,7 +18,7 @@ workspace = true
[dependencies] [dependencies]
parity-db = { version = "0.4", default-features = false, optional = true } parity-db = { version = "0.4", default-features = false, optional = true }
rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true } rocksdb = { version = "0.21", default-features = false, features = ["lz4"], optional = true }
[features] [features]
parity-db = ["dep:parity-db"] parity-db = ["dep:parity-db"]

View File

@@ -1,65 +1,42 @@
use std::sync::Arc; use std::sync::Arc;
use rocksdb::{ use rocksdb::{DBCompressionType, ThreadMode, SingleThreaded, Options, Transaction, TransactionDB};
DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions,
Transaction as RocksTransaction, Options, OptimisticTransactionDB,
};
use crate::*; use crate::*;
pub struct Transaction<'a, T: ThreadMode>( impl<T: ThreadMode> Get for Transaction<'_, TransactionDB<T>> {
RocksTransaction<'a, OptimisticTransactionDB<T>>,
&'a OptimisticTransactionDB<T>,
);
impl<T: ThreadMode> Get for Transaction<'_, T> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> { fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
self.0.get(key).expect("couldn't read from RocksDB via transaction") self.get(key).expect("couldn't read from RocksDB via transaction")
} }
} }
impl<T: ThreadMode> DbTxn for Transaction<'_, T> { impl<T: ThreadMode> DbTxn for Transaction<'_, TransactionDB<T>> {
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
self.0.put(key, value).expect("couldn't write to RocksDB via transaction") Transaction::put(self, key, value).expect("couldn't write to RocksDB via transaction")
} }
fn del(&mut self, key: impl AsRef<[u8]>) { fn del(&mut self, key: impl AsRef<[u8]>) {
self.0.delete(key).expect("couldn't delete from RocksDB via transaction") self.delete(key).expect("couldn't delete from RocksDB via transaction")
} }
fn commit(self) { fn commit(self) {
self.0.commit().expect("couldn't commit to RocksDB via transaction"); Transaction::commit(self).expect("couldn't commit to RocksDB via transaction")
self.1.flush_wal(true).expect("couldn't flush RocksDB WAL");
self.1.flush().expect("couldn't flush RocksDB");
} }
} }
impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> { impl<T: ThreadMode> Get for Arc<TransactionDB<T>> {
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> { fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB") TransactionDB::get(self, key).expect("couldn't read from RocksDB")
} }
} }
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> { impl<T: ThreadMode + 'static> Db for Arc<TransactionDB<T>> {
type Transaction<'a> = Transaction<'a, T>; type Transaction<'a> = Transaction<'a, TransactionDB<T>>;
fn txn(&mut self) -> Self::Transaction<'_> { fn txn(&mut self) -> Self::Transaction<'_> {
let mut opts = WriteOptions::default(); self.transaction()
opts.set_sync(true);
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
} }
} }
pub type RocksDB = Arc<OptimisticTransactionDB<SingleThreaded>>; pub type RocksDB = Arc<TransactionDB<SingleThreaded>>;
pub fn new_rocksdb(path: &str) -> RocksDB { pub fn new_rocksdb(path: &str) -> RocksDB {
let mut options = Options::default(); let mut options = Options::default();
options.create_if_missing(true); options.create_if_missing(true);
options.set_compression_type(DBCompressionType::Zstd); options.set_compression_type(DBCompressionType::Lz4);
Arc::new(TransactionDB::open(&options, &Default::default(), path).unwrap())
options.set_wal_compression_type(DBCompressionType::Zstd);
// 10 MB
options.set_max_total_wal_size(10 * 1024 * 1024);
options.set_wal_size_limit_mb(10);
options.set_log_level(LogLevel::Warn);
// 1 MB
options.set_max_log_file_size(1024 * 1024);
options.set_recycle_log_file_num(1);
Arc::new(OptimisticTransactionDB::open(&options, path).unwrap())
} }

View File

@@ -23,7 +23,7 @@ hyper-util = { version = "0.1", default-features = false, features = ["http1", "
http-body-util = { version = "0.1", default-features = false } http-body-util = { version = "0.1", default-features = false }
tokio = { version = "1", default-features = false } tokio = { version = "1", default-features = false }
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true } hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
zeroize = { version = "1", optional = true } zeroize = { version = "1", optional = true }
base64ct = { version = "1", features = ["alloc"], optional = true } base64ct = { version = "1", features = ["alloc"], optional = true }

View File

@@ -51,7 +51,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim
futures-util = { version = "0.3", default-features = false, features = ["std"] } futures-util = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] } libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] }
[dev-dependencies] [dev-dependencies]
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] } tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }

View File

@@ -22,7 +22,7 @@ use serai_db::{Get, DbTxn, Db, create_db};
use processor_messages::coordinator::cosign_block_msg; use processor_messages::coordinator::cosign_block_msg;
use crate::{ use crate::{
p2p::{CosignedBlock, GossipMessageKind, P2p}, p2p::{CosignedBlock, P2pMessageKind, P2p},
substrate::LatestCosignedBlock, substrate::LatestCosignedBlock,
}; };
@@ -323,7 +323,7 @@ impl<D: Db> CosignEvaluator<D> {
for cosign in cosigns { for cosign in cosigns {
let mut buf = vec![]; let mut buf = vec![];
cosign.serialize(&mut buf).unwrap(); cosign.serialize(&mut buf).unwrap();
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await; P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, buf).await;
} }
sleep(Duration::from_secs(60)).await; sleep(Duration::from_secs(60)).await;
} }

View File

@@ -260,7 +260,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
cosign_channel.send(cosigned_block).unwrap(); cosign_channel.send(cosigned_block).unwrap();
let mut buf = vec![]; let mut buf = vec![];
cosigned_block.serialize(&mut buf).unwrap(); cosigned_block.serialize(&mut buf).unwrap();
P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await; P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await;
None None
} }
// This causes an action on Substrate yet not on any Tributary // This causes an action on Substrate yet not on any Tributary
@@ -836,8 +836,8 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
) { ) {
let mut tributaries = HashMap::new(); let mut tributaries = HashMap::new();
'outer: loop { 'outer: loop {
// TODO: Create a better async flow for this // TODO: Create a better async flow for this, as this does still hammer this task
tokio::time::sleep(core::time::Duration::from_millis(100)).await; tokio::task::yield_now().await;
match tributary_event.try_recv() { match tributary_event.try_recv() {
Ok(event) => match event { Ok(event) => match event {

File diff suppressed because it is too large Load Diff

View File

@@ -41,9 +41,8 @@ enum HasEvents {
create_db!( create_db!(
SubstrateCosignDb { SubstrateCosignDb {
ScanCosignFrom: () -> u64,
IntendedCosign: () -> (u64, Option<u64>), IntendedCosign: () -> (u64, Option<u64>),
BlockHasEventsCache: (block: u64) -> HasEvents, BlockHasEvents: (block: u64) -> HasEvents,
LatestCosignedBlock: () -> u64, LatestCosignedBlock: () -> u64,
} }
); );
@@ -86,7 +85,7 @@ async fn block_has_events(
serai: &Serai, serai: &Serai,
block: u64, block: u64,
) -> Result<HasEvents, SeraiError> { ) -> Result<HasEvents, SeraiError> {
let cached = BlockHasEventsCache::get(txn, block); let cached = BlockHasEvents::get(txn, block);
match cached { match cached {
None => { None => {
let serai = serai.as_of( let serai = serai.as_of(
@@ -108,8 +107,8 @@ async fn block_has_events(
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes }; let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
BlockHasEventsCache::set(txn, block, &has_events); BlockHasEvents::set(txn, block, &has_events);
Ok(has_events) Ok(HasEvents::Yes)
} }
Some(code) => Ok(code), Some(code) => Ok(code),
} }
@@ -136,7 +135,6 @@ async fn potentially_cosign_block(
if (block_has_events == HasEvents::No) && if (block_has_events == HasEvents::No) &&
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1)) (LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
{ {
log::debug!("automatically co-signing next block ({block}) since it has no events");
LatestCosignedBlock::set(txn, &block); LatestCosignedBlock::set(txn, &block);
} }
@@ -180,7 +178,7 @@ async fn potentially_cosign_block(
which should be cosigned). Accordingly, it is necessary to call multiple times even if which should be cosigned). Accordingly, it is necessary to call multiple times even if
`latest_number` doesn't change. `latest_number` doesn't change.
*/ */
async fn advance_cosign_protocol_inner( pub async fn advance_cosign_protocol(
db: &mut impl Db, db: &mut impl Db,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: &Serai, serai: &Serai,
@@ -205,23 +203,16 @@ async fn advance_cosign_protocol_inner(
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
// If we've never triggered a cosign, don't skip any cosigns based on proximity // If we've never triggered a cosign, don't skip any cosigns based on proximity
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN { if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
window_end_exclusive = 1; window_end_exclusive = 0;
} }
// The consensus rules for this are `last_intended_to_cosign_block + 1`
let scan_start_block = last_intended_to_cosign_block + 1;
// As a practical optimization, we don't re-scan old blocks since old blocks are independent to
// new state
let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));
// Check all blocks within the window to see if they should be cosigned // Check all blocks within the window to see if they should be cosigned
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we // If so, we're skipping them and need to flag them as skipped so that once the window closes, we
// do cosign them // do cosign them
// We only perform this check if we haven't already marked a block as skipped since the cosign // We only perform this check if we haven't already marked a block as skipped since the cosign
// the skipped block will cause will cosign all other blocks within this window // the skipped block will cause will cosign all other blocks within this window
if skipped_block.is_none() { if skipped_block.is_none() {
let window_end_inclusive = window_end_exclusive - 1; for b in (last_intended_to_cosign_block + 1) .. window_end_exclusive.min(latest_number) {
for b in scan_start_block ..= window_end_inclusive.min(latest_number) {
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
skipped_block = Some(b); skipped_block = Some(b);
log::debug!("skipping cosigning {b} due to proximity to prior cosign"); log::debug!("skipping cosigning {b} due to proximity to prior cosign");
@@ -236,7 +227,7 @@ async fn advance_cosign_protocol_inner(
// A list of sets which are cosigning, along with a boolean of if we're in the set // A list of sets which are cosigning, along with a boolean of if we're in the set
let mut cosigning = vec![]; let mut cosigning = vec![];
for block in scan_start_block ..= latest_number { for block in (last_intended_to_cosign_block + 1) ..= latest_number {
let actual_block = serai let actual_block = serai
.finalized_block_by_number(block) .finalized_block_by_number(block)
.await? .await?
@@ -285,11 +276,6 @@ async fn advance_cosign_protocol_inner(
break; break;
} }
// If this TX is committed, always start future scanning from the next block
ScanCosignFrom::set(&mut txn, &(block + 1));
// Since we're scanning *from* the next block, tidy the cache
BlockHasEventsCache::del(&mut txn, block);
} }
if let Some((number, hash)) = to_cosign { if let Some((number, hash)) = to_cosign {
@@ -311,22 +297,3 @@ async fn advance_cosign_protocol_inner(
Ok(()) Ok(())
} }
pub async fn advance_cosign_protocol(
db: &mut impl Db,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: &Serai,
latest_number: u64,
) -> Result<(), SeraiError> {
loop {
let scan_from = ScanCosignFrom::get(db).unwrap_or(1);
// Only scan 1000 blocks at a time to limit a massive txn from forming
let scan_to = latest_number.min(scan_from + 1000);
advance_cosign_protocol_inner(db, key, serai, scan_to).await?;
// If we didn't limit the scan_to, break
if scan_to == latest_number {
break;
}
}
Ok(())
}

View File

@@ -11,7 +11,10 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use serai_client::{ use serai_client::{
SeraiError, Block, Serai, TemporalSerai, SeraiError, Block, Serai, TemporalSerai,
primitives::{BlockHash, NetworkId}, primitives::{BlockHash, NetworkId},
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent}, validator_sets::{
primitives::{ValidatorSet, amortize_excess_key_shares},
ValidatorSetsEvent,
},
in_instructions::InInstructionsEvent, in_instructions::InInstructionsEvent,
coins::CoinsEvent, coins::CoinsEvent,
}; };
@@ -66,7 +69,12 @@ async fn handle_new_set<D: Db>(
let set_participants = let set_participants =
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist"); serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>() let mut set_data = set_participants
.into_iter()
.map(|(k, w)| (k, u16::try_from(w).unwrap()))
.collect::<Vec<_>>();
amortize_excess_key_shares(&mut set_data);
set_data
}; };
let time = if let Ok(time) = block.time() { let time = if let Ok(time) = block.time() {

View File

@@ -14,7 +14,7 @@ use tokio::sync::RwLock;
use crate::{ use crate::{
processors::{Message, Processors}, processors::{Message, Processors},
TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p, TributaryP2p, P2pMessageKind, P2p,
}; };
pub mod tributary; pub mod tributary;
@@ -45,10 +45,7 @@ impl Processors for MemProcessors {
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct LocalP2p( pub struct LocalP2p(usize, pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, Vec<u8>)>>)>>);
usize,
pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, P2pMessageKind, Vec<u8>)>>)>>,
);
impl LocalP2p { impl LocalP2p {
pub fn new(validators: usize) -> Vec<LocalP2p> { pub fn new(validators: usize) -> Vec<LocalP2p> {
@@ -68,13 +65,11 @@ impl P2p for LocalP2p {
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) { async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
let mut msg_ref = msg.as_slice(); self.1.write().await.1[to].push_back((self.0, msg));
let kind = ReqResMessageKind::read(&mut msg_ref).unwrap();
self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec()));
} }
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) { async fn broadcast_raw(&self, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
// Content-based deduplication // Content-based deduplication
let mut lock = self.1.write().await; let mut lock = self.1.write().await;
{ {
@@ -86,26 +81,19 @@ impl P2p for LocalP2p {
} }
let queues = &mut lock.1; let queues = &mut lock.1;
let kind_len = (match kind {
P2pMessageKind::ReqRes(kind) => kind.serialize(),
P2pMessageKind::Gossip(kind) => kind.serialize(),
})
.len();
let msg = msg[kind_len ..].to_vec();
for (i, msg_queue) in queues.iter_mut().enumerate() { for (i, msg_queue) in queues.iter_mut().enumerate() {
if i == self.0 { if i == self.0 {
continue; continue;
} }
msg_queue.push_back((self.0, kind, msg.clone())); msg_queue.push_back((self.0, msg.clone()));
} }
} }
async fn receive(&self) -> P2pMessage<Self> { async fn receive_raw(&self) -> (Self::Id, Vec<u8>) {
// This is a cursed way to implement an async read from a Vec // This is a cursed way to implement an async read from a Vec
loop { loop {
if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() { if let Some(res) = self.1.write().await.1[self.0].pop_front() {
return P2pMessage { sender, kind, msg }; return res;
} }
tokio::time::sleep(std::time::Duration::from_millis(100)).await; tokio::time::sleep(std::time::Duration::from_millis(100)).await;
} }
@@ -115,11 +103,6 @@ impl P2p for LocalP2p {
#[async_trait] #[async_trait]
impl TributaryP2p for LocalP2p { impl TributaryP2p for LocalP2p {
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) { async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
<Self as P2p>::broadcast( <Self as P2p>::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await
self,
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)),
msg,
)
.await
} }
} }

View File

@@ -26,7 +26,7 @@ use serai_db::MemDb;
use tributary::Tributary; use tributary::Tributary;
use crate::{ use crate::{
GossipMessageKind, P2pMessageKind, P2p, P2pMessageKind, P2p,
tributary::{Transaction, TributarySpec}, tributary::{Transaction, TributarySpec},
tests::LocalP2p, tests::LocalP2p,
}; };
@@ -98,7 +98,7 @@ pub async fn run_tributaries(
for (p2p, tributary) in &mut tributaries { for (p2p, tributary) in &mut tributaries {
while let Poll::Ready(msg) = poll!(p2p.receive()) { while let Poll::Ready(msg) = poll!(p2p.receive()) {
match msg.kind { match msg.kind {
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { P2pMessageKind::Tributary(genesis) => {
assert_eq!(genesis, tributary.genesis()); assert_eq!(genesis, tributary.genesis());
if tributary.handle_message(&msg.msg).await { if tributary.handle_message(&msg.msg).await {
p2p.broadcast(msg.kind, msg.msg).await; p2p.broadcast(msg.kind, msg.msg).await;
@@ -173,7 +173,7 @@ async fn tributary_test() {
for (p2p, tributary) in &mut tributaries { for (p2p, tributary) in &mut tributaries {
while let Poll::Ready(msg) = poll!(p2p.receive()) { while let Poll::Ready(msg) = poll!(p2p.receive()) {
match msg.kind { match msg.kind {
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { P2pMessageKind::Tributary(genesis) => {
assert_eq!(genesis, tributary.genesis()); assert_eq!(genesis, tributary.genesis());
tributary.handle_message(&msg.msg).await; tributary.handle_message(&msg.msg).await;
} }
@@ -199,7 +199,7 @@ async fn tributary_test() {
for (p2p, tributary) in &mut tributaries { for (p2p, tributary) in &mut tributaries {
while let Poll::Ready(msg) = poll!(p2p.receive()) { while let Poll::Ready(msg) = poll!(p2p.receive()) {
match msg.kind { match msg.kind {
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { P2pMessageKind::Tributary(genesis) => {
assert_eq!(genesis, tributary.genesis()); assert_eq!(genesis, tributary.genesis());
tributary.handle_message(&msg.msg).await; tributary.handle_message(&msg.msg).await;
} }

View File

@@ -116,8 +116,8 @@ async fn sync_test() {
.map_err(|_| "failed to send ActiveTributary to heartbeat") .map_err(|_| "failed to send ActiveTributary to heartbeat")
.unwrap(); .unwrap();
// The heartbeat is once every 10 blocks, with some limitations // The heartbeat is once every 10 blocks
sleep(Duration::from_secs(20 * block_time)).await; sleep(Duration::from_secs(10 * block_time)).await;
assert!(syncer_tributary.tip().await != spec.genesis()); assert!(syncer_tributary.tip().await != spec.genesis());
// Verify it synced to the tip // Verify it synced to the tip

View File

@@ -74,7 +74,7 @@ impl TributarySpec {
pub fn genesis(&self) -> [u8; 32] { pub fn genesis(&self) -> [u8; 32] {
// Calculate the genesis for this Tributary // Calculate the genesis for this Tributary
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis Testnet 2.1"); let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis");
// This locks it to a specific Serai chain // This locks it to a specific Serai chain
genesis.append_message(b"serai_block", self.serai_block); genesis.append_message(b"serai_block", self.serai_block);
genesis.append_message(b"session", self.set.session.0.to_le_bytes()); genesis.append_message(b"session", self.set.session.0.to_le_bytes());

View File

@@ -1,5 +1,5 @@
use core::{marker::PhantomData, fmt::Debug}; use core::{marker::PhantomData, fmt::Debug};
use std::{sync::Arc, io, collections::VecDeque}; use std::{sync::Arc, io};
use async_trait::async_trait; use async_trait::async_trait;
@@ -59,7 +59,8 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000; pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
pub(crate) const TENDERMINT_MESSAGE: u8 = 0; pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
pub(crate) const TRANSACTION_MESSAGE: u8 = 2; // TODO: Normalize to 1 pub(crate) const BLOCK_MESSAGE: u8 = 1;
pub(crate) const TRANSACTION_MESSAGE: u8 = 2;
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
@@ -193,7 +194,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
); );
let blockchain = Arc::new(RwLock::new(blockchain)); let blockchain = Arc::new(RwLock::new(blockchain));
let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new())); let to_rebroadcast = Arc::new(RwLock::new(vec![]));
// Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the // Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the
// P2P layer // P2P layer
let p2p_meta_task_handle = Arc::new( let p2p_meta_task_handle = Arc::new(
@@ -206,7 +207,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
for msg in to_rebroadcast { for msg in to_rebroadcast {
p2p.broadcast(genesis, msg).await; p2p.broadcast(genesis, msg).await;
} }
tokio::time::sleep(core::time::Duration::from_secs(60)).await; tokio::time::sleep(core::time::Duration::from_secs(1)).await;
} }
} }
}) })
@@ -217,15 +218,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p }; TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p };
let TendermintHandle { synced_block, synced_block_result, messages, machine } = let TendermintHandle { synced_block, synced_block_result, messages, machine } =
TendermintMachine::new( TendermintMachine::new(network.clone(), block_number, start_time, proposal).await;
db.clone(),
network.clone(),
genesis,
block_number,
start_time,
proposal,
)
.await;
tokio::spawn(machine.run()); tokio::spawn(machine.run());
Some(Self { Some(Self {
@@ -335,6 +328,9 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
// Return true if the message should be rebroadcasted. // Return true if the message should be rebroadcasted.
pub async fn handle_message(&self, msg: &[u8]) -> bool { pub async fn handle_message(&self, msg: &[u8]) -> bool {
// Acquire the lock now to prevent sync_block from being run at the same time
let mut sync_block = self.synced_block_result.write().await;
match msg.first() { match msg.first() {
Some(&TRANSACTION_MESSAGE) => { Some(&TRANSACTION_MESSAGE) => {
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else { let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
@@ -366,6 +362,19 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
false false
} }
Some(&BLOCK_MESSAGE) => {
let mut msg_ref = &msg[1 ..];
let Ok(block) = Block::<T>::read(&mut msg_ref) else {
log::error!("received invalid block message");
return false;
};
let commit = msg[(msg.len() - msg_ref.len()) ..].to_vec();
if self.sync_block_internal(block, commit, &mut sync_block).await {
log::debug!("synced block over p2p net instead of building the commit ourselves");
}
false
}
_ => false, _ => false,
} }
} }

View File

@@ -1,8 +1,5 @@
use core::ops::Deref; use core::ops::Deref;
use std::{ use std::{sync::Arc, collections::HashMap};
sync::Arc,
collections::{VecDeque, HashMap},
};
use async_trait::async_trait; use async_trait::async_trait;
@@ -41,8 +38,9 @@ use tendermint::{
use tokio::sync::RwLock; use tokio::sync::RwLock;
use crate::{ use crate::{
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait, TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite,
Transaction, BlockHeader, Block, BlockError, Blockchain, P2p, transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError,
Blockchain, P2p,
}; };
pub mod tx; pub mod tx;
@@ -270,25 +268,46 @@ pub struct TendermintNetwork<D: Db, T: TransactionTrait, P: P2p> {
pub(crate) validators: Arc<Validators>, pub(crate) validators: Arc<Validators>,
pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>, pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>,
pub(crate) to_rebroadcast: Arc<RwLock<VecDeque<Vec<u8>>>>, pub(crate) to_rebroadcast: Arc<RwLock<Vec<Vec<u8>>>>,
pub(crate) p2p: P, pub(crate) p2p: P,
} }
pub const BLOCK_PROCESSING_TIME: u32 = 999; pub const BLOCK_PROCESSING_TIME: u32 = 1000;
pub const LATENCY_TIME: u32 = 1667; pub const LATENCY_TIME: u32 = 3000;
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME); pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
#[test]
fn assert_target_block_time() {
use serai_db::MemDb;
#[derive(Clone, Debug)]
pub struct DummyP2p;
#[async_trait::async_trait]
impl P2p for DummyP2p {
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
unimplemented!()
}
}
// Type paremeters don't matter here since we only need to call the block_time()
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
// TARGET_BLOCK_TIME is in milliseconds.
assert_eq!(
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
TARGET_BLOCK_TIME / 1000
)
}
#[async_trait] #[async_trait]
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> { impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
type Db = D;
type ValidatorId = [u8; 32]; type ValidatorId = [u8; 32];
type SignatureScheme = Arc<Validators>; type SignatureScheme = Arc<Validators>;
type Weights = Arc<Validators>; type Weights = Arc<Validators>;
type Block = TendermintBlock; type Block = TendermintBlock;
// These are in milliseconds and create a six-second block time. // These are in milliseconds and create a ten-second block time.
// The block time is the latency on message delivery (where a message is some piece of data // The block time is the latency on message delivery (where a message is some piece of data
// embedded in a transaction) times three plus the block processing time, hence why it should be // embedded in a transaction) times three plus the block processing time, hence why it should be
// kept low. // kept low.
@@ -306,28 +325,19 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
} }
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) { async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
to_broadcast.extend(msg.encode());
// Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second // Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second
// until the block it's trying to build is complete // until the block it's trying to build is complete
// If the P2P layer drops a message before all nodes obtained access, or a node had an // If the P2P layer drops a message before all nodes obtained access, or a node had an
// intermittent failure, this will ensure reconcilliation // intermittent failure, this will ensure reconcilliation
// Resolves halts caused by timing discrepancies, which technically are violations of
// Tendermint as a BFT protocol, and shouldn't occur yet have in low-powered testing
// environments
// This is atrocious if there's no content-based deduplication protocol for messages actively // This is atrocious if there's no content-based deduplication protocol for messages actively
// being gossiped // being gossiped
// LibP2p, as used by Serai, is configured to content-based deduplicate // LibP2p, as used by Serai, is configured to content-based deduplicate
{ let mut to_broadcast = vec![TENDERMINT_MESSAGE];
let mut to_rebroadcast_lock = self.to_rebroadcast.write().await; to_broadcast.extend(msg.encode());
to_rebroadcast_lock.push_back(to_broadcast.clone()); self.to_rebroadcast.write().await.push(to_broadcast.clone());
// We should have, ideally, 3 * validators messages within a round
// Therefore, this should keep the most recent 2-rounds
// TODO: This isn't perfect. Each participant should just rebroadcast their latest round of
// messages
while to_rebroadcast_lock.len() > (6 * self.validators.weights.len()) {
to_rebroadcast_lock.pop_front();
}
}
self.p2p.broadcast(self.genesis, to_broadcast).await self.p2p.broadcast(self.genesis, to_broadcast).await
} }
@@ -413,7 +423,12 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
); );
match block_res { match block_res {
Ok(()) => { Ok(()) => {
// If we successfully added this block, break // If we successfully added this block, broadcast it
// TODO: Move this under the coordinator once we set up on new block notifications?
let mut msg = serialized_block.0;
msg.insert(0, BLOCK_MESSAGE);
msg.extend(encoded_commit);
self.p2p.broadcast(self.genesis, msg).await;
break; break;
} }
Err(BlockError::NonLocalProvided(hash)) => { Err(BlockError::NonLocalProvided(hash)) => {
@@ -422,14 +437,13 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
hex::encode(hash), hex::encode(hash),
hex::encode(self.genesis) hex::encode(self.genesis)
); );
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
} }
_ => return invalid_block(), _ => return invalid_block(),
} }
} }
// Since we've added a valid block, clear to_rebroadcast // Since we've added a valid block, clear to_rebroadcast
*self.to_rebroadcast.write().await = VecDeque::new(); *self.to_rebroadcast.write().await = vec![];
Some(TendermintBlock( Some(TendermintBlock(
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(), self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),

View File

@@ -1,6 +1,3 @@
#[cfg(test)]
mod tendermint;
mod transaction; mod transaction;
pub use transaction::*; pub use transaction::*;

View File

@@ -1,28 +0,0 @@
use tendermint::ext::Network;
use crate::{
P2p, TendermintTx,
tendermint::{TARGET_BLOCK_TIME, TendermintNetwork},
};
#[test]
fn assert_target_block_time() {
use serai_db::MemDb;
#[derive(Clone, Debug)]
pub struct DummyP2p;
#[async_trait::async_trait]
impl P2p for DummyP2p {
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
unimplemented!()
}
}
// Type paremeters don't matter here since we only need to call the block_time()
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
// TARGET_BLOCK_TIME is in milliseconds.
assert_eq!(
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
TARGET_BLOCK_TIME / 1000
)
}

View File

@@ -27,7 +27,5 @@ futures-util = { version = "0.3", default-features = false, features = ["std", "
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
tokio = { version = "1", default-features = false, features = ["time"] } tokio = { version = "1", default-features = false, features = ["time"] }
serai-db = { path = "../../../common/db", version = "0.1", default-features = false }
[dev-dependencies] [dev-dependencies]
tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] } tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] }

View File

@@ -3,9 +3,6 @@ use std::{
collections::{HashSet, HashMap}, collections::{HashSet, HashMap},
}; };
use parity_scale_codec::Encode;
use serai_db::{Get, DbTxn, Db};
use crate::{ use crate::{
time::CanonicalInstant, time::CanonicalInstant,
ext::{RoundNumber, BlockNumber, Block, Network}, ext::{RoundNumber, BlockNumber, Block, Network},
@@ -15,9 +12,6 @@ use crate::{
}; };
pub(crate) struct BlockData<N: Network> { pub(crate) struct BlockData<N: Network> {
db: N::Db,
genesis: [u8; 32],
pub(crate) number: BlockNumber, pub(crate) number: BlockNumber,
pub(crate) validator_id: Option<N::ValidatorId>, pub(crate) validator_id: Option<N::ValidatorId>,
pub(crate) proposal: Option<N::Block>, pub(crate) proposal: Option<N::Block>,
@@ -38,17 +32,12 @@ pub(crate) struct BlockData<N: Network> {
impl<N: Network> BlockData<N> { impl<N: Network> BlockData<N> {
pub(crate) fn new( pub(crate) fn new(
db: N::Db,
genesis: [u8; 32],
weights: Arc<N::Weights>, weights: Arc<N::Weights>,
number: BlockNumber, number: BlockNumber,
validator_id: Option<N::ValidatorId>, validator_id: Option<N::ValidatorId>,
proposal: Option<N::Block>, proposal: Option<N::Block>,
) -> BlockData<N> { ) -> BlockData<N> {
BlockData { BlockData {
db,
genesis,
number, number,
validator_id, validator_id,
proposal, proposal,
@@ -139,35 +128,12 @@ impl<N: Network> BlockData<N> {
// 27, 33, 41, 46, 60, 64 // 27, 33, 41, 46, 60, 64
self.round_mut().step = data.step(); self.round_mut().step = data.step();
// Only return a message to if we're actually a current validator and haven't prior posted a // Only return a message to if we're actually a current validator
// message self.validator_id.map(|validator_id| Message {
let round_number = self.round().number;
let step = data.step();
let res = self.validator_id.map(|validator_id| Message {
sender: validator_id, sender: validator_id,
block: self.number, block: self.number,
round: round_number, round: self.round().number,
data, data,
}); })
if res.is_some() {
let mut txn = self.db.txn();
let key = [
b"tendermint-machine_already_sent_message".as_ref(),
&self.genesis,
&self.number.0.to_le_bytes(),
&round_number.0.to_le_bytes(),
&step.encode(),
]
.concat();
// If we've already sent a message, return
if txn.get(&key).is_some() {
None?;
}
txn.put(&key, []);
txn.commit();
}
res
} }
} }

View File

@@ -212,9 +212,6 @@ pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode
/// Trait representing the distributed system Tendermint is providing consensus over. /// Trait representing the distributed system Tendermint is providing consensus over.
#[async_trait] #[async_trait]
pub trait Network: Sized + Send + Sync { pub trait Network: Sized + Send + Sync {
/// The database used to back this.
type Db: serai_db::Db;
// Type used to identify validators. // Type used to identify validators.
type ValidatorId: ValidatorId; type ValidatorId: ValidatorId;
/// Signature scheme used by validators. /// Signature scheme used by validators.

View File

@@ -231,9 +231,6 @@ pub enum SlashEvent {
/// A machine executing the Tendermint protocol. /// A machine executing the Tendermint protocol.
pub struct TendermintMachine<N: Network> { pub struct TendermintMachine<N: Network> {
db: N::Db,
genesis: [u8; 32],
network: N, network: N,
signer: <N::SignatureScheme as SignatureScheme>::Signer, signer: <N::SignatureScheme as SignatureScheme>::Signer,
validators: N::SignatureScheme, validators: N::SignatureScheme,
@@ -313,16 +310,11 @@ impl<N: Network + 'static> TendermintMachine<N> {
let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now()); let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now());
if time_until_round_end == Duration::ZERO { if time_until_round_end == Duration::ZERO {
log::trace!( log::trace!(
target: "tendermint",
"resetting when prior round ended {}ms ago", "resetting when prior round ended {}ms ago",
Instant::now().saturating_duration_since(round_end.instant()).as_millis(), Instant::now().saturating_duration_since(round_end.instant()).as_millis(),
); );
} }
log::trace!( log::trace!("sleeping until round ends in {}ms", time_until_round_end.as_millis());
target: "tendermint",
"sleeping until round ends in {}ms",
time_until_round_end.as_millis(),
);
sleep(time_until_round_end).await; sleep(time_until_round_end).await;
// Clear our outbound message queue // Clear our outbound message queue
@@ -330,8 +322,6 @@ impl<N: Network + 'static> TendermintMachine<N> {
// Create the new block // Create the new block
self.block = BlockData::new( self.block = BlockData::new(
self.db.clone(),
self.genesis,
self.weights.clone(), self.weights.clone(),
BlockNumber(self.block.number.0 + 1), BlockNumber(self.block.number.0 + 1),
self.signer.validator_id().await, self.signer.validator_id().await,
@@ -380,9 +370,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
/// the machine itself. The machine should have `run` called from an asynchronous task. /// the machine itself. The machine should have `run` called from an asynchronous task.
#[allow(clippy::new_ret_no_self)] #[allow(clippy::new_ret_no_self)]
pub async fn new( pub async fn new(
db: N::Db,
network: N, network: N,
genesis: [u8; 32],
last_block: BlockNumber, last_block: BlockNumber,
last_time: u64, last_time: u64,
proposal: N::Block, proposal: N::Block,
@@ -421,9 +409,6 @@ impl<N: Network + 'static> TendermintMachine<N> {
let validator_id = signer.validator_id().await; let validator_id = signer.validator_id().await;
// 01-10 // 01-10
let mut machine = TendermintMachine { let mut machine = TendermintMachine {
db: db.clone(),
genesis,
network, network,
signer, signer,
validators, validators,
@@ -435,8 +420,6 @@ impl<N: Network + 'static> TendermintMachine<N> {
synced_block_result_send, synced_block_result_send,
block: BlockData::new( block: BlockData::new(
db,
genesis,
weights, weights,
BlockNumber(last_block.0 + 1), BlockNumber(last_block.0 + 1),
validator_id, validator_id,
@@ -603,11 +586,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
); );
let id = block.id(); let id = block.id();
let proposal = self.network.add_block(block, commit).await; let proposal = self.network.add_block(block, commit).await;
log::trace!( log::trace!("added block {} (produced by machine)", hex::encode(id.as_ref()));
target: "tendermint",
"added block {} (produced by machine)",
hex::encode(id.as_ref()),
);
self.reset(msg.round, proposal).await; self.reset(msg.round, proposal).await;
} }
Err(TendermintError::Malicious(sender, evidence)) => { Err(TendermintError::Malicious(sender, evidence)) => {
@@ -701,12 +680,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
(msg.round == self.block.round().number) && (msg.round == self.block.round().number) &&
(msg.data.step() == Step::Propose) (msg.data.step() == Step::Propose)
{ {
log::trace!( log::trace!("received Propose for block {}, round {}", msg.block.0, msg.round.0);
target: "tendermint",
"received Propose for block {}, round {}",
msg.block.0,
msg.round.0,
);
} }
// If this is a precommit, verify its signature // If this is a precommit, verify its signature
@@ -724,13 +698,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
if !self.block.log.log(signed.clone())? { if !self.block.log.log(signed.clone())? {
return Err(TendermintError::AlreadyHandled); return Err(TendermintError::AlreadyHandled);
} }
log::debug!( log::debug!(target: "tendermint", "received new tendermint message");
target: "tendermint",
"received new tendermint message (block: {}, round: {}, step: {:?})",
msg.block.0,
msg.round.0,
msg.data.step(),
);
// All functions, except for the finalizer and the jump, are locked to the current round // All functions, except for the finalizer and the jump, are locked to the current round
@@ -777,13 +745,6 @@ impl<N: Network + 'static> TendermintMachine<N> {
// 55-56 // 55-56
// Jump, enabling processing by the below code // Jump, enabling processing by the below code
if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() { if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() {
log::debug!(
target: "tendermint",
"jumping from round {} to round {}",
self.block.round().number.0,
msg.round.0,
);
// Jump to the new round. // Jump to the new round.
let proposer = self.round(msg.round, None); let proposer = self.round(msg.round, None);
@@ -841,26 +802,13 @@ impl<N: Network + 'static> TendermintMachine<N> {
if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) { if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) {
let (participation, weight) = let (participation, weight) =
self.block.log.message_instances(self.block.round().number, &Data::Prevote(None)); self.block.log.message_instances(self.block.round().number, &Data::Prevote(None));
let threshold_weight = self.weights.threshold();
if participation < threshold_weight {
log::trace!(
target: "tendermint",
"progess towards setting prevote timeout, participation: {}, needed: {}",
participation,
threshold_weight,
);
}
// 34-35 // 34-35
if participation >= threshold_weight { if participation >= self.weights.threshold() {
log::trace!(
target: "tendermint",
"setting timeout for prevote due to sufficient participation",
);
self.block.round_mut().set_timeout(Step::Prevote); self.block.round_mut().set_timeout(Step::Prevote);
} }
// 44-46 // 44-46
if weight >= threshold_weight { if weight >= self.weights.threshold() {
self.broadcast(Data::Precommit(None)); self.broadcast(Data::Precommit(None));
return Ok(None); return Ok(None);
} }
@@ -870,10 +818,6 @@ impl<N: Network + 'static> TendermintMachine<N> {
if matches!(msg.data, Data::Precommit(_)) && if matches!(msg.data, Data::Precommit(_)) &&
self.block.log.has_participation(self.block.round().number, Step::Precommit) self.block.log.has_participation(self.block.round().number, Step::Precommit)
{ {
log::trace!(
target: "tendermint",
"setting timeout for precommit due to sufficient participation",
);
self.block.round_mut().set_timeout(Step::Precommit); self.block.round_mut().set_timeout(Step::Precommit);
} }

View File

@@ -1,5 +1,6 @@
use std::{sync::Arc, collections::HashMap}; use std::{sync::Arc, collections::HashMap};
use log::debug;
use parity_scale_codec::Encode; use parity_scale_codec::Encode;
use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence}; use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence};
@@ -26,7 +27,7 @@ impl<N: Network> MessageLog<N> {
let step = msg.data.step(); let step = msg.data.step();
if let Some(existing) = msgs.get(&step) { if let Some(existing) = msgs.get(&step) {
if existing.msg.data != msg.data { if existing.msg.data != msg.data {
log::debug!( debug!(
target: "tendermint", target: "tendermint",
"Validator sent multiple messages for the same block + round + step" "Validator sent multiple messages for the same block + round + step"
); );

View File

@@ -57,7 +57,6 @@ impl<N: Network> RoundData<N> {
// Poll all set timeouts, returning the Step whose timeout has just expired // Poll all set timeouts, returning the Step whose timeout has just expired
pub(crate) async fn timeout_future(&self) -> Step { pub(crate) async fn timeout_future(&self) -> Step {
/*
let now = Instant::now(); let now = Instant::now();
log::trace!( log::trace!(
target: "tendermint", target: "tendermint",
@@ -65,7 +64,6 @@ impl<N: Network> RoundData<N> {
self.step, self.step,
self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::<HashMap<_, _>>() self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::<HashMap<_, _>>()
); );
*/
let timeout_future = |step| { let timeout_future = |step| {
let timeout = self.timeouts.get(&step).copied(); let timeout = self.timeouts.get(&step).copied();

View File

@@ -10,8 +10,6 @@ use parity_scale_codec::{Encode, Decode};
use futures_util::sink::SinkExt; use futures_util::sink::SinkExt;
use tokio::{sync::RwLock, time::sleep}; use tokio::{sync::RwLock, time::sleep};
use serai_db::MemDb;
use tendermint_machine::{ use tendermint_machine::{
ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender, ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
SlashEvent, TendermintMachine, TendermintHandle, SlashEvent, TendermintMachine, TendermintHandle,
@@ -113,8 +111,6 @@ struct TestNetwork(
#[async_trait] #[async_trait]
impl Network for TestNetwork { impl Network for TestNetwork {
type Db = MemDb;
type ValidatorId = TestValidatorId; type ValidatorId = TestValidatorId;
type SignatureScheme = TestSignatureScheme; type SignatureScheme = TestSignatureScheme;
type Weights = TestWeights; type Weights = TestWeights;
@@ -174,9 +170,7 @@ impl TestNetwork {
let i = u16::try_from(i).unwrap(); let i = u16::try_from(i).unwrap();
let TendermintHandle { messages, synced_block, synced_block_result, machine } = let TendermintHandle { messages, synced_block, synced_block_result, machine } =
TendermintMachine::new( TendermintMachine::new(
MemDb::new(),
TestNetwork(i, arc.clone()), TestNetwork(i, arc.clone()),
[0; 32],
BlockNumber(1), BlockNumber(1),
start_time, start_time,
TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) }, TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) },

7
docs/.gitignore vendored
View File

@@ -1,7 +0,0 @@
_site/
.sass-cache/
.jekyll-cache/
.jekyll-metadata
.bundle/
vendor/

View File

@@ -1 +0,0 @@
3.1

View File

@@ -1,4 +0,0 @@
source 'https://rubygems.org'
gem "jekyll", "~> 4.3.3"
gem "just-the-docs", "0.8.1"

View File

@@ -1,82 +0,0 @@
GEM
remote: https://rubygems.org/
specs:
addressable (2.8.6)
public_suffix (>= 2.0.2, < 6.0)
colorator (1.1.0)
concurrent-ruby (1.2.3)
em-websocket (0.5.3)
eventmachine (>= 0.12.9)
http_parser.rb (~> 0)
eventmachine (1.2.7)
ffi (1.16.3)
forwardable-extended (2.6.0)
google-protobuf (3.25.3-x86_64-linux)
http_parser.rb (0.8.0)
i18n (1.14.4)
concurrent-ruby (~> 1.0)
jekyll (4.3.3)
addressable (~> 2.4)
colorator (~> 1.0)
em-websocket (~> 0.5)
i18n (~> 1.0)
jekyll-sass-converter (>= 2.0, < 4.0)
jekyll-watch (~> 2.0)
kramdown (~> 2.3, >= 2.3.1)
kramdown-parser-gfm (~> 1.0)
liquid (~> 4.0)
mercenary (>= 0.3.6, < 0.5)
pathutil (~> 0.9)
rouge (>= 3.0, < 5.0)
safe_yaml (~> 1.0)
terminal-table (>= 1.8, < 4.0)
webrick (~> 1.7)
jekyll-include-cache (0.2.1)
jekyll (>= 3.7, < 5.0)
jekyll-sass-converter (3.0.0)
sass-embedded (~> 1.54)
jekyll-seo-tag (2.8.0)
jekyll (>= 3.8, < 5.0)
jekyll-watch (2.2.1)
listen (~> 3.0)
just-the-docs (0.8.1)
jekyll (>= 3.8.5)
jekyll-include-cache
jekyll-seo-tag (>= 2.0)
rake (>= 12.3.1)
kramdown (2.4.0)
rexml
kramdown-parser-gfm (1.1.0)
kramdown (~> 2.0)
liquid (4.0.4)
listen (3.9.0)
rb-fsevent (~> 0.10, >= 0.10.3)
rb-inotify (~> 0.9, >= 0.9.10)
mercenary (0.4.0)
pathutil (0.16.2)
forwardable-extended (~> 2.6)
public_suffix (5.0.4)
rake (13.1.0)
rb-fsevent (0.11.2)
rb-inotify (0.10.1)
ffi (~> 1.0)
rexml (3.2.6)
rouge (4.2.0)
safe_yaml (1.0.5)
sass-embedded (1.63.6)
google-protobuf (~> 3.23)
rake (>= 13.0.0)
terminal-table (3.0.2)
unicode-display_width (>= 1.1.1, < 3)
unicode-display_width (2.5.0)
webrick (1.8.1)
PLATFORMS
x86_64-linux
DEPENDENCIES
jekyll (~> 4.3.3)
just-the-docs (= 0.8.1)
BUNDLED WITH
2.2.5

View File

@@ -36,16 +36,16 @@ rustup target add wasm32-unknown-unknown --toolchain nightly
``` ```
cargo install svm-rs cargo install svm-rs
svm install 0.8.25 svm install 0.8.16
svm use 0.8.25 svm use 0.8.16
``` ```
### Install Solidity Compiler Version Manager ### Install Solidity Compiler Version Manager
``` ```
cargo install svm-rs cargo install svm-rs
svm install 0.8.25 svm install 0.8.16
svm use 0.8.25 svm use 0.8.16
``` ```
### Install foundry (for tests) ### Install foundry (for tests)

View File

@@ -1,14 +0,0 @@
title: Serai Documentation
description: Documentation for the Serai protocol.
theme: just-the-docs
url: https://docs.serai.exchange
callouts:
warning:
title: Warning
color: red
definition:
title: Definition
color: blue

View File

@@ -1,19 +0,0 @@
---
title: Automatic Market Makers
layout: default
nav_order: 2
---
# Automatic Market Makers
*text on how AMMs work*
Serai uses a symmetric liquidity pool with the `xy=k` formula.
Concentrated liquidity would presumably offer less slippage on swaps, and there are
[discussions to evolve to a concentrated liquidity/order book environment](https://github.com/serai-dex/serai/issues/420).
Unfortunately, it effectively requires active management of provided liquidity.
This disenfranchises small liquidity providers who may not have the knowledge
and resources necessary to perform such management. Since Serai is expected to
have a community-bootstrapped start, starting with concentrated liquidity would
accordingly be contradictory.

View File

@@ -1,7 +0,0 @@
---
title: Cross-Chain Architecture
layout: default
nav_order: 3
---
# Cross-Chain Architecture

View File

@@ -1,6 +0,0 @@
---
title: Genesis
layout: default
nav_order: 1
parent: Economics
---

View File

@@ -1,45 +0,0 @@
---
title: Economics
layout: default
nav_order: 4
has_children: true
---
# Economics
Serai's economics change depending on which of three eras is currently
occurring.
## Genesis Era
The network starts with the "Genesis" era, where the goal of the network is to
attract the liquidity necessary to facilitate swaps. This period will last for
30 days and will let anyone add liquidity to the protocol. Only with its
conclusion will SRI start being distributed.
After the Genesis era, the network enters the "Pre-Economic Security" era.
## Pre-Economic Security
{: .definition-title }
> Definition: Economic Security
>
> Economic security is derived from it being unprofitable to misbehave.
> This is by the economic penalty which is presumed to occur upon misbehavior
> exceeding the value which would presumably be gained.
> Accordingly, rational actors would behave properly, causing the protocol to
> maintain its integrity.
>
> For Serai specifically, the stake required to produce unintended signatures
> must exceed the value accessible via producing unintended signatures.
With liquidity provided, and swaps enabled, the goal is to have validators stake
sufficiently for economic security to be achieved. This is primarily via
offering freshly minted, staked SRI to would-be validators who decide to swap
external coins for their stake.
## Post-Economic Security
Having achieved economic security, the protocol changes its economics one last
time (barring future upgrades to the protocol) to a 'normal' state of
operations.

View File

@@ -1,6 +0,0 @@
---
title: Post-Economic Security
layout: default
nav_order: 3
parent: Economics
---

View File

@@ -1,6 +0,0 @@
---
title: Pre-Economic Security
layout: default
nav_order: 2
parent: Economics
---

View File

@@ -1,32 +0,0 @@
---
title: Home
layout: home
nav_order: 1
---
{: .warning }
This documentation site is still under active development and may have missing
sections, errors, and typos. Even once this documentation site is 'complete', it
may become out-of-date (as Serai is an evolving protocol yet to release) or have
minor errors.
# Serai
Serai is a fairly launched cross-chain decentralized exchange, integrating
Bitcoin (BTC), Ethereum (ETH, DAI), and Monero (XMR).
The Serai mainnet has yet to launch, and until then, all details are subject to
change.
Prior to the Serai mainnet launching, SRI, Serai's native coin, will not
exist. As a fairly launched project, SRI will have no ICO, no IEO, no presale,
no developers' tax/fund, and no airdrop for out-of-mainnet activity.
Out-of-mainnet activity includes:
- Being a community member (such as on Discord or on Twitter)
- Participating in testnets
- Contributing to the GitHub
None of these will be awarded any airdrop. All distributions of SRI will happen
on-chain per the protocols' defined rules, based on on-chain activity.

View File

@@ -1,21 +0,0 @@
---
title: Coordinator
layout: default
nav_order: 3
parent: Infrastructure
---
# Coordinator
The coordinator is a local service which communicates with other validators'
coordinators. It provides a verifiable broadcast layer for various consensus
messages, such as agreement on external blockchains, key generation and signing
protocols, and the latest Serai block.
The verifiable broadcast layer is implemented via a blockchain, referred to as a
Tributary, which is agreed upon using Tendermint consensus. This consensus is
not as offered by Tendermint Core/CometBFT, as used in the Cosmos SDK
(historically/presently), yet by our own implementation designed to be used as a
library and not as another daemon. Tributaries are ephemeral, only used by the
current validators, and deleted upon the next epoch. All of the results from it
are verifiable via the external network and the Serai blockchain alone.

View File

@@ -1,6 +0,0 @@
---
title: Infrastructure
layout: default
nav_order: 6
has_children: true
---

View File

@@ -1,29 +0,0 @@
---
title: Message Queue
layout: default
nav_order: 1
parent: Infrastructure
---
# Message Queue
The Message Queue is a microservice to authenticate and relay messages between
services. It offers just three functions:
1) Queue a message.
2) Receive the next message.
3) Acknowledge a message, removing it from the queue.
This ensures messages are delivered between services, with their order
preserved. This also ensures that if a service reboots while handling a message,
it'll still handle the message once rebooted (and the message will not be lost).
The Message Queue also aims to offer increased liveliness and performance.
If services directly communicated, the rate at which one service could operate
would always be bottlenecked by the service it communicates with. If the
receiving service ever went offline, the sending service wouldn't be able to
deliver messages until the receiver came back online, halting its own work. By
defining a dedicated microservice, with a lack of complex logic, it's much less
likely to go offline or suffer from degraded performance.

View File

@@ -1,21 +0,0 @@
---
title: Processor
layout: default
nav_order: 2
parent: Infrastructure
---
# Processor
The processor performs several important tasks with regards to the external
network. Each of them are documented in the following sections.
## Key Generation
## Scanning
## Signing Batches
## Planning Transactions
## Cosigning

View File

@@ -1,6 +0,0 @@
---
title: Serai
layout: default
nav_order: 4
parent: Infrastructure
---

View File

@@ -1,6 +0,0 @@
---
title: Integrating with Serai
layout: default
nav_order: 7
has_children: true
---

View File

Before

Width:  |  Height:  |  Size: 1.1 KiB

After

Width:  |  Height:  |  Size: 1.1 KiB

View File

@@ -1,44 +0,0 @@
---
title: Protocol Changes
layout: default
nav_order: 5
---
# Protocol Changes
The protocol has no central authority nor organization nor actors (such as
liquidity providers/validators) who can compel new protocol rules. The Serai
protocol is as-written with all granted functionality and declared rules
present.
Validators are explicitly granted the ability to signal for two things to occur:
### 1) Halt another validator set.
This will presumably occur if another validator set turns malicious and is the
expected incident response in order to apply an economic penalty of ideally
greater value than damage wrecked. Halting a validator set prevents further
publication of `Batch`s, preventing improper actions on the Serai blockchain,
and preventing validators from unstaking (as unstaking only occurs once future
validator sets have accepted responsibility, and accepting responsibility
requires `Batch` publication). This effectively burns the malicious validators'
stake.
### 2) Retire the protocol.
A supermajority of validators may favor a signal (an opaque 32-byte ID). A
common signal gaining sufficient favor will cause the protocol to stop producing
blocks in two weeks.
Nodes will presumably, as individual entities, hard fork to new consensus rules.
These rules presumably will remove the rule to stop producing blocks in two
weeks, they may declare new validators, and they may declare new functionality
entirely.
While nodes individually hard fork, across every hard fork the state of the
various `sriXYZ` coins (such as `sriBTC`, `sriETH`, `sriDAI`, and `sriXMR`)
remains intact (unless the new rules modify such state). These coins can still
be burned with instructions (unless the new rules prevent that) and if a
validator set doesn't send `XYZ` as expected, they can be halted (effectively
burning their `SRI` stake). Accordingly, every node decides if and how to future
participate, with the abilities and powers they declare themselves to have.

View File

@@ -1,6 +0,0 @@
---
title: Running a Validator
layout: default
nav_order: 8
has_children: true
---

View File

@@ -1,20 +1,17 @@
# rust:1.77.0-slim-bookworm as of March 22nd, 2024 (GMT) FROM --platform=linux/amd64 rust:1.76.0-slim-bookworm as builder
FROM --platform=linux/amd64 rust@sha256:e785e4aa81f87bc1ee02fa2026ffbc491e0410bdaf6652cea74884373f452664 as deterministic
# Move to a Debian package snapshot # Move to a Debian package snapshot
RUN rm -rf /etc/apt/sources.list.d/debian.sources && \ RUN rm -rf /etc/apt/sources.list.d/debian.sources && \
rm -rf /var/lib/apt/lists/* && \ rm -rf /var/lib/apt/lists/* && \
echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240301T000000Z bookworm main" > /etc/apt/sources.list && \ echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240201T000000Z bookworm main" > /etc/apt/sources.list && \
apt update apt update
# Install dependencies # Install dependencies
RUN apt update && apt upgrade && apt install clang -y RUN apt install clang -y
# Add the wasm toolchain # Add the wasm toolchain
RUN rustup target add wasm32-unknown-unknown RUN rustup target add wasm32-unknown-unknown
FROM deterministic
# Add files for build # Add files for build
ADD patches /serai/patches ADD patches /serai/patches
ADD common /serai/common ADD common /serai/common
@@ -33,8 +30,3 @@ ADD Cargo.lock /serai
ADD AGPL-3.0 /serai ADD AGPL-3.0 /serai
WORKDIR /serai WORKDIR /serai
# Build the runtime, copying it to the volume if it exists
CMD cargo build --release -p serai-runtime && \
mkdir -p /volume && \
cp /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm /volume/serai.wasm

Some files were not shown because too many files have changed in this diff Show More