mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Compare commits
1 Commits
rocksdb-sn
...
10s-tender
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
466af2bc19 |
@@ -42,8 +42,8 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
cargo install svm-rs
|
cargo install svm-rs
|
||||||
svm install 0.8.25
|
svm install 0.8.16
|
||||||
svm use 0.8.25
|
svm use 0.8.16
|
||||||
|
|
||||||
# - name: Cache Rust
|
# - name: Cache Rust
|
||||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||||
|
|||||||
90
.github/workflows/pages.yml
vendored
90
.github/workflows/pages.yml
vendored
@@ -1,90 +0,0 @@
|
|||||||
# MIT License
|
|
||||||
#
|
|
||||||
# Copyright (c) 2022 just-the-docs
|
|
||||||
#
|
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
|
||||||
# in the Software without restriction, including without limitation the rights
|
|
||||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
||||||
# copies of the Software, and to permit persons to whom the Software is
|
|
||||||
# furnished to do so, subject to the following conditions:
|
|
||||||
#
|
|
||||||
# The above copyright notice and this permission notice shall be included in all
|
|
||||||
# copies or substantial portions of the Software.
|
|
||||||
#
|
|
||||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
||||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
||||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
||||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
# SOFTWARE.
|
|
||||||
|
|
||||||
# This workflow uses actions that are not certified by GitHub.
|
|
||||||
# They are provided by a third-party and are governed by
|
|
||||||
# separate terms of service, privacy policy, and support
|
|
||||||
# documentation.
|
|
||||||
|
|
||||||
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
|
||||||
name: Deploy Jekyll site to Pages
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- "develop"
|
|
||||||
paths:
|
|
||||||
- "docs/**"
|
|
||||||
|
|
||||||
# Allows you to run this workflow manually from the Actions tab
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pages: write
|
|
||||||
id-token: write
|
|
||||||
|
|
||||||
# Allow one concurrent deployment
|
|
||||||
concurrency:
|
|
||||||
group: "pages"
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
# Build job
|
|
||||||
build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
defaults:
|
|
||||||
run:
|
|
||||||
working-directory: docs
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: Setup Ruby
|
|
||||||
uses: ruby/setup-ruby@v1
|
|
||||||
with:
|
|
||||||
bundler-cache: true
|
|
||||||
cache-version: 0
|
|
||||||
working-directory: "${{ github.workspace }}/docs"
|
|
||||||
- name: Setup Pages
|
|
||||||
id: pages
|
|
||||||
uses: actions/configure-pages@v3
|
|
||||||
- name: Build with Jekyll
|
|
||||||
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
|
||||||
env:
|
|
||||||
JEKYLL_ENV: production
|
|
||||||
- name: Upload artifact
|
|
||||||
uses: actions/upload-pages-artifact@v1
|
|
||||||
with:
|
|
||||||
path: "docs/_site/"
|
|
||||||
|
|
||||||
# Deployment job
|
|
||||||
deploy:
|
|
||||||
environment:
|
|
||||||
name: github-pages
|
|
||||||
url: ${{ steps.deployment.outputs.page_url }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: build
|
|
||||||
steps:
|
|
||||||
- name: Deploy to GitHub Pages
|
|
||||||
id: deployment
|
|
||||||
uses: actions/deploy-pages@v2
|
|
||||||
648
Cargo.lock
generated
648
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -3,7 +3,6 @@ resolver = "2"
|
|||||||
members = [
|
members = [
|
||||||
# Version patches
|
# Version patches
|
||||||
"patches/zstd",
|
"patches/zstd",
|
||||||
"patches/rocksdb",
|
|
||||||
"patches/proc-macro-crate",
|
"patches/proc-macro-crate",
|
||||||
|
|
||||||
# std patches
|
# std patches
|
||||||
@@ -113,8 +112,6 @@ dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "a
|
|||||||
|
|
||||||
# wasmtime pulls in an old version for this
|
# wasmtime pulls in an old version for this
|
||||||
zstd = { path = "patches/zstd" }
|
zstd = { path = "patches/zstd" }
|
||||||
# Needed for WAL compression
|
|
||||||
rocksdb = { path = "patches/rocksdb" }
|
|
||||||
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
|
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
|
||||||
proc-macro-crate = { path = "patches/proc-macro-crate" }
|
proc-macro-crate = { path = "patches/proc-macro-crate" }
|
||||||
|
|
||||||
|
|||||||
@@ -5,16 +5,13 @@ Bitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading
|
|||||||
experience. Funds are stored in an economically secured threshold-multisig
|
experience. Funds are stored in an economically secured threshold-multisig
|
||||||
wallet.
|
wallet.
|
||||||
|
|
||||||
[Getting Started](spec/Getting%20Started.md)
|
[Getting Started](docs/Getting%20Started.md)
|
||||||
|
|
||||||
### Layout
|
### Layout
|
||||||
|
|
||||||
- `audits`: Audits for various parts of Serai.
|
- `audits`: Audits for various parts of Serai.
|
||||||
|
|
||||||
- `spec`: The specification of the Serai protocol, both internally and as
|
- `docs`: Documentation on the Serai protocol.
|
||||||
networked.
|
|
||||||
|
|
||||||
- `docs`: User-facing documentation on the Serai protocol.
|
|
||||||
|
|
||||||
- `common`: Crates containing utilities common to a variety of areas under
|
- `common`: Crates containing utilities common to a variety of areas under
|
||||||
Serai, none neatly fitting under another category.
|
Serai, none neatly fitting under another category.
|
||||||
|
|||||||
6
coins/ethereum/.gitignore
vendored
6
coins/ethereum/.gitignore
vendored
@@ -1,7 +1,3 @@
|
|||||||
# Solidity build outputs
|
# solidity build outputs
|
||||||
cache
|
cache
|
||||||
artifacts
|
artifacts
|
||||||
|
|
||||||
# Auto-generated ABI files
|
|
||||||
src/abi/schnorr.rs
|
|
||||||
src/abi/router.rs
|
|
||||||
|
|||||||
@@ -30,9 +30,6 @@ ethers-core = { version = "2", default-features = false }
|
|||||||
ethers-providers = { version = "2", default-features = false }
|
ethers-providers = { version = "2", default-features = false }
|
||||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
|||||||
@@ -1,20 +1,6 @@
|
|||||||
use std::process::Command;
|
|
||||||
|
|
||||||
use ethers_contract::Abigen;
|
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
println!("cargo:rerun-if-changed=contracts/*");
|
println!("cargo:rerun-if-changed=contracts");
|
||||||
println!("cargo:rerun-if-changed=artifacts/*");
|
println!("cargo:rerun-if-changed=artifacts");
|
||||||
|
|
||||||
for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout)
|
|
||||||
.unwrap()
|
|
||||||
.lines()
|
|
||||||
{
|
|
||||||
if let Some(version) = line.strip_prefix("Version: ") {
|
|
||||||
let version = version.split('+').next().unwrap();
|
|
||||||
assert_eq!(version, "0.8.25");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
let args = [
|
let args = [
|
||||||
@@ -22,21 +8,8 @@ fn main() {
|
|||||||
"-o", "./artifacts", "--overwrite",
|
"-o", "./artifacts", "--overwrite",
|
||||||
"--bin", "--abi",
|
"--bin", "--abi",
|
||||||
"--optimize",
|
"--optimize",
|
||||||
"./contracts/Schnorr.sol", "./contracts/Router.sol",
|
"./contracts/Schnorr.sol"
|
||||||
];
|
];
|
||||||
assert!(Command::new("solc").args(args).status().unwrap().success());
|
|
||||||
|
|
||||||
Abigen::new("Schnorr", "./artifacts/Schnorr.abi")
|
assert!(std::process::Command::new("solc").args(args).status().unwrap().success());
|
||||||
.unwrap()
|
|
||||||
.generate()
|
|
||||||
.unwrap()
|
|
||||||
.write_to_file("./src/abi/schnorr.rs")
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
Abigen::new("Router", "./artifacts/Router.abi")
|
|
||||||
.unwrap()
|
|
||||||
.generate()
|
|
||||||
.unwrap()
|
|
||||||
.write_to_file("./src/abi/router.rs")
|
|
||||||
.unwrap();
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,90 +0,0 @@
|
|||||||
// SPDX-License-Identifier: AGPLv3
|
|
||||||
pragma solidity ^0.8.0;
|
|
||||||
|
|
||||||
import "./Schnorr.sol";
|
|
||||||
|
|
||||||
contract Router is Schnorr {
|
|
||||||
// Contract initializer
|
|
||||||
// TODO: Replace with a MuSig of the genesis validators
|
|
||||||
address public initializer;
|
|
||||||
|
|
||||||
// Nonce is incremented for each batch of transactions executed
|
|
||||||
uint256 public nonce;
|
|
||||||
|
|
||||||
// fixed parity for the public keys used in this contract
|
|
||||||
uint8 constant public KEY_PARITY = 27;
|
|
||||||
|
|
||||||
// current public key's x-coordinate
|
|
||||||
// note: this key must always use the fixed parity defined above
|
|
||||||
bytes32 public seraiKey;
|
|
||||||
|
|
||||||
struct OutInstruction {
|
|
||||||
address to;
|
|
||||||
uint256 value;
|
|
||||||
bytes data;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Signature {
|
|
||||||
bytes32 c;
|
|
||||||
bytes32 s;
|
|
||||||
}
|
|
||||||
|
|
||||||
// success is a uint256 representing a bitfield of transaction successes
|
|
||||||
event Executed(uint256 nonce, bytes32 batch, uint256 success);
|
|
||||||
|
|
||||||
// error types
|
|
||||||
error NotInitializer();
|
|
||||||
error AlreadyInitialized();
|
|
||||||
error InvalidKey();
|
|
||||||
error TooManyTransactions();
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
initializer = msg.sender;
|
|
||||||
}
|
|
||||||
|
|
||||||
// initSeraiKey can be called by the contract initializer to set the first
|
|
||||||
// public key, only if the public key has yet to be set.
|
|
||||||
function initSeraiKey(bytes32 _seraiKey) external {
|
|
||||||
if (msg.sender != initializer) revert NotInitializer();
|
|
||||||
if (seraiKey != 0) revert AlreadyInitialized();
|
|
||||||
if (_seraiKey == bytes32(0)) revert InvalidKey();
|
|
||||||
seraiKey = _seraiKey;
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateSeraiKey validates the given Schnorr signature against the current public key,
|
|
||||||
// and if successful, updates the contract's public key to the given one.
|
|
||||||
function updateSeraiKey(
|
|
||||||
bytes32 _seraiKey,
|
|
||||||
Signature memory sig
|
|
||||||
) public {
|
|
||||||
if (_seraiKey == bytes32(0)) revert InvalidKey();
|
|
||||||
bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey));
|
|
||||||
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
|
|
||||||
seraiKey = _seraiKey;
|
|
||||||
}
|
|
||||||
|
|
||||||
// execute accepts a list of transactions to execute as well as a Schnorr signature.
|
|
||||||
// if signature verification passes, the given transactions are executed.
|
|
||||||
// if signature verification fails, this function will revert.
|
|
||||||
function execute(
|
|
||||||
OutInstruction[] calldata transactions,
|
|
||||||
Signature memory sig
|
|
||||||
) public {
|
|
||||||
if (transactions.length > 256) revert TooManyTransactions();
|
|
||||||
|
|
||||||
bytes32 message = keccak256(abi.encode("execute", nonce, transactions));
|
|
||||||
// This prevents re-entrancy from causing double spends yet does allow
|
|
||||||
// out-of-order execution via re-entrancy
|
|
||||||
nonce++;
|
|
||||||
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
|
|
||||||
|
|
||||||
uint256 successes;
|
|
||||||
for(uint256 i = 0; i < transactions.length; i++) {
|
|
||||||
(bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data);
|
|
||||||
assembly {
|
|
||||||
successes := or(successes, shl(i, success))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
emit Executed(nonce, message, successes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
// SPDX-License-Identifier: AGPLv3
|
//SPDX-License-Identifier: AGPLv3
|
||||||
pragma solidity ^0.8.0;
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
// see https://github.com/noot/schnorr-verify for implementation details
|
// see https://github.com/noot/schnorr-verify for implementation details
|
||||||
@@ -7,32 +7,29 @@ contract Schnorr {
|
|||||||
uint256 constant public Q =
|
uint256 constant public Q =
|
||||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
||||||
|
|
||||||
error InvalidSOrA();
|
|
||||||
error InvalidSignature();
|
|
||||||
|
|
||||||
// parity := public key y-coord parity (27 or 28)
|
// parity := public key y-coord parity (27 or 28)
|
||||||
// px := public key x-coord
|
// px := public key x-coord
|
||||||
// message := 32-byte hash of the message
|
// message := 32-byte message
|
||||||
// c := schnorr signature challenge
|
|
||||||
// s := schnorr signature
|
// s := schnorr signature
|
||||||
|
// e := schnorr signature challenge
|
||||||
function verify(
|
function verify(
|
||||||
uint8 parity,
|
uint8 parity,
|
||||||
bytes32 px,
|
bytes32 px,
|
||||||
bytes32 message,
|
bytes32 message,
|
||||||
bytes32 c,
|
bytes32 s,
|
||||||
bytes32 s
|
bytes32 e
|
||||||
) public view returns (bool) {
|
) public view returns (bool) {
|
||||||
// ecrecover = (m, v, r, s);
|
// ecrecover = (m, v, r, s);
|
||||||
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
bytes32 sp = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||||
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
bytes32 ep = bytes32(Q - mulmod(uint256(e), uint256(px), Q));
|
||||||
|
|
||||||
if (sa == 0) revert InvalidSOrA();
|
require(sp != 0);
|
||||||
// the ecrecover precompile implementation checks that the `r` and `s`
|
// the ecrecover precompile implementation checks that the `r` and `s`
|
||||||
// inputs are non-zero (in this case, `px` and `ca`), thus we don't need to
|
// inputs are non-zero (in this case, `px` and `ep`), thus we don't need to
|
||||||
// check if they're zero.
|
// check if they're zero.will make me
|
||||||
address R = ecrecover(sa, parity, px, ca);
|
address R = ecrecover(sp, parity, px, ep);
|
||||||
if (R == address(0)) revert InvalidSignature();
|
require(R != address(0), "ecrecover failed");
|
||||||
return c == keccak256(
|
return e == keccak256(
|
||||||
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
|
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::all)]
|
|
||||||
pub(crate) mod schnorr;
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::all)]
|
|
||||||
pub(crate) mod router;
|
|
||||||
36
coins/ethereum/src/contract.rs
Normal file
36
coins/ethereum/src/contract.rs
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
use thiserror::Error;
|
||||||
|
use eyre::{eyre, Result};
|
||||||
|
|
||||||
|
use ethers_providers::{Provider, Http};
|
||||||
|
use ethers_contract::abigen;
|
||||||
|
|
||||||
|
use crate::crypto::ProcessedSignature;
|
||||||
|
|
||||||
|
#[derive(Error, Debug)]
|
||||||
|
pub enum EthereumError {
|
||||||
|
#[error("failed to verify Schnorr signature")]
|
||||||
|
VerificationError,
|
||||||
|
}
|
||||||
|
|
||||||
|
abigen!(Schnorr, "./artifacts/Schnorr.abi");
|
||||||
|
|
||||||
|
pub async fn call_verify(
|
||||||
|
contract: &Schnorr<Provider<Http>>,
|
||||||
|
params: &ProcessedSignature,
|
||||||
|
) -> Result<()> {
|
||||||
|
if contract
|
||||||
|
.verify(
|
||||||
|
params.parity + 27,
|
||||||
|
params.px.to_bytes().into(),
|
||||||
|
params.message,
|
||||||
|
params.s.to_bytes().into(),
|
||||||
|
params.e.to_bytes().into(),
|
||||||
|
)
|
||||||
|
.call()
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(eyre!(EthereumError::VerificationError))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,54 +1,50 @@
|
|||||||
use sha3::{Digest, Keccak256};
|
use sha3::{Digest, Keccak256};
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
use group::Group;
|
||||||
use k256::{
|
use k256::{
|
||||||
elliptic_curve::{
|
elliptic_curve::{
|
||||||
bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint,
|
bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint, sec1::ToEncodedPoint,
|
||||||
},
|
},
|
||||||
ProjectivePoint, Scalar, U256,
|
AffinePoint, ProjectivePoint, Scalar, U256,
|
||||||
};
|
};
|
||||||
|
|
||||||
use frost::{
|
use frost::{algorithm::Hram, curve::Secp256k1};
|
||||||
algorithm::{Hram, SchnorrSignature},
|
|
||||||
curve::Secp256k1,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
|
pub fn keccak256(data: &[u8]) -> [u8; 32] {
|
||||||
Keccak256::digest(data).into()
|
Keccak256::digest(data).into()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] {
|
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||||
|
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
|
||||||
let encoded_point = point.to_encoded_point(false);
|
let encoded_point = point.to_encoded_point(false);
|
||||||
// Last 20 bytes of the hash of the concatenated x and y coordinates
|
keccak256(&encoded_point.as_ref()[1 .. 65])[12 .. 32].try_into().unwrap()
|
||||||
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
|
|
||||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
pub fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||||
pub struct PublicKey {
|
if r.is_zero().into() || s.is_zero().into() {
|
||||||
pub A: ProjectivePoint,
|
return None;
|
||||||
pub px: Scalar,
|
|
||||||
pub parity: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PublicKey {
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
|
||||||
let affine = A.to_affine();
|
|
||||||
let parity = u8::from(bool::from(affine.y_is_odd())) + 27;
|
|
||||||
if parity != 27 {
|
|
||||||
None?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let x_coord = affine.x();
|
|
||||||
let x_coord_scalar = <Scalar as Reduce<U256>>::reduce_bytes(&x_coord);
|
|
||||||
// Return None if a reduction would occur
|
|
||||||
if x_coord_scalar.to_repr() != x_coord {
|
|
||||||
None?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(PublicKey { A, px: x_coord_scalar, parity })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
let R = AffinePoint::decompress(&r.to_bytes(), v.into());
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
if let Some(R) = Option::<AffinePoint>::from(R) {
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
let R = ProjectivePoint::from(R);
|
||||||
|
|
||||||
|
let r = r.invert().unwrap();
|
||||||
|
let u1 = ProjectivePoint::GENERATOR * (-message * r);
|
||||||
|
let u2 = R * (s * r);
|
||||||
|
let key: ProjectivePoint = u1 + u2;
|
||||||
|
if !bool::from(key.is_identity()) {
|
||||||
|
return Some(address(&key));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
@@ -59,33 +55,53 @@ impl Hram<Secp256k1> for EthereumHram {
|
|||||||
let a_encoded_point = A.to_encoded_point(true);
|
let a_encoded_point = A.to_encoded_point(true);
|
||||||
let mut a_encoded = a_encoded_point.as_ref().to_owned();
|
let mut a_encoded = a_encoded_point.as_ref().to_owned();
|
||||||
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
|
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
|
||||||
assert!((a_encoded[0] == 27) || (a_encoded[0] == 28));
|
|
||||||
let mut data = address(R).to_vec();
|
let mut data = address(R).to_vec();
|
||||||
data.append(&mut a_encoded);
|
data.append(&mut a_encoded);
|
||||||
data.extend(m);
|
data.append(&mut m.to_vec());
|
||||||
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
|
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct Signature {
|
pub struct ProcessedSignature {
|
||||||
pub(crate) c: Scalar,
|
pub s: Scalar,
|
||||||
pub(crate) s: Scalar,
|
pub px: Scalar,
|
||||||
|
pub parity: u8,
|
||||||
|
pub message: [u8; 32],
|
||||||
|
pub e: Scalar,
|
||||||
}
|
}
|
||||||
impl Signature {
|
|
||||||
pub fn new(
|
#[allow(non_snake_case)]
|
||||||
public_key: &PublicKey,
|
pub fn preprocess_signature_for_ecrecover(
|
||||||
chain_id: U256,
|
m: [u8; 32],
|
||||||
m: &[u8],
|
R: &ProjectivePoint,
|
||||||
signature: SchnorrSignature<Secp256k1>,
|
s: Scalar,
|
||||||
) -> Option<Signature> {
|
A: &ProjectivePoint,
|
||||||
let c = EthereumHram::hram(
|
chain_id: U256,
|
||||||
&signature.R,
|
) -> (Scalar, Scalar) {
|
||||||
&public_key.A,
|
let processed_sig = process_signature_for_contract(m, R, s, A, chain_id);
|
||||||
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
|
let sr = processed_sig.s.mul(&processed_sig.px).negate();
|
||||||
);
|
let er = processed_sig.e.mul(&processed_sig.px).negate();
|
||||||
if !signature.verify(public_key.A, c) {
|
(sr, er)
|
||||||
None?;
|
}
|
||||||
}
|
|
||||||
Some(Signature { c, s: signature.s })
|
#[allow(non_snake_case)]
|
||||||
|
pub fn process_signature_for_contract(
|
||||||
|
m: [u8; 32],
|
||||||
|
R: &ProjectivePoint,
|
||||||
|
s: Scalar,
|
||||||
|
A: &ProjectivePoint,
|
||||||
|
chain_id: U256,
|
||||||
|
) -> ProcessedSignature {
|
||||||
|
let encoded_pk = A.to_encoded_point(true);
|
||||||
|
let px = &encoded_pk.as_ref()[1 .. 33];
|
||||||
|
let px_scalar = Scalar::reduce(U256::from_be_slice(px));
|
||||||
|
let e = EthereumHram::hram(R, A, &[chain_id.to_be_byte_array().as_slice(), &m].concat());
|
||||||
|
ProcessedSignature {
|
||||||
|
s,
|
||||||
|
px: px_scalar,
|
||||||
|
parity: &encoded_pk.as_ref()[0] - 2,
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
message: m,
|
||||||
|
e,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,16 +1,2 @@
|
|||||||
use thiserror::Error;
|
pub mod contract;
|
||||||
|
|
||||||
pub mod crypto;
|
pub mod crypto;
|
||||||
|
|
||||||
pub(crate) mod abi;
|
|
||||||
pub mod schnorr;
|
|
||||||
pub mod router;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests;
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum Error {
|
|
||||||
#[error("failed to verify Schnorr signature")]
|
|
||||||
InvalidSignature,
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -1,30 +0,0 @@
|
|||||||
pub use crate::abi::router::*;
|
|
||||||
|
|
||||||
/*
|
|
||||||
use crate::crypto::{ProcessedSignature, PublicKey};
|
|
||||||
use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode};
|
|
||||||
use eyre::Result;
|
|
||||||
use std::{convert::From, fs::File, sync::Arc};
|
|
||||||
|
|
||||||
pub async fn router_update_public_key<M: Middleware + 'static>(
|
|
||||||
contract: &Router<M>,
|
|
||||||
public_key: &PublicKey,
|
|
||||||
signature: &ProcessedSignature,
|
|
||||||
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
|
|
||||||
let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into());
|
|
||||||
let pending_tx = tx.send().await?;
|
|
||||||
let receipt = pending_tx.await?;
|
|
||||||
Ok(receipt)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn router_execute<M: Middleware + 'static>(
|
|
||||||
contract: &Router<M>,
|
|
||||||
txs: Vec<Rtransaction>,
|
|
||||||
signature: &ProcessedSignature,
|
|
||||||
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
|
|
||||||
let tx = contract.execute(txs, signature.into()).send();
|
|
||||||
let pending_tx = tx.send().await?;
|
|
||||||
let receipt = pending_tx.await?;
|
|
||||||
Ok(receipt)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
use eyre::{eyre, Result};
|
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
|
||||||
|
|
||||||
use ethers_providers::{Provider, Http};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Error,
|
|
||||||
crypto::{keccak256, PublicKey, Signature},
|
|
||||||
};
|
|
||||||
pub use crate::abi::schnorr::*;
|
|
||||||
|
|
||||||
pub async fn call_verify(
|
|
||||||
contract: &Schnorr<Provider<Http>>,
|
|
||||||
public_key: &PublicKey,
|
|
||||||
message: &[u8],
|
|
||||||
signature: &Signature,
|
|
||||||
) -> Result<()> {
|
|
||||||
if contract
|
|
||||||
.verify(
|
|
||||||
public_key.parity,
|
|
||||||
public_key.px.to_repr().into(),
|
|
||||||
keccak256(message),
|
|
||||||
signature.c.to_repr().into(),
|
|
||||||
signature.s.to_repr().into(),
|
|
||||||
)
|
|
||||||
.call()
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(eyre!(Error::InvalidSignature))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,132 +0,0 @@
|
|||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use sha2::Sha256;
|
|
||||||
use sha3::{Digest, Keccak256};
|
|
||||||
|
|
||||||
use group::Group;
|
|
||||||
use k256::{
|
|
||||||
ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey},
|
|
||||||
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint},
|
|
||||||
U256, Scalar, AffinePoint, ProjectivePoint,
|
|
||||||
};
|
|
||||||
|
|
||||||
use frost::{
|
|
||||||
curve::Secp256k1,
|
|
||||||
algorithm::{Hram, IetfSchnorr},
|
|
||||||
tests::{algorithm_machines, sign},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{crypto::*, tests::key_gen};
|
|
||||||
|
|
||||||
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
|
||||||
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
|
||||||
if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into());
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
if let Some(R) = Option::<AffinePoint>::from(R) {
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let R = ProjectivePoint::from(R);
|
|
||||||
|
|
||||||
let r = r.invert().unwrap();
|
|
||||||
let u1 = ProjectivePoint::GENERATOR * (-message * r);
|
|
||||||
let u2 = R * (s * r);
|
|
||||||
let key: ProjectivePoint = u1 + u2;
|
|
||||||
if !bool::from(key.is_identity()) {
|
|
||||||
return Some(address(&key));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_ecrecover() {
|
|
||||||
let private = SigningKey::random(&mut OsRng);
|
|
||||||
let public = VerifyingKey::from(&private);
|
|
||||||
|
|
||||||
// Sign the signature
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
let (sig, recovery_id) = private
|
|
||||||
.as_nonzero_scalar()
|
|
||||||
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Sanity check the signature verifies
|
|
||||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
|
||||||
{
|
|
||||||
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform the ecrecover
|
|
||||||
assert_eq!(
|
|
||||||
ecrecover(
|
|
||||||
hash_to_scalar(MESSAGE),
|
|
||||||
u8::from(recovery_id.unwrap().is_y_odd()) + 27,
|
|
||||||
*sig.r(),
|
|
||||||
*sig.s()
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
address(&ProjectivePoint::from(public.as_affine()))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the sign test with the EthereumHram
|
|
||||||
#[test]
|
|
||||||
fn test_signing() {
|
|
||||||
let (keys, _) = key_gen();
|
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
|
||||||
let _sig =
|
|
||||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub fn preprocess_signature_for_ecrecover(
|
|
||||||
R: ProjectivePoint,
|
|
||||||
public_key: &PublicKey,
|
|
||||||
chain_id: U256,
|
|
||||||
m: &[u8],
|
|
||||||
s: Scalar,
|
|
||||||
) -> (u8, Scalar, Scalar) {
|
|
||||||
let c = EthereumHram::hram(
|
|
||||||
&R,
|
|
||||||
&public_key.A,
|
|
||||||
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
|
|
||||||
);
|
|
||||||
let sa = -(s * public_key.px);
|
|
||||||
let ca = -(c * public_key.px);
|
|
||||||
(public_key.parity, sa, ca)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_ecrecover_hack() {
|
|
||||||
let (keys, public_key) = key_gen();
|
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
let hashed_message = keccak256(MESSAGE);
|
|
||||||
let chain_id = U256::ONE;
|
|
||||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
|
||||||
let sig = sign(
|
|
||||||
&mut OsRng,
|
|
||||||
&algo,
|
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
|
|
||||||
let (parity, sa, ca) =
|
|
||||||
preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s);
|
|
||||||
let q = ecrecover(sa, parity, public_key.px, ca).unwrap();
|
|
||||||
assert_eq!(q, address(&sig.R));
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
use std::{sync::Arc, time::Duration, fs::File, collections::HashMap};
|
|
||||||
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
|
||||||
use k256::{Scalar, ProjectivePoint};
|
|
||||||
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
|
|
||||||
|
|
||||||
use ethers_core::{
|
|
||||||
types::{H160, Signature as EthersSignature},
|
|
||||||
abi::Abi,
|
|
||||||
};
|
|
||||||
use ethers_contract::ContractFactory;
|
|
||||||
use ethers_providers::{Middleware, Provider, Http};
|
|
||||||
|
|
||||||
use crate::crypto::PublicKey;
|
|
||||||
|
|
||||||
mod crypto;
|
|
||||||
mod schnorr;
|
|
||||||
mod router;
|
|
||||||
|
|
||||||
pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) {
|
|
||||||
let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng);
|
|
||||||
let mut group_key = keys[&Participant::new(1).unwrap()].group_key();
|
|
||||||
|
|
||||||
let mut offset = Scalar::ZERO;
|
|
||||||
while PublicKey::new(group_key).is_none() {
|
|
||||||
offset += Scalar::ONE;
|
|
||||||
group_key += ProjectivePoint::GENERATOR;
|
|
||||||
}
|
|
||||||
for keys in keys.values_mut() {
|
|
||||||
*keys = keys.offset(offset);
|
|
||||||
}
|
|
||||||
let public_key = PublicKey::new(group_key).unwrap();
|
|
||||||
|
|
||||||
(keys, public_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
|
|
||||||
// to fund the deployer, not create/pass a wallet
|
|
||||||
// TODO: Deterministic deployments across chains
|
|
||||||
pub async fn deploy_contract(
|
|
||||||
chain_id: u32,
|
|
||||||
client: Arc<Provider<Http>>,
|
|
||||||
wallet: &k256::ecdsa::SigningKey,
|
|
||||||
name: &str,
|
|
||||||
) -> eyre::Result<H160> {
|
|
||||||
let abi: Abi =
|
|
||||||
serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap();
|
|
||||||
|
|
||||||
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
|
|
||||||
let hex_bin =
|
|
||||||
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
|
||||||
let bin = hex::decode(hex_bin).unwrap();
|
|
||||||
let factory = ContractFactory::new(abi, bin.into(), client.clone());
|
|
||||||
|
|
||||||
let mut deployment_tx = factory.deploy(())?.tx;
|
|
||||||
deployment_tx.set_chain_id(chain_id);
|
|
||||||
deployment_tx.set_gas(1_000_000);
|
|
||||||
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
|
|
||||||
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
|
|
||||||
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
|
|
||||||
|
|
||||||
let sig_hash = deployment_tx.sighash();
|
|
||||||
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
|
|
||||||
|
|
||||||
// EIP-155 v
|
|
||||||
let mut v = u64::from(rid.to_byte());
|
|
||||||
assert!((v == 0) || (v == 1));
|
|
||||||
v += u64::from((chain_id * 2) + 35);
|
|
||||||
|
|
||||||
let r = sig.r().to_repr();
|
|
||||||
let r_ref: &[u8] = r.as_ref();
|
|
||||||
let s = sig.s().to_repr();
|
|
||||||
let s_ref: &[u8] = s.as_ref();
|
|
||||||
let deployment_tx =
|
|
||||||
deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v });
|
|
||||||
|
|
||||||
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
|
|
||||||
|
|
||||||
let mut receipt;
|
|
||||||
while {
|
|
||||||
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
|
|
||||||
receipt.is_none()
|
|
||||||
} {
|
|
||||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
|
||||||
}
|
|
||||||
let receipt = receipt.unwrap();
|
|
||||||
assert!(receipt.status == Some(1.into()));
|
|
||||||
|
|
||||||
Ok(receipt.contract_address.unwrap())
|
|
||||||
}
|
|
||||||
@@ -1,109 +0,0 @@
|
|||||||
use std::{convert::TryFrom, sync::Arc, collections::HashMap};
|
|
||||||
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
|
||||||
use frost::{
|
|
||||||
curve::Secp256k1,
|
|
||||||
Participant, ThresholdKeys,
|
|
||||||
algorithm::IetfSchnorr,
|
|
||||||
tests::{algorithm_machines, sign},
|
|
||||||
};
|
|
||||||
|
|
||||||
use ethers_core::{
|
|
||||||
types::{H160, U256, Bytes},
|
|
||||||
abi::AbiEncode,
|
|
||||||
utils::{Anvil, AnvilInstance},
|
|
||||||
};
|
|
||||||
use ethers_providers::{Middleware, Provider, Http};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
crypto::{keccak256, PublicKey, EthereumHram, Signature},
|
|
||||||
router::{self, *},
|
|
||||||
tests::{key_gen, deploy_contract},
|
|
||||||
};
|
|
||||||
|
|
||||||
async fn setup_test() -> (
|
|
||||||
u32,
|
|
||||||
AnvilInstance,
|
|
||||||
Router<Provider<Http>>,
|
|
||||||
HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
|
||||||
PublicKey,
|
|
||||||
) {
|
|
||||||
let anvil = Anvil::new().spawn();
|
|
||||||
|
|
||||||
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
|
|
||||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
|
||||||
let wallet = anvil.keys()[0].clone().into();
|
|
||||||
let client = Arc::new(provider);
|
|
||||||
|
|
||||||
let contract_address =
|
|
||||||
deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap();
|
|
||||||
let contract = Router::new(contract_address, client.clone());
|
|
||||||
|
|
||||||
let (keys, public_key) = key_gen();
|
|
||||||
|
|
||||||
// Set the key to the threshold keys
|
|
||||||
let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000);
|
|
||||||
let pending_tx = tx.send().await.unwrap();
|
|
||||||
let receipt = pending_tx.await.unwrap().unwrap();
|
|
||||||
assert!(receipt.status == Some(1.into()));
|
|
||||||
|
|
||||||
(chain_id, anvil, contract, keys, public_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_deploy_contract() {
|
|
||||||
setup_test().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn hash_and_sign(
|
|
||||||
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
|
||||||
public_key: &PublicKey,
|
|
||||||
chain_id: U256,
|
|
||||||
message: &[u8],
|
|
||||||
) -> Signature {
|
|
||||||
let hashed_message = keccak256(message);
|
|
||||||
|
|
||||||
let mut chain_id_bytes = [0; 32];
|
|
||||||
chain_id.to_big_endian(&mut chain_id_bytes);
|
|
||||||
let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
|
||||||
let sig = sign(
|
|
||||||
&mut OsRng,
|
|
||||||
&algo,
|
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
|
|
||||||
Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_router_execute() {
|
|
||||||
let (chain_id, _anvil, contract, keys, public_key) = setup_test().await;
|
|
||||||
|
|
||||||
let to = H160([0u8; 20]);
|
|
||||||
let value = U256([0u64; 4]);
|
|
||||||
let data = Bytes::from([0]);
|
|
||||||
let tx = OutInstruction { to, value, data: data.clone() };
|
|
||||||
|
|
||||||
let nonce_call = contract.nonce();
|
|
||||||
let nonce = nonce_call.call().await.unwrap();
|
|
||||||
|
|
||||||
let encoded =
|
|
||||||
("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode();
|
|
||||||
let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded);
|
|
||||||
|
|
||||||
let tx = contract
|
|
||||||
.execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() })
|
|
||||||
.gas(300_000);
|
|
||||||
let pending_tx = tx.send().await.unwrap();
|
|
||||||
let receipt = dbg!(pending_tx.await.unwrap().unwrap());
|
|
||||||
assert!(receipt.status == Some(1.into()));
|
|
||||||
|
|
||||||
println!("gas used: {:?}", receipt.cumulative_gas_used);
|
|
||||||
println!("logs: {:?}", receipt.logs);
|
|
||||||
}
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
use std::{convert::TryFrom, sync::Arc};
|
|
||||||
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar};
|
|
||||||
|
|
||||||
use ethers_core::utils::{keccak256, Anvil, AnvilInstance};
|
|
||||||
use ethers_providers::{Middleware, Provider, Http};
|
|
||||||
|
|
||||||
use frost::{
|
|
||||||
curve::Secp256k1,
|
|
||||||
algorithm::IetfSchnorr,
|
|
||||||
tests::{algorithm_machines, sign},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
crypto::*,
|
|
||||||
schnorr::*,
|
|
||||||
tests::{key_gen, deploy_contract},
|
|
||||||
};
|
|
||||||
|
|
||||||
async fn setup_test() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
|
|
||||||
let anvil = Anvil::new().spawn();
|
|
||||||
|
|
||||||
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
|
|
||||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
|
||||||
let wallet = anvil.keys()[0].clone().into();
|
|
||||||
let client = Arc::new(provider);
|
|
||||||
|
|
||||||
let contract_address =
|
|
||||||
deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap();
|
|
||||||
let contract = Schnorr::new(contract_address, client.clone());
|
|
||||||
(chain_id, anvil, contract)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_deploy_contract() {
|
|
||||||
setup_test().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_ecrecover_hack() {
|
|
||||||
let (chain_id, _anvil, contract) = setup_test().await;
|
|
||||||
let chain_id = U256::from(chain_id);
|
|
||||||
|
|
||||||
let (keys, public_key) = key_gen();
|
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
let hashed_message = keccak256(MESSAGE);
|
|
||||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
|
||||||
let sig = sign(
|
|
||||||
&mut OsRng,
|
|
||||||
&algo,
|
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap();
|
|
||||||
|
|
||||||
call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap();
|
|
||||||
// Test an invalid signature fails
|
|
||||||
let mut sig = sig;
|
|
||||||
sig.s += Scalar::ONE;
|
|
||||||
assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err());
|
|
||||||
}
|
|
||||||
128
coins/ethereum/tests/contract.rs
Normal file
128
coins/ethereum/tests/contract.rs
Normal file
@@ -0,0 +1,128 @@
|
|||||||
|
use std::{convert::TryFrom, sync::Arc, time::Duration, fs::File};
|
||||||
|
|
||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
|
use ::k256::{
|
||||||
|
elliptic_curve::{bigint::ArrayEncoding, PrimeField},
|
||||||
|
U256,
|
||||||
|
};
|
||||||
|
|
||||||
|
use ethers_core::{
|
||||||
|
types::Signature,
|
||||||
|
abi::Abi,
|
||||||
|
utils::{keccak256, Anvil, AnvilInstance},
|
||||||
|
};
|
||||||
|
use ethers_contract::ContractFactory;
|
||||||
|
use ethers_providers::{Middleware, Provider, Http};
|
||||||
|
|
||||||
|
use frost::{
|
||||||
|
curve::Secp256k1,
|
||||||
|
Participant,
|
||||||
|
algorithm::IetfSchnorr,
|
||||||
|
tests::{key_gen, algorithm_machines, sign},
|
||||||
|
};
|
||||||
|
|
||||||
|
use ethereum_serai::{
|
||||||
|
crypto,
|
||||||
|
contract::{Schnorr, call_verify},
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
|
||||||
|
// to fund the deployer, not create/pass a wallet
|
||||||
|
pub async fn deploy_schnorr_verifier_contract(
|
||||||
|
chain_id: u32,
|
||||||
|
client: Arc<Provider<Http>>,
|
||||||
|
wallet: &k256::ecdsa::SigningKey,
|
||||||
|
) -> eyre::Result<Schnorr<Provider<Http>>> {
|
||||||
|
let abi: Abi = serde_json::from_reader(File::open("./artifacts/Schnorr.abi").unwrap()).unwrap();
|
||||||
|
|
||||||
|
let hex_bin_buf = std::fs::read_to_string("./artifacts/Schnorr.bin").unwrap();
|
||||||
|
let hex_bin =
|
||||||
|
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
||||||
|
let bin = hex::decode(hex_bin).unwrap();
|
||||||
|
let factory = ContractFactory::new(abi, bin.into(), client.clone());
|
||||||
|
|
||||||
|
let mut deployment_tx = factory.deploy(())?.tx;
|
||||||
|
deployment_tx.set_chain_id(chain_id);
|
||||||
|
deployment_tx.set_gas(500_000);
|
||||||
|
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
|
||||||
|
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
|
||||||
|
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
|
||||||
|
|
||||||
|
let sig_hash = deployment_tx.sighash();
|
||||||
|
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
|
||||||
|
|
||||||
|
// EIP-155 v
|
||||||
|
let mut v = u64::from(rid.to_byte());
|
||||||
|
assert!((v == 0) || (v == 1));
|
||||||
|
v += u64::from((chain_id * 2) + 35);
|
||||||
|
|
||||||
|
let r = sig.r().to_repr();
|
||||||
|
let r_ref: &[u8] = r.as_ref();
|
||||||
|
let s = sig.s().to_repr();
|
||||||
|
let s_ref: &[u8] = s.as_ref();
|
||||||
|
let deployment_tx = deployment_tx.rlp_signed(&Signature { r: r_ref.into(), s: s_ref.into(), v });
|
||||||
|
|
||||||
|
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
|
||||||
|
|
||||||
|
let mut receipt;
|
||||||
|
while {
|
||||||
|
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
|
||||||
|
receipt.is_none()
|
||||||
|
} {
|
||||||
|
tokio::time::sleep(Duration::from_secs(6)).await;
|
||||||
|
}
|
||||||
|
let receipt = receipt.unwrap();
|
||||||
|
assert!(receipt.status == Some(1.into()));
|
||||||
|
|
||||||
|
let contract = Schnorr::new(receipt.contract_address.unwrap(), client.clone());
|
||||||
|
Ok(contract)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn deploy_test_contract() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
|
||||||
|
let anvil = Anvil::new().spawn();
|
||||||
|
|
||||||
|
let provider =
|
||||||
|
Provider::<Http>::try_from(anvil.endpoint()).unwrap().interval(Duration::from_millis(10u64));
|
||||||
|
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
||||||
|
let wallet = anvil.keys()[0].clone().into();
|
||||||
|
let client = Arc::new(provider);
|
||||||
|
|
||||||
|
(chain_id, anvil, deploy_schnorr_verifier_contract(chain_id, client, &wallet).await.unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_deploy_contract() {
|
||||||
|
deploy_test_contract().await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_ecrecover_hack() {
|
||||||
|
let (chain_id, _anvil, contract) = deploy_test_contract().await;
|
||||||
|
let chain_id = U256::from(chain_id);
|
||||||
|
|
||||||
|
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
||||||
|
let group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||||
|
|
||||||
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
|
let hashed_message = keccak256(MESSAGE);
|
||||||
|
|
||||||
|
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
||||||
|
|
||||||
|
let algo = IetfSchnorr::<Secp256k1, crypto::EthereumHram>::ietf();
|
||||||
|
let sig = sign(
|
||||||
|
&mut OsRng,
|
||||||
|
&algo,
|
||||||
|
keys.clone(),
|
||||||
|
algorithm_machines(&mut OsRng, &algo, &keys),
|
||||||
|
full_message,
|
||||||
|
);
|
||||||
|
let mut processed_sig =
|
||||||
|
crypto::process_signature_for_contract(hashed_message, &sig.R, sig.s, &group_key, chain_id);
|
||||||
|
|
||||||
|
call_verify(&contract, &processed_sig).await.unwrap();
|
||||||
|
|
||||||
|
// test invalid signature fails
|
||||||
|
processed_sig.message[0] = 0;
|
||||||
|
assert!(call_verify(&contract, &processed_sig).await.is_err());
|
||||||
|
}
|
||||||
87
coins/ethereum/tests/crypto.rs
Normal file
87
coins/ethereum/tests/crypto.rs
Normal file
@@ -0,0 +1,87 @@
|
|||||||
|
use k256::{
|
||||||
|
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, sec1::ToEncodedPoint},
|
||||||
|
ProjectivePoint, Scalar, U256,
|
||||||
|
};
|
||||||
|
use frost::{curve::Secp256k1, Participant};
|
||||||
|
|
||||||
|
use ethereum_serai::crypto::*;
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_ecrecover() {
|
||||||
|
use rand_core::OsRng;
|
||||||
|
use sha2::Sha256;
|
||||||
|
use sha3::{Digest, Keccak256};
|
||||||
|
use k256::ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey};
|
||||||
|
|
||||||
|
let private = SigningKey::random(&mut OsRng);
|
||||||
|
let public = VerifyingKey::from(&private);
|
||||||
|
|
||||||
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
|
let (sig, recovery_id) = private
|
||||||
|
.as_nonzero_scalar()
|
||||||
|
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
|
||||||
|
.unwrap();
|
||||||
|
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||||
|
{
|
||||||
|
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
|
||||||
|
}
|
||||||
|
|
||||||
|
assert_eq!(
|
||||||
|
ecrecover(hash_to_scalar(MESSAGE), recovery_id.unwrap().is_y_odd().into(), *sig.r(), *sig.s())
|
||||||
|
.unwrap(),
|
||||||
|
address(&ProjectivePoint::from(public.as_affine()))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_signing() {
|
||||||
|
use frost::{
|
||||||
|
algorithm::IetfSchnorr,
|
||||||
|
tests::{algorithm_machines, key_gen, sign},
|
||||||
|
};
|
||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
|
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
||||||
|
let _group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||||
|
|
||||||
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
|
|
||||||
|
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||||
|
let _sig =
|
||||||
|
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_ecrecover_hack() {
|
||||||
|
use frost::{
|
||||||
|
algorithm::IetfSchnorr,
|
||||||
|
tests::{algorithm_machines, key_gen, sign},
|
||||||
|
};
|
||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
|
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
||||||
|
let group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||||
|
let group_key_encoded = group_key.to_encoded_point(true);
|
||||||
|
let group_key_compressed = group_key_encoded.as_ref();
|
||||||
|
let group_key_x = Scalar::reduce(U256::from_be_slice(&group_key_compressed[1 .. 33]));
|
||||||
|
|
||||||
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
|
let hashed_message = keccak256(MESSAGE);
|
||||||
|
let chain_id = U256::ONE;
|
||||||
|
|
||||||
|
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
||||||
|
|
||||||
|
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||||
|
let sig = sign(
|
||||||
|
&mut OsRng,
|
||||||
|
&algo,
|
||||||
|
keys.clone(),
|
||||||
|
algorithm_machines(&mut OsRng, &algo, &keys),
|
||||||
|
full_message,
|
||||||
|
);
|
||||||
|
|
||||||
|
let (sr, er) =
|
||||||
|
preprocess_signature_for_ecrecover(hashed_message, &sig.R, sig.s, &group_key, chain_id);
|
||||||
|
let q = ecrecover(sr, group_key_compressed[0] - 2, group_key_x, er).unwrap();
|
||||||
|
assert_eq!(q, address(&sig.R));
|
||||||
|
}
|
||||||
2
coins/ethereum/tests/mod.rs
Normal file
2
coins/ethereum/tests/mod.rs
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
mod contract;
|
||||||
|
mod crypto;
|
||||||
@@ -9,7 +9,7 @@ use curve25519_dalek::{scalar::Scalar as DalekScalar, edwards::EdwardsPoint as D
|
|||||||
use group::{ff::Field, Group};
|
use group::{ff::Field, Group};
|
||||||
use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint};
|
use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint};
|
||||||
|
|
||||||
use multiexp::{BatchVerifier, multiexp};
|
use multiexp::BatchVerifier;
|
||||||
|
|
||||||
use crate::{Commitment, ringct::bulletproofs::core::*};
|
use crate::{Commitment, ringct::bulletproofs::core::*};
|
||||||
|
|
||||||
@@ -17,20 +17,7 @@ include!(concat!(env!("OUT_DIR"), "/generators.rs"));
|
|||||||
|
|
||||||
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
|
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
|
||||||
pub(crate) fn IP12() -> Scalar {
|
pub(crate) fn IP12() -> Scalar {
|
||||||
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N()))
|
*IP12_CELL.get_or_init(|| inner_product(&ScalarVector(vec![Scalar::ONE; N]), TWO_N()))
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn hadamard_fold(
|
|
||||||
l: &[EdwardsPoint],
|
|
||||||
r: &[EdwardsPoint],
|
|
||||||
a: Scalar,
|
|
||||||
b: Scalar,
|
|
||||||
) -> Vec<EdwardsPoint> {
|
|
||||||
let mut res = Vec::with_capacity(l.len() / 2);
|
|
||||||
for i in 0 .. l.len() {
|
|
||||||
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
@@ -70,7 +57,7 @@ impl OriginalStruct {
|
|||||||
let mut cache = hash_to_scalar(&y.to_bytes());
|
let mut cache = hash_to_scalar(&y.to_bytes());
|
||||||
let z = cache;
|
let z = cache;
|
||||||
|
|
||||||
let l0 = aL - z;
|
let l0 = &aL - z;
|
||||||
let l1 = sL;
|
let l1 = sL;
|
||||||
|
|
||||||
let mut zero_twos = Vec::with_capacity(MN);
|
let mut zero_twos = Vec::with_capacity(MN);
|
||||||
@@ -82,12 +69,12 @@ impl OriginalStruct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let yMN = ScalarVector::powers(y, MN);
|
let yMN = ScalarVector::powers(y, MN);
|
||||||
let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos);
|
let r0 = (&(aR + z) * &yMN) + ScalarVector(zero_twos);
|
||||||
let r1 = yMN * &sR;
|
let r1 = yMN * sR;
|
||||||
|
|
||||||
let (T1, T2, x, mut taux) = {
|
let (T1, T2, x, mut taux) = {
|
||||||
let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1);
|
let t1 = inner_product(&l0, &r1) + inner_product(&l1, &r0);
|
||||||
let t2 = l1.clone().inner_product(&r1);
|
let t2 = inner_product(&l1, &r1);
|
||||||
|
|
||||||
let mut tau1 = Scalar::random(&mut *rng);
|
let mut tau1 = Scalar::random(&mut *rng);
|
||||||
let mut tau2 = Scalar::random(&mut *rng);
|
let mut tau2 = Scalar::random(&mut *rng);
|
||||||
@@ -113,10 +100,10 @@ impl OriginalStruct {
|
|||||||
taux += zpow[i + 2] * gamma;
|
taux += zpow[i + 2] * gamma;
|
||||||
}
|
}
|
||||||
|
|
||||||
let l = l0 + &(l1 * x);
|
let l = &l0 + &(l1 * x);
|
||||||
let r = r0 + &(r1 * x);
|
let r = &r0 + &(r1 * x);
|
||||||
|
|
||||||
let t = l.clone().inner_product(&r);
|
let t = inner_product(&l, &r);
|
||||||
|
|
||||||
let x_ip =
|
let x_ip =
|
||||||
hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
|
hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
|
||||||
@@ -139,8 +126,8 @@ impl OriginalStruct {
|
|||||||
let (aL, aR) = a.split();
|
let (aL, aR) = a.split();
|
||||||
let (bL, bR) = b.split();
|
let (bL, bR) = b.split();
|
||||||
|
|
||||||
let cL = aL.clone().inner_product(&bR);
|
let cL = inner_product(&aL, &bR);
|
||||||
let cR = aR.clone().inner_product(&bL);
|
let cR = inner_product(&aR, &bL);
|
||||||
|
|
||||||
let (G_L, G_R) = G_proof.split_at(aL.len());
|
let (G_L, G_R) = G_proof.split_at(aL.len());
|
||||||
let (H_L, H_R) = H_proof.split_at(aL.len());
|
let (H_L, H_R) = H_proof.split_at(aL.len());
|
||||||
@@ -153,8 +140,8 @@ impl OriginalStruct {
|
|||||||
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
|
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
|
||||||
let winv = w.invert().unwrap();
|
let winv = w.invert().unwrap();
|
||||||
|
|
||||||
a = (aL * w) + &(aR * winv);
|
a = (aL * w) + (aR * winv);
|
||||||
b = (bL * winv) + &(bR * w);
|
b = (bL * winv) + (bR * w);
|
||||||
|
|
||||||
if a.len() != 1 {
|
if a.len() != 1 {
|
||||||
G_proof = hadamard_fold(G_L, G_R, winv, w);
|
G_proof = hadamard_fold(G_L, G_R, winv, w);
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ impl AggregateRangeStatement {
|
|||||||
let mut d = ScalarVector::new(mn);
|
let mut d = ScalarVector::new(mn);
|
||||||
for j in 1 ..= V.len() {
|
for j in 1 ..= V.len() {
|
||||||
z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this
|
z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this
|
||||||
d = d + &(Self::d_j(j, V.len()) * (z_pow[j - 1]));
|
d = d.add_vec(&Self::d_j(j, V.len()).mul(z_pow[j - 1]));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut ascending_y = ScalarVector(vec![y]);
|
let mut ascending_y = ScalarVector(vec![y]);
|
||||||
@@ -124,8 +124,7 @@ impl AggregateRangeStatement {
|
|||||||
let mut descending_y = ascending_y.clone();
|
let mut descending_y = ascending_y.clone();
|
||||||
descending_y.0.reverse();
|
descending_y.0.reverse();
|
||||||
|
|
||||||
let d_descending_y = d.clone() * &descending_y;
|
let d_descending_y = d.mul_vec(&descending_y);
|
||||||
let d_descending_y_plus_z = d_descending_y + z;
|
|
||||||
|
|
||||||
let y_mn_plus_one = descending_y[0] * y;
|
let y_mn_plus_one = descending_y[0] * y;
|
||||||
|
|
||||||
@@ -136,9 +135,9 @@ impl AggregateRangeStatement {
|
|||||||
|
|
||||||
let neg_z = -z;
|
let neg_z = -z;
|
||||||
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2);
|
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2);
|
||||||
for (i, d_y_z) in d_descending_y_plus_z.0.iter().enumerate() {
|
for (i, d_y_z) in d_descending_y.add(z).0.drain(..).enumerate() {
|
||||||
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i)));
|
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i)));
|
||||||
A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold1, i)));
|
A_terms.push((d_y_z, generators.generator(GeneratorsList::HBold1, i)));
|
||||||
}
|
}
|
||||||
A_terms.push((y_mn_plus_one, commitment_accum));
|
A_terms.push((y_mn_plus_one, commitment_accum));
|
||||||
A_terms.push((
|
A_terms.push((
|
||||||
@@ -146,14 +145,7 @@ impl AggregateRangeStatement {
|
|||||||
Generators::g(),
|
Generators::g(),
|
||||||
));
|
));
|
||||||
|
|
||||||
(
|
(y, d_descending_y, y_mn_plus_one, z, ScalarVector(z_pow), A + multiexp_vartime(&A_terms))
|
||||||
y,
|
|
||||||
d_descending_y_plus_z,
|
|
||||||
y_mn_plus_one,
|
|
||||||
z,
|
|
||||||
ScalarVector(z_pow),
|
|
||||||
A + multiexp_vartime(&A_terms),
|
|
||||||
)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn prove<R: RngCore + CryptoRng>(
|
pub(crate) fn prove<R: RngCore + CryptoRng>(
|
||||||
@@ -199,7 +191,7 @@ impl AggregateRangeStatement {
|
|||||||
a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
|
a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
|
||||||
}
|
}
|
||||||
|
|
||||||
let a_r = a_l.clone() - Scalar::ONE;
|
let a_r = a_l.sub(Scalar::ONE);
|
||||||
|
|
||||||
let alpha = Scalar::random(&mut *rng);
|
let alpha = Scalar::random(&mut *rng);
|
||||||
|
|
||||||
@@ -217,11 +209,11 @@ impl AggregateRangeStatement {
|
|||||||
// Multiply by INV_EIGHT per earlier commentary
|
// Multiply by INV_EIGHT per earlier commentary
|
||||||
A.0 *= crate::INV_EIGHT();
|
A.0 *= crate::INV_EIGHT();
|
||||||
|
|
||||||
let (y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat) =
|
let (y, d_descending_y, y_mn_plus_one, z, z_pow, A_hat) =
|
||||||
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A);
|
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A);
|
||||||
|
|
||||||
let a_l = a_l - z;
|
let a_l = a_l.sub(z);
|
||||||
let a_r = a_r + &d_descending_y_plus_z;
|
let a_r = a_r.add_vec(&d_descending_y).add(z);
|
||||||
let mut alpha = alpha;
|
let mut alpha = alpha;
|
||||||
for j in 1 ..= witness.gammas.len() {
|
for j in 1 ..= witness.gammas.len() {
|
||||||
alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;
|
alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;
|
||||||
|
|||||||
@@ -3,7 +3,8 @@
|
|||||||
use group::Group;
|
use group::Group;
|
||||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||||
|
|
||||||
pub(crate) use crate::ringct::bulletproofs::scalar_vector::ScalarVector;
|
mod scalar_vector;
|
||||||
|
pub(crate) use scalar_vector::{ScalarVector, weighted_inner_product};
|
||||||
mod point_vector;
|
mod point_vector;
|
||||||
pub(crate) use point_vector::PointVector;
|
pub(crate) use point_vector::PointVector;
|
||||||
|
|
||||||
|
|||||||
114
coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs
Normal file
114
coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs
Normal file
@@ -0,0 +1,114 @@
|
|||||||
|
use core::{
|
||||||
|
borrow::Borrow,
|
||||||
|
ops::{Index, IndexMut},
|
||||||
|
};
|
||||||
|
use std_shims::vec::Vec;
|
||||||
|
|
||||||
|
use zeroize::Zeroize;
|
||||||
|
|
||||||
|
use group::ff::Field;
|
||||||
|
use dalek_ff_group::Scalar;
|
||||||
|
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||||
|
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
||||||
|
|
||||||
|
impl Index<usize> for ScalarVector {
|
||||||
|
type Output = Scalar;
|
||||||
|
fn index(&self, index: usize) -> &Scalar {
|
||||||
|
&self.0[index]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IndexMut<usize> for ScalarVector {
|
||||||
|
fn index_mut(&mut self, index: usize) -> &mut Scalar {
|
||||||
|
&mut self.0[index]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ScalarVector {
|
||||||
|
pub(crate) fn new(len: usize) -> Self {
|
||||||
|
ScalarVector(vec![Scalar::ZERO; len])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn add(&self, scalar: impl Borrow<Scalar>) -> Self {
|
||||||
|
let mut res = self.clone();
|
||||||
|
for val in &mut res.0 {
|
||||||
|
*val += scalar.borrow();
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn sub(&self, scalar: impl Borrow<Scalar>) -> Self {
|
||||||
|
let mut res = self.clone();
|
||||||
|
for val in &mut res.0 {
|
||||||
|
*val -= scalar.borrow();
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn mul(&self, scalar: impl Borrow<Scalar>) -> Self {
|
||||||
|
let mut res = self.clone();
|
||||||
|
for val in &mut res.0 {
|
||||||
|
*val *= scalar.borrow();
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn add_vec(&self, vector: &Self) -> Self {
|
||||||
|
debug_assert_eq!(self.len(), vector.len());
|
||||||
|
let mut res = self.clone();
|
||||||
|
for (i, val) in res.0.iter_mut().enumerate() {
|
||||||
|
*val += vector.0[i];
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn mul_vec(&self, vector: &Self) -> Self {
|
||||||
|
debug_assert_eq!(self.len(), vector.len());
|
||||||
|
let mut res = self.clone();
|
||||||
|
for (i, val) in res.0.iter_mut().enumerate() {
|
||||||
|
*val *= vector.0[i];
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn inner_product(&self, vector: &Self) -> Scalar {
|
||||||
|
self.mul_vec(vector).sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
|
||||||
|
debug_assert!(len != 0);
|
||||||
|
|
||||||
|
let mut res = Vec::with_capacity(len);
|
||||||
|
res.push(Scalar::ONE);
|
||||||
|
res.push(x);
|
||||||
|
for i in 2 .. len {
|
||||||
|
res.push(res[i - 1] * x);
|
||||||
|
}
|
||||||
|
res.truncate(len);
|
||||||
|
ScalarVector(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn sum(mut self) -> Scalar {
|
||||||
|
self.0.drain(..).sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn len(&self) -> usize {
|
||||||
|
self.0.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn split(mut self) -> (Self, Self) {
|
||||||
|
debug_assert!(self.len() > 1);
|
||||||
|
let r = self.0.split_off(self.0.len() / 2);
|
||||||
|
debug_assert_eq!(self.len(), r.len());
|
||||||
|
(self, ScalarVector(r))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn weighted_inner_product(
|
||||||
|
a: &ScalarVector,
|
||||||
|
b: &ScalarVector,
|
||||||
|
y: &ScalarVector,
|
||||||
|
) -> Scalar {
|
||||||
|
a.inner_product(&b.mul_vec(y))
|
||||||
|
}
|
||||||
@@ -4,7 +4,7 @@ use rand_core::{RngCore, CryptoRng};
|
|||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||||
|
|
||||||
use multiexp::{BatchVerifier, multiexp, multiexp_vartime};
|
use multiexp::{multiexp, multiexp_vartime, BatchVerifier};
|
||||||
use group::{
|
use group::{
|
||||||
ff::{Field, PrimeField},
|
ff::{Field, PrimeField},
|
||||||
GroupEncoding,
|
GroupEncoding,
|
||||||
@@ -12,7 +12,8 @@ use group::{
|
|||||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||||
|
|
||||||
use crate::ringct::bulletproofs::plus::{
|
use crate::ringct::bulletproofs::plus::{
|
||||||
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
|
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, weighted_inner_product,
|
||||||
|
transcript::*,
|
||||||
};
|
};
|
||||||
|
|
||||||
// Figure 1
|
// Figure 1
|
||||||
@@ -218,7 +219,7 @@ impl WipStatement {
|
|||||||
.zip(g_bold.0.iter().copied())
|
.zip(g_bold.0.iter().copied())
|
||||||
.chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied()))
|
.chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
P_terms.push((witness.a.clone().weighted_inner_product(&witness.b, &y), g));
|
P_terms.push((weighted_inner_product(&witness.a, &witness.b, &y), g));
|
||||||
P_terms.push((witness.alpha, h));
|
P_terms.push((witness.alpha, h));
|
||||||
debug_assert_eq!(multiexp(&P_terms), P);
|
debug_assert_eq!(multiexp(&P_terms), P);
|
||||||
P_terms.zeroize();
|
P_terms.zeroize();
|
||||||
@@ -257,13 +258,14 @@ impl WipStatement {
|
|||||||
let d_l = Scalar::random(&mut *rng);
|
let d_l = Scalar::random(&mut *rng);
|
||||||
let d_r = Scalar::random(&mut *rng);
|
let d_r = Scalar::random(&mut *rng);
|
||||||
|
|
||||||
let c_l = a1.clone().weighted_inner_product(&b2, &y);
|
let c_l = weighted_inner_product(&a1, &b2, &y);
|
||||||
let c_r = (a2.clone() * y_n_hat).weighted_inner_product(&b1, &y);
|
let c_r = weighted_inner_product(&(a2.mul(y_n_hat)), &b1, &y);
|
||||||
|
|
||||||
// TODO: Calculate these with a batch inversion
|
// TODO: Calculate these with a batch inversion
|
||||||
let y_inv_n_hat = y_n_hat.invert().unwrap();
|
let y_inv_n_hat = y_n_hat.invert().unwrap();
|
||||||
|
|
||||||
let mut L_terms = (a1.clone() * y_inv_n_hat)
|
let mut L_terms = a1
|
||||||
|
.mul(y_inv_n_hat)
|
||||||
.0
|
.0
|
||||||
.drain(..)
|
.drain(..)
|
||||||
.zip(g_bold2.0.iter().copied())
|
.zip(g_bold2.0.iter().copied())
|
||||||
@@ -275,7 +277,8 @@ impl WipStatement {
|
|||||||
L_vec.push(L);
|
L_vec.push(L);
|
||||||
L_terms.zeroize();
|
L_terms.zeroize();
|
||||||
|
|
||||||
let mut R_terms = (a2.clone() * y_n_hat)
|
let mut R_terms = a2
|
||||||
|
.mul(y_n_hat)
|
||||||
.0
|
.0
|
||||||
.drain(..)
|
.drain(..)
|
||||||
.zip(g_bold1.0.iter().copied())
|
.zip(g_bold1.0.iter().copied())
|
||||||
@@ -291,8 +294,8 @@ impl WipStatement {
|
|||||||
(e, inv_e, e_square, inv_e_square, g_bold, h_bold) =
|
(e, inv_e, e_square, inv_e_square, g_bold, h_bold) =
|
||||||
Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat);
|
Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat);
|
||||||
|
|
||||||
a = (a1 * e) + &(a2 * (y_n_hat * inv_e));
|
a = a1.mul(e).add_vec(&a2.mul(y_n_hat * inv_e));
|
||||||
b = (b1 * inv_e) + &(b2 * e);
|
b = b1.mul(inv_e).add_vec(&b2.mul(e));
|
||||||
alpha += (d_l * e_square) + (d_r * inv_e_square);
|
alpha += (d_l * e_square) + (d_r * inv_e_square);
|
||||||
|
|
||||||
debug_assert_eq!(g_bold.len(), a.len());
|
debug_assert_eq!(g_bold.len(), a.len());
|
||||||
|
|||||||
@@ -1,17 +1,85 @@
|
|||||||
use core::{
|
use core::ops::{Add, Sub, Mul, Index};
|
||||||
borrow::Borrow,
|
|
||||||
ops::{Index, IndexMut, Add, Sub, Mul},
|
|
||||||
};
|
|
||||||
use std_shims::vec::Vec;
|
use std_shims::vec::Vec;
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||||
|
|
||||||
use group::ff::Field;
|
use group::ff::Field;
|
||||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||||
|
|
||||||
use multiexp::multiexp;
|
use multiexp::multiexp;
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
||||||
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
||||||
|
macro_rules! math_op {
|
||||||
|
($Op: ident, $op: ident, $f: expr) => {
|
||||||
|
#[allow(clippy::redundant_closure_call)]
|
||||||
|
impl $Op<Scalar> for ScalarVector {
|
||||||
|
type Output = ScalarVector;
|
||||||
|
fn $op(self, b: Scalar) -> ScalarVector {
|
||||||
|
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::redundant_closure_call)]
|
||||||
|
impl $Op<Scalar> for &ScalarVector {
|
||||||
|
type Output = ScalarVector;
|
||||||
|
fn $op(self, b: Scalar) -> ScalarVector {
|
||||||
|
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::redundant_closure_call)]
|
||||||
|
impl $Op<ScalarVector> for ScalarVector {
|
||||||
|
type Output = ScalarVector;
|
||||||
|
fn $op(self, b: ScalarVector) -> ScalarVector {
|
||||||
|
debug_assert_eq!(self.len(), b.len());
|
||||||
|
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::redundant_closure_call)]
|
||||||
|
impl $Op<&ScalarVector> for &ScalarVector {
|
||||||
|
type Output = ScalarVector;
|
||||||
|
fn $op(self, b: &ScalarVector) -> ScalarVector {
|
||||||
|
debug_assert_eq!(self.len(), b.len());
|
||||||
|
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
math_op!(Add, add, |(a, b): (&Scalar, &Scalar)| *a + *b);
|
||||||
|
math_op!(Sub, sub, |(a, b): (&Scalar, &Scalar)| *a - *b);
|
||||||
|
math_op!(Mul, mul, |(a, b): (&Scalar, &Scalar)| *a * *b);
|
||||||
|
|
||||||
|
impl ScalarVector {
|
||||||
|
pub(crate) fn new(len: usize) -> ScalarVector {
|
||||||
|
ScalarVector(vec![Scalar::ZERO; len])
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn powers(x: Scalar, len: usize) -> ScalarVector {
|
||||||
|
debug_assert!(len != 0);
|
||||||
|
|
||||||
|
let mut res = Vec::with_capacity(len);
|
||||||
|
res.push(Scalar::ONE);
|
||||||
|
for i in 1 .. len {
|
||||||
|
res.push(res[i - 1] * x);
|
||||||
|
}
|
||||||
|
ScalarVector(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn sum(mut self) -> Scalar {
|
||||||
|
self.0.drain(..).sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn len(&self) -> usize {
|
||||||
|
self.0.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn split(self) -> (ScalarVector, ScalarVector) {
|
||||||
|
let (l, r) = self.0.split_at(self.0.len() / 2);
|
||||||
|
(ScalarVector(l.to_vec()), ScalarVector(r.to_vec()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl Index<usize> for ScalarVector {
|
impl Index<usize> for ScalarVector {
|
||||||
type Output = Scalar;
|
type Output = Scalar;
|
||||||
@@ -19,120 +87,28 @@ impl Index<usize> for ScalarVector {
|
|||||||
&self.0[index]
|
&self.0[index]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl IndexMut<usize> for ScalarVector {
|
|
||||||
fn index_mut(&mut self, index: usize) -> &mut Scalar {
|
|
||||||
&mut self.0[index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<S: Borrow<Scalar>> Add<S> for ScalarVector {
|
pub(crate) fn inner_product(a: &ScalarVector, b: &ScalarVector) -> Scalar {
|
||||||
type Output = ScalarVector;
|
(a * b).sum()
|
||||||
fn add(mut self, scalar: S) -> ScalarVector {
|
|
||||||
for s in &mut self.0 {
|
|
||||||
*s += scalar.borrow();
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<S: Borrow<Scalar>> Sub<S> for ScalarVector {
|
|
||||||
type Output = ScalarVector;
|
|
||||||
fn sub(mut self, scalar: S) -> ScalarVector {
|
|
||||||
for s in &mut self.0 {
|
|
||||||
*s -= scalar.borrow();
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<S: Borrow<Scalar>> Mul<S> for ScalarVector {
|
|
||||||
type Output = ScalarVector;
|
|
||||||
fn mul(mut self, scalar: S) -> ScalarVector {
|
|
||||||
for s in &mut self.0 {
|
|
||||||
*s *= scalar.borrow();
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Add<&ScalarVector> for ScalarVector {
|
|
||||||
type Output = ScalarVector;
|
|
||||||
fn add(mut self, other: &ScalarVector) -> ScalarVector {
|
|
||||||
debug_assert_eq!(self.len(), other.len());
|
|
||||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
|
||||||
*s += o;
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Sub<&ScalarVector> for ScalarVector {
|
|
||||||
type Output = ScalarVector;
|
|
||||||
fn sub(mut self, other: &ScalarVector) -> ScalarVector {
|
|
||||||
debug_assert_eq!(self.len(), other.len());
|
|
||||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
|
||||||
*s -= o;
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Mul<&ScalarVector> for ScalarVector {
|
|
||||||
type Output = ScalarVector;
|
|
||||||
fn mul(mut self, other: &ScalarVector) -> ScalarVector {
|
|
||||||
debug_assert_eq!(self.len(), other.len());
|
|
||||||
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
|
||||||
*s *= o;
|
|
||||||
}
|
|
||||||
self
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Mul<&[EdwardsPoint]> for &ScalarVector {
|
impl Mul<&[EdwardsPoint]> for &ScalarVector {
|
||||||
type Output = EdwardsPoint;
|
type Output = EdwardsPoint;
|
||||||
fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint {
|
fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint {
|
||||||
debug_assert_eq!(self.len(), b.len());
|
debug_assert_eq!(self.len(), b.len());
|
||||||
let mut multiexp_args = self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>();
|
multiexp(&self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>())
|
||||||
let res = multiexp(&multiexp_args);
|
|
||||||
multiexp_args.zeroize();
|
|
||||||
res
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ScalarVector {
|
pub(crate) fn hadamard_fold(
|
||||||
pub(crate) fn new(len: usize) -> Self {
|
l: &[EdwardsPoint],
|
||||||
ScalarVector(vec![Scalar::ZERO; len])
|
r: &[EdwardsPoint],
|
||||||
}
|
a: Scalar,
|
||||||
|
b: Scalar,
|
||||||
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
|
) -> Vec<EdwardsPoint> {
|
||||||
debug_assert!(len != 0);
|
let mut res = Vec::with_capacity(l.len() / 2);
|
||||||
|
for i in 0 .. l.len() {
|
||||||
let mut res = Vec::with_capacity(len);
|
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
|
||||||
res.push(Scalar::ONE);
|
|
||||||
res.push(x);
|
|
||||||
for i in 2 .. len {
|
|
||||||
res.push(res[i - 1] * x);
|
|
||||||
}
|
|
||||||
res.truncate(len);
|
|
||||||
ScalarVector(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn len(&self) -> usize {
|
|
||||||
self.0.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn sum(mut self) -> Scalar {
|
|
||||||
self.0.drain(..).sum()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn inner_product(self, vector: &Self) -> Scalar {
|
|
||||||
(self * vector).sum()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn weighted_inner_product(self, vector: &Self, y: &Self) -> Scalar {
|
|
||||||
(self * vector * y).sum()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn split(mut self) -> (Self, Self) {
|
|
||||||
debug_assert!(self.len() > 1);
|
|
||||||
let r = self.0.split_off(self.0.len() / 2);
|
|
||||||
debug_assert_eq!(self.len(), r.len());
|
|
||||||
(self, ScalarVector(r))
|
|
||||||
}
|
}
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -199,7 +199,6 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
|||||||
l: Participant,
|
l: Participant,
|
||||||
addendum: ClsagAddendum,
|
addendum: ClsagAddendum,
|
||||||
) -> Result<(), FrostError> {
|
) -> Result<(), FrostError> {
|
||||||
// TODO: This check is faulty if two shares are additive inverses of each other
|
|
||||||
if self.image.is_identity().into() {
|
if self.image.is_identity().into() {
|
||||||
self.transcript.domain_separate(b"CLSAG");
|
self.transcript.domain_separate(b"CLSAG");
|
||||||
self.input().transcript(&mut self.transcript);
|
self.input().transcript(&mut self.transcript);
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ use dalek_ff_group::{Scalar, EdwardsPoint};
|
|||||||
use crate::ringct::bulletproofs::plus::{
|
use crate::ringct::bulletproofs::plus::{
|
||||||
ScalarVector, PointVector, GeneratorsList, Generators,
|
ScalarVector, PointVector, GeneratorsList, Generators,
|
||||||
weighted_inner_product::{WipStatement, WipWitness},
|
weighted_inner_product::{WipStatement, WipWitness},
|
||||||
|
weighted_inner_product,
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -67,7 +68,7 @@ fn test_weighted_inner_product() {
|
|||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
let P = g_bold.multiexp(&a) +
|
let P = g_bold.multiexp(&a) +
|
||||||
h_bold.multiexp(&b) +
|
h_bold.multiexp(&b) +
|
||||||
(g * a.clone().weighted_inner_product(&b, &y_vec)) +
|
(g * weighted_inner_product(&a, &b, &y_vec)) +
|
||||||
(h * alpha);
|
(h * alpha);
|
||||||
|
|
||||||
let statement = WipStatement::new(generators, P, y);
|
let statement = WipStatement::new(generators, P, y);
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap();
|
let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap();
|
||||||
|
|
||||||
// TODO: Needs https://github.com/monero-project/monero/pull/9260
|
// TODO: Needs https://github.com/monero-project/monero/pull/8882
|
||||||
// let fee_rate = daemon_rpc
|
// let fee_rate = daemon_rpc
|
||||||
// .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant)
|
// .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant)
|
||||||
// .await
|
// .await
|
||||||
@@ -107,7 +107,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
|
|||||||
let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap();
|
let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap();
|
||||||
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||||
|
|
||||||
// TODO: Needs https://github.com/monero-project/monero/pull/9260
|
// TODO: Needs https://github.com/monero-project/monero/pull/8882
|
||||||
// runner::check_weight_and_fee(&tx, fee_rate);
|
// runner::check_weight_and_fee(&tx, fee_rate);
|
||||||
|
|
||||||
match spec {
|
match spec {
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
parity-db = { version = "0.4", default-features = false, optional = true }
|
||||||
rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true }
|
rocksdb = { version = "0.21", default-features = false, features = ["lz4"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
parity-db = ["dep:parity-db"]
|
parity-db = ["dep:parity-db"]
|
||||||
|
|||||||
@@ -1,77 +1,42 @@
|
|||||||
use std::{sync::Arc, collections::HashSet};
|
use std::sync::Arc;
|
||||||
|
|
||||||
use rocksdb::{
|
use rocksdb::{DBCompressionType, ThreadMode, SingleThreaded, Options, Transaction, TransactionDB};
|
||||||
DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions,
|
|
||||||
Transaction as RocksTransaction, Options, OptimisticTransactionDB, SnapshotWithThreadMode,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
pub struct Transaction<'a, T: ThreadMode> {
|
impl<T: ThreadMode> Get for Transaction<'_, TransactionDB<T>> {
|
||||||
dirtied_keys: HashSet<Vec<u8>>,
|
|
||||||
txn: RocksTransaction<'a, OptimisticTransactionDB<T>>,
|
|
||||||
snapshot: SnapshotWithThreadMode<'a, OptimisticTransactionDB<T>>,
|
|
||||||
db: &'a OptimisticTransactionDB<T>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<T: ThreadMode> Get for Transaction<'_, T> {
|
|
||||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||||
if self.dirtied_keys.contains(key.as_ref()) {
|
self.get(key).expect("couldn't read from RocksDB via transaction")
|
||||||
return self.txn.get(key).expect("couldn't read from RocksDB via transaction");
|
|
||||||
}
|
|
||||||
self.snapshot.get(key).expect("couldn't read from RocksDB via snapshot")
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl<T: ThreadMode> DbTxn for Transaction<'_, T> {
|
impl<T: ThreadMode> DbTxn for Transaction<'_, TransactionDB<T>> {
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||||
self.dirtied_keys.insert(key.as_ref().to_vec());
|
Transaction::put(self, key, value).expect("couldn't write to RocksDB via transaction")
|
||||||
self.txn.put(key, value).expect("couldn't write to RocksDB via transaction")
|
|
||||||
}
|
}
|
||||||
fn del(&mut self, key: impl AsRef<[u8]>) {
|
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||||
self.dirtied_keys.insert(key.as_ref().to_vec());
|
self.delete(key).expect("couldn't delete from RocksDB via transaction")
|
||||||
self.txn.delete(key).expect("couldn't delete from RocksDB via transaction")
|
|
||||||
}
|
}
|
||||||
fn commit(self) {
|
fn commit(self) {
|
||||||
self.txn.commit().expect("couldn't commit to RocksDB via transaction");
|
Transaction::commit(self).expect("couldn't commit to RocksDB via transaction")
|
||||||
self.db.flush_wal(true).expect("couldn't flush RocksDB WAL");
|
|
||||||
self.db.flush().expect("couldn't flush RocksDB");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
impl<T: ThreadMode> Get for Arc<TransactionDB<T>> {
|
||||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||||
OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB")
|
TransactionDB::get(self, key).expect("couldn't read from RocksDB")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
impl<T: ThreadMode + 'static> Db for Arc<TransactionDB<T>> {
|
||||||
type Transaction<'a> = Transaction<'a, T>;
|
type Transaction<'a> = Transaction<'a, TransactionDB<T>>;
|
||||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||||
let mut opts = WriteOptions::default();
|
self.transaction()
|
||||||
opts.set_sync(true);
|
|
||||||
Transaction {
|
|
||||||
dirtied_keys: HashSet::new(),
|
|
||||||
txn: self.transaction_opt(&opts, &Default::default()),
|
|
||||||
snapshot: self.snapshot(),
|
|
||||||
db: &**self,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type RocksDB = Arc<OptimisticTransactionDB<SingleThreaded>>;
|
pub type RocksDB = Arc<TransactionDB<SingleThreaded>>;
|
||||||
pub fn new_rocksdb(path: &str) -> RocksDB {
|
pub fn new_rocksdb(path: &str) -> RocksDB {
|
||||||
let mut options = Options::default();
|
let mut options = Options::default();
|
||||||
options.create_if_missing(true);
|
options.create_if_missing(true);
|
||||||
options.set_compression_type(DBCompressionType::Zstd);
|
options.set_compression_type(DBCompressionType::Lz4);
|
||||||
|
Arc::new(TransactionDB::open(&options, &Default::default(), path).unwrap())
|
||||||
options.set_wal_compression_type(DBCompressionType::Zstd);
|
|
||||||
// 10 MB
|
|
||||||
options.set_max_total_wal_size(10 * 1024 * 1024);
|
|
||||||
options.set_wal_size_limit_mb(10);
|
|
||||||
|
|
||||||
options.set_log_level(LogLevel::Warn);
|
|
||||||
// 1 MB
|
|
||||||
options.set_max_log_file_size(1024 * 1024);
|
|
||||||
options.set_recycle_log_file_num(1);
|
|
||||||
|
|
||||||
Arc::new(OptimisticTransactionDB::open(&options, path).unwrap())
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ hyper-util = { version = "0.1", default-features = false, features = ["http1", "
|
|||||||
http-body-util = { version = "0.1", default-features = false }
|
http-body-util = { version = "0.1", default-features = false }
|
||||||
tokio = { version = "1", default-features = false }
|
tokio = { version = "1", default-features = false }
|
||||||
|
|
||||||
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||||
|
|
||||||
zeroize = { version = "1", optional = true }
|
zeroize = { version = "1", optional = true }
|
||||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||||
|
|||||||
@@ -836,8 +836,8 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
|||||||
) {
|
) {
|
||||||
let mut tributaries = HashMap::new();
|
let mut tributaries = HashMap::new();
|
||||||
'outer: loop {
|
'outer: loop {
|
||||||
// TODO: Create a better async flow for this
|
// TODO: Create a better async flow for this, as this does still hammer this task
|
||||||
tokio::time::sleep(core::time::Duration::from_millis(100)).await;
|
tokio::task::yield_now().await;
|
||||||
|
|
||||||
match tributary_event.try_recv() {
|
match tributary_event.try_recv() {
|
||||||
Ok(event) => match event {
|
Ok(event) => match event {
|
||||||
|
|||||||
@@ -290,81 +290,6 @@ impl LibP2p {
|
|||||||
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
|
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Find and connect to peers
|
|
||||||
let (pending_p2p_connections_send, mut pending_p2p_connections_recv) =
|
|
||||||
tokio::sync::mpsc::unbounded_channel();
|
|
||||||
let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel();
|
|
||||||
tokio::spawn({
|
|
||||||
let pending_p2p_connections_send = pending_p2p_connections_send.clone();
|
|
||||||
async move {
|
|
||||||
loop {
|
|
||||||
// TODO: Add better peer management logic?
|
|
||||||
{
|
|
||||||
let connect = |addr: Multiaddr| {
|
|
||||||
log::info!("found peer from substrate: {addr}");
|
|
||||||
|
|
||||||
let protocols = addr.iter().filter_map(|piece| match piece {
|
|
||||||
// Drop PeerIds from the Substrate P2p network
|
|
||||||
Protocol::P2p(_) => None,
|
|
||||||
// Use our own TCP port
|
|
||||||
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
|
|
||||||
other => Some(other),
|
|
||||||
});
|
|
||||||
|
|
||||||
let mut new_addr = Multiaddr::empty();
|
|
||||||
for protocol in protocols {
|
|
||||||
new_addr.push(protocol);
|
|
||||||
}
|
|
||||||
let addr = new_addr;
|
|
||||||
log::debug!("transformed found peer: {addr}");
|
|
||||||
|
|
||||||
// TODO: Check this isn't a duplicate
|
|
||||||
to_dial_send.send(addr).unwrap();
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: We should also connect to random peers from random nets as needed for
|
|
||||||
// cosigning
|
|
||||||
let mut to_retry = vec![];
|
|
||||||
while let Some(network) = pending_p2p_connections_recv.recv().await {
|
|
||||||
if let Ok(mut nodes) = serai.p2p_validators(network).await {
|
|
||||||
// If there's an insufficient amount of nodes known, connect to all yet add it
|
|
||||||
// back and break
|
|
||||||
if nodes.len() < 3 {
|
|
||||||
log::warn!(
|
|
||||||
"insufficient amount of P2P nodes known for {:?}: {}",
|
|
||||||
network,
|
|
||||||
nodes.len()
|
|
||||||
);
|
|
||||||
to_retry.push(network);
|
|
||||||
for node in nodes {
|
|
||||||
connect(node);
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Randomly select up to 5
|
|
||||||
for _ in 0 .. 5 {
|
|
||||||
if !nodes.is_empty() {
|
|
||||||
let to_connect = nodes.swap_remove(
|
|
||||||
usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap())
|
|
||||||
.unwrap(),
|
|
||||||
);
|
|
||||||
connect(to_connect);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
for to_retry in to_retry {
|
|
||||||
pending_p2p_connections_send.send(to_retry).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Sleep 60 seconds before moving to the next iteration
|
|
||||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Manage the actual swarm
|
|
||||||
tokio::spawn({
|
tokio::spawn({
|
||||||
let mut time_of_last_p2p_message = Instant::now();
|
let mut time_of_last_p2p_message = Instant::now();
|
||||||
|
|
||||||
@@ -396,8 +321,66 @@ impl LibP2p {
|
|||||||
|
|
||||||
async move {
|
async move {
|
||||||
let mut set_for_genesis = HashMap::new();
|
let mut set_for_genesis = HashMap::new();
|
||||||
let mut connected_peers = 0;
|
let mut pending_p2p_connections = vec![];
|
||||||
|
// Run this task ad-infinitum
|
||||||
loop {
|
loop {
|
||||||
|
// Handle pending P2P connections
|
||||||
|
// TODO: Break this out onto its own task with better peer management logic?
|
||||||
|
{
|
||||||
|
let mut connect = |addr: Multiaddr| {
|
||||||
|
log::info!("found peer from substrate: {addr}");
|
||||||
|
|
||||||
|
let protocols = addr.iter().filter_map(|piece| match piece {
|
||||||
|
// Drop PeerIds from the Substrate P2p network
|
||||||
|
Protocol::P2p(_) => None,
|
||||||
|
// Use our own TCP port
|
||||||
|
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
|
||||||
|
other => Some(other),
|
||||||
|
});
|
||||||
|
|
||||||
|
let mut new_addr = Multiaddr::empty();
|
||||||
|
for protocol in protocols {
|
||||||
|
new_addr.push(protocol);
|
||||||
|
}
|
||||||
|
let addr = new_addr;
|
||||||
|
log::debug!("transformed found peer: {addr}");
|
||||||
|
|
||||||
|
if let Err(e) = swarm.dial(addr) {
|
||||||
|
log::warn!("dialing peer failed: {e:?}");
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
while let Some(network) = pending_p2p_connections.pop() {
|
||||||
|
if let Ok(mut nodes) = serai.p2p_validators(network).await {
|
||||||
|
// If there's an insufficient amount of nodes known, connect to all yet add it back
|
||||||
|
// and break
|
||||||
|
if nodes.len() < 3 {
|
||||||
|
log::warn!(
|
||||||
|
"insufficient amount of P2P nodes known for {:?}: {}",
|
||||||
|
network,
|
||||||
|
nodes.len()
|
||||||
|
);
|
||||||
|
pending_p2p_connections.push(network);
|
||||||
|
for node in nodes {
|
||||||
|
connect(node);
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Randomly select up to 5
|
||||||
|
for _ in 0 .. 5 {
|
||||||
|
if !nodes.is_empty() {
|
||||||
|
let to_connect = nodes.swap_remove(
|
||||||
|
usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap())
|
||||||
|
.unwrap(),
|
||||||
|
);
|
||||||
|
connect(to_connect);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let time_since_last = Instant::now().duration_since(time_of_last_p2p_message);
|
let time_since_last = Instant::now().duration_since(time_of_last_p2p_message);
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
biased;
|
biased;
|
||||||
@@ -409,7 +392,7 @@ impl LibP2p {
|
|||||||
let topic = topic_for_set(set);
|
let topic = topic_for_set(set);
|
||||||
if subscribe {
|
if subscribe {
|
||||||
log::info!("subscribing to p2p messages for {set:?}");
|
log::info!("subscribing to p2p messages for {set:?}");
|
||||||
pending_p2p_connections_send.send(set.network).unwrap();
|
pending_p2p_connections.push(set.network);
|
||||||
set_for_genesis.insert(genesis, set);
|
set_for_genesis.insert(genesis, set);
|
||||||
swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap();
|
swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap();
|
||||||
} else {
|
} else {
|
||||||
@@ -438,28 +421,14 @@ impl LibP2p {
|
|||||||
Some(SwarmEvent::Dialing { connection_id, .. }) => {
|
Some(SwarmEvent::Dialing { connection_id, .. }) => {
|
||||||
log::debug!("dialing to peer in connection ID {}", &connection_id);
|
log::debug!("dialing to peer in connection ID {}", &connection_id);
|
||||||
}
|
}
|
||||||
Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => {
|
Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => {
|
||||||
if &peer_id == swarm.local_peer_id() {
|
log::debug!(
|
||||||
log::warn!("established a libp2p connection to ourselves");
|
"connection established to peer {} in connection ID {}",
|
||||||
swarm.close_connection(connection_id);
|
&peer_id,
|
||||||
continue;
|
&connection_id,
|
||||||
|
);
|
||||||
|
swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
connected_peers += 1;
|
|
||||||
log::debug!(
|
|
||||||
"connection established to peer {} in connection ID {}, connected peers: {}",
|
|
||||||
&peer_id,
|
|
||||||
&connection_id,
|
|
||||||
connected_peers,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Some(SwarmEvent::ConnectionClosed { peer_id, .. }) => {
|
|
||||||
connected_peers -= 1;
|
|
||||||
log::debug!(
|
|
||||||
"connection with peer {peer_id} closed, connected peers: {}",
|
|
||||||
connected_peers,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub(
|
Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub(
|
||||||
GsEvent::Message { propagation_source, message, .. },
|
GsEvent::Message { propagation_source, message, .. },
|
||||||
))) => {
|
))) => {
|
||||||
@@ -471,14 +440,6 @@ impl LibP2p {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Handle peers to dial
|
|
||||||
addr = to_dial_recv.recv() => {
|
|
||||||
let addr = addr.expect("received address was None (sender dropped?)");
|
|
||||||
if let Err(e) = swarm.dial(addr) {
|
|
||||||
log::warn!("dialing peer failed: {e:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If it's been >80s since we've published a message, publish a KeepAlive since we're
|
// If it's been >80s since we've published a message, publish a KeepAlive since we're
|
||||||
// still an active service
|
// still an active service
|
||||||
// This is useful when we have no active tributaries and accordingly aren't sending
|
// This is useful when we have no active tributaries and accordingly aren't sending
|
||||||
|
|||||||
@@ -41,9 +41,8 @@ enum HasEvents {
|
|||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
SubstrateCosignDb {
|
SubstrateCosignDb {
|
||||||
ScanCosignFrom: () -> u64,
|
|
||||||
IntendedCosign: () -> (u64, Option<u64>),
|
IntendedCosign: () -> (u64, Option<u64>),
|
||||||
BlockHasEventsCache: (block: u64) -> HasEvents,
|
BlockHasEvents: (block: u64) -> HasEvents,
|
||||||
LatestCosignedBlock: () -> u64,
|
LatestCosignedBlock: () -> u64,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@@ -86,7 +85,7 @@ async fn block_has_events(
|
|||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
block: u64,
|
block: u64,
|
||||||
) -> Result<HasEvents, SeraiError> {
|
) -> Result<HasEvents, SeraiError> {
|
||||||
let cached = BlockHasEventsCache::get(txn, block);
|
let cached = BlockHasEvents::get(txn, block);
|
||||||
match cached {
|
match cached {
|
||||||
None => {
|
None => {
|
||||||
let serai = serai.as_of(
|
let serai = serai.as_of(
|
||||||
@@ -108,8 +107,8 @@ async fn block_has_events(
|
|||||||
|
|
||||||
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
||||||
|
|
||||||
BlockHasEventsCache::set(txn, block, &has_events);
|
BlockHasEvents::set(txn, block, &has_events);
|
||||||
Ok(has_events)
|
Ok(HasEvents::Yes)
|
||||||
}
|
}
|
||||||
Some(code) => Ok(code),
|
Some(code) => Ok(code),
|
||||||
}
|
}
|
||||||
@@ -136,7 +135,6 @@ async fn potentially_cosign_block(
|
|||||||
if (block_has_events == HasEvents::No) &&
|
if (block_has_events == HasEvents::No) &&
|
||||||
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
||||||
{
|
{
|
||||||
log::debug!("automatically co-signing next block ({block}) since it has no events");
|
|
||||||
LatestCosignedBlock::set(txn, &block);
|
LatestCosignedBlock::set(txn, &block);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,7 +178,7 @@ async fn potentially_cosign_block(
|
|||||||
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
||||||
`latest_number` doesn't change.
|
`latest_number` doesn't change.
|
||||||
*/
|
*/
|
||||||
async fn advance_cosign_protocol_inner(
|
pub async fn advance_cosign_protocol(
|
||||||
db: &mut impl Db,
|
db: &mut impl Db,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
@@ -205,23 +203,16 @@ async fn advance_cosign_protocol_inner(
|
|||||||
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
||||||
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
||||||
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
||||||
window_end_exclusive = 1;
|
window_end_exclusive = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
// The consensus rules for this are `last_intended_to_cosign_block + 1`
|
|
||||||
let scan_start_block = last_intended_to_cosign_block + 1;
|
|
||||||
// As a practical optimization, we don't re-scan old blocks since old blocks are independent to
|
|
||||||
// new state
|
|
||||||
let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));
|
|
||||||
|
|
||||||
// Check all blocks within the window to see if they should be cosigned
|
// Check all blocks within the window to see if they should be cosigned
|
||||||
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
||||||
// do cosign them
|
// do cosign them
|
||||||
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
||||||
// the skipped block will cause will cosign all other blocks within this window
|
// the skipped block will cause will cosign all other blocks within this window
|
||||||
if skipped_block.is_none() {
|
if skipped_block.is_none() {
|
||||||
let window_end_inclusive = window_end_exclusive - 1;
|
for b in (last_intended_to_cosign_block + 1) .. window_end_exclusive.min(latest_number) {
|
||||||
for b in scan_start_block ..= window_end_inclusive.min(latest_number) {
|
|
||||||
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
||||||
skipped_block = Some(b);
|
skipped_block = Some(b);
|
||||||
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
||||||
@@ -236,7 +227,7 @@ async fn advance_cosign_protocol_inner(
|
|||||||
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
||||||
let mut cosigning = vec![];
|
let mut cosigning = vec![];
|
||||||
|
|
||||||
for block in scan_start_block ..= latest_number {
|
for block in (last_intended_to_cosign_block + 1) ..= latest_number {
|
||||||
let actual_block = serai
|
let actual_block = serai
|
||||||
.finalized_block_by_number(block)
|
.finalized_block_by_number(block)
|
||||||
.await?
|
.await?
|
||||||
@@ -285,11 +276,6 @@ async fn advance_cosign_protocol_inner(
|
|||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this TX is committed, always start future scanning from the next block
|
|
||||||
ScanCosignFrom::set(&mut txn, &(block + 1));
|
|
||||||
// Since we're scanning *from* the next block, tidy the cache
|
|
||||||
BlockHasEventsCache::del(&mut txn, block);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some((number, hash)) = to_cosign {
|
if let Some((number, hash)) = to_cosign {
|
||||||
@@ -311,22 +297,3 @@ async fn advance_cosign_protocol_inner(
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn advance_cosign_protocol(
|
|
||||||
db: &mut impl Db,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
serai: &Serai,
|
|
||||||
latest_number: u64,
|
|
||||||
) -> Result<(), SeraiError> {
|
|
||||||
loop {
|
|
||||||
let scan_from = ScanCosignFrom::get(db).unwrap_or(1);
|
|
||||||
// Only scan 1000 blocks at a time to limit a massive txn from forming
|
|
||||||
let scan_to = latest_number.min(scan_from + 1000);
|
|
||||||
advance_cosign_protocol_inner(db, key, serai, scan_to).await?;
|
|
||||||
// If we didn't limit the scan_to, break
|
|
||||||
if scan_to == latest_number {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -11,7 +11,10 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
|||||||
use serai_client::{
|
use serai_client::{
|
||||||
SeraiError, Block, Serai, TemporalSerai,
|
SeraiError, Block, Serai, TemporalSerai,
|
||||||
primitives::{BlockHash, NetworkId},
|
primitives::{BlockHash, NetworkId},
|
||||||
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
|
validator_sets::{
|
||||||
|
primitives::{ValidatorSet, amortize_excess_key_shares},
|
||||||
|
ValidatorSetsEvent,
|
||||||
|
},
|
||||||
in_instructions::InInstructionsEvent,
|
in_instructions::InInstructionsEvent,
|
||||||
coins::CoinsEvent,
|
coins::CoinsEvent,
|
||||||
};
|
};
|
||||||
@@ -66,7 +69,12 @@ async fn handle_new_set<D: Db>(
|
|||||||
let set_participants =
|
let set_participants =
|
||||||
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
|
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
|
||||||
|
|
||||||
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
|
let mut set_data = set_participants
|
||||||
|
.into_iter()
|
||||||
|
.map(|(k, w)| (k, u16::try_from(w).unwrap()))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
amortize_excess_key_shares(&mut set_data);
|
||||||
|
set_data
|
||||||
};
|
};
|
||||||
|
|
||||||
let time = if let Ok(time) = block.time() {
|
let time = if let Ok(time) = block.time() {
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use core::{marker::PhantomData, fmt::Debug};
|
use core::{marker::PhantomData, fmt::Debug};
|
||||||
use std::{sync::Arc, io, collections::VecDeque};
|
use std::{sync::Arc, io};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
@@ -194,7 +194,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
);
|
);
|
||||||
let blockchain = Arc::new(RwLock::new(blockchain));
|
let blockchain = Arc::new(RwLock::new(blockchain));
|
||||||
|
|
||||||
let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new()));
|
let to_rebroadcast = Arc::new(RwLock::new(vec![]));
|
||||||
// Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the
|
// Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the
|
||||||
// P2P layer
|
// P2P layer
|
||||||
let p2p_meta_task_handle = Arc::new(
|
let p2p_meta_task_handle = Arc::new(
|
||||||
@@ -207,7 +207,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
for msg in to_rebroadcast {
|
for msg in to_rebroadcast {
|
||||||
p2p.broadcast(genesis, msg).await;
|
p2p.broadcast(genesis, msg).await;
|
||||||
}
|
}
|
||||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -218,15 +218,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p };
|
TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p };
|
||||||
|
|
||||||
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
||||||
TendermintMachine::new(
|
TendermintMachine::new(network.clone(), block_number, start_time, proposal).await;
|
||||||
db.clone(),
|
|
||||||
network.clone(),
|
|
||||||
genesis,
|
|
||||||
block_number,
|
|
||||||
start_time,
|
|
||||||
proposal,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
tokio::spawn(machine.run());
|
tokio::spawn(machine.run());
|
||||||
|
|
||||||
Some(Self {
|
Some(Self {
|
||||||
|
|||||||
@@ -1,8 +1,5 @@
|
|||||||
use core::ops::Deref;
|
use core::ops::Deref;
|
||||||
use std::{
|
use std::{sync::Arc, collections::HashMap};
|
||||||
sync::Arc,
|
|
||||||
collections::{VecDeque, HashMap},
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
@@ -271,25 +268,46 @@ pub struct TendermintNetwork<D: Db, T: TransactionTrait, P: P2p> {
|
|||||||
pub(crate) validators: Arc<Validators>,
|
pub(crate) validators: Arc<Validators>,
|
||||||
pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>,
|
pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>,
|
||||||
|
|
||||||
pub(crate) to_rebroadcast: Arc<RwLock<VecDeque<Vec<u8>>>>,
|
pub(crate) to_rebroadcast: Arc<RwLock<Vec<Vec<u8>>>>,
|
||||||
|
|
||||||
pub(crate) p2p: P,
|
pub(crate) p2p: P,
|
||||||
}
|
}
|
||||||
|
|
||||||
pub const BLOCK_PROCESSING_TIME: u32 = 999;
|
pub const BLOCK_PROCESSING_TIME: u32 = 1000;
|
||||||
pub const LATENCY_TIME: u32 = 1667;
|
pub const LATENCY_TIME: u32 = 3000;
|
||||||
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
|
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn assert_target_block_time() {
|
||||||
|
use serai_db::MemDb;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct DummyP2p;
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl P2p for DummyP2p {
|
||||||
|
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type paremeters don't matter here since we only need to call the block_time()
|
||||||
|
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
|
||||||
|
// TARGET_BLOCK_TIME is in milliseconds.
|
||||||
|
assert_eq!(
|
||||||
|
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
|
||||||
|
TARGET_BLOCK_TIME / 1000
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
|
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
|
||||||
type Db = D;
|
|
||||||
|
|
||||||
type ValidatorId = [u8; 32];
|
type ValidatorId = [u8; 32];
|
||||||
type SignatureScheme = Arc<Validators>;
|
type SignatureScheme = Arc<Validators>;
|
||||||
type Weights = Arc<Validators>;
|
type Weights = Arc<Validators>;
|
||||||
type Block = TendermintBlock;
|
type Block = TendermintBlock;
|
||||||
|
|
||||||
// These are in milliseconds and create a six-second block time.
|
// These are in milliseconds and create a ten-second block time.
|
||||||
// The block time is the latency on message delivery (where a message is some piece of data
|
// The block time is the latency on message delivery (where a message is some piece of data
|
||||||
// embedded in a transaction) times three plus the block processing time, hence why it should be
|
// embedded in a transaction) times three plus the block processing time, hence why it should be
|
||||||
// kept low.
|
// kept low.
|
||||||
@@ -307,28 +325,19 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
|
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
|
||||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
|
||||||
to_broadcast.extend(msg.encode());
|
|
||||||
|
|
||||||
// Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second
|
// Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second
|
||||||
// until the block it's trying to build is complete
|
// until the block it's trying to build is complete
|
||||||
// If the P2P layer drops a message before all nodes obtained access, or a node had an
|
// If the P2P layer drops a message before all nodes obtained access, or a node had an
|
||||||
// intermittent failure, this will ensure reconcilliation
|
// intermittent failure, this will ensure reconcilliation
|
||||||
|
// Resolves halts caused by timing discrepancies, which technically are violations of
|
||||||
|
// Tendermint as a BFT protocol, and shouldn't occur yet have in low-powered testing
|
||||||
|
// environments
|
||||||
// This is atrocious if there's no content-based deduplication protocol for messages actively
|
// This is atrocious if there's no content-based deduplication protocol for messages actively
|
||||||
// being gossiped
|
// being gossiped
|
||||||
// LibP2p, as used by Serai, is configured to content-based deduplicate
|
// LibP2p, as used by Serai, is configured to content-based deduplicate
|
||||||
{
|
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||||
let mut to_rebroadcast_lock = self.to_rebroadcast.write().await;
|
to_broadcast.extend(msg.encode());
|
||||||
to_rebroadcast_lock.push_back(to_broadcast.clone());
|
self.to_rebroadcast.write().await.push(to_broadcast.clone());
|
||||||
// We should have, ideally, 3 * validators messages within a round
|
|
||||||
// Therefore, this should keep the most recent 2-rounds
|
|
||||||
// TODO: This isn't perfect. Each participant should just rebroadcast their latest round of
|
|
||||||
// messages
|
|
||||||
while to_rebroadcast_lock.len() > (6 * self.validators.weights.len()) {
|
|
||||||
to_rebroadcast_lock.pop_front();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self.p2p.broadcast(self.genesis, to_broadcast).await
|
self.p2p.broadcast(self.genesis, to_broadcast).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -434,7 +443,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Since we've added a valid block, clear to_rebroadcast
|
// Since we've added a valid block, clear to_rebroadcast
|
||||||
*self.to_rebroadcast.write().await = VecDeque::new();
|
*self.to_rebroadcast.write().await = vec![];
|
||||||
|
|
||||||
Some(TendermintBlock(
|
Some(TendermintBlock(
|
||||||
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),
|
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),
|
||||||
|
|||||||
@@ -1,6 +1,3 @@
|
|||||||
#[cfg(test)]
|
|
||||||
mod tendermint;
|
|
||||||
|
|
||||||
mod transaction;
|
mod transaction;
|
||||||
pub use transaction::*;
|
pub use transaction::*;
|
||||||
|
|
||||||
|
|||||||
@@ -1,28 +0,0 @@
|
|||||||
use tendermint::ext::Network;
|
|
||||||
use crate::{
|
|
||||||
P2p, TendermintTx,
|
|
||||||
tendermint::{TARGET_BLOCK_TIME, TendermintNetwork},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn assert_target_block_time() {
|
|
||||||
use serai_db::MemDb;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct DummyP2p;
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl P2p for DummyP2p {
|
|
||||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type paremeters don't matter here since we only need to call the block_time()
|
|
||||||
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
|
|
||||||
// TARGET_BLOCK_TIME is in milliseconds.
|
|
||||||
assert_eq!(
|
|
||||||
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
|
|
||||||
TARGET_BLOCK_TIME / 1000
|
|
||||||
)
|
|
||||||
}
|
|
||||||
@@ -27,7 +27,5 @@ futures-util = { version = "0.3", default-features = false, features = ["std", "
|
|||||||
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||||
tokio = { version = "1", default-features = false, features = ["time"] }
|
tokio = { version = "1", default-features = false, features = ["time"] }
|
||||||
|
|
||||||
serai-db = { path = "../../../common/db", version = "0.1", default-features = false }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] }
|
tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] }
|
||||||
|
|||||||
@@ -3,9 +3,6 @@ use std::{
|
|||||||
collections::{HashSet, HashMap},
|
collections::{HashSet, HashMap},
|
||||||
};
|
};
|
||||||
|
|
||||||
use parity_scale_codec::Encode;
|
|
||||||
use serai_db::{Get, DbTxn, Db};
|
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
time::CanonicalInstant,
|
time::CanonicalInstant,
|
||||||
ext::{RoundNumber, BlockNumber, Block, Network},
|
ext::{RoundNumber, BlockNumber, Block, Network},
|
||||||
@@ -15,9 +12,6 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub(crate) struct BlockData<N: Network> {
|
pub(crate) struct BlockData<N: Network> {
|
||||||
db: N::Db,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
|
|
||||||
pub(crate) number: BlockNumber,
|
pub(crate) number: BlockNumber,
|
||||||
pub(crate) validator_id: Option<N::ValidatorId>,
|
pub(crate) validator_id: Option<N::ValidatorId>,
|
||||||
pub(crate) proposal: Option<N::Block>,
|
pub(crate) proposal: Option<N::Block>,
|
||||||
@@ -38,17 +32,12 @@ pub(crate) struct BlockData<N: Network> {
|
|||||||
|
|
||||||
impl<N: Network> BlockData<N> {
|
impl<N: Network> BlockData<N> {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
db: N::Db,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
weights: Arc<N::Weights>,
|
weights: Arc<N::Weights>,
|
||||||
number: BlockNumber,
|
number: BlockNumber,
|
||||||
validator_id: Option<N::ValidatorId>,
|
validator_id: Option<N::ValidatorId>,
|
||||||
proposal: Option<N::Block>,
|
proposal: Option<N::Block>,
|
||||||
) -> BlockData<N> {
|
) -> BlockData<N> {
|
||||||
BlockData {
|
BlockData {
|
||||||
db,
|
|
||||||
genesis,
|
|
||||||
|
|
||||||
number,
|
number,
|
||||||
validator_id,
|
validator_id,
|
||||||
proposal,
|
proposal,
|
||||||
@@ -139,35 +128,12 @@ impl<N: Network> BlockData<N> {
|
|||||||
// 27, 33, 41, 46, 60, 64
|
// 27, 33, 41, 46, 60, 64
|
||||||
self.round_mut().step = data.step();
|
self.round_mut().step = data.step();
|
||||||
|
|
||||||
// Only return a message to if we're actually a current validator and haven't prior posted a
|
// Only return a message to if we're actually a current validator
|
||||||
// message
|
self.validator_id.map(|validator_id| Message {
|
||||||
let round_number = self.round().number;
|
|
||||||
let step = data.step();
|
|
||||||
let res = self.validator_id.map(|validator_id| Message {
|
|
||||||
sender: validator_id,
|
sender: validator_id,
|
||||||
block: self.number,
|
block: self.number,
|
||||||
round: round_number,
|
round: self.round().number,
|
||||||
data,
|
data,
|
||||||
});
|
})
|
||||||
|
|
||||||
if res.is_some() {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let key = [
|
|
||||||
b"tendermint-machine_already_sent_message".as_ref(),
|
|
||||||
&self.genesis,
|
|
||||||
&self.number.0.to_le_bytes(),
|
|
||||||
&round_number.0.to_le_bytes(),
|
|
||||||
&step.encode(),
|
|
||||||
]
|
|
||||||
.concat();
|
|
||||||
// If we've already sent a message, return
|
|
||||||
if txn.get(&key).is_some() {
|
|
||||||
None?;
|
|
||||||
}
|
|
||||||
txn.put(&key, []);
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
res
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -212,9 +212,6 @@ pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode
|
|||||||
/// Trait representing the distributed system Tendermint is providing consensus over.
|
/// Trait representing the distributed system Tendermint is providing consensus over.
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Network: Sized + Send + Sync {
|
pub trait Network: Sized + Send + Sync {
|
||||||
/// The database used to back this.
|
|
||||||
type Db: serai_db::Db;
|
|
||||||
|
|
||||||
// Type used to identify validators.
|
// Type used to identify validators.
|
||||||
type ValidatorId: ValidatorId;
|
type ValidatorId: ValidatorId;
|
||||||
/// Signature scheme used by validators.
|
/// Signature scheme used by validators.
|
||||||
|
|||||||
@@ -231,9 +231,6 @@ pub enum SlashEvent {
|
|||||||
|
|
||||||
/// A machine executing the Tendermint protocol.
|
/// A machine executing the Tendermint protocol.
|
||||||
pub struct TendermintMachine<N: Network> {
|
pub struct TendermintMachine<N: Network> {
|
||||||
db: N::Db,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
|
|
||||||
network: N,
|
network: N,
|
||||||
signer: <N::SignatureScheme as SignatureScheme>::Signer,
|
signer: <N::SignatureScheme as SignatureScheme>::Signer,
|
||||||
validators: N::SignatureScheme,
|
validators: N::SignatureScheme,
|
||||||
@@ -325,8 +322,6 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
|
|
||||||
// Create the new block
|
// Create the new block
|
||||||
self.block = BlockData::new(
|
self.block = BlockData::new(
|
||||||
self.db.clone(),
|
|
||||||
self.genesis,
|
|
||||||
self.weights.clone(),
|
self.weights.clone(),
|
||||||
BlockNumber(self.block.number.0 + 1),
|
BlockNumber(self.block.number.0 + 1),
|
||||||
self.signer.validator_id().await,
|
self.signer.validator_id().await,
|
||||||
@@ -375,9 +370,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
/// the machine itself. The machine should have `run` called from an asynchronous task.
|
/// the machine itself. The machine should have `run` called from an asynchronous task.
|
||||||
#[allow(clippy::new_ret_no_self)]
|
#[allow(clippy::new_ret_no_self)]
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
db: N::Db,
|
|
||||||
network: N,
|
network: N,
|
||||||
genesis: [u8; 32],
|
|
||||||
last_block: BlockNumber,
|
last_block: BlockNumber,
|
||||||
last_time: u64,
|
last_time: u64,
|
||||||
proposal: N::Block,
|
proposal: N::Block,
|
||||||
@@ -416,9 +409,6 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
let validator_id = signer.validator_id().await;
|
let validator_id = signer.validator_id().await;
|
||||||
// 01-10
|
// 01-10
|
||||||
let mut machine = TendermintMachine {
|
let mut machine = TendermintMachine {
|
||||||
db: db.clone(),
|
|
||||||
genesis,
|
|
||||||
|
|
||||||
network,
|
network,
|
||||||
signer,
|
signer,
|
||||||
validators,
|
validators,
|
||||||
@@ -430,8 +420,6 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
synced_block_result_send,
|
synced_block_result_send,
|
||||||
|
|
||||||
block: BlockData::new(
|
block: BlockData::new(
|
||||||
db,
|
|
||||||
genesis,
|
|
||||||
weights,
|
weights,
|
||||||
BlockNumber(last_block.0 + 1),
|
BlockNumber(last_block.0 + 1),
|
||||||
validator_id,
|
validator_id,
|
||||||
|
|||||||
@@ -10,8 +10,6 @@ use parity_scale_codec::{Encode, Decode};
|
|||||||
use futures_util::sink::SinkExt;
|
use futures_util::sink::SinkExt;
|
||||||
use tokio::{sync::RwLock, time::sleep};
|
use tokio::{sync::RwLock, time::sleep};
|
||||||
|
|
||||||
use serai_db::MemDb;
|
|
||||||
|
|
||||||
use tendermint_machine::{
|
use tendermint_machine::{
|
||||||
ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
||||||
SlashEvent, TendermintMachine, TendermintHandle,
|
SlashEvent, TendermintMachine, TendermintHandle,
|
||||||
@@ -113,8 +111,6 @@ struct TestNetwork(
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Network for TestNetwork {
|
impl Network for TestNetwork {
|
||||||
type Db = MemDb;
|
|
||||||
|
|
||||||
type ValidatorId = TestValidatorId;
|
type ValidatorId = TestValidatorId;
|
||||||
type SignatureScheme = TestSignatureScheme;
|
type SignatureScheme = TestSignatureScheme;
|
||||||
type Weights = TestWeights;
|
type Weights = TestWeights;
|
||||||
@@ -174,9 +170,7 @@ impl TestNetwork {
|
|||||||
let i = u16::try_from(i).unwrap();
|
let i = u16::try_from(i).unwrap();
|
||||||
let TendermintHandle { messages, synced_block, synced_block_result, machine } =
|
let TendermintHandle { messages, synced_block, synced_block_result, machine } =
|
||||||
TendermintMachine::new(
|
TendermintMachine::new(
|
||||||
MemDb::new(),
|
|
||||||
TestNetwork(i, arc.clone()),
|
TestNetwork(i, arc.clone()),
|
||||||
[0; 32],
|
|
||||||
BlockNumber(1),
|
BlockNumber(1),
|
||||||
start_time,
|
start_time,
|
||||||
TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) },
|
TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) },
|
||||||
|
|||||||
7
docs/.gitignore
vendored
7
docs/.gitignore
vendored
@@ -1,7 +0,0 @@
|
|||||||
_site/
|
|
||||||
.sass-cache/
|
|
||||||
.jekyll-cache/
|
|
||||||
.jekyll-metadata
|
|
||||||
|
|
||||||
.bundle/
|
|
||||||
vendor/
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
3.1
|
|
||||||
@@ -1,4 +0,0 @@
|
|||||||
source 'https://rubygems.org'
|
|
||||||
|
|
||||||
gem "jekyll", "~> 4.3.3"
|
|
||||||
gem "just-the-docs", "0.8.1"
|
|
||||||
@@ -1,82 +0,0 @@
|
|||||||
GEM
|
|
||||||
remote: https://rubygems.org/
|
|
||||||
specs:
|
|
||||||
addressable (2.8.6)
|
|
||||||
public_suffix (>= 2.0.2, < 6.0)
|
|
||||||
colorator (1.1.0)
|
|
||||||
concurrent-ruby (1.2.3)
|
|
||||||
em-websocket (0.5.3)
|
|
||||||
eventmachine (>= 0.12.9)
|
|
||||||
http_parser.rb (~> 0)
|
|
||||||
eventmachine (1.2.7)
|
|
||||||
ffi (1.16.3)
|
|
||||||
forwardable-extended (2.6.0)
|
|
||||||
google-protobuf (3.25.3-x86_64-linux)
|
|
||||||
http_parser.rb (0.8.0)
|
|
||||||
i18n (1.14.4)
|
|
||||||
concurrent-ruby (~> 1.0)
|
|
||||||
jekyll (4.3.3)
|
|
||||||
addressable (~> 2.4)
|
|
||||||
colorator (~> 1.0)
|
|
||||||
em-websocket (~> 0.5)
|
|
||||||
i18n (~> 1.0)
|
|
||||||
jekyll-sass-converter (>= 2.0, < 4.0)
|
|
||||||
jekyll-watch (~> 2.0)
|
|
||||||
kramdown (~> 2.3, >= 2.3.1)
|
|
||||||
kramdown-parser-gfm (~> 1.0)
|
|
||||||
liquid (~> 4.0)
|
|
||||||
mercenary (>= 0.3.6, < 0.5)
|
|
||||||
pathutil (~> 0.9)
|
|
||||||
rouge (>= 3.0, < 5.0)
|
|
||||||
safe_yaml (~> 1.0)
|
|
||||||
terminal-table (>= 1.8, < 4.0)
|
|
||||||
webrick (~> 1.7)
|
|
||||||
jekyll-include-cache (0.2.1)
|
|
||||||
jekyll (>= 3.7, < 5.0)
|
|
||||||
jekyll-sass-converter (3.0.0)
|
|
||||||
sass-embedded (~> 1.54)
|
|
||||||
jekyll-seo-tag (2.8.0)
|
|
||||||
jekyll (>= 3.8, < 5.0)
|
|
||||||
jekyll-watch (2.2.1)
|
|
||||||
listen (~> 3.0)
|
|
||||||
just-the-docs (0.8.1)
|
|
||||||
jekyll (>= 3.8.5)
|
|
||||||
jekyll-include-cache
|
|
||||||
jekyll-seo-tag (>= 2.0)
|
|
||||||
rake (>= 12.3.1)
|
|
||||||
kramdown (2.4.0)
|
|
||||||
rexml
|
|
||||||
kramdown-parser-gfm (1.1.0)
|
|
||||||
kramdown (~> 2.0)
|
|
||||||
liquid (4.0.4)
|
|
||||||
listen (3.9.0)
|
|
||||||
rb-fsevent (~> 0.10, >= 0.10.3)
|
|
||||||
rb-inotify (~> 0.9, >= 0.9.10)
|
|
||||||
mercenary (0.4.0)
|
|
||||||
pathutil (0.16.2)
|
|
||||||
forwardable-extended (~> 2.6)
|
|
||||||
public_suffix (5.0.4)
|
|
||||||
rake (13.1.0)
|
|
||||||
rb-fsevent (0.11.2)
|
|
||||||
rb-inotify (0.10.1)
|
|
||||||
ffi (~> 1.0)
|
|
||||||
rexml (3.2.6)
|
|
||||||
rouge (4.2.0)
|
|
||||||
safe_yaml (1.0.5)
|
|
||||||
sass-embedded (1.63.6)
|
|
||||||
google-protobuf (~> 3.23)
|
|
||||||
rake (>= 13.0.0)
|
|
||||||
terminal-table (3.0.2)
|
|
||||||
unicode-display_width (>= 1.1.1, < 3)
|
|
||||||
unicode-display_width (2.5.0)
|
|
||||||
webrick (1.8.1)
|
|
||||||
|
|
||||||
PLATFORMS
|
|
||||||
x86_64-linux
|
|
||||||
|
|
||||||
DEPENDENCIES
|
|
||||||
jekyll (~> 4.3.3)
|
|
||||||
just-the-docs (= 0.8.1)
|
|
||||||
|
|
||||||
BUNDLED WITH
|
|
||||||
2.2.5
|
|
||||||
@@ -36,16 +36,16 @@ rustup target add wasm32-unknown-unknown --toolchain nightly
|
|||||||
|
|
||||||
```
|
```
|
||||||
cargo install svm-rs
|
cargo install svm-rs
|
||||||
svm install 0.8.25
|
svm install 0.8.16
|
||||||
svm use 0.8.25
|
svm use 0.8.16
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install Solidity Compiler Version Manager
|
### Install Solidity Compiler Version Manager
|
||||||
|
|
||||||
```
|
```
|
||||||
cargo install svm-rs
|
cargo install svm-rs
|
||||||
svm install 0.8.25
|
svm install 0.8.16
|
||||||
svm use 0.8.25
|
svm use 0.8.16
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install foundry (for tests)
|
### Install foundry (for tests)
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
title: Serai Documentation
|
|
||||||
description: Documentation for the Serai protocol.
|
|
||||||
theme: just-the-docs
|
|
||||||
|
|
||||||
url: https://docs.serai.exchange
|
|
||||||
|
|
||||||
callouts:
|
|
||||||
warning:
|
|
||||||
title: Warning
|
|
||||||
color: red
|
|
||||||
|
|
||||||
definition:
|
|
||||||
title: Definition
|
|
||||||
color: blue
|
|
||||||
@@ -1,19 +0,0 @@
|
|||||||
---
|
|
||||||
title: Automatic Market Makers
|
|
||||||
layout: default
|
|
||||||
nav_order: 2
|
|
||||||
---
|
|
||||||
|
|
||||||
# Automatic Market Makers
|
|
||||||
|
|
||||||
*text on how AMMs work*
|
|
||||||
|
|
||||||
Serai uses a symmetric liquidity pool with the `xy=k` formula.
|
|
||||||
|
|
||||||
Concentrated liquidity would presumably offer less slippage on swaps, and there are
|
|
||||||
[discussions to evolve to a concentrated liquidity/order book environment](https://github.com/serai-dex/serai/issues/420).
|
|
||||||
Unfortunately, it effectively requires active management of provided liquidity.
|
|
||||||
This disenfranchises small liquidity providers who may not have the knowledge
|
|
||||||
and resources necessary to perform such management. Since Serai is expected to
|
|
||||||
have a community-bootstrapped start, starting with concentrated liquidity would
|
|
||||||
accordingly be contradictory.
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
---
|
|
||||||
title: Cross-Chain Architecture
|
|
||||||
layout: default
|
|
||||||
nav_order: 3
|
|
||||||
---
|
|
||||||
|
|
||||||
# Cross-Chain Architecture
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
title: Genesis
|
|
||||||
layout: default
|
|
||||||
nav_order: 1
|
|
||||||
parent: Economics
|
|
||||||
---
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
---
|
|
||||||
title: Economics
|
|
||||||
layout: default
|
|
||||||
nav_order: 4
|
|
||||||
has_children: true
|
|
||||||
---
|
|
||||||
|
|
||||||
# Economics
|
|
||||||
|
|
||||||
Serai's economics change depending on which of three eras is currently
|
|
||||||
occurring.
|
|
||||||
|
|
||||||
## Genesis Era
|
|
||||||
|
|
||||||
The network starts with the "Genesis" era, where the goal of the network is to
|
|
||||||
attract the liquidity necessary to facilitate swaps. This period will last for
|
|
||||||
30 days and will let anyone add liquidity to the protocol. Only with its
|
|
||||||
conclusion will SRI start being distributed.
|
|
||||||
|
|
||||||
After the Genesis era, the network enters the "Pre-Economic Security" era.
|
|
||||||
|
|
||||||
## Pre-Economic Security
|
|
||||||
|
|
||||||
{: .definition-title }
|
|
||||||
> Definition: Economic Security
|
|
||||||
>
|
|
||||||
> Economic security is derived from it being unprofitable to misbehave.
|
|
||||||
> This is by the economic penalty which is presumed to occur upon misbehavior
|
|
||||||
> exceeding the value which would presumably be gained.
|
|
||||||
> Accordingly, rational actors would behave properly, causing the protocol to
|
|
||||||
> maintain its integrity.
|
|
||||||
>
|
|
||||||
> For Serai specifically, the stake required to produce unintended signatures
|
|
||||||
> must exceed the value accessible via producing unintended signatures.
|
|
||||||
|
|
||||||
With liquidity provided, and swaps enabled, the goal is to have validators stake
|
|
||||||
sufficiently for economic security to be achieved. This is primarily via
|
|
||||||
offering freshly minted, staked SRI to would-be validators who decide to swap
|
|
||||||
external coins for their stake.
|
|
||||||
|
|
||||||
## Post-Economic Security
|
|
||||||
|
|
||||||
Having achieved economic security, the protocol changes its economics one last
|
|
||||||
time (barring future upgrades to the protocol) to a 'normal' state of
|
|
||||||
operations.
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
title: Post-Economic Security
|
|
||||||
layout: default
|
|
||||||
nav_order: 3
|
|
||||||
parent: Economics
|
|
||||||
---
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
title: Pre-Economic Security
|
|
||||||
layout: default
|
|
||||||
nav_order: 2
|
|
||||||
parent: Economics
|
|
||||||
---
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
---
|
|
||||||
title: Home
|
|
||||||
layout: home
|
|
||||||
nav_order: 1
|
|
||||||
---
|
|
||||||
|
|
||||||
{: .warning }
|
|
||||||
This documentation site is still under active development and may have missing
|
|
||||||
sections, errors, and typos. Even once this documentation site is 'complete', it
|
|
||||||
may become out-of-date (as Serai is an evolving protocol yet to release) or have
|
|
||||||
minor errors.
|
|
||||||
|
|
||||||
# Serai
|
|
||||||
|
|
||||||
Serai is a fairly launched cross-chain decentralized exchange, integrating
|
|
||||||
Bitcoin (BTC), Ethereum (ETH, DAI), and Monero (XMR).
|
|
||||||
|
|
||||||
The Serai mainnet has yet to launch, and until then, all details are subject to
|
|
||||||
change.
|
|
||||||
|
|
||||||
Prior to the Serai mainnet launching, SRI, Serai's native coin, will not
|
|
||||||
exist. As a fairly launched project, SRI will have no ICO, no IEO, no presale,
|
|
||||||
no developers' tax/fund, and no airdrop for out-of-mainnet activity.
|
|
||||||
|
|
||||||
Out-of-mainnet activity includes:
|
|
||||||
|
|
||||||
- Being a community member (such as on Discord or on Twitter)
|
|
||||||
- Participating in testnets
|
|
||||||
- Contributing to the GitHub
|
|
||||||
|
|
||||||
None of these will be awarded any airdrop. All distributions of SRI will happen
|
|
||||||
on-chain per the protocols' defined rules, based on on-chain activity.
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
---
|
|
||||||
title: Coordinator
|
|
||||||
layout: default
|
|
||||||
nav_order: 3
|
|
||||||
parent: Infrastructure
|
|
||||||
---
|
|
||||||
|
|
||||||
# Coordinator
|
|
||||||
|
|
||||||
The coordinator is a local service which communicates with other validators'
|
|
||||||
coordinators. It provides a verifiable broadcast layer for various consensus
|
|
||||||
messages, such as agreement on external blockchains, key generation and signing
|
|
||||||
protocols, and the latest Serai block.
|
|
||||||
|
|
||||||
The verifiable broadcast layer is implemented via a blockchain, referred to as a
|
|
||||||
Tributary, which is agreed upon using Tendermint consensus. This consensus is
|
|
||||||
not as offered by Tendermint Core/CometBFT, as used in the Cosmos SDK
|
|
||||||
(historically/presently), yet by our own implementation designed to be used as a
|
|
||||||
library and not as another daemon. Tributaries are ephemeral, only used by the
|
|
||||||
current validators, and deleted upon the next epoch. All of the results from it
|
|
||||||
are verifiable via the external network and the Serai blockchain alone.
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
title: Infrastructure
|
|
||||||
layout: default
|
|
||||||
nav_order: 6
|
|
||||||
has_children: true
|
|
||||||
---
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
---
|
|
||||||
title: Message Queue
|
|
||||||
layout: default
|
|
||||||
nav_order: 1
|
|
||||||
parent: Infrastructure
|
|
||||||
---
|
|
||||||
|
|
||||||
# Message Queue
|
|
||||||
|
|
||||||
The Message Queue is a microservice to authenticate and relay messages between
|
|
||||||
services. It offers just three functions:
|
|
||||||
|
|
||||||
1) Queue a message.
|
|
||||||
|
|
||||||
2) Receive the next message.
|
|
||||||
|
|
||||||
3) Acknowledge a message, removing it from the queue.
|
|
||||||
|
|
||||||
This ensures messages are delivered between services, with their order
|
|
||||||
preserved. This also ensures that if a service reboots while handling a message,
|
|
||||||
it'll still handle the message once rebooted (and the message will not be lost).
|
|
||||||
|
|
||||||
The Message Queue also aims to offer increased liveliness and performance.
|
|
||||||
If services directly communicated, the rate at which one service could operate
|
|
||||||
would always be bottlenecked by the service it communicates with. If the
|
|
||||||
receiving service ever went offline, the sending service wouldn't be able to
|
|
||||||
deliver messages until the receiver came back online, halting its own work. By
|
|
||||||
defining a dedicated microservice, with a lack of complex logic, it's much less
|
|
||||||
likely to go offline or suffer from degraded performance.
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
---
|
|
||||||
title: Processor
|
|
||||||
layout: default
|
|
||||||
nav_order: 2
|
|
||||||
parent: Infrastructure
|
|
||||||
---
|
|
||||||
|
|
||||||
# Processor
|
|
||||||
|
|
||||||
The processor performs several important tasks with regards to the external
|
|
||||||
network. Each of them are documented in the following sections.
|
|
||||||
|
|
||||||
## Key Generation
|
|
||||||
|
|
||||||
## Scanning
|
|
||||||
|
|
||||||
## Signing Batches
|
|
||||||
|
|
||||||
## Planning Transactions
|
|
||||||
|
|
||||||
## Cosigning
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
title: Serai
|
|
||||||
layout: default
|
|
||||||
nav_order: 4
|
|
||||||
parent: Infrastructure
|
|
||||||
---
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
title: Integrating with Serai
|
|
||||||
layout: default
|
|
||||||
nav_order: 7
|
|
||||||
has_children: true
|
|
||||||
---
|
|
||||||
|
Before Width: | Height: | Size: 1.1 KiB After Width: | Height: | Size: 1.1 KiB |
@@ -1,44 +0,0 @@
|
|||||||
---
|
|
||||||
title: Protocol Changes
|
|
||||||
layout: default
|
|
||||||
nav_order: 5
|
|
||||||
---
|
|
||||||
|
|
||||||
# Protocol Changes
|
|
||||||
|
|
||||||
The protocol has no central authority nor organization nor actors (such as
|
|
||||||
liquidity providers/validators) who can compel new protocol rules. The Serai
|
|
||||||
protocol is as-written with all granted functionality and declared rules
|
|
||||||
present.
|
|
||||||
|
|
||||||
Validators are explicitly granted the ability to signal for two things to occur:
|
|
||||||
|
|
||||||
### 1) Halt another validator set.
|
|
||||||
|
|
||||||
This will presumably occur if another validator set turns malicious and is the
|
|
||||||
expected incident response in order to apply an economic penalty of ideally
|
|
||||||
greater value than damage wrecked. Halting a validator set prevents further
|
|
||||||
publication of `Batch`s, preventing improper actions on the Serai blockchain,
|
|
||||||
and preventing validators from unstaking (as unstaking only occurs once future
|
|
||||||
validator sets have accepted responsibility, and accepting responsibility
|
|
||||||
requires `Batch` publication). This effectively burns the malicious validators'
|
|
||||||
stake.
|
|
||||||
|
|
||||||
### 2) Retire the protocol.
|
|
||||||
|
|
||||||
A supermajority of validators may favor a signal (an opaque 32-byte ID). A
|
|
||||||
common signal gaining sufficient favor will cause the protocol to stop producing
|
|
||||||
blocks in two weeks.
|
|
||||||
|
|
||||||
Nodes will presumably, as individual entities, hard fork to new consensus rules.
|
|
||||||
These rules presumably will remove the rule to stop producing blocks in two
|
|
||||||
weeks, they may declare new validators, and they may declare new functionality
|
|
||||||
entirely.
|
|
||||||
|
|
||||||
While nodes individually hard fork, across every hard fork the state of the
|
|
||||||
various `sriXYZ` coins (such as `sriBTC`, `sriETH`, `sriDAI`, and `sriXMR`)
|
|
||||||
remains intact (unless the new rules modify such state). These coins can still
|
|
||||||
be burned with instructions (unless the new rules prevent that) and if a
|
|
||||||
validator set doesn't send `XYZ` as expected, they can be halted (effectively
|
|
||||||
burning their `SRI` stake). Accordingly, every node decides if and how to future
|
|
||||||
participate, with the abilities and powers they declare themselves to have.
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
---
|
|
||||||
title: Running a Validator
|
|
||||||
layout: default
|
|
||||||
nav_order: 8
|
|
||||||
has_children: true
|
|
||||||
---
|
|
||||||
@@ -1,20 +1,17 @@
|
|||||||
# rust:1.77.0-slim-bookworm as of March 22nd, 2024 (GMT)
|
FROM --platform=linux/amd64 rust:1.76.0-slim-bookworm as builder
|
||||||
FROM --platform=linux/amd64 rust@sha256:e785e4aa81f87bc1ee02fa2026ffbc491e0410bdaf6652cea74884373f452664 as deterministic
|
|
||||||
|
|
||||||
# Move to a Debian package snapshot
|
# Move to a Debian package snapshot
|
||||||
RUN rm -rf /etc/apt/sources.list.d/debian.sources && \
|
RUN rm -rf /etc/apt/sources.list.d/debian.sources && \
|
||||||
rm -rf /var/lib/apt/lists/* && \
|
rm -rf /var/lib/apt/lists/* && \
|
||||||
echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240301T000000Z bookworm main" > /etc/apt/sources.list && \
|
echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240201T000000Z bookworm main" > /etc/apt/sources.list && \
|
||||||
apt update
|
apt update
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
RUN apt update && apt upgrade && apt install clang -y
|
RUN apt install clang -y
|
||||||
|
|
||||||
# Add the wasm toolchain
|
# Add the wasm toolchain
|
||||||
RUN rustup target add wasm32-unknown-unknown
|
RUN rustup target add wasm32-unknown-unknown
|
||||||
|
|
||||||
FROM deterministic
|
|
||||||
|
|
||||||
# Add files for build
|
# Add files for build
|
||||||
ADD patches /serai/patches
|
ADD patches /serai/patches
|
||||||
ADD common /serai/common
|
ADD common /serai/common
|
||||||
@@ -33,8 +30,3 @@ ADD Cargo.lock /serai
|
|||||||
ADD AGPL-3.0 /serai
|
ADD AGPL-3.0 /serai
|
||||||
|
|
||||||
WORKDIR /serai
|
WORKDIR /serai
|
||||||
|
|
||||||
# Build the runtime, copying it to the volume if it exists
|
|
||||||
CMD cargo build --release -p serai-runtime && \
|
|
||||||
mkdir -p /volume && \
|
|
||||||
cp /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm /volume/serai.wasm
|
|
||||||
|
|||||||
@@ -43,7 +43,8 @@ CMD ["/run.sh"]
|
|||||||
network.label()
|
network.label()
|
||||||
);
|
);
|
||||||
|
|
||||||
let run = os(Os::Debian, "", "bitcoin") + &run_bitcoin;
|
let run =
|
||||||
|
os(Os::Debian, "RUN mkdir /volume && chown bitcoin:bitcoin /volume", "bitcoin") + &run_bitcoin;
|
||||||
let res = setup + &run;
|
let res = setup + &run;
|
||||||
|
|
||||||
let mut bitcoin_path = orchestration_path.to_path_buf();
|
let mut bitcoin_path = orchestration_path.to_path_buf();
|
||||||
|
|||||||
@@ -55,9 +55,12 @@ CMD ["/run.sh"]
|
|||||||
network.label(),
|
network.label(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let run =
|
let run = crate::os(
|
||||||
crate::os(os, if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }, "monero") +
|
os,
|
||||||
&run_monero;
|
&("RUN mkdir /volume && chown monero /volume\r\n".to_string() +
|
||||||
|
if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }),
|
||||||
|
"monero",
|
||||||
|
) + &run_monero;
|
||||||
let res = setup + &run;
|
let res = setup + &run;
|
||||||
|
|
||||||
let mut monero_path = orchestration_path.to_path_buf();
|
let mut monero_path = orchestration_path.to_path_buf();
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ pub fn coordinator(
|
|||||||
orchestration_path: &Path,
|
orchestration_path: &Path,
|
||||||
network: Network,
|
network: Network,
|
||||||
coordinator_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
coordinator_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
serai_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
) {
|
) {
|
||||||
let db = network.db();
|
let db = network.db();
|
||||||
let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" };
|
let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" };
|
||||||
@@ -27,16 +27,13 @@ pub fn coordinator(
|
|||||||
RUN apt install -y ca-certificates
|
RUN apt install -y ca-certificates
|
||||||
"#;
|
"#;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
const DEFAULT_RUST_LOG: &str = "info,serai_coordinator=debug,tributary_chain=debug,tendermint=debug,libp2p_gossipsub::behaviour=error";
|
|
||||||
|
|
||||||
let env_vars = [
|
let env_vars = [
|
||||||
("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())),
|
("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())),
|
||||||
("MESSAGE_QUEUE_KEY", hex::encode(coordinator_key.to_repr())),
|
("MESSAGE_QUEUE_KEY", hex::encode(coordinator_key.to_repr())),
|
||||||
("DB_PATH", "/volume/coordinator-db".to_string()),
|
("DB_PATH", "./coordinator-db".to_string()),
|
||||||
("SERAI_KEY", hex::encode(serai_key.to_repr())),
|
("SERAI_KEY", hex::encode(serai_key.to_repr())),
|
||||||
("SERAI_HOSTNAME", format!("serai-{}-serai", network.label())),
|
("SERAI_HOSTNAME", format!("serai-{}-serai", network.label())),
|
||||||
("RUST_LOG", DEFAULT_RUST_LOG.to_string()),
|
("RUST_LOG", "serai_coordinator=debug,tributary_chain=debug,tendermint=debug".to_string()),
|
||||||
];
|
];
|
||||||
let mut env_vars_str = String::new();
|
let mut env_vars_str = String::new();
|
||||||
for (env_var, value) in env_vars {
|
for (env_var, value) in env_vars {
|
||||||
|
|||||||
@@ -2,14 +2,7 @@
|
|||||||
// TODO: Generate keys for a validator and the infra
|
// TODO: Generate keys for a validator and the infra
|
||||||
|
|
||||||
use core::ops::Deref;
|
use core::ops::Deref;
|
||||||
use std::{
|
use std::{collections::HashSet, env, path::PathBuf, io::Write, fs, process::Command};
|
||||||
collections::{HashSet, HashMap},
|
|
||||||
env,
|
|
||||||
path::PathBuf,
|
|
||||||
io::Write,
|
|
||||||
fs,
|
|
||||||
process::{Stdio, Command},
|
|
||||||
};
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
@@ -96,12 +89,8 @@ ENV LD_PRELOAD=libmimalloc.so
|
|||||||
|
|
||||||
RUN apk update && apk upgrade
|
RUN apk update && apk upgrade
|
||||||
|
|
||||||
RUN adduser --system --shell /sbin/nologin --disabled-password {user}
|
# System user (not a human), shell of nologin, no password assigned
|
||||||
RUN addgroup {user}
|
RUN adduser -S -s /sbin/nologin -D {user}
|
||||||
RUN addgroup {user} {user}
|
|
||||||
|
|
||||||
# Make the /volume directory and transfer it to the user
|
|
||||||
RUN mkdir /volume && chown {user}:{user} /volume
|
|
||||||
|
|
||||||
{additional_root}
|
{additional_root}
|
||||||
|
|
||||||
@@ -121,10 +110,7 @@ RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload
|
|||||||
|
|
||||||
RUN apt update && apt upgrade -y && apt autoremove -y && apt clean
|
RUN apt update && apt upgrade -y && apt autoremove -y && apt clean
|
||||||
|
|
||||||
RUN useradd --system --user-group --create-home --shell /sbin/nologin {user}
|
RUN useradd --system --create-home --shell /sbin/nologin {user}
|
||||||
|
|
||||||
# Make the /volume directory and transfer it to the user
|
|
||||||
RUN mkdir /volume && chown {user}:{user} /volume
|
|
||||||
|
|
||||||
{additional_root}
|
{additional_root}
|
||||||
|
|
||||||
@@ -143,7 +129,7 @@ fn build_serai_service(release: bool, features: &str, package: &str) -> String {
|
|||||||
|
|
||||||
format!(
|
format!(
|
||||||
r#"
|
r#"
|
||||||
FROM rust:1.77-slim-bookworm as builder
|
FROM rust:1.76-slim-bookworm as builder
|
||||||
|
|
||||||
COPY --from=mimalloc-debian libmimalloc.so /usr/lib
|
COPY --from=mimalloc-debian libmimalloc.so /usr/lib
|
||||||
RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload
|
RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload
|
||||||
@@ -213,55 +199,6 @@ fn orchestration_path(network: Network) -> PathBuf {
|
|||||||
orchestration_path
|
orchestration_path
|
||||||
}
|
}
|
||||||
|
|
||||||
type InfrastructureKeys =
|
|
||||||
HashMap<&'static str, (Zeroizing<<Ristretto as Ciphersuite>::F>, <Ristretto as Ciphersuite>::G)>;
|
|
||||||
fn infrastructure_keys(network: Network) -> InfrastructureKeys {
|
|
||||||
// Generate entropy for the infrastructure keys
|
|
||||||
|
|
||||||
let entropy = if network == Network::Dev {
|
|
||||||
// Don't use actual entropy if this is a dev environment
|
|
||||||
Zeroizing::new([0; 32])
|
|
||||||
} else {
|
|
||||||
let path = home::home_dir()
|
|
||||||
.unwrap()
|
|
||||||
.join(".serai")
|
|
||||||
.join(network.label())
|
|
||||||
.join("infrastructure_keys_entropy");
|
|
||||||
// Check if there's existing entropy
|
|
||||||
if let Ok(entropy) = fs::read(&path).map(Zeroizing::new) {
|
|
||||||
assert_eq!(entropy.len(), 32, "entropy saved to disk wasn't 32 bytes");
|
|
||||||
let mut res = Zeroizing::new([0; 32]);
|
|
||||||
res.copy_from_slice(entropy.as_ref());
|
|
||||||
res
|
|
||||||
} else {
|
|
||||||
// If there isn't, generate fresh entropy
|
|
||||||
let mut res = Zeroizing::new([0; 32]);
|
|
||||||
OsRng.fill_bytes(res.as_mut());
|
|
||||||
fs::write(&path, &res).unwrap();
|
|
||||||
res
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut transcript =
|
|
||||||
RecommendedTranscript::new(b"Serai Orchestrator Infrastructure Keys Transcript");
|
|
||||||
transcript.append_message(b"network", network.label().as_bytes());
|
|
||||||
transcript.append_message(b"entropy", entropy);
|
|
||||||
let mut rng = ChaCha20Rng::from_seed(transcript.rng_seed(b"infrastructure_keys"));
|
|
||||||
|
|
||||||
let mut key_pair = || {
|
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut rng));
|
|
||||||
let public = Ristretto::generator() * key.deref();
|
|
||||||
(key, public)
|
|
||||||
};
|
|
||||||
|
|
||||||
HashMap::from([
|
|
||||||
("coordinator", key_pair()),
|
|
||||||
("bitcoin", key_pair()),
|
|
||||||
("ethereum", key_pair()),
|
|
||||||
("monero", key_pair()),
|
|
||||||
])
|
|
||||||
}
|
|
||||||
|
|
||||||
fn dockerfiles(network: Network) {
|
fn dockerfiles(network: Network) {
|
||||||
let orchestration_path = orchestration_path(network);
|
let orchestration_path = orchestration_path(network);
|
||||||
|
|
||||||
@@ -272,11 +209,28 @@ fn dockerfiles(network: Network) {
|
|||||||
monero_wallet_rpc(&orchestration_path);
|
monero_wallet_rpc(&orchestration_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut infrastructure_keys = infrastructure_keys(network);
|
// TODO: Generate infra keys in key_gen, yet service entropy here?
|
||||||
let coordinator_key = infrastructure_keys.remove("coordinator").unwrap();
|
|
||||||
let bitcoin_key = infrastructure_keys.remove("bitcoin").unwrap();
|
// Generate entropy for the infrastructure keys
|
||||||
let ethereum_key = infrastructure_keys.remove("ethereum").unwrap();
|
let mut entropy = Zeroizing::new([0; 32]);
|
||||||
let monero_key = infrastructure_keys.remove("monero").unwrap();
|
// Only use actual entropy if this isn't a development environment
|
||||||
|
if network != Network::Dev {
|
||||||
|
OsRng.fill_bytes(entropy.as_mut());
|
||||||
|
}
|
||||||
|
let mut transcript = RecommendedTranscript::new(b"Serai Orchestrator Transcript");
|
||||||
|
transcript.append_message(b"entropy", entropy);
|
||||||
|
let mut new_rng = |label| ChaCha20Rng::from_seed(transcript.rng_seed(label));
|
||||||
|
|
||||||
|
let mut message_queue_keys_rng = new_rng(b"message_queue_keys");
|
||||||
|
let mut key_pair = || {
|
||||||
|
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut message_queue_keys_rng));
|
||||||
|
let public = Ristretto::generator() * key.deref();
|
||||||
|
(key, public)
|
||||||
|
};
|
||||||
|
let coordinator_key = key_pair();
|
||||||
|
let bitcoin_key = key_pair();
|
||||||
|
let ethereum_key = key_pair();
|
||||||
|
let monero_key = key_pair();
|
||||||
|
|
||||||
message_queue(
|
message_queue(
|
||||||
&orchestration_path,
|
&orchestration_path,
|
||||||
@@ -287,9 +241,10 @@ fn dockerfiles(network: Network) {
|
|||||||
monero_key.1,
|
monero_key.1,
|
||||||
);
|
);
|
||||||
|
|
||||||
let new_entropy = || {
|
let mut processor_entropy_rng = new_rng(b"processor_entropy");
|
||||||
|
let mut new_entropy = || {
|
||||||
let mut res = Zeroizing::new([0; 32]);
|
let mut res = Zeroizing::new([0; 32]);
|
||||||
OsRng.fill_bytes(res.as_mut());
|
processor_entropy_rng.fill_bytes(res.as_mut());
|
||||||
res
|
res
|
||||||
};
|
};
|
||||||
processor(
|
processor(
|
||||||
@@ -321,9 +276,9 @@ fn dockerfiles(network: Network) {
|
|||||||
Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(*serai_key_repr).unwrap())
|
Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(*serai_key_repr).unwrap())
|
||||||
};
|
};
|
||||||
|
|
||||||
coordinator(&orchestration_path, network, coordinator_key.0, &serai_key);
|
coordinator(&orchestration_path, network, coordinator_key.0, serai_key);
|
||||||
|
|
||||||
serai(&orchestration_path, network, &serai_key);
|
serai(&orchestration_path, network);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn key_gen(network: Network) {
|
fn key_gen(network: Network) {
|
||||||
@@ -370,87 +325,6 @@ fn start(network: Network, services: HashSet<String>) {
|
|||||||
_ => panic!("starting unrecognized service"),
|
_ => panic!("starting unrecognized service"),
|
||||||
};
|
};
|
||||||
|
|
||||||
// If we're building the Serai service, first build the runtime
|
|
||||||
let serai_runtime_volume = format!("serai-{}-runtime-volume", network.label());
|
|
||||||
if name == "serai" {
|
|
||||||
// Check if it's built by checking if the volume has the expected runtime file
|
|
||||||
let built = || {
|
|
||||||
if let Ok(path) = Command::new("docker")
|
|
||||||
.arg("volume")
|
|
||||||
.arg("inspect")
|
|
||||||
.arg("-f")
|
|
||||||
.arg("{{ .Mountpoint }}")
|
|
||||||
.arg(&serai_runtime_volume)
|
|
||||||
.output()
|
|
||||||
{
|
|
||||||
if let Ok(path) = String::from_utf8(path.stdout) {
|
|
||||||
if let Ok(iter) = std::fs::read_dir(PathBuf::from(path.trim())) {
|
|
||||||
for item in iter.flatten() {
|
|
||||||
if item.file_name() == "serai.wasm" {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
false
|
|
||||||
};
|
|
||||||
|
|
||||||
if !built() {
|
|
||||||
let mut repo_path = env::current_exe().unwrap();
|
|
||||||
repo_path.pop();
|
|
||||||
if repo_path.as_path().ends_with("deps") {
|
|
||||||
repo_path.pop();
|
|
||||||
}
|
|
||||||
assert!(repo_path.as_path().ends_with("debug") || repo_path.as_path().ends_with("release"));
|
|
||||||
repo_path.pop();
|
|
||||||
assert!(repo_path.as_path().ends_with("target"));
|
|
||||||
repo_path.pop();
|
|
||||||
|
|
||||||
// Build the image to build the runtime
|
|
||||||
if !Command::new("docker")
|
|
||||||
.current_dir(&repo_path)
|
|
||||||
.arg("build")
|
|
||||||
.arg("-f")
|
|
||||||
.arg("orchestration/runtime/Dockerfile")
|
|
||||||
.arg(".")
|
|
||||||
.arg("-t")
|
|
||||||
.arg(format!("serai-{}-runtime-img", network.label()))
|
|
||||||
.spawn()
|
|
||||||
.unwrap()
|
|
||||||
.wait()
|
|
||||||
.unwrap()
|
|
||||||
.success()
|
|
||||||
{
|
|
||||||
panic!("failed to build runtime image");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the image, building the runtime
|
|
||||||
println!("Building the Serai runtime");
|
|
||||||
let container_name = format!("serai-{}-runtime", network.label());
|
|
||||||
let _ =
|
|
||||||
Command::new("docker").arg("rm").arg("-f").arg(&container_name).spawn().unwrap().wait();
|
|
||||||
let _ = Command::new("docker")
|
|
||||||
.arg("run")
|
|
||||||
.arg("--name")
|
|
||||||
.arg(container_name)
|
|
||||||
.arg("--volume")
|
|
||||||
.arg(format!("{serai_runtime_volume}:/volume"))
|
|
||||||
.arg(format!("serai-{}-runtime-img", network.label()))
|
|
||||||
.spawn();
|
|
||||||
|
|
||||||
// Wait until its built
|
|
||||||
let mut ticks = 0;
|
|
||||||
while !built() {
|
|
||||||
std::thread::sleep(core::time::Duration::from_secs(60));
|
|
||||||
ticks += 1;
|
|
||||||
if ticks > 6 * 60 {
|
|
||||||
panic!("couldn't build the runtime after 6 hours")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build it
|
// Build it
|
||||||
println!("Building {service}");
|
println!("Building {service}");
|
||||||
docker::build(&orchestration_path(network), network, name);
|
docker::build(&orchestration_path(network), network, name);
|
||||||
@@ -461,10 +335,6 @@ fn start(network: Network, services: HashSet<String>) {
|
|||||||
.arg("container")
|
.arg("container")
|
||||||
.arg("inspect")
|
.arg("inspect")
|
||||||
.arg(&docker_name)
|
.arg(&docker_name)
|
||||||
// Use null for all IO to silence 'container does not exist'
|
|
||||||
.stdin(Stdio::null())
|
|
||||||
.stdout(Stdio::null())
|
|
||||||
.stderr(Stdio::null())
|
|
||||||
.status()
|
.status()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.success()
|
.success()
|
||||||
@@ -476,53 +346,25 @@ fn start(network: Network, services: HashSet<String>) {
|
|||||||
let command = command.arg("create").arg("--name").arg(&docker_name);
|
let command = command.arg("create").arg("--name").arg(&docker_name);
|
||||||
let command = command.arg("--network").arg("serai");
|
let command = command.arg("--network").arg("serai");
|
||||||
let command = command.arg("--restart").arg("always");
|
let command = command.arg("--restart").arg("always");
|
||||||
let command = command.arg("--log-opt").arg("max-size=100m");
|
|
||||||
let command = command.arg("--log-opt").arg("max-file=3");
|
|
||||||
let command = if network == Network::Dev {
|
|
||||||
command
|
|
||||||
} else {
|
|
||||||
// Assign a persistent volume if this isn't for Dev
|
|
||||||
command.arg("--volume").arg(volume)
|
|
||||||
};
|
|
||||||
let command = match name {
|
let command = match name {
|
||||||
"bitcoin" => {
|
"bitcoin" => {
|
||||||
// Expose the RPC for tests
|
|
||||||
if network == Network::Dev {
|
if network == Network::Dev {
|
||||||
command.arg("-p").arg("8332:8332")
|
command.arg("-p").arg("8332:8332")
|
||||||
} else {
|
} else {
|
||||||
command
|
command.arg("--volume").arg(volume)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"monero" => {
|
"monero" => {
|
||||||
// Expose the RPC for tests
|
|
||||||
if network == Network::Dev {
|
if network == Network::Dev {
|
||||||
command.arg("-p").arg("18081:18081")
|
command.arg("-p").arg("18081:18081")
|
||||||
} else {
|
} else {
|
||||||
command
|
command.arg("--volume").arg(volume)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"monero-wallet-rpc" => {
|
"monero-wallet-rpc" => {
|
||||||
assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev");
|
assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev");
|
||||||
// Expose the RPC for tests
|
|
||||||
command.arg("-p").arg("18082:18082")
|
command.arg("-p").arg("18082:18082")
|
||||||
}
|
}
|
||||||
"coordinator" => {
|
|
||||||
if network == Network::Dev {
|
|
||||||
command
|
|
||||||
} else {
|
|
||||||
// Publish the port
|
|
||||||
command.arg("-p").arg("30563:30563")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"serai" => {
|
|
||||||
let command = command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime"));
|
|
||||||
if network == Network::Dev {
|
|
||||||
command
|
|
||||||
} else {
|
|
||||||
// Publish the port
|
|
||||||
command.arg("-p").arg("30333:30333")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => command,
|
_ => command,
|
||||||
};
|
};
|
||||||
assert!(
|
assert!(
|
||||||
@@ -546,10 +388,10 @@ Serai Orchestrator v0.0.1
|
|||||||
|
|
||||||
Commands:
|
Commands:
|
||||||
key_gen *network*
|
key_gen *network*
|
||||||
Generate a key for the validator.
|
Generates a key for the validator.
|
||||||
|
|
||||||
setup *network*
|
setup *network*
|
||||||
Generate the Dockerfiles for every Serai service.
|
Generate infrastructure keys and the Dockerfiles for every Serai service.
|
||||||
|
|
||||||
start *network* [service1, service2...]
|
start *network* [service1, service2...]
|
||||||
Start the specified services for the specified network ("dev" or "testnet").
|
Start the specified services for the specified network ("dev" or "testnet").
|
||||||
|
|||||||
@@ -20,8 +20,8 @@ pub fn message_queue(
|
|||||||
("BITCOIN_KEY", hex::encode(bitcoin_key.to_bytes())),
|
("BITCOIN_KEY", hex::encode(bitcoin_key.to_bytes())),
|
||||||
("ETHEREUM_KEY", hex::encode(ethereum_key.to_bytes())),
|
("ETHEREUM_KEY", hex::encode(ethereum_key.to_bytes())),
|
||||||
("MONERO_KEY", hex::encode(monero_key.to_bytes())),
|
("MONERO_KEY", hex::encode(monero_key.to_bytes())),
|
||||||
("DB_PATH", "/volume/message-queue-db".to_string()),
|
("DB_PATH", "./message-queue-db".to_string()),
|
||||||
("RUST_LOG", "info,serai_message_queue=trace".to_string()),
|
("RUST_LOG", "serai_message_queue=trace".to_string()),
|
||||||
];
|
];
|
||||||
let mut env_vars_str = String::new();
|
let mut env_vars_str = String::new();
|
||||||
for (env_var, value) in env_vars {
|
for (env_var, value) in env_vars {
|
||||||
|
|||||||
@@ -40,15 +40,15 @@ RUN apt install -y ca-certificates
|
|||||||
};
|
};
|
||||||
|
|
||||||
let env_vars = [
|
let env_vars = [
|
||||||
("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())),
|
("MESSAGE_QUEUE_RPC", format!("serai-{}-message_queue", network.label())),
|
||||||
("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())),
|
("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())),
|
||||||
("ENTROPY", hex::encode(entropy.as_ref())),
|
("ENTROPY", hex::encode(entropy.as_ref())),
|
||||||
("NETWORK", coin.to_string()),
|
("NETWORK", coin.to_string()),
|
||||||
("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")),
|
("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")),
|
||||||
("NETWORK_RPC_HOSTNAME", hostname),
|
("NETWORK_RPC_HOSTNAME", hostname),
|
||||||
("NETWORK_RPC_PORT", format!("{port}")),
|
("NETWORK_RPC_PORT", format!("{port}")),
|
||||||
("DB_PATH", "/volume/processor-db".to_string()),
|
("DB_PATH", "./processor-db".to_string()),
|
||||||
("RUST_LOG", "info,serai_processor=debug".to_string()),
|
("RUST_LOG", "serai_processor=debug".to_string()),
|
||||||
];
|
];
|
||||||
let mut env_vars_str = String::new();
|
let mut env_vars_str = String::new();
|
||||||
for (env_var, value) in env_vars {
|
for (env_var, value) in env_vars {
|
||||||
|
|||||||
@@ -1,26 +1,14 @@
|
|||||||
use std::{path::Path};
|
use std::{path::Path};
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};
|
|
||||||
|
|
||||||
use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};
|
use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};
|
||||||
|
|
||||||
pub fn serai(
|
pub fn serai(orchestration_path: &Path, network: Network) {
|
||||||
orchestration_path: &Path,
|
|
||||||
network: Network,
|
|
||||||
serai_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
) {
|
|
||||||
// Always builds in release for performance reasons
|
// Always builds in release for performance reasons
|
||||||
let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node");
|
let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node");
|
||||||
let setup_fast_epoch =
|
let setup_fast_epoch =
|
||||||
mimalloc(Os::Debian).to_string() + &build_serai_service(true, "fast-epoch", "serai-node");
|
mimalloc(Os::Debian).to_string() + &build_serai_service(true, "fast-epoch", "serai-node");
|
||||||
|
|
||||||
let env_vars = [("KEY", hex::encode(serai_key.to_repr()))];
|
// TODO: Review the ports exposed here
|
||||||
let mut env_vars_str = String::new();
|
|
||||||
for (env_var, value) in env_vars {
|
|
||||||
env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#);
|
|
||||||
}
|
|
||||||
|
|
||||||
let run_serai = format!(
|
let run_serai = format!(
|
||||||
r#"
|
r#"
|
||||||
# Copy the Serai binary and relevant license
|
# Copy the Serai binary and relevant license
|
||||||
@@ -28,12 +16,12 @@ COPY --from=builder --chown=serai /serai/bin/serai-node /bin/
|
|||||||
COPY --from=builder --chown=serai /serai/AGPL-3.0 .
|
COPY --from=builder --chown=serai /serai/AGPL-3.0 .
|
||||||
|
|
||||||
# Run the Serai node
|
# Run the Serai node
|
||||||
EXPOSE 30333 9944
|
EXPOSE 30333 9615 9933 9944
|
||||||
|
|
||||||
ADD /orchestration/{}/serai/run.sh /
|
ADD /orchestration/{}/serai/run.sh /
|
||||||
CMD {env_vars_str} "/run.sh"
|
CMD ["/run.sh"]
|
||||||
"#,
|
"#,
|
||||||
network.label(),
|
network.label()
|
||||||
);
|
);
|
||||||
|
|
||||||
let run = os(Os::Debian, "", "serai") + &run_serai;
|
let run = os(Os::Debian, "", "serai") + &run_serai;
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
serai-node --base-path /volume --unsafe-rpc-external --rpc-cors all --chain testnet --validator
|
exit 1
|
||||||
|
|||||||
@@ -1,26 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "rocksdb"
|
|
||||||
version = "0.21.0"
|
|
||||||
description = "rocksdb which patches to the latest update"
|
|
||||||
license = "MIT"
|
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/patches/rocksdb"
|
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|
||||||
keywords = []
|
|
||||||
edition = "2021"
|
|
||||||
rust-version = "1.70"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
all-features = true
|
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
rocksdb = { version = "0.22", default-features = false }
|
|
||||||
|
|
||||||
[features]
|
|
||||||
jemalloc = []
|
|
||||||
snappy = ["rocksdb/snappy"]
|
|
||||||
lz4 = ["rocksdb/lz4"]
|
|
||||||
zstd = ["rocksdb/zstd"]
|
|
||||||
zlib = ["rocksdb/zlib"]
|
|
||||||
bzip2 = ["rocksdb/bzip2"]
|
|
||||||
default = ["snappy", "lz4", "zstd", "zlib", "bzip2"]
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
pub use rocksdb::*;
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user