mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 20:29:23 +00:00
Compare commits
89 Commits
10s-tender
...
testnet-2
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2ba6d77ee7 | ||
|
|
67a0ff825b | ||
|
|
6518379981 | ||
|
|
0c6ab50e35 | ||
|
|
f73ce37e18 | ||
|
|
973dcf065e | ||
|
|
8f5aaa8492 | ||
|
|
93ba8d840a | ||
|
|
485e454680 | ||
|
|
c3b6abf020 | ||
|
|
f3ccf1cab0 | ||
|
|
0deee0ec6b | ||
|
|
6b428948d4 | ||
|
|
6986257d4f | ||
|
|
a3c37cba21 | ||
|
|
b5f2ff1397 | ||
|
|
c84931c6ae | ||
|
|
63abf2d022 | ||
|
|
a62d2d05ad | ||
|
|
967cc16748 | ||
|
|
ab4b8cc2d5 | ||
|
|
387ccbad3a | ||
|
|
26cdfdd824 | ||
|
|
68e77384ac | ||
|
|
68da88c1f3 | ||
|
|
2b481ab71e | ||
|
|
05e6d81948 | ||
|
|
e426cd00bd | ||
|
|
09e3881b7d | ||
|
|
10124ac4a8 | ||
|
|
1987983f88 | ||
|
|
fcad402186 | ||
|
|
ab4d79628d | ||
|
|
93be7a3067 | ||
|
|
63521f6a96 | ||
|
|
3d855c75be | ||
|
|
07df9aa035 | ||
|
|
bc44fbdbac | ||
|
|
4cacce5e55 | ||
|
|
7408e26781 | ||
|
|
1f92e1cbda | ||
|
|
333a9571b8 | ||
|
|
b7d49af1d5 | ||
|
|
5ea3b1bf97 | ||
|
|
2a31d8552e | ||
|
|
bca3728a10 | ||
|
|
4914420a37 | ||
|
|
f11a08c436 | ||
|
|
35b58a45bd | ||
|
|
af9b1ad5f9 | ||
|
|
e5afcda76b | ||
|
|
08c7c1b413 | ||
|
|
bdf5a66e95 | ||
|
|
e861859dec | ||
|
|
6658d95c85 | ||
|
|
2f07d04d88 | ||
|
|
e0259f2fe5 | ||
|
|
fab7a0a7cb | ||
|
|
84cee06ac1 | ||
|
|
c706d8664a | ||
|
|
1f2b9376f9 | ||
|
|
13b147cbf6 | ||
|
|
4a6496a90b | ||
|
|
9662d94bf9 | ||
|
|
233164cefd | ||
|
|
442d8c02fc | ||
|
|
d1be9eaa2d | ||
|
|
c32d3413ba | ||
|
|
a3a009a7e9 | ||
|
|
0889627e60 | ||
|
|
ace41c79fd | ||
|
|
f7d16b3fc5 | ||
|
|
157acc47ca | ||
|
|
ae0ecf9efe | ||
|
|
6374d9987e | ||
|
|
c93f6bf901 | ||
|
|
61a81e53e1 | ||
|
|
68dc872b88 | ||
|
|
89b237af7e | ||
|
|
2347bf5fd3 | ||
|
|
97f433c694 | ||
|
|
10f5ec51ca | ||
|
|
454bebaa77 | ||
|
|
0d569ff7a3 | ||
|
|
480acfd430 | ||
|
|
e266bc2e32 | ||
|
|
6c8a0bfda6 | ||
|
|
06c23368f2 | ||
|
|
5629c94b8b |
@@ -42,8 +42,8 @@ runs:
|
|||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
cargo install svm-rs
|
cargo install svm-rs
|
||||||
svm install 0.8.16
|
svm install 0.8.25
|
||||||
svm use 0.8.16
|
svm use 0.8.25
|
||||||
|
|
||||||
# - name: Cache Rust
|
# - name: Cache Rust
|
||||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||||
|
|||||||
90
.github/workflows/pages.yml
vendored
Normal file
90
.github/workflows/pages.yml
vendored
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
# MIT License
|
||||||
|
#
|
||||||
|
# Copyright (c) 2022 just-the-docs
|
||||||
|
#
|
||||||
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
|
# in the Software without restriction, including without limitation the rights
|
||||||
|
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
# copies of the Software, and to permit persons to whom the Software is
|
||||||
|
# furnished to do so, subject to the following conditions:
|
||||||
|
#
|
||||||
|
# The above copyright notice and this permission notice shall be included in all
|
||||||
|
# copies or substantial portions of the Software.
|
||||||
|
#
|
||||||
|
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
# SOFTWARE.
|
||||||
|
|
||||||
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
|
||||||
|
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
||||||
|
name: Deploy Jekyll site to Pages
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- "develop"
|
||||||
|
paths:
|
||||||
|
- "docs/**"
|
||||||
|
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||||
|
permissions:
|
||||||
|
contents: read
|
||||||
|
pages: write
|
||||||
|
id-token: write
|
||||||
|
|
||||||
|
# Allow one concurrent deployment
|
||||||
|
concurrency:
|
||||||
|
group: "pages"
|
||||||
|
cancel-in-progress: true
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
# Build job
|
||||||
|
build:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: docs
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
- name: Setup Ruby
|
||||||
|
uses: ruby/setup-ruby@v1
|
||||||
|
with:
|
||||||
|
bundler-cache: true
|
||||||
|
cache-version: 0
|
||||||
|
working-directory: "${{ github.workspace }}/docs"
|
||||||
|
- name: Setup Pages
|
||||||
|
id: pages
|
||||||
|
uses: actions/configure-pages@v3
|
||||||
|
- name: Build with Jekyll
|
||||||
|
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||||
|
env:
|
||||||
|
JEKYLL_ENV: production
|
||||||
|
- name: Upload artifact
|
||||||
|
uses: actions/upload-pages-artifact@v1
|
||||||
|
with:
|
||||||
|
path: "docs/_site/"
|
||||||
|
|
||||||
|
# Deployment job
|
||||||
|
deploy:
|
||||||
|
environment:
|
||||||
|
name: github-pages
|
||||||
|
url: ${{ steps.deployment.outputs.page_url }}
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
needs: build
|
||||||
|
steps:
|
||||||
|
- name: Deploy to GitHub Pages
|
||||||
|
id: deployment
|
||||||
|
uses: actions/deploy-pages@v2
|
||||||
763
Cargo.lock
generated
763
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -3,6 +3,7 @@ resolver = "2"
|
|||||||
members = [
|
members = [
|
||||||
# Version patches
|
# Version patches
|
||||||
"patches/zstd",
|
"patches/zstd",
|
||||||
|
"patches/rocksdb",
|
||||||
"patches/proc-macro-crate",
|
"patches/proc-macro-crate",
|
||||||
|
|
||||||
# std patches
|
# std patches
|
||||||
@@ -112,6 +113,8 @@ dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "a
|
|||||||
|
|
||||||
# wasmtime pulls in an old version for this
|
# wasmtime pulls in an old version for this
|
||||||
zstd = { path = "patches/zstd" }
|
zstd = { path = "patches/zstd" }
|
||||||
|
# Needed for WAL compression
|
||||||
|
rocksdb = { path = "patches/rocksdb" }
|
||||||
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
|
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
|
||||||
proc-macro-crate = { path = "patches/proc-macro-crate" }
|
proc-macro-crate = { path = "patches/proc-macro-crate" }
|
||||||
|
|
||||||
|
|||||||
@@ -5,13 +5,16 @@ Bitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading
|
|||||||
experience. Funds are stored in an economically secured threshold-multisig
|
experience. Funds are stored in an economically secured threshold-multisig
|
||||||
wallet.
|
wallet.
|
||||||
|
|
||||||
[Getting Started](docs/Getting%20Started.md)
|
[Getting Started](spec/Getting%20Started.md)
|
||||||
|
|
||||||
### Layout
|
### Layout
|
||||||
|
|
||||||
- `audits`: Audits for various parts of Serai.
|
- `audits`: Audits for various parts of Serai.
|
||||||
|
|
||||||
- `docs`: Documentation on the Serai protocol.
|
- `spec`: The specification of the Serai protocol, both internally and as
|
||||||
|
networked.
|
||||||
|
|
||||||
|
- `docs`: User-facing documentation on the Serai protocol.
|
||||||
|
|
||||||
- `common`: Crates containing utilities common to a variety of areas under
|
- `common`: Crates containing utilities common to a variety of areas under
|
||||||
Serai, none neatly fitting under another category.
|
Serai, none neatly fitting under another category.
|
||||||
|
|||||||
6
coins/ethereum/.gitignore
vendored
6
coins/ethereum/.gitignore
vendored
@@ -1,3 +1,7 @@
|
|||||||
# solidity build outputs
|
# Solidity build outputs
|
||||||
cache
|
cache
|
||||||
artifacts
|
artifacts
|
||||||
|
|
||||||
|
# Auto-generated ABI files
|
||||||
|
src/abi/schnorr.rs
|
||||||
|
src/abi/router.rs
|
||||||
|
|||||||
@@ -30,6 +30,9 @@ ethers-core = { version = "2", default-features = false }
|
|||||||
ethers-providers = { version = "2", default-features = false }
|
ethers-providers = { version = "2", default-features = false }
|
||||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,20 @@
|
|||||||
|
use std::process::Command;
|
||||||
|
|
||||||
|
use ethers_contract::Abigen;
|
||||||
|
|
||||||
fn main() {
|
fn main() {
|
||||||
println!("cargo:rerun-if-changed=contracts");
|
println!("cargo:rerun-if-changed=contracts/*");
|
||||||
println!("cargo:rerun-if-changed=artifacts");
|
println!("cargo:rerun-if-changed=artifacts/*");
|
||||||
|
|
||||||
|
for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout)
|
||||||
|
.unwrap()
|
||||||
|
.lines()
|
||||||
|
{
|
||||||
|
if let Some(version) = line.strip_prefix("Version: ") {
|
||||||
|
let version = version.split('+').next().unwrap();
|
||||||
|
assert_eq!(version, "0.8.25");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
let args = [
|
let args = [
|
||||||
@@ -8,8 +22,21 @@ fn main() {
|
|||||||
"-o", "./artifacts", "--overwrite",
|
"-o", "./artifacts", "--overwrite",
|
||||||
"--bin", "--abi",
|
"--bin", "--abi",
|
||||||
"--optimize",
|
"--optimize",
|
||||||
"./contracts/Schnorr.sol"
|
"./contracts/Schnorr.sol", "./contracts/Router.sol",
|
||||||
];
|
];
|
||||||
|
assert!(Command::new("solc").args(args).status().unwrap().success());
|
||||||
|
|
||||||
assert!(std::process::Command::new("solc").args(args).status().unwrap().success());
|
Abigen::new("Schnorr", "./artifacts/Schnorr.abi")
|
||||||
|
.unwrap()
|
||||||
|
.generate()
|
||||||
|
.unwrap()
|
||||||
|
.write_to_file("./src/abi/schnorr.rs")
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
Abigen::new("Router", "./artifacts/Router.abi")
|
||||||
|
.unwrap()
|
||||||
|
.generate()
|
||||||
|
.unwrap()
|
||||||
|
.write_to_file("./src/abi/router.rs")
|
||||||
|
.unwrap();
|
||||||
}
|
}
|
||||||
|
|||||||
90
coins/ethereum/contracts/Router.sol
Normal file
90
coins/ethereum/contracts/Router.sol
Normal file
@@ -0,0 +1,90 @@
|
|||||||
|
// SPDX-License-Identifier: AGPLv3
|
||||||
|
pragma solidity ^0.8.0;
|
||||||
|
|
||||||
|
import "./Schnorr.sol";
|
||||||
|
|
||||||
|
contract Router is Schnorr {
|
||||||
|
// Contract initializer
|
||||||
|
// TODO: Replace with a MuSig of the genesis validators
|
||||||
|
address public initializer;
|
||||||
|
|
||||||
|
// Nonce is incremented for each batch of transactions executed
|
||||||
|
uint256 public nonce;
|
||||||
|
|
||||||
|
// fixed parity for the public keys used in this contract
|
||||||
|
uint8 constant public KEY_PARITY = 27;
|
||||||
|
|
||||||
|
// current public key's x-coordinate
|
||||||
|
// note: this key must always use the fixed parity defined above
|
||||||
|
bytes32 public seraiKey;
|
||||||
|
|
||||||
|
struct OutInstruction {
|
||||||
|
address to;
|
||||||
|
uint256 value;
|
||||||
|
bytes data;
|
||||||
|
}
|
||||||
|
|
||||||
|
struct Signature {
|
||||||
|
bytes32 c;
|
||||||
|
bytes32 s;
|
||||||
|
}
|
||||||
|
|
||||||
|
// success is a uint256 representing a bitfield of transaction successes
|
||||||
|
event Executed(uint256 nonce, bytes32 batch, uint256 success);
|
||||||
|
|
||||||
|
// error types
|
||||||
|
error NotInitializer();
|
||||||
|
error AlreadyInitialized();
|
||||||
|
error InvalidKey();
|
||||||
|
error TooManyTransactions();
|
||||||
|
|
||||||
|
constructor() {
|
||||||
|
initializer = msg.sender;
|
||||||
|
}
|
||||||
|
|
||||||
|
// initSeraiKey can be called by the contract initializer to set the first
|
||||||
|
// public key, only if the public key has yet to be set.
|
||||||
|
function initSeraiKey(bytes32 _seraiKey) external {
|
||||||
|
if (msg.sender != initializer) revert NotInitializer();
|
||||||
|
if (seraiKey != 0) revert AlreadyInitialized();
|
||||||
|
if (_seraiKey == bytes32(0)) revert InvalidKey();
|
||||||
|
seraiKey = _seraiKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateSeraiKey validates the given Schnorr signature against the current public key,
|
||||||
|
// and if successful, updates the contract's public key to the given one.
|
||||||
|
function updateSeraiKey(
|
||||||
|
bytes32 _seraiKey,
|
||||||
|
Signature memory sig
|
||||||
|
) public {
|
||||||
|
if (_seraiKey == bytes32(0)) revert InvalidKey();
|
||||||
|
bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey));
|
||||||
|
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
|
||||||
|
seraiKey = _seraiKey;
|
||||||
|
}
|
||||||
|
|
||||||
|
// execute accepts a list of transactions to execute as well as a Schnorr signature.
|
||||||
|
// if signature verification passes, the given transactions are executed.
|
||||||
|
// if signature verification fails, this function will revert.
|
||||||
|
function execute(
|
||||||
|
OutInstruction[] calldata transactions,
|
||||||
|
Signature memory sig
|
||||||
|
) public {
|
||||||
|
if (transactions.length > 256) revert TooManyTransactions();
|
||||||
|
|
||||||
|
bytes32 message = keccak256(abi.encode("execute", nonce, transactions));
|
||||||
|
// This prevents re-entrancy from causing double spends yet does allow
|
||||||
|
// out-of-order execution via re-entrancy
|
||||||
|
nonce++;
|
||||||
|
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
|
||||||
|
|
||||||
|
uint256 successes;
|
||||||
|
for(uint256 i = 0; i < transactions.length; i++) {
|
||||||
|
(bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data);
|
||||||
|
assembly {
|
||||||
|
successes := or(successes, shl(i, success))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
emit Executed(nonce, message, successes);
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -7,29 +7,32 @@ contract Schnorr {
|
|||||||
uint256 constant public Q =
|
uint256 constant public Q =
|
||||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
||||||
|
|
||||||
|
error InvalidSOrA();
|
||||||
|
error InvalidSignature();
|
||||||
|
|
||||||
// parity := public key y-coord parity (27 or 28)
|
// parity := public key y-coord parity (27 or 28)
|
||||||
// px := public key x-coord
|
// px := public key x-coord
|
||||||
// message := 32-byte message
|
// message := 32-byte hash of the message
|
||||||
|
// c := schnorr signature challenge
|
||||||
// s := schnorr signature
|
// s := schnorr signature
|
||||||
// e := schnorr signature challenge
|
|
||||||
function verify(
|
function verify(
|
||||||
uint8 parity,
|
uint8 parity,
|
||||||
bytes32 px,
|
bytes32 px,
|
||||||
bytes32 message,
|
bytes32 message,
|
||||||
bytes32 s,
|
bytes32 c,
|
||||||
bytes32 e
|
bytes32 s
|
||||||
) public view returns (bool) {
|
) public view returns (bool) {
|
||||||
// ecrecover = (m, v, r, s);
|
// ecrecover = (m, v, r, s);
|
||||||
bytes32 sp = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||||
bytes32 ep = bytes32(Q - mulmod(uint256(e), uint256(px), Q));
|
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
||||||
|
|
||||||
require(sp != 0);
|
if (sa == 0) revert InvalidSOrA();
|
||||||
// the ecrecover precompile implementation checks that the `r` and `s`
|
// the ecrecover precompile implementation checks that the `r` and `s`
|
||||||
// inputs are non-zero (in this case, `px` and `ep`), thus we don't need to
|
// inputs are non-zero (in this case, `px` and `ca`), thus we don't need to
|
||||||
// check if they're zero.will make me
|
// check if they're zero.
|
||||||
address R = ecrecover(sp, parity, px, ep);
|
address R = ecrecover(sa, parity, px, ca);
|
||||||
require(R != address(0), "ecrecover failed");
|
if (R == address(0)) revert InvalidSignature();
|
||||||
return e == keccak256(
|
return c == keccak256(
|
||||||
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
|
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
6
coins/ethereum/src/abi/mod.rs
Normal file
6
coins/ethereum/src/abi/mod.rs
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::all)]
|
||||||
|
pub(crate) mod schnorr;
|
||||||
|
#[rustfmt::skip]
|
||||||
|
#[allow(clippy::all)]
|
||||||
|
pub(crate) mod router;
|
||||||
@@ -1,36 +0,0 @@
|
|||||||
use thiserror::Error;
|
|
||||||
use eyre::{eyre, Result};
|
|
||||||
|
|
||||||
use ethers_providers::{Provider, Http};
|
|
||||||
use ethers_contract::abigen;
|
|
||||||
|
|
||||||
use crate::crypto::ProcessedSignature;
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum EthereumError {
|
|
||||||
#[error("failed to verify Schnorr signature")]
|
|
||||||
VerificationError,
|
|
||||||
}
|
|
||||||
|
|
||||||
abigen!(Schnorr, "./artifacts/Schnorr.abi");
|
|
||||||
|
|
||||||
pub async fn call_verify(
|
|
||||||
contract: &Schnorr<Provider<Http>>,
|
|
||||||
params: &ProcessedSignature,
|
|
||||||
) -> Result<()> {
|
|
||||||
if contract
|
|
||||||
.verify(
|
|
||||||
params.parity + 27,
|
|
||||||
params.px.to_bytes().into(),
|
|
||||||
params.message,
|
|
||||||
params.s.to_bytes().into(),
|
|
||||||
params.e.to_bytes().into(),
|
|
||||||
)
|
|
||||||
.call()
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(eyre!(EthereumError::VerificationError))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,50 +1,54 @@
|
|||||||
use sha3::{Digest, Keccak256};
|
use sha3::{Digest, Keccak256};
|
||||||
|
|
||||||
use group::Group;
|
use group::ff::PrimeField;
|
||||||
use k256::{
|
use k256::{
|
||||||
elliptic_curve::{
|
elliptic_curve::{
|
||||||
bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint, sec1::ToEncodedPoint,
|
bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint,
|
||||||
},
|
},
|
||||||
AffinePoint, ProjectivePoint, Scalar, U256,
|
ProjectivePoint, Scalar, U256,
|
||||||
};
|
};
|
||||||
|
|
||||||
use frost::{algorithm::Hram, curve::Secp256k1};
|
use frost::{
|
||||||
|
algorithm::{Hram, SchnorrSignature},
|
||||||
|
curve::Secp256k1,
|
||||||
|
};
|
||||||
|
|
||||||
pub fn keccak256(data: &[u8]) -> [u8; 32] {
|
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
|
||||||
Keccak256::digest(data).into()
|
Keccak256::digest(data).into()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] {
|
||||||
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
|
|
||||||
let encoded_point = point.to_encoded_point(false);
|
let encoded_point = point.to_encoded_point(false);
|
||||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 .. 32].try_into().unwrap()
|
// Last 20 bytes of the hash of the concatenated x and y coordinates
|
||||||
}
|
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
|
||||||
|
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
|
||||||
pub fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
|
||||||
if r.is_zero().into() || s.is_zero().into() {
|
|
||||||
return None;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
let R = AffinePoint::decompress(&r.to_bytes(), v.into());
|
pub struct PublicKey {
|
||||||
#[allow(non_snake_case)]
|
pub A: ProjectivePoint,
|
||||||
if let Some(R) = Option::<AffinePoint>::from(R) {
|
pub px: Scalar,
|
||||||
#[allow(non_snake_case)]
|
pub parity: u8,
|
||||||
let R = ProjectivePoint::from(R);
|
|
||||||
|
|
||||||
let r = r.invert().unwrap();
|
|
||||||
let u1 = ProjectivePoint::GENERATOR * (-message * r);
|
|
||||||
let u2 = R * (s * r);
|
|
||||||
let key: ProjectivePoint = u1 + u2;
|
|
||||||
if !bool::from(key.is_identity()) {
|
|
||||||
return Some(address(&key));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
impl PublicKey {
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
||||||
|
let affine = A.to_affine();
|
||||||
|
let parity = u8::from(bool::from(affine.y_is_odd())) + 27;
|
||||||
|
if parity != 27 {
|
||||||
|
None?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let x_coord = affine.x();
|
||||||
|
let x_coord_scalar = <Scalar as Reduce<U256>>::reduce_bytes(&x_coord);
|
||||||
|
// Return None if a reduction would occur
|
||||||
|
if x_coord_scalar.to_repr() != x_coord {
|
||||||
|
None?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(PublicKey { A, px: x_coord_scalar, parity })
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
#[derive(Clone, Default)]
|
||||||
@@ -55,53 +59,33 @@ impl Hram<Secp256k1> for EthereumHram {
|
|||||||
let a_encoded_point = A.to_encoded_point(true);
|
let a_encoded_point = A.to_encoded_point(true);
|
||||||
let mut a_encoded = a_encoded_point.as_ref().to_owned();
|
let mut a_encoded = a_encoded_point.as_ref().to_owned();
|
||||||
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
|
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
|
||||||
|
assert!((a_encoded[0] == 27) || (a_encoded[0] == 28));
|
||||||
let mut data = address(R).to_vec();
|
let mut data = address(R).to_vec();
|
||||||
data.append(&mut a_encoded);
|
data.append(&mut a_encoded);
|
||||||
data.append(&mut m.to_vec());
|
data.extend(m);
|
||||||
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
|
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct ProcessedSignature {
|
pub struct Signature {
|
||||||
pub s: Scalar,
|
pub(crate) c: Scalar,
|
||||||
pub px: Scalar,
|
pub(crate) s: Scalar,
|
||||||
pub parity: u8,
|
|
||||||
pub message: [u8; 32],
|
|
||||||
pub e: Scalar,
|
|
||||||
}
|
}
|
||||||
|
impl Signature {
|
||||||
#[allow(non_snake_case)]
|
pub fn new(
|
||||||
pub fn preprocess_signature_for_ecrecover(
|
public_key: &PublicKey,
|
||||||
m: [u8; 32],
|
|
||||||
R: &ProjectivePoint,
|
|
||||||
s: Scalar,
|
|
||||||
A: &ProjectivePoint,
|
|
||||||
chain_id: U256,
|
chain_id: U256,
|
||||||
) -> (Scalar, Scalar) {
|
m: &[u8],
|
||||||
let processed_sig = process_signature_for_contract(m, R, s, A, chain_id);
|
signature: SchnorrSignature<Secp256k1>,
|
||||||
let sr = processed_sig.s.mul(&processed_sig.px).negate();
|
) -> Option<Signature> {
|
||||||
let er = processed_sig.e.mul(&processed_sig.px).negate();
|
let c = EthereumHram::hram(
|
||||||
(sr, er)
|
&signature.R,
|
||||||
|
&public_key.A,
|
||||||
|
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
|
||||||
|
);
|
||||||
|
if !signature.verify(public_key.A, c) {
|
||||||
|
None?;
|
||||||
}
|
}
|
||||||
|
Some(Signature { c, s: signature.s })
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub fn process_signature_for_contract(
|
|
||||||
m: [u8; 32],
|
|
||||||
R: &ProjectivePoint,
|
|
||||||
s: Scalar,
|
|
||||||
A: &ProjectivePoint,
|
|
||||||
chain_id: U256,
|
|
||||||
) -> ProcessedSignature {
|
|
||||||
let encoded_pk = A.to_encoded_point(true);
|
|
||||||
let px = &encoded_pk.as_ref()[1 .. 33];
|
|
||||||
let px_scalar = Scalar::reduce(U256::from_be_slice(px));
|
|
||||||
let e = EthereumHram::hram(R, A, &[chain_id.to_be_byte_array().as_slice(), &m].concat());
|
|
||||||
ProcessedSignature {
|
|
||||||
s,
|
|
||||||
px: px_scalar,
|
|
||||||
parity: &encoded_pk.as_ref()[0] - 2,
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
message: m,
|
|
||||||
e,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,2 +1,16 @@
|
|||||||
pub mod contract;
|
use thiserror::Error;
|
||||||
|
|
||||||
pub mod crypto;
|
pub mod crypto;
|
||||||
|
|
||||||
|
pub(crate) mod abi;
|
||||||
|
pub mod schnorr;
|
||||||
|
pub mod router;
|
||||||
|
|
||||||
|
#[cfg(test)]
|
||||||
|
mod tests;
|
||||||
|
|
||||||
|
#[derive(Error, Debug)]
|
||||||
|
pub enum Error {
|
||||||
|
#[error("failed to verify Schnorr signature")]
|
||||||
|
InvalidSignature,
|
||||||
|
}
|
||||||
|
|||||||
30
coins/ethereum/src/router.rs
Normal file
30
coins/ethereum/src/router.rs
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
pub use crate::abi::router::*;
|
||||||
|
|
||||||
|
/*
|
||||||
|
use crate::crypto::{ProcessedSignature, PublicKey};
|
||||||
|
use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode};
|
||||||
|
use eyre::Result;
|
||||||
|
use std::{convert::From, fs::File, sync::Arc};
|
||||||
|
|
||||||
|
pub async fn router_update_public_key<M: Middleware + 'static>(
|
||||||
|
contract: &Router<M>,
|
||||||
|
public_key: &PublicKey,
|
||||||
|
signature: &ProcessedSignature,
|
||||||
|
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
|
||||||
|
let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into());
|
||||||
|
let pending_tx = tx.send().await?;
|
||||||
|
let receipt = pending_tx.await?;
|
||||||
|
Ok(receipt)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn router_execute<M: Middleware + 'static>(
|
||||||
|
contract: &Router<M>,
|
||||||
|
txs: Vec<Rtransaction>,
|
||||||
|
signature: &ProcessedSignature,
|
||||||
|
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
|
||||||
|
let tx = contract.execute(txs, signature.into()).send();
|
||||||
|
let pending_tx = tx.send().await?;
|
||||||
|
let receipt = pending_tx.await?;
|
||||||
|
Ok(receipt)
|
||||||
|
}
|
||||||
|
*/
|
||||||
34
coins/ethereum/src/schnorr.rs
Normal file
34
coins/ethereum/src/schnorr.rs
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
use eyre::{eyre, Result};
|
||||||
|
|
||||||
|
use group::ff::PrimeField;
|
||||||
|
|
||||||
|
use ethers_providers::{Provider, Http};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
Error,
|
||||||
|
crypto::{keccak256, PublicKey, Signature},
|
||||||
|
};
|
||||||
|
pub use crate::abi::schnorr::*;
|
||||||
|
|
||||||
|
pub async fn call_verify(
|
||||||
|
contract: &Schnorr<Provider<Http>>,
|
||||||
|
public_key: &PublicKey,
|
||||||
|
message: &[u8],
|
||||||
|
signature: &Signature,
|
||||||
|
) -> Result<()> {
|
||||||
|
if contract
|
||||||
|
.verify(
|
||||||
|
public_key.parity,
|
||||||
|
public_key.px.to_repr().into(),
|
||||||
|
keccak256(message),
|
||||||
|
signature.c.to_repr().into(),
|
||||||
|
signature.s.to_repr().into(),
|
||||||
|
)
|
||||||
|
.call()
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
Ok(())
|
||||||
|
} else {
|
||||||
|
Err(eyre!(Error::InvalidSignature))
|
||||||
|
}
|
||||||
|
}
|
||||||
132
coins/ethereum/src/tests/crypto.rs
Normal file
132
coins/ethereum/src/tests/crypto.rs
Normal file
@@ -0,0 +1,132 @@
|
|||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
|
use sha2::Sha256;
|
||||||
|
use sha3::{Digest, Keccak256};
|
||||||
|
|
||||||
|
use group::Group;
|
||||||
|
use k256::{
|
||||||
|
ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey},
|
||||||
|
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint},
|
||||||
|
U256, Scalar, AffinePoint, ProjectivePoint,
|
||||||
|
};
|
||||||
|
|
||||||
|
use frost::{
|
||||||
|
curve::Secp256k1,
|
||||||
|
algorithm::{Hram, IetfSchnorr},
|
||||||
|
tests::{algorithm_machines, sign},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{crypto::*, tests::key_gen};
|
||||||
|
|
||||||
|
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||||
|
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||||
|
if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into());
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
if let Some(R) = Option::<AffinePoint>::from(R) {
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
let R = ProjectivePoint::from(R);
|
||||||
|
|
||||||
|
let r = r.invert().unwrap();
|
||||||
|
let u1 = ProjectivePoint::GENERATOR * (-message * r);
|
||||||
|
let u2 = R * (s * r);
|
||||||
|
let key: ProjectivePoint = u1 + u2;
|
||||||
|
if !bool::from(key.is_identity()) {
|
||||||
|
return Some(address(&key));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_ecrecover() {
|
||||||
|
let private = SigningKey::random(&mut OsRng);
|
||||||
|
let public = VerifyingKey::from(&private);
|
||||||
|
|
||||||
|
// Sign the signature
|
||||||
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
|
let (sig, recovery_id) = private
|
||||||
|
.as_nonzero_scalar()
|
||||||
|
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Sanity check the signature verifies
|
||||||
|
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||||
|
{
|
||||||
|
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the ecrecover
|
||||||
|
assert_eq!(
|
||||||
|
ecrecover(
|
||||||
|
hash_to_scalar(MESSAGE),
|
||||||
|
u8::from(recovery_id.unwrap().is_y_odd()) + 27,
|
||||||
|
*sig.r(),
|
||||||
|
*sig.s()
|
||||||
|
)
|
||||||
|
.unwrap(),
|
||||||
|
address(&ProjectivePoint::from(public.as_affine()))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the sign test with the EthereumHram
|
||||||
|
#[test]
|
||||||
|
fn test_signing() {
|
||||||
|
let (keys, _) = key_gen();
|
||||||
|
|
||||||
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
|
|
||||||
|
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||||
|
let _sig =
|
||||||
|
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(non_snake_case)]
|
||||||
|
pub fn preprocess_signature_for_ecrecover(
|
||||||
|
R: ProjectivePoint,
|
||||||
|
public_key: &PublicKey,
|
||||||
|
chain_id: U256,
|
||||||
|
m: &[u8],
|
||||||
|
s: Scalar,
|
||||||
|
) -> (u8, Scalar, Scalar) {
|
||||||
|
let c = EthereumHram::hram(
|
||||||
|
&R,
|
||||||
|
&public_key.A,
|
||||||
|
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
|
||||||
|
);
|
||||||
|
let sa = -(s * public_key.px);
|
||||||
|
let ca = -(c * public_key.px);
|
||||||
|
(public_key.parity, sa, ca)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_ecrecover_hack() {
|
||||||
|
let (keys, public_key) = key_gen();
|
||||||
|
|
||||||
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
|
let hashed_message = keccak256(MESSAGE);
|
||||||
|
let chain_id = U256::ONE;
|
||||||
|
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
||||||
|
|
||||||
|
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||||
|
let sig = sign(
|
||||||
|
&mut OsRng,
|
||||||
|
&algo,
|
||||||
|
keys.clone(),
|
||||||
|
algorithm_machines(&mut OsRng, &algo, &keys),
|
||||||
|
full_message,
|
||||||
|
);
|
||||||
|
|
||||||
|
let (parity, sa, ca) =
|
||||||
|
preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s);
|
||||||
|
let q = ecrecover(sa, parity, public_key.px, ca).unwrap();
|
||||||
|
assert_eq!(q, address(&sig.R));
|
||||||
|
}
|
||||||
92
coins/ethereum/src/tests/mod.rs
Normal file
92
coins/ethereum/src/tests/mod.rs
Normal file
@@ -0,0 +1,92 @@
|
|||||||
|
use std::{sync::Arc, time::Duration, fs::File, collections::HashMap};
|
||||||
|
|
||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
|
use group::ff::PrimeField;
|
||||||
|
use k256::{Scalar, ProjectivePoint};
|
||||||
|
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
|
||||||
|
|
||||||
|
use ethers_core::{
|
||||||
|
types::{H160, Signature as EthersSignature},
|
||||||
|
abi::Abi,
|
||||||
|
};
|
||||||
|
use ethers_contract::ContractFactory;
|
||||||
|
use ethers_providers::{Middleware, Provider, Http};
|
||||||
|
|
||||||
|
use crate::crypto::PublicKey;
|
||||||
|
|
||||||
|
mod crypto;
|
||||||
|
mod schnorr;
|
||||||
|
mod router;
|
||||||
|
|
||||||
|
pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) {
|
||||||
|
let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng);
|
||||||
|
let mut group_key = keys[&Participant::new(1).unwrap()].group_key();
|
||||||
|
|
||||||
|
let mut offset = Scalar::ZERO;
|
||||||
|
while PublicKey::new(group_key).is_none() {
|
||||||
|
offset += Scalar::ONE;
|
||||||
|
group_key += ProjectivePoint::GENERATOR;
|
||||||
|
}
|
||||||
|
for keys in keys.values_mut() {
|
||||||
|
*keys = keys.offset(offset);
|
||||||
|
}
|
||||||
|
let public_key = PublicKey::new(group_key).unwrap();
|
||||||
|
|
||||||
|
(keys, public_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
|
||||||
|
// to fund the deployer, not create/pass a wallet
|
||||||
|
// TODO: Deterministic deployments across chains
|
||||||
|
pub async fn deploy_contract(
|
||||||
|
chain_id: u32,
|
||||||
|
client: Arc<Provider<Http>>,
|
||||||
|
wallet: &k256::ecdsa::SigningKey,
|
||||||
|
name: &str,
|
||||||
|
) -> eyre::Result<H160> {
|
||||||
|
let abi: Abi =
|
||||||
|
serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap();
|
||||||
|
|
||||||
|
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
|
||||||
|
let hex_bin =
|
||||||
|
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
||||||
|
let bin = hex::decode(hex_bin).unwrap();
|
||||||
|
let factory = ContractFactory::new(abi, bin.into(), client.clone());
|
||||||
|
|
||||||
|
let mut deployment_tx = factory.deploy(())?.tx;
|
||||||
|
deployment_tx.set_chain_id(chain_id);
|
||||||
|
deployment_tx.set_gas(1_000_000);
|
||||||
|
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
|
||||||
|
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
|
||||||
|
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
|
||||||
|
|
||||||
|
let sig_hash = deployment_tx.sighash();
|
||||||
|
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
|
||||||
|
|
||||||
|
// EIP-155 v
|
||||||
|
let mut v = u64::from(rid.to_byte());
|
||||||
|
assert!((v == 0) || (v == 1));
|
||||||
|
v += u64::from((chain_id * 2) + 35);
|
||||||
|
|
||||||
|
let r = sig.r().to_repr();
|
||||||
|
let r_ref: &[u8] = r.as_ref();
|
||||||
|
let s = sig.s().to_repr();
|
||||||
|
let s_ref: &[u8] = s.as_ref();
|
||||||
|
let deployment_tx =
|
||||||
|
deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v });
|
||||||
|
|
||||||
|
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
|
||||||
|
|
||||||
|
let mut receipt;
|
||||||
|
while {
|
||||||
|
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
|
||||||
|
receipt.is_none()
|
||||||
|
} {
|
||||||
|
tokio::time::sleep(Duration::from_secs(6)).await;
|
||||||
|
}
|
||||||
|
let receipt = receipt.unwrap();
|
||||||
|
assert!(receipt.status == Some(1.into()));
|
||||||
|
|
||||||
|
Ok(receipt.contract_address.unwrap())
|
||||||
|
}
|
||||||
109
coins/ethereum/src/tests/router.rs
Normal file
109
coins/ethereum/src/tests/router.rs
Normal file
@@ -0,0 +1,109 @@
|
|||||||
|
use std::{convert::TryFrom, sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
|
use group::ff::PrimeField;
|
||||||
|
use frost::{
|
||||||
|
curve::Secp256k1,
|
||||||
|
Participant, ThresholdKeys,
|
||||||
|
algorithm::IetfSchnorr,
|
||||||
|
tests::{algorithm_machines, sign},
|
||||||
|
};
|
||||||
|
|
||||||
|
use ethers_core::{
|
||||||
|
types::{H160, U256, Bytes},
|
||||||
|
abi::AbiEncode,
|
||||||
|
utils::{Anvil, AnvilInstance},
|
||||||
|
};
|
||||||
|
use ethers_providers::{Middleware, Provider, Http};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
crypto::{keccak256, PublicKey, EthereumHram, Signature},
|
||||||
|
router::{self, *},
|
||||||
|
tests::{key_gen, deploy_contract},
|
||||||
|
};
|
||||||
|
|
||||||
|
async fn setup_test() -> (
|
||||||
|
u32,
|
||||||
|
AnvilInstance,
|
||||||
|
Router<Provider<Http>>,
|
||||||
|
HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
||||||
|
PublicKey,
|
||||||
|
) {
|
||||||
|
let anvil = Anvil::new().spawn();
|
||||||
|
|
||||||
|
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
|
||||||
|
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
||||||
|
let wallet = anvil.keys()[0].clone().into();
|
||||||
|
let client = Arc::new(provider);
|
||||||
|
|
||||||
|
let contract_address =
|
||||||
|
deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap();
|
||||||
|
let contract = Router::new(contract_address, client.clone());
|
||||||
|
|
||||||
|
let (keys, public_key) = key_gen();
|
||||||
|
|
||||||
|
// Set the key to the threshold keys
|
||||||
|
let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000);
|
||||||
|
let pending_tx = tx.send().await.unwrap();
|
||||||
|
let receipt = pending_tx.await.unwrap().unwrap();
|
||||||
|
assert!(receipt.status == Some(1.into()));
|
||||||
|
|
||||||
|
(chain_id, anvil, contract, keys, public_key)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_deploy_contract() {
|
||||||
|
setup_test().await;
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn hash_and_sign(
|
||||||
|
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
||||||
|
public_key: &PublicKey,
|
||||||
|
chain_id: U256,
|
||||||
|
message: &[u8],
|
||||||
|
) -> Signature {
|
||||||
|
let hashed_message = keccak256(message);
|
||||||
|
|
||||||
|
let mut chain_id_bytes = [0; 32];
|
||||||
|
chain_id.to_big_endian(&mut chain_id_bytes);
|
||||||
|
let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat();
|
||||||
|
|
||||||
|
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||||
|
let sig = sign(
|
||||||
|
&mut OsRng,
|
||||||
|
&algo,
|
||||||
|
keys.clone(),
|
||||||
|
algorithm_machines(&mut OsRng, &algo, keys),
|
||||||
|
full_message,
|
||||||
|
);
|
||||||
|
|
||||||
|
Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_router_execute() {
|
||||||
|
let (chain_id, _anvil, contract, keys, public_key) = setup_test().await;
|
||||||
|
|
||||||
|
let to = H160([0u8; 20]);
|
||||||
|
let value = U256([0u64; 4]);
|
||||||
|
let data = Bytes::from([0]);
|
||||||
|
let tx = OutInstruction { to, value, data: data.clone() };
|
||||||
|
|
||||||
|
let nonce_call = contract.nonce();
|
||||||
|
let nonce = nonce_call.call().await.unwrap();
|
||||||
|
|
||||||
|
let encoded =
|
||||||
|
("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode();
|
||||||
|
let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded);
|
||||||
|
|
||||||
|
let tx = contract
|
||||||
|
.execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() })
|
||||||
|
.gas(300_000);
|
||||||
|
let pending_tx = tx.send().await.unwrap();
|
||||||
|
let receipt = dbg!(pending_tx.await.unwrap().unwrap());
|
||||||
|
assert!(receipt.status == Some(1.into()));
|
||||||
|
|
||||||
|
println!("gas used: {:?}", receipt.cumulative_gas_used);
|
||||||
|
println!("logs: {:?}", receipt.logs);
|
||||||
|
}
|
||||||
67
coins/ethereum/src/tests/schnorr.rs
Normal file
67
coins/ethereum/src/tests/schnorr.rs
Normal file
@@ -0,0 +1,67 @@
|
|||||||
|
use std::{convert::TryFrom, sync::Arc};
|
||||||
|
|
||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
|
use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar};
|
||||||
|
|
||||||
|
use ethers_core::utils::{keccak256, Anvil, AnvilInstance};
|
||||||
|
use ethers_providers::{Middleware, Provider, Http};
|
||||||
|
|
||||||
|
use frost::{
|
||||||
|
curve::Secp256k1,
|
||||||
|
algorithm::IetfSchnorr,
|
||||||
|
tests::{algorithm_machines, sign},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
crypto::*,
|
||||||
|
schnorr::*,
|
||||||
|
tests::{key_gen, deploy_contract},
|
||||||
|
};
|
||||||
|
|
||||||
|
async fn setup_test() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
|
||||||
|
let anvil = Anvil::new().spawn();
|
||||||
|
|
||||||
|
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
|
||||||
|
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
||||||
|
let wallet = anvil.keys()[0].clone().into();
|
||||||
|
let client = Arc::new(provider);
|
||||||
|
|
||||||
|
let contract_address =
|
||||||
|
deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap();
|
||||||
|
let contract = Schnorr::new(contract_address, client.clone());
|
||||||
|
(chain_id, anvil, contract)
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_deploy_contract() {
|
||||||
|
setup_test().await;
|
||||||
|
}
|
||||||
|
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test_ecrecover_hack() {
|
||||||
|
let (chain_id, _anvil, contract) = setup_test().await;
|
||||||
|
let chain_id = U256::from(chain_id);
|
||||||
|
|
||||||
|
let (keys, public_key) = key_gen();
|
||||||
|
|
||||||
|
const MESSAGE: &[u8] = b"Hello, World!";
|
||||||
|
let hashed_message = keccak256(MESSAGE);
|
||||||
|
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
||||||
|
|
||||||
|
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||||
|
let sig = sign(
|
||||||
|
&mut OsRng,
|
||||||
|
&algo,
|
||||||
|
keys.clone(),
|
||||||
|
algorithm_machines(&mut OsRng, &algo, &keys),
|
||||||
|
full_message,
|
||||||
|
);
|
||||||
|
let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap();
|
||||||
|
|
||||||
|
call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap();
|
||||||
|
// Test an invalid signature fails
|
||||||
|
let mut sig = sig;
|
||||||
|
sig.s += Scalar::ONE;
|
||||||
|
assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err());
|
||||||
|
}
|
||||||
@@ -1,128 +0,0 @@
|
|||||||
use std::{convert::TryFrom, sync::Arc, time::Duration, fs::File};
|
|
||||||
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use ::k256::{
|
|
||||||
elliptic_curve::{bigint::ArrayEncoding, PrimeField},
|
|
||||||
U256,
|
|
||||||
};
|
|
||||||
|
|
||||||
use ethers_core::{
|
|
||||||
types::Signature,
|
|
||||||
abi::Abi,
|
|
||||||
utils::{keccak256, Anvil, AnvilInstance},
|
|
||||||
};
|
|
||||||
use ethers_contract::ContractFactory;
|
|
||||||
use ethers_providers::{Middleware, Provider, Http};
|
|
||||||
|
|
||||||
use frost::{
|
|
||||||
curve::Secp256k1,
|
|
||||||
Participant,
|
|
||||||
algorithm::IetfSchnorr,
|
|
||||||
tests::{key_gen, algorithm_machines, sign},
|
|
||||||
};
|
|
||||||
|
|
||||||
use ethereum_serai::{
|
|
||||||
crypto,
|
|
||||||
contract::{Schnorr, call_verify},
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
|
|
||||||
// to fund the deployer, not create/pass a wallet
|
|
||||||
pub async fn deploy_schnorr_verifier_contract(
|
|
||||||
chain_id: u32,
|
|
||||||
client: Arc<Provider<Http>>,
|
|
||||||
wallet: &k256::ecdsa::SigningKey,
|
|
||||||
) -> eyre::Result<Schnorr<Provider<Http>>> {
|
|
||||||
let abi: Abi = serde_json::from_reader(File::open("./artifacts/Schnorr.abi").unwrap()).unwrap();
|
|
||||||
|
|
||||||
let hex_bin_buf = std::fs::read_to_string("./artifacts/Schnorr.bin").unwrap();
|
|
||||||
let hex_bin =
|
|
||||||
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
|
||||||
let bin = hex::decode(hex_bin).unwrap();
|
|
||||||
let factory = ContractFactory::new(abi, bin.into(), client.clone());
|
|
||||||
|
|
||||||
let mut deployment_tx = factory.deploy(())?.tx;
|
|
||||||
deployment_tx.set_chain_id(chain_id);
|
|
||||||
deployment_tx.set_gas(500_000);
|
|
||||||
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
|
|
||||||
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
|
|
||||||
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
|
|
||||||
|
|
||||||
let sig_hash = deployment_tx.sighash();
|
|
||||||
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
|
|
||||||
|
|
||||||
// EIP-155 v
|
|
||||||
let mut v = u64::from(rid.to_byte());
|
|
||||||
assert!((v == 0) || (v == 1));
|
|
||||||
v += u64::from((chain_id * 2) + 35);
|
|
||||||
|
|
||||||
let r = sig.r().to_repr();
|
|
||||||
let r_ref: &[u8] = r.as_ref();
|
|
||||||
let s = sig.s().to_repr();
|
|
||||||
let s_ref: &[u8] = s.as_ref();
|
|
||||||
let deployment_tx = deployment_tx.rlp_signed(&Signature { r: r_ref.into(), s: s_ref.into(), v });
|
|
||||||
|
|
||||||
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
|
|
||||||
|
|
||||||
let mut receipt;
|
|
||||||
while {
|
|
||||||
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
|
|
||||||
receipt.is_none()
|
|
||||||
} {
|
|
||||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
|
||||||
}
|
|
||||||
let receipt = receipt.unwrap();
|
|
||||||
assert!(receipt.status == Some(1.into()));
|
|
||||||
|
|
||||||
let contract = Schnorr::new(receipt.contract_address.unwrap(), client.clone());
|
|
||||||
Ok(contract)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn deploy_test_contract() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
|
|
||||||
let anvil = Anvil::new().spawn();
|
|
||||||
|
|
||||||
let provider =
|
|
||||||
Provider::<Http>::try_from(anvil.endpoint()).unwrap().interval(Duration::from_millis(10u64));
|
|
||||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
|
||||||
let wallet = anvil.keys()[0].clone().into();
|
|
||||||
let client = Arc::new(provider);
|
|
||||||
|
|
||||||
(chain_id, anvil, deploy_schnorr_verifier_contract(chain_id, client, &wallet).await.unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_deploy_contract() {
|
|
||||||
deploy_test_contract().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_ecrecover_hack() {
|
|
||||||
let (chain_id, _anvil, contract) = deploy_test_contract().await;
|
|
||||||
let chain_id = U256::from(chain_id);
|
|
||||||
|
|
||||||
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
|
||||||
let group_key = keys[&Participant::new(1).unwrap()].group_key();
|
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
let hashed_message = keccak256(MESSAGE);
|
|
||||||
|
|
||||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, crypto::EthereumHram>::ietf();
|
|
||||||
let sig = sign(
|
|
||||||
&mut OsRng,
|
|
||||||
&algo,
|
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
let mut processed_sig =
|
|
||||||
crypto::process_signature_for_contract(hashed_message, &sig.R, sig.s, &group_key, chain_id);
|
|
||||||
|
|
||||||
call_verify(&contract, &processed_sig).await.unwrap();
|
|
||||||
|
|
||||||
// test invalid signature fails
|
|
||||||
processed_sig.message[0] = 0;
|
|
||||||
assert!(call_verify(&contract, &processed_sig).await.is_err());
|
|
||||||
}
|
|
||||||
@@ -1,87 +0,0 @@
|
|||||||
use k256::{
|
|
||||||
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, sec1::ToEncodedPoint},
|
|
||||||
ProjectivePoint, Scalar, U256,
|
|
||||||
};
|
|
||||||
use frost::{curve::Secp256k1, Participant};
|
|
||||||
|
|
||||||
use ethereum_serai::crypto::*;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_ecrecover() {
|
|
||||||
use rand_core::OsRng;
|
|
||||||
use sha2::Sha256;
|
|
||||||
use sha3::{Digest, Keccak256};
|
|
||||||
use k256::ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey};
|
|
||||||
|
|
||||||
let private = SigningKey::random(&mut OsRng);
|
|
||||||
let public = VerifyingKey::from(&private);
|
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
let (sig, recovery_id) = private
|
|
||||||
.as_nonzero_scalar()
|
|
||||||
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
|
|
||||||
.unwrap();
|
|
||||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
|
||||||
{
|
|
||||||
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
ecrecover(hash_to_scalar(MESSAGE), recovery_id.unwrap().is_y_odd().into(), *sig.r(), *sig.s())
|
|
||||||
.unwrap(),
|
|
||||||
address(&ProjectivePoint::from(public.as_affine()))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_signing() {
|
|
||||||
use frost::{
|
|
||||||
algorithm::IetfSchnorr,
|
|
||||||
tests::{algorithm_machines, key_gen, sign},
|
|
||||||
};
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
|
||||||
let _group_key = keys[&Participant::new(1).unwrap()].group_key();
|
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
|
||||||
let _sig =
|
|
||||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_ecrecover_hack() {
|
|
||||||
use frost::{
|
|
||||||
algorithm::IetfSchnorr,
|
|
||||||
tests::{algorithm_machines, key_gen, sign},
|
|
||||||
};
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
let keys = key_gen::<_, Secp256k1>(&mut OsRng);
|
|
||||||
let group_key = keys[&Participant::new(1).unwrap()].group_key();
|
|
||||||
let group_key_encoded = group_key.to_encoded_point(true);
|
|
||||||
let group_key_compressed = group_key_encoded.as_ref();
|
|
||||||
let group_key_x = Scalar::reduce(U256::from_be_slice(&group_key_compressed[1 .. 33]));
|
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
let hashed_message = keccak256(MESSAGE);
|
|
||||||
let chain_id = U256::ONE;
|
|
||||||
|
|
||||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
|
||||||
let sig = sign(
|
|
||||||
&mut OsRng,
|
|
||||||
&algo,
|
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
|
|
||||||
let (sr, er) =
|
|
||||||
preprocess_signature_for_ecrecover(hashed_message, &sig.R, sig.s, &group_key, chain_id);
|
|
||||||
let q = ecrecover(sr, group_key_compressed[0] - 2, group_key_x, er).unwrap();
|
|
||||||
assert_eq!(q, address(&sig.R));
|
|
||||||
}
|
|
||||||
@@ -1,2 +0,0 @@
|
|||||||
mod contract;
|
|
||||||
mod crypto;
|
|
||||||
@@ -9,7 +9,7 @@ use curve25519_dalek::{scalar::Scalar as DalekScalar, edwards::EdwardsPoint as D
|
|||||||
use group::{ff::Field, Group};
|
use group::{ff::Field, Group};
|
||||||
use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint};
|
use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint};
|
||||||
|
|
||||||
use multiexp::BatchVerifier;
|
use multiexp::{BatchVerifier, multiexp};
|
||||||
|
|
||||||
use crate::{Commitment, ringct::bulletproofs::core::*};
|
use crate::{Commitment, ringct::bulletproofs::core::*};
|
||||||
|
|
||||||
@@ -17,7 +17,20 @@ include!(concat!(env!("OUT_DIR"), "/generators.rs"));
|
|||||||
|
|
||||||
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
|
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
|
||||||
pub(crate) fn IP12() -> Scalar {
|
pub(crate) fn IP12() -> Scalar {
|
||||||
*IP12_CELL.get_or_init(|| inner_product(&ScalarVector(vec![Scalar::ONE; N]), TWO_N()))
|
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N()))
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn hadamard_fold(
|
||||||
|
l: &[EdwardsPoint],
|
||||||
|
r: &[EdwardsPoint],
|
||||||
|
a: Scalar,
|
||||||
|
b: Scalar,
|
||||||
|
) -> Vec<EdwardsPoint> {
|
||||||
|
let mut res = Vec::with_capacity(l.len() / 2);
|
||||||
|
for i in 0 .. l.len() {
|
||||||
|
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
|
||||||
|
}
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
@@ -57,7 +70,7 @@ impl OriginalStruct {
|
|||||||
let mut cache = hash_to_scalar(&y.to_bytes());
|
let mut cache = hash_to_scalar(&y.to_bytes());
|
||||||
let z = cache;
|
let z = cache;
|
||||||
|
|
||||||
let l0 = &aL - z;
|
let l0 = aL - z;
|
||||||
let l1 = sL;
|
let l1 = sL;
|
||||||
|
|
||||||
let mut zero_twos = Vec::with_capacity(MN);
|
let mut zero_twos = Vec::with_capacity(MN);
|
||||||
@@ -69,12 +82,12 @@ impl OriginalStruct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let yMN = ScalarVector::powers(y, MN);
|
let yMN = ScalarVector::powers(y, MN);
|
||||||
let r0 = (&(aR + z) * &yMN) + ScalarVector(zero_twos);
|
let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos);
|
||||||
let r1 = yMN * sR;
|
let r1 = yMN * &sR;
|
||||||
|
|
||||||
let (T1, T2, x, mut taux) = {
|
let (T1, T2, x, mut taux) = {
|
||||||
let t1 = inner_product(&l0, &r1) + inner_product(&l1, &r0);
|
let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1);
|
||||||
let t2 = inner_product(&l1, &r1);
|
let t2 = l1.clone().inner_product(&r1);
|
||||||
|
|
||||||
let mut tau1 = Scalar::random(&mut *rng);
|
let mut tau1 = Scalar::random(&mut *rng);
|
||||||
let mut tau2 = Scalar::random(&mut *rng);
|
let mut tau2 = Scalar::random(&mut *rng);
|
||||||
@@ -100,10 +113,10 @@ impl OriginalStruct {
|
|||||||
taux += zpow[i + 2] * gamma;
|
taux += zpow[i + 2] * gamma;
|
||||||
}
|
}
|
||||||
|
|
||||||
let l = &l0 + &(l1 * x);
|
let l = l0 + &(l1 * x);
|
||||||
let r = &r0 + &(r1 * x);
|
let r = r0 + &(r1 * x);
|
||||||
|
|
||||||
let t = inner_product(&l, &r);
|
let t = l.clone().inner_product(&r);
|
||||||
|
|
||||||
let x_ip =
|
let x_ip =
|
||||||
hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
|
hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
|
||||||
@@ -126,8 +139,8 @@ impl OriginalStruct {
|
|||||||
let (aL, aR) = a.split();
|
let (aL, aR) = a.split();
|
||||||
let (bL, bR) = b.split();
|
let (bL, bR) = b.split();
|
||||||
|
|
||||||
let cL = inner_product(&aL, &bR);
|
let cL = aL.clone().inner_product(&bR);
|
||||||
let cR = inner_product(&aR, &bL);
|
let cR = aR.clone().inner_product(&bL);
|
||||||
|
|
||||||
let (G_L, G_R) = G_proof.split_at(aL.len());
|
let (G_L, G_R) = G_proof.split_at(aL.len());
|
||||||
let (H_L, H_R) = H_proof.split_at(aL.len());
|
let (H_L, H_R) = H_proof.split_at(aL.len());
|
||||||
@@ -140,8 +153,8 @@ impl OriginalStruct {
|
|||||||
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
|
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
|
||||||
let winv = w.invert().unwrap();
|
let winv = w.invert().unwrap();
|
||||||
|
|
||||||
a = (aL * w) + (aR * winv);
|
a = (aL * w) + &(aR * winv);
|
||||||
b = (bL * winv) + (bR * w);
|
b = (bL * winv) + &(bR * w);
|
||||||
|
|
||||||
if a.len() != 1 {
|
if a.len() != 1 {
|
||||||
G_proof = hadamard_fold(G_L, G_R, winv, w);
|
G_proof = hadamard_fold(G_L, G_R, winv, w);
|
||||||
|
|||||||
@@ -112,7 +112,7 @@ impl AggregateRangeStatement {
|
|||||||
let mut d = ScalarVector::new(mn);
|
let mut d = ScalarVector::new(mn);
|
||||||
for j in 1 ..= V.len() {
|
for j in 1 ..= V.len() {
|
||||||
z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this
|
z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this
|
||||||
d = d.add_vec(&Self::d_j(j, V.len()).mul(z_pow[j - 1]));
|
d = d + &(Self::d_j(j, V.len()) * (z_pow[j - 1]));
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut ascending_y = ScalarVector(vec![y]);
|
let mut ascending_y = ScalarVector(vec![y]);
|
||||||
@@ -124,7 +124,8 @@ impl AggregateRangeStatement {
|
|||||||
let mut descending_y = ascending_y.clone();
|
let mut descending_y = ascending_y.clone();
|
||||||
descending_y.0.reverse();
|
descending_y.0.reverse();
|
||||||
|
|
||||||
let d_descending_y = d.mul_vec(&descending_y);
|
let d_descending_y = d.clone() * &descending_y;
|
||||||
|
let d_descending_y_plus_z = d_descending_y + z;
|
||||||
|
|
||||||
let y_mn_plus_one = descending_y[0] * y;
|
let y_mn_plus_one = descending_y[0] * y;
|
||||||
|
|
||||||
@@ -135,9 +136,9 @@ impl AggregateRangeStatement {
|
|||||||
|
|
||||||
let neg_z = -z;
|
let neg_z = -z;
|
||||||
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2);
|
let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2);
|
||||||
for (i, d_y_z) in d_descending_y.add(z).0.drain(..).enumerate() {
|
for (i, d_y_z) in d_descending_y_plus_z.0.iter().enumerate() {
|
||||||
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i)));
|
A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i)));
|
||||||
A_terms.push((d_y_z, generators.generator(GeneratorsList::HBold1, i)));
|
A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold1, i)));
|
||||||
}
|
}
|
||||||
A_terms.push((y_mn_plus_one, commitment_accum));
|
A_terms.push((y_mn_plus_one, commitment_accum));
|
||||||
A_terms.push((
|
A_terms.push((
|
||||||
@@ -145,7 +146,14 @@ impl AggregateRangeStatement {
|
|||||||
Generators::g(),
|
Generators::g(),
|
||||||
));
|
));
|
||||||
|
|
||||||
(y, d_descending_y, y_mn_plus_one, z, ScalarVector(z_pow), A + multiexp_vartime(&A_terms))
|
(
|
||||||
|
y,
|
||||||
|
d_descending_y_plus_z,
|
||||||
|
y_mn_plus_one,
|
||||||
|
z,
|
||||||
|
ScalarVector(z_pow),
|
||||||
|
A + multiexp_vartime(&A_terms),
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn prove<R: RngCore + CryptoRng>(
|
pub(crate) fn prove<R: RngCore + CryptoRng>(
|
||||||
@@ -191,7 +199,7 @@ impl AggregateRangeStatement {
|
|||||||
a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
|
a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0);
|
||||||
}
|
}
|
||||||
|
|
||||||
let a_r = a_l.sub(Scalar::ONE);
|
let a_r = a_l.clone() - Scalar::ONE;
|
||||||
|
|
||||||
let alpha = Scalar::random(&mut *rng);
|
let alpha = Scalar::random(&mut *rng);
|
||||||
|
|
||||||
@@ -209,11 +217,11 @@ impl AggregateRangeStatement {
|
|||||||
// Multiply by INV_EIGHT per earlier commentary
|
// Multiply by INV_EIGHT per earlier commentary
|
||||||
A.0 *= crate::INV_EIGHT();
|
A.0 *= crate::INV_EIGHT();
|
||||||
|
|
||||||
let (y, d_descending_y, y_mn_plus_one, z, z_pow, A_hat) =
|
let (y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat) =
|
||||||
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A);
|
Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A);
|
||||||
|
|
||||||
let a_l = a_l.sub(z);
|
let a_l = a_l - z;
|
||||||
let a_r = a_r.add_vec(&d_descending_y).add(z);
|
let a_r = a_r + &d_descending_y_plus_z;
|
||||||
let mut alpha = alpha;
|
let mut alpha = alpha;
|
||||||
for j in 1 ..= witness.gammas.len() {
|
for j in 1 ..= witness.gammas.len() {
|
||||||
alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;
|
alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one;
|
||||||
|
|||||||
@@ -3,8 +3,7 @@
|
|||||||
use group::Group;
|
use group::Group;
|
||||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||||
|
|
||||||
mod scalar_vector;
|
pub(crate) use crate::ringct::bulletproofs::scalar_vector::ScalarVector;
|
||||||
pub(crate) use scalar_vector::{ScalarVector, weighted_inner_product};
|
|
||||||
mod point_vector;
|
mod point_vector;
|
||||||
pub(crate) use point_vector::PointVector;
|
pub(crate) use point_vector::PointVector;
|
||||||
|
|
||||||
|
|||||||
@@ -1,114 +0,0 @@
|
|||||||
use core::{
|
|
||||||
borrow::Borrow,
|
|
||||||
ops::{Index, IndexMut},
|
|
||||||
};
|
|
||||||
use std_shims::vec::Vec;
|
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
|
||||||
|
|
||||||
use group::ff::Field;
|
|
||||||
use dalek_ff_group::Scalar;
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
|
||||||
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
|
||||||
|
|
||||||
impl Index<usize> for ScalarVector {
|
|
||||||
type Output = Scalar;
|
|
||||||
fn index(&self, index: usize) -> &Scalar {
|
|
||||||
&self.0[index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl IndexMut<usize> for ScalarVector {
|
|
||||||
fn index_mut(&mut self, index: usize) -> &mut Scalar {
|
|
||||||
&mut self.0[index]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ScalarVector {
|
|
||||||
pub(crate) fn new(len: usize) -> Self {
|
|
||||||
ScalarVector(vec![Scalar::ZERO; len])
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn add(&self, scalar: impl Borrow<Scalar>) -> Self {
|
|
||||||
let mut res = self.clone();
|
|
||||||
for val in &mut res.0 {
|
|
||||||
*val += scalar.borrow();
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn sub(&self, scalar: impl Borrow<Scalar>) -> Self {
|
|
||||||
let mut res = self.clone();
|
|
||||||
for val in &mut res.0 {
|
|
||||||
*val -= scalar.borrow();
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn mul(&self, scalar: impl Borrow<Scalar>) -> Self {
|
|
||||||
let mut res = self.clone();
|
|
||||||
for val in &mut res.0 {
|
|
||||||
*val *= scalar.borrow();
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn add_vec(&self, vector: &Self) -> Self {
|
|
||||||
debug_assert_eq!(self.len(), vector.len());
|
|
||||||
let mut res = self.clone();
|
|
||||||
for (i, val) in res.0.iter_mut().enumerate() {
|
|
||||||
*val += vector.0[i];
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn mul_vec(&self, vector: &Self) -> Self {
|
|
||||||
debug_assert_eq!(self.len(), vector.len());
|
|
||||||
let mut res = self.clone();
|
|
||||||
for (i, val) in res.0.iter_mut().enumerate() {
|
|
||||||
*val *= vector.0[i];
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn inner_product(&self, vector: &Self) -> Scalar {
|
|
||||||
self.mul_vec(vector).sum()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
|
|
||||||
debug_assert!(len != 0);
|
|
||||||
|
|
||||||
let mut res = Vec::with_capacity(len);
|
|
||||||
res.push(Scalar::ONE);
|
|
||||||
res.push(x);
|
|
||||||
for i in 2 .. len {
|
|
||||||
res.push(res[i - 1] * x);
|
|
||||||
}
|
|
||||||
res.truncate(len);
|
|
||||||
ScalarVector(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn sum(mut self) -> Scalar {
|
|
||||||
self.0.drain(..).sum()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn len(&self) -> usize {
|
|
||||||
self.0.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn split(mut self) -> (Self, Self) {
|
|
||||||
debug_assert!(self.len() > 1);
|
|
||||||
let r = self.0.split_off(self.0.len() / 2);
|
|
||||||
debug_assert_eq!(self.len(), r.len());
|
|
||||||
(self, ScalarVector(r))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn weighted_inner_product(
|
|
||||||
a: &ScalarVector,
|
|
||||||
b: &ScalarVector,
|
|
||||||
y: &ScalarVector,
|
|
||||||
) -> Scalar {
|
|
||||||
a.inner_product(&b.mul_vec(y))
|
|
||||||
}
|
|
||||||
@@ -4,7 +4,7 @@ use rand_core::{RngCore, CryptoRng};
|
|||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||||
|
|
||||||
use multiexp::{multiexp, multiexp_vartime, BatchVerifier};
|
use multiexp::{BatchVerifier, multiexp, multiexp_vartime};
|
||||||
use group::{
|
use group::{
|
||||||
ff::{Field, PrimeField},
|
ff::{Field, PrimeField},
|
||||||
GroupEncoding,
|
GroupEncoding,
|
||||||
@@ -12,8 +12,7 @@ use group::{
|
|||||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||||
|
|
||||||
use crate::ringct::bulletproofs::plus::{
|
use crate::ringct::bulletproofs::plus::{
|
||||||
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, weighted_inner_product,
|
ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*,
|
||||||
transcript::*,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
// Figure 1
|
// Figure 1
|
||||||
@@ -219,7 +218,7 @@ impl WipStatement {
|
|||||||
.zip(g_bold.0.iter().copied())
|
.zip(g_bold.0.iter().copied())
|
||||||
.chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied()))
|
.chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
P_terms.push((weighted_inner_product(&witness.a, &witness.b, &y), g));
|
P_terms.push((witness.a.clone().weighted_inner_product(&witness.b, &y), g));
|
||||||
P_terms.push((witness.alpha, h));
|
P_terms.push((witness.alpha, h));
|
||||||
debug_assert_eq!(multiexp(&P_terms), P);
|
debug_assert_eq!(multiexp(&P_terms), P);
|
||||||
P_terms.zeroize();
|
P_terms.zeroize();
|
||||||
@@ -258,14 +257,13 @@ impl WipStatement {
|
|||||||
let d_l = Scalar::random(&mut *rng);
|
let d_l = Scalar::random(&mut *rng);
|
||||||
let d_r = Scalar::random(&mut *rng);
|
let d_r = Scalar::random(&mut *rng);
|
||||||
|
|
||||||
let c_l = weighted_inner_product(&a1, &b2, &y);
|
let c_l = a1.clone().weighted_inner_product(&b2, &y);
|
||||||
let c_r = weighted_inner_product(&(a2.mul(y_n_hat)), &b1, &y);
|
let c_r = (a2.clone() * y_n_hat).weighted_inner_product(&b1, &y);
|
||||||
|
|
||||||
// TODO: Calculate these with a batch inversion
|
// TODO: Calculate these with a batch inversion
|
||||||
let y_inv_n_hat = y_n_hat.invert().unwrap();
|
let y_inv_n_hat = y_n_hat.invert().unwrap();
|
||||||
|
|
||||||
let mut L_terms = a1
|
let mut L_terms = (a1.clone() * y_inv_n_hat)
|
||||||
.mul(y_inv_n_hat)
|
|
||||||
.0
|
.0
|
||||||
.drain(..)
|
.drain(..)
|
||||||
.zip(g_bold2.0.iter().copied())
|
.zip(g_bold2.0.iter().copied())
|
||||||
@@ -277,8 +275,7 @@ impl WipStatement {
|
|||||||
L_vec.push(L);
|
L_vec.push(L);
|
||||||
L_terms.zeroize();
|
L_terms.zeroize();
|
||||||
|
|
||||||
let mut R_terms = a2
|
let mut R_terms = (a2.clone() * y_n_hat)
|
||||||
.mul(y_n_hat)
|
|
||||||
.0
|
.0
|
||||||
.drain(..)
|
.drain(..)
|
||||||
.zip(g_bold1.0.iter().copied())
|
.zip(g_bold1.0.iter().copied())
|
||||||
@@ -294,8 +291,8 @@ impl WipStatement {
|
|||||||
(e, inv_e, e_square, inv_e_square, g_bold, h_bold) =
|
(e, inv_e, e_square, inv_e_square, g_bold, h_bold) =
|
||||||
Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat);
|
Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat);
|
||||||
|
|
||||||
a = a1.mul(e).add_vec(&a2.mul(y_n_hat * inv_e));
|
a = (a1 * e) + &(a2 * (y_n_hat * inv_e));
|
||||||
b = b1.mul(inv_e).add_vec(&b2.mul(e));
|
b = (b1 * inv_e) + &(b2 * e);
|
||||||
alpha += (d_l * e_square) + (d_r * inv_e_square);
|
alpha += (d_l * e_square) + (d_r * inv_e_square);
|
||||||
|
|
||||||
debug_assert_eq!(g_bold.len(), a.len());
|
debug_assert_eq!(g_bold.len(), a.len());
|
||||||
|
|||||||
@@ -1,85 +1,17 @@
|
|||||||
use core::ops::{Add, Sub, Mul, Index};
|
use core::{
|
||||||
|
borrow::Borrow,
|
||||||
|
ops::{Index, IndexMut, Add, Sub, Mul},
|
||||||
|
};
|
||||||
use std_shims::vec::Vec;
|
use std_shims::vec::Vec;
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||||
|
|
||||||
use group::ff::Field;
|
use group::ff::Field;
|
||||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
use dalek_ff_group::{Scalar, EdwardsPoint};
|
||||||
|
|
||||||
use multiexp::multiexp;
|
use multiexp::multiexp;
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
||||||
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
pub(crate) struct ScalarVector(pub(crate) Vec<Scalar>);
|
||||||
macro_rules! math_op {
|
|
||||||
($Op: ident, $op: ident, $f: expr) => {
|
|
||||||
#[allow(clippy::redundant_closure_call)]
|
|
||||||
impl $Op<Scalar> for ScalarVector {
|
|
||||||
type Output = ScalarVector;
|
|
||||||
fn $op(self, b: Scalar) -> ScalarVector {
|
|
||||||
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::redundant_closure_call)]
|
|
||||||
impl $Op<Scalar> for &ScalarVector {
|
|
||||||
type Output = ScalarVector;
|
|
||||||
fn $op(self, b: Scalar) -> ScalarVector {
|
|
||||||
ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::redundant_closure_call)]
|
|
||||||
impl $Op<ScalarVector> for ScalarVector {
|
|
||||||
type Output = ScalarVector;
|
|
||||||
fn $op(self, b: ScalarVector) -> ScalarVector {
|
|
||||||
debug_assert_eq!(self.len(), b.len());
|
|
||||||
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::redundant_closure_call)]
|
|
||||||
impl $Op<&ScalarVector> for &ScalarVector {
|
|
||||||
type Output = ScalarVector;
|
|
||||||
fn $op(self, b: &ScalarVector) -> ScalarVector {
|
|
||||||
debug_assert_eq!(self.len(), b.len());
|
|
||||||
ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
math_op!(Add, add, |(a, b): (&Scalar, &Scalar)| *a + *b);
|
|
||||||
math_op!(Sub, sub, |(a, b): (&Scalar, &Scalar)| *a - *b);
|
|
||||||
math_op!(Mul, mul, |(a, b): (&Scalar, &Scalar)| *a * *b);
|
|
||||||
|
|
||||||
impl ScalarVector {
|
|
||||||
pub(crate) fn new(len: usize) -> ScalarVector {
|
|
||||||
ScalarVector(vec![Scalar::ZERO; len])
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn powers(x: Scalar, len: usize) -> ScalarVector {
|
|
||||||
debug_assert!(len != 0);
|
|
||||||
|
|
||||||
let mut res = Vec::with_capacity(len);
|
|
||||||
res.push(Scalar::ONE);
|
|
||||||
for i in 1 .. len {
|
|
||||||
res.push(res[i - 1] * x);
|
|
||||||
}
|
|
||||||
ScalarVector(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn sum(mut self) -> Scalar {
|
|
||||||
self.0.drain(..).sum()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn len(&self) -> usize {
|
|
||||||
self.0.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn split(self) -> (ScalarVector, ScalarVector) {
|
|
||||||
let (l, r) = self.0.split_at(self.0.len() / 2);
|
|
||||||
(ScalarVector(l.to_vec()), ScalarVector(r.to_vec()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Index<usize> for ScalarVector {
|
impl Index<usize> for ScalarVector {
|
||||||
type Output = Scalar;
|
type Output = Scalar;
|
||||||
@@ -87,28 +19,120 @@ impl Index<usize> for ScalarVector {
|
|||||||
&self.0[index]
|
&self.0[index]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl IndexMut<usize> for ScalarVector {
|
||||||
|
fn index_mut(&mut self, index: usize) -> &mut Scalar {
|
||||||
|
&mut self.0[index]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub(crate) fn inner_product(a: &ScalarVector, b: &ScalarVector) -> Scalar {
|
impl<S: Borrow<Scalar>> Add<S> for ScalarVector {
|
||||||
(a * b).sum()
|
type Output = ScalarVector;
|
||||||
|
fn add(mut self, scalar: S) -> ScalarVector {
|
||||||
|
for s in &mut self.0 {
|
||||||
|
*s += scalar.borrow();
|
||||||
|
}
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<S: Borrow<Scalar>> Sub<S> for ScalarVector {
|
||||||
|
type Output = ScalarVector;
|
||||||
|
fn sub(mut self, scalar: S) -> ScalarVector {
|
||||||
|
for s in &mut self.0 {
|
||||||
|
*s -= scalar.borrow();
|
||||||
|
}
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<S: Borrow<Scalar>> Mul<S> for ScalarVector {
|
||||||
|
type Output = ScalarVector;
|
||||||
|
fn mul(mut self, scalar: S) -> ScalarVector {
|
||||||
|
for s in &mut self.0 {
|
||||||
|
*s *= scalar.borrow();
|
||||||
|
}
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Add<&ScalarVector> for ScalarVector {
|
||||||
|
type Output = ScalarVector;
|
||||||
|
fn add(mut self, other: &ScalarVector) -> ScalarVector {
|
||||||
|
debug_assert_eq!(self.len(), other.len());
|
||||||
|
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||||
|
*s += o;
|
||||||
|
}
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Sub<&ScalarVector> for ScalarVector {
|
||||||
|
type Output = ScalarVector;
|
||||||
|
fn sub(mut self, other: &ScalarVector) -> ScalarVector {
|
||||||
|
debug_assert_eq!(self.len(), other.len());
|
||||||
|
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||||
|
*s -= o;
|
||||||
|
}
|
||||||
|
self
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl Mul<&ScalarVector> for ScalarVector {
|
||||||
|
type Output = ScalarVector;
|
||||||
|
fn mul(mut self, other: &ScalarVector) -> ScalarVector {
|
||||||
|
debug_assert_eq!(self.len(), other.len());
|
||||||
|
for (s, o) in self.0.iter_mut().zip(other.0.iter()) {
|
||||||
|
*s *= o;
|
||||||
|
}
|
||||||
|
self
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Mul<&[EdwardsPoint]> for &ScalarVector {
|
impl Mul<&[EdwardsPoint]> for &ScalarVector {
|
||||||
type Output = EdwardsPoint;
|
type Output = EdwardsPoint;
|
||||||
fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint {
|
fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint {
|
||||||
debug_assert_eq!(self.len(), b.len());
|
debug_assert_eq!(self.len(), b.len());
|
||||||
multiexp(&self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>())
|
let mut multiexp_args = self.0.iter().copied().zip(b.iter().copied()).collect::<Vec<_>>();
|
||||||
|
let res = multiexp(&multiexp_args);
|
||||||
|
multiexp_args.zeroize();
|
||||||
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn hadamard_fold(
|
impl ScalarVector {
|
||||||
l: &[EdwardsPoint],
|
pub(crate) fn new(len: usize) -> Self {
|
||||||
r: &[EdwardsPoint],
|
ScalarVector(vec![Scalar::ZERO; len])
|
||||||
a: Scalar,
|
}
|
||||||
b: Scalar,
|
|
||||||
) -> Vec<EdwardsPoint> {
|
pub(crate) fn powers(x: Scalar, len: usize) -> Self {
|
||||||
let mut res = Vec::with_capacity(l.len() / 2);
|
debug_assert!(len != 0);
|
||||||
for i in 0 .. l.len() {
|
|
||||||
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
|
let mut res = Vec::with_capacity(len);
|
||||||
|
res.push(Scalar::ONE);
|
||||||
|
res.push(x);
|
||||||
|
for i in 2 .. len {
|
||||||
|
res.push(res[i - 1] * x);
|
||||||
|
}
|
||||||
|
res.truncate(len);
|
||||||
|
ScalarVector(res)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn len(&self) -> usize {
|
||||||
|
self.0.len()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn sum(mut self) -> Scalar {
|
||||||
|
self.0.drain(..).sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn inner_product(self, vector: &Self) -> Scalar {
|
||||||
|
(self * vector).sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn weighted_inner_product(self, vector: &Self, y: &Self) -> Scalar {
|
||||||
|
(self * vector * y).sum()
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn split(mut self) -> (Self, Self) {
|
||||||
|
debug_assert!(self.len() > 1);
|
||||||
|
let r = self.0.split_off(self.0.len() / 2);
|
||||||
|
debug_assert_eq!(self.len(), r.len());
|
||||||
|
(self, ScalarVector(r))
|
||||||
}
|
}
|
||||||
res
|
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use std_shims::{
|
|||||||
use rand_core::{RngCore, CryptoRng};
|
use rand_core::{RngCore, CryptoRng};
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
||||||
use subtle::{ConstantTimeEq, Choice, CtOption};
|
use subtle::{ConstantTimeEq, ConditionallySelectable};
|
||||||
|
|
||||||
use curve25519_dalek::{
|
use curve25519_dalek::{
|
||||||
constants::ED25519_BASEPOINT_TABLE,
|
constants::ED25519_BASEPOINT_TABLE,
|
||||||
@@ -169,13 +169,8 @@ fn core(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Perform the core loop
|
// Perform the core loop
|
||||||
let mut c1 = CtOption::new(Scalar::ZERO, Choice::from(0));
|
let mut c1 = c;
|
||||||
for i in (start .. end).map(|i| i % n) {
|
for i in (start .. end).map(|i| i % n) {
|
||||||
// This will only execute once and shouldn't need to be constant time. Making it constant time
|
|
||||||
// removes the risk of branch prediction creating timing differences depending on ring index
|
|
||||||
// however
|
|
||||||
c1 = c1.or_else(|| CtOption::new(c, i.ct_eq(&0)));
|
|
||||||
|
|
||||||
let c_p = mu_P * c;
|
let c_p = mu_P * c;
|
||||||
let c_c = mu_C * c;
|
let c_c = mu_C * c;
|
||||||
|
|
||||||
@@ -188,10 +183,15 @@ fn core(
|
|||||||
to_hash.extend(L.compress().to_bytes());
|
to_hash.extend(L.compress().to_bytes());
|
||||||
to_hash.extend(R.compress().to_bytes());
|
to_hash.extend(R.compress().to_bytes());
|
||||||
c = hash_to_scalar(&to_hash);
|
c = hash_to_scalar(&to_hash);
|
||||||
|
|
||||||
|
// This will only execute once and shouldn't need to be constant time. Making it constant time
|
||||||
|
// removes the risk of branch prediction creating timing differences depending on ring index
|
||||||
|
// however
|
||||||
|
c1.conditional_assign(&c, i.ct_eq(&(n - 1)));
|
||||||
}
|
}
|
||||||
|
|
||||||
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
|
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
|
||||||
((D, c * mu_P, c * mu_C), c1.unwrap_or(c))
|
((D, c * mu_P, c * mu_C), c1)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// CLSAG signature, as used in Monero.
|
/// CLSAG signature, as used in Monero.
|
||||||
|
|||||||
@@ -199,6 +199,7 @@ impl Algorithm<Ed25519> for ClsagMultisig {
|
|||||||
l: Participant,
|
l: Participant,
|
||||||
addendum: ClsagAddendum,
|
addendum: ClsagAddendum,
|
||||||
) -> Result<(), FrostError> {
|
) -> Result<(), FrostError> {
|
||||||
|
// TODO: This check is faulty if two shares are additive inverses of each other
|
||||||
if self.image.is_identity().into() {
|
if self.image.is_identity().into() {
|
||||||
self.transcript.domain_separate(b"CLSAG");
|
self.transcript.domain_separate(b"CLSAG");
|
||||||
self.input().transcript(&mut self.transcript);
|
self.input().transcript(&mut self.transcript);
|
||||||
|
|||||||
@@ -9,7 +9,6 @@ use dalek_ff_group::{Scalar, EdwardsPoint};
|
|||||||
use crate::ringct::bulletproofs::plus::{
|
use crate::ringct::bulletproofs::plus::{
|
||||||
ScalarVector, PointVector, GeneratorsList, Generators,
|
ScalarVector, PointVector, GeneratorsList, Generators,
|
||||||
weighted_inner_product::{WipStatement, WipWitness},
|
weighted_inner_product::{WipStatement, WipWitness},
|
||||||
weighted_inner_product,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
@@ -68,7 +67,7 @@ fn test_weighted_inner_product() {
|
|||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
let P = g_bold.multiexp(&a) +
|
let P = g_bold.multiexp(&a) +
|
||||||
h_bold.multiexp(&b) +
|
h_bold.multiexp(&b) +
|
||||||
(g * weighted_inner_product(&a, &b, &y_vec)) +
|
(g * a.clone().weighted_inner_product(&b, &y_vec)) +
|
||||||
(h * alpha);
|
(h * alpha);
|
||||||
|
|
||||||
let statement = WipStatement::new(generators, P, y);
|
let statement = WipStatement::new(generators, P, y);
|
||||||
|
|||||||
@@ -57,7 +57,7 @@ fn clsag() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let image = generate_key_image(&secrets.0);
|
let image = generate_key_image(&secrets.0);
|
||||||
let (clsag, pseudo_out) = Clsag::sign(
|
let (mut clsag, pseudo_out) = Clsag::sign(
|
||||||
&mut OsRng,
|
&mut OsRng,
|
||||||
vec![(
|
vec![(
|
||||||
secrets.0,
|
secrets.0,
|
||||||
@@ -76,7 +76,12 @@ fn clsag() {
|
|||||||
msg,
|
msg,
|
||||||
)
|
)
|
||||||
.swap_remove(0);
|
.swap_remove(0);
|
||||||
|
|
||||||
clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap();
|
clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap();
|
||||||
|
|
||||||
|
// make sure verification fails if we throw a random `c1` at it.
|
||||||
|
clsag.c1 = random_scalar(&mut OsRng);
|
||||||
|
assert!(clsag.verify(&ring, &image, &pseudo_out, &msg).is_err());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
|
|||||||
.unwrap();
|
.unwrap();
|
||||||
let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap();
|
let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap();
|
||||||
|
|
||||||
// TODO: Needs https://github.com/monero-project/monero/pull/8882
|
// TODO: Needs https://github.com/monero-project/monero/pull/9260
|
||||||
// let fee_rate = daemon_rpc
|
// let fee_rate = daemon_rpc
|
||||||
// .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant)
|
// .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant)
|
||||||
// .await
|
// .await
|
||||||
@@ -107,7 +107,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
|
|||||||
let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap();
|
let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap();
|
||||||
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||||
|
|
||||||
// TODO: Needs https://github.com/monero-project/monero/pull/8882
|
// TODO: Needs https://github.com/monero-project/monero/pull/9260
|
||||||
// runner::check_weight_and_fee(&tx, fee_rate);
|
// runner::check_weight_and_fee(&tx, fee_rate);
|
||||||
|
|
||||||
match spec {
|
match spec {
|
||||||
|
|||||||
@@ -18,7 +18,7 @@ workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
parity-db = { version = "0.4", default-features = false, optional = true }
|
||||||
rocksdb = { version = "0.21", default-features = false, features = ["lz4"], optional = true }
|
rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
parity-db = ["dep:parity-db"]
|
parity-db = ["dep:parity-db"]
|
||||||
|
|||||||
@@ -1,42 +1,65 @@
|
|||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use rocksdb::{DBCompressionType, ThreadMode, SingleThreaded, Options, Transaction, TransactionDB};
|
use rocksdb::{
|
||||||
|
DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions,
|
||||||
|
Transaction as RocksTransaction, Options, OptimisticTransactionDB,
|
||||||
|
};
|
||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
impl<T: ThreadMode> Get for Transaction<'_, TransactionDB<T>> {
|
pub struct Transaction<'a, T: ThreadMode>(
|
||||||
|
RocksTransaction<'a, OptimisticTransactionDB<T>>,
|
||||||
|
&'a OptimisticTransactionDB<T>,
|
||||||
|
);
|
||||||
|
|
||||||
|
impl<T: ThreadMode> Get for Transaction<'_, T> {
|
||||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||||
self.get(key).expect("couldn't read from RocksDB via transaction")
|
self.0.get(key).expect("couldn't read from RocksDB via transaction")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl<T: ThreadMode> DbTxn for Transaction<'_, TransactionDB<T>> {
|
impl<T: ThreadMode> DbTxn for Transaction<'_, T> {
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||||
Transaction::put(self, key, value).expect("couldn't write to RocksDB via transaction")
|
self.0.put(key, value).expect("couldn't write to RocksDB via transaction")
|
||||||
}
|
}
|
||||||
fn del(&mut self, key: impl AsRef<[u8]>) {
|
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||||
self.delete(key).expect("couldn't delete from RocksDB via transaction")
|
self.0.delete(key).expect("couldn't delete from RocksDB via transaction")
|
||||||
}
|
}
|
||||||
fn commit(self) {
|
fn commit(self) {
|
||||||
Transaction::commit(self).expect("couldn't commit to RocksDB via transaction")
|
self.0.commit().expect("couldn't commit to RocksDB via transaction");
|
||||||
|
self.1.flush_wal(true).expect("couldn't flush RocksDB WAL");
|
||||||
|
self.1.flush().expect("couldn't flush RocksDB");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<T: ThreadMode> Get for Arc<TransactionDB<T>> {
|
impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
||||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||||
TransactionDB::get(self, key).expect("couldn't read from RocksDB")
|
OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl<T: ThreadMode + 'static> Db for Arc<TransactionDB<T>> {
|
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
||||||
type Transaction<'a> = Transaction<'a, TransactionDB<T>>;
|
type Transaction<'a> = Transaction<'a, T>;
|
||||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||||
self.transaction()
|
let mut opts = WriteOptions::default();
|
||||||
|
opts.set_sync(true);
|
||||||
|
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub type RocksDB = Arc<TransactionDB<SingleThreaded>>;
|
pub type RocksDB = Arc<OptimisticTransactionDB<SingleThreaded>>;
|
||||||
pub fn new_rocksdb(path: &str) -> RocksDB {
|
pub fn new_rocksdb(path: &str) -> RocksDB {
|
||||||
let mut options = Options::default();
|
let mut options = Options::default();
|
||||||
options.create_if_missing(true);
|
options.create_if_missing(true);
|
||||||
options.set_compression_type(DBCompressionType::Lz4);
|
options.set_compression_type(DBCompressionType::Zstd);
|
||||||
Arc::new(TransactionDB::open(&options, &Default::default(), path).unwrap())
|
|
||||||
|
options.set_wal_compression_type(DBCompressionType::Zstd);
|
||||||
|
// 10 MB
|
||||||
|
options.set_max_total_wal_size(10 * 1024 * 1024);
|
||||||
|
options.set_wal_size_limit_mb(10);
|
||||||
|
|
||||||
|
options.set_log_level(LogLevel::Warn);
|
||||||
|
// 1 MB
|
||||||
|
options.set_max_log_file_size(1024 * 1024);
|
||||||
|
options.set_recycle_log_file_num(1);
|
||||||
|
|
||||||
|
Arc::new(OptimisticTransactionDB::open(&options, path).unwrap())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -23,7 +23,7 @@ hyper-util = { version = "0.1", default-features = false, features = ["http1", "
|
|||||||
http-body-util = { version = "0.1", default-features = false }
|
http-body-util = { version = "0.1", default-features = false }
|
||||||
tokio = { version = "1", default-features = false }
|
tokio = { version = "1", default-features = false }
|
||||||
|
|
||||||
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||||
|
|
||||||
zeroize = { version = "1", optional = true }
|
zeroize = { version = "1", optional = true }
|
||||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||||
|
|||||||
@@ -51,7 +51,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim
|
|||||||
|
|
||||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] }
|
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||||
|
|||||||
@@ -22,7 +22,7 @@ use serai_db::{Get, DbTxn, Db, create_db};
|
|||||||
use processor_messages::coordinator::cosign_block_msg;
|
use processor_messages::coordinator::cosign_block_msg;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
p2p::{CosignedBlock, P2pMessageKind, P2p},
|
p2p::{CosignedBlock, GossipMessageKind, P2p},
|
||||||
substrate::LatestCosignedBlock,
|
substrate::LatestCosignedBlock,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -323,7 +323,7 @@ impl<D: Db> CosignEvaluator<D> {
|
|||||||
for cosign in cosigns {
|
for cosign in cosigns {
|
||||||
let mut buf = vec![];
|
let mut buf = vec![];
|
||||||
cosign.serialize(&mut buf).unwrap();
|
cosign.serialize(&mut buf).unwrap();
|
||||||
P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, buf).await;
|
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||||
}
|
}
|
||||||
sleep(Duration::from_secs(60)).await;
|
sleep(Duration::from_secs(60)).await;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -260,7 +260,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
cosign_channel.send(cosigned_block).unwrap();
|
cosign_channel.send(cosigned_block).unwrap();
|
||||||
let mut buf = vec![];
|
let mut buf = vec![];
|
||||||
cosigned_block.serialize(&mut buf).unwrap();
|
cosigned_block.serialize(&mut buf).unwrap();
|
||||||
P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await;
|
P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
// This causes an action on Substrate yet not on any Tributary
|
// This causes an action on Substrate yet not on any Tributary
|
||||||
@@ -836,8 +836,8 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
|||||||
) {
|
) {
|
||||||
let mut tributaries = HashMap::new();
|
let mut tributaries = HashMap::new();
|
||||||
'outer: loop {
|
'outer: loop {
|
||||||
// TODO: Create a better async flow for this, as this does still hammer this task
|
// TODO: Create a better async flow for this
|
||||||
tokio::task::yield_now().await;
|
tokio::time::sleep(core::time::Duration::from_millis(100)).await;
|
||||||
|
|
||||||
match tributary_event.try_recv() {
|
match tributary_event.try_recv() {
|
||||||
Ok(event) => match event {
|
Ok(event) => match event {
|
||||||
|
|||||||
File diff suppressed because it is too large
Load Diff
@@ -41,8 +41,9 @@ enum HasEvents {
|
|||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
SubstrateCosignDb {
|
SubstrateCosignDb {
|
||||||
|
ScanCosignFrom: () -> u64,
|
||||||
IntendedCosign: () -> (u64, Option<u64>),
|
IntendedCosign: () -> (u64, Option<u64>),
|
||||||
BlockHasEvents: (block: u64) -> HasEvents,
|
BlockHasEventsCache: (block: u64) -> HasEvents,
|
||||||
LatestCosignedBlock: () -> u64,
|
LatestCosignedBlock: () -> u64,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@@ -85,7 +86,7 @@ async fn block_has_events(
|
|||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
block: u64,
|
block: u64,
|
||||||
) -> Result<HasEvents, SeraiError> {
|
) -> Result<HasEvents, SeraiError> {
|
||||||
let cached = BlockHasEvents::get(txn, block);
|
let cached = BlockHasEventsCache::get(txn, block);
|
||||||
match cached {
|
match cached {
|
||||||
None => {
|
None => {
|
||||||
let serai = serai.as_of(
|
let serai = serai.as_of(
|
||||||
@@ -107,8 +108,8 @@ async fn block_has_events(
|
|||||||
|
|
||||||
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
||||||
|
|
||||||
BlockHasEvents::set(txn, block, &has_events);
|
BlockHasEventsCache::set(txn, block, &has_events);
|
||||||
Ok(HasEvents::Yes)
|
Ok(has_events)
|
||||||
}
|
}
|
||||||
Some(code) => Ok(code),
|
Some(code) => Ok(code),
|
||||||
}
|
}
|
||||||
@@ -135,6 +136,7 @@ async fn potentially_cosign_block(
|
|||||||
if (block_has_events == HasEvents::No) &&
|
if (block_has_events == HasEvents::No) &&
|
||||||
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
||||||
{
|
{
|
||||||
|
log::debug!("automatically co-signing next block ({block}) since it has no events");
|
||||||
LatestCosignedBlock::set(txn, &block);
|
LatestCosignedBlock::set(txn, &block);
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -178,7 +180,7 @@ async fn potentially_cosign_block(
|
|||||||
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
||||||
`latest_number` doesn't change.
|
`latest_number` doesn't change.
|
||||||
*/
|
*/
|
||||||
pub async fn advance_cosign_protocol(
|
async fn advance_cosign_protocol_inner(
|
||||||
db: &mut impl Db,
|
db: &mut impl Db,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
@@ -203,16 +205,23 @@ pub async fn advance_cosign_protocol(
|
|||||||
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
||||||
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
||||||
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
||||||
window_end_exclusive = 0;
|
window_end_exclusive = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// The consensus rules for this are `last_intended_to_cosign_block + 1`
|
||||||
|
let scan_start_block = last_intended_to_cosign_block + 1;
|
||||||
|
// As a practical optimization, we don't re-scan old blocks since old blocks are independent to
|
||||||
|
// new state
|
||||||
|
let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));
|
||||||
|
|
||||||
// Check all blocks within the window to see if they should be cosigned
|
// Check all blocks within the window to see if they should be cosigned
|
||||||
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
||||||
// do cosign them
|
// do cosign them
|
||||||
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
||||||
// the skipped block will cause will cosign all other blocks within this window
|
// the skipped block will cause will cosign all other blocks within this window
|
||||||
if skipped_block.is_none() {
|
if skipped_block.is_none() {
|
||||||
for b in (last_intended_to_cosign_block + 1) .. window_end_exclusive.min(latest_number) {
|
let window_end_inclusive = window_end_exclusive - 1;
|
||||||
|
for b in scan_start_block ..= window_end_inclusive.min(latest_number) {
|
||||||
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
||||||
skipped_block = Some(b);
|
skipped_block = Some(b);
|
||||||
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
||||||
@@ -227,7 +236,7 @@ pub async fn advance_cosign_protocol(
|
|||||||
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
||||||
let mut cosigning = vec![];
|
let mut cosigning = vec![];
|
||||||
|
|
||||||
for block in (last_intended_to_cosign_block + 1) ..= latest_number {
|
for block in scan_start_block ..= latest_number {
|
||||||
let actual_block = serai
|
let actual_block = serai
|
||||||
.finalized_block_by_number(block)
|
.finalized_block_by_number(block)
|
||||||
.await?
|
.await?
|
||||||
@@ -276,6 +285,11 @@ pub async fn advance_cosign_protocol(
|
|||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If this TX is committed, always start future scanning from the next block
|
||||||
|
ScanCosignFrom::set(&mut txn, &(block + 1));
|
||||||
|
// Since we're scanning *from* the next block, tidy the cache
|
||||||
|
BlockHasEventsCache::del(&mut txn, block);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some((number, hash)) = to_cosign {
|
if let Some((number, hash)) = to_cosign {
|
||||||
@@ -297,3 +311,22 @@ pub async fn advance_cosign_protocol(
|
|||||||
|
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub async fn advance_cosign_protocol(
|
||||||
|
db: &mut impl Db,
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
serai: &Serai,
|
||||||
|
latest_number: u64,
|
||||||
|
) -> Result<(), SeraiError> {
|
||||||
|
loop {
|
||||||
|
let scan_from = ScanCosignFrom::get(db).unwrap_or(1);
|
||||||
|
// Only scan 1000 blocks at a time to limit a massive txn from forming
|
||||||
|
let scan_to = latest_number.min(scan_from + 1000);
|
||||||
|
advance_cosign_protocol_inner(db, key, serai, scan_to).await?;
|
||||||
|
// If we didn't limit the scan_to, break
|
||||||
|
if scan_to == latest_number {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|||||||
@@ -11,10 +11,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
|||||||
use serai_client::{
|
use serai_client::{
|
||||||
SeraiError, Block, Serai, TemporalSerai,
|
SeraiError, Block, Serai, TemporalSerai,
|
||||||
primitives::{BlockHash, NetworkId},
|
primitives::{BlockHash, NetworkId},
|
||||||
validator_sets::{
|
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
|
||||||
primitives::{ValidatorSet, amortize_excess_key_shares},
|
|
||||||
ValidatorSetsEvent,
|
|
||||||
},
|
|
||||||
in_instructions::InInstructionsEvent,
|
in_instructions::InInstructionsEvent,
|
||||||
coins::CoinsEvent,
|
coins::CoinsEvent,
|
||||||
};
|
};
|
||||||
@@ -69,12 +66,7 @@ async fn handle_new_set<D: Db>(
|
|||||||
let set_participants =
|
let set_participants =
|
||||||
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
|
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
|
||||||
|
|
||||||
let mut set_data = set_participants
|
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
|
||||||
.into_iter()
|
|
||||||
.map(|(k, w)| (k, u16::try_from(w).unwrap()))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
amortize_excess_key_shares(&mut set_data);
|
|
||||||
set_data
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let time = if let Ok(time) = block.time() {
|
let time = if let Ok(time) = block.time() {
|
||||||
|
|||||||
@@ -14,7 +14,7 @@ use tokio::sync::RwLock;
|
|||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
processors::{Message, Processors},
|
processors::{Message, Processors},
|
||||||
TributaryP2p, P2pMessageKind, P2p,
|
TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p,
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod tributary;
|
pub mod tributary;
|
||||||
@@ -45,7 +45,10 @@ impl Processors for MemProcessors {
|
|||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct LocalP2p(usize, pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, Vec<u8>)>>)>>);
|
pub struct LocalP2p(
|
||||||
|
usize,
|
||||||
|
pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, P2pMessageKind, Vec<u8>)>>)>>,
|
||||||
|
);
|
||||||
|
|
||||||
impl LocalP2p {
|
impl LocalP2p {
|
||||||
pub fn new(validators: usize) -> Vec<LocalP2p> {
|
pub fn new(validators: usize) -> Vec<LocalP2p> {
|
||||||
@@ -65,11 +68,13 @@ impl P2p for LocalP2p {
|
|||||||
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||||
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||||
|
|
||||||
async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
|
||||||
self.1.write().await.1[to].push_back((self.0, msg));
|
let mut msg_ref = msg.as_slice();
|
||||||
|
let kind = ReqResMessageKind::read(&mut msg_ref).unwrap();
|
||||||
|
self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec()));
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn broadcast_raw(&self, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
|
||||||
// Content-based deduplication
|
// Content-based deduplication
|
||||||
let mut lock = self.1.write().await;
|
let mut lock = self.1.write().await;
|
||||||
{
|
{
|
||||||
@@ -81,19 +86,26 @@ impl P2p for LocalP2p {
|
|||||||
}
|
}
|
||||||
let queues = &mut lock.1;
|
let queues = &mut lock.1;
|
||||||
|
|
||||||
|
let kind_len = (match kind {
|
||||||
|
P2pMessageKind::ReqRes(kind) => kind.serialize(),
|
||||||
|
P2pMessageKind::Gossip(kind) => kind.serialize(),
|
||||||
|
})
|
||||||
|
.len();
|
||||||
|
let msg = msg[kind_len ..].to_vec();
|
||||||
|
|
||||||
for (i, msg_queue) in queues.iter_mut().enumerate() {
|
for (i, msg_queue) in queues.iter_mut().enumerate() {
|
||||||
if i == self.0 {
|
if i == self.0 {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
msg_queue.push_back((self.0, msg.clone()));
|
msg_queue.push_back((self.0, kind, msg.clone()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn receive_raw(&self) -> (Self::Id, Vec<u8>) {
|
async fn receive(&self) -> P2pMessage<Self> {
|
||||||
// This is a cursed way to implement an async read from a Vec
|
// This is a cursed way to implement an async read from a Vec
|
||||||
loop {
|
loop {
|
||||||
if let Some(res) = self.1.write().await.1[self.0].pop_front() {
|
if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() {
|
||||||
return res;
|
return P2pMessage { sender, kind, msg };
|
||||||
}
|
}
|
||||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
||||||
}
|
}
|
||||||
@@ -103,6 +115,11 @@ impl P2p for LocalP2p {
|
|||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl TributaryP2p for LocalP2p {
|
impl TributaryP2p for LocalP2p {
|
||||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
||||||
<Self as P2p>::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await
|
<Self as P2p>::broadcast(
|
||||||
|
self,
|
||||||
|
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)),
|
||||||
|
msg,
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -26,7 +26,7 @@ use serai_db::MemDb;
|
|||||||
use tributary::Tributary;
|
use tributary::Tributary;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
P2pMessageKind, P2p,
|
GossipMessageKind, P2pMessageKind, P2p,
|
||||||
tributary::{Transaction, TributarySpec},
|
tributary::{Transaction, TributarySpec},
|
||||||
tests::LocalP2p,
|
tests::LocalP2p,
|
||||||
};
|
};
|
||||||
@@ -98,7 +98,7 @@ pub async fn run_tributaries(
|
|||||||
for (p2p, tributary) in &mut tributaries {
|
for (p2p, tributary) in &mut tributaries {
|
||||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||||
match msg.kind {
|
match msg.kind {
|
||||||
P2pMessageKind::Tributary(genesis) => {
|
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||||
assert_eq!(genesis, tributary.genesis());
|
assert_eq!(genesis, tributary.genesis());
|
||||||
if tributary.handle_message(&msg.msg).await {
|
if tributary.handle_message(&msg.msg).await {
|
||||||
p2p.broadcast(msg.kind, msg.msg).await;
|
p2p.broadcast(msg.kind, msg.msg).await;
|
||||||
@@ -173,7 +173,7 @@ async fn tributary_test() {
|
|||||||
for (p2p, tributary) in &mut tributaries {
|
for (p2p, tributary) in &mut tributaries {
|
||||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||||
match msg.kind {
|
match msg.kind {
|
||||||
P2pMessageKind::Tributary(genesis) => {
|
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||||
assert_eq!(genesis, tributary.genesis());
|
assert_eq!(genesis, tributary.genesis());
|
||||||
tributary.handle_message(&msg.msg).await;
|
tributary.handle_message(&msg.msg).await;
|
||||||
}
|
}
|
||||||
@@ -199,7 +199,7 @@ async fn tributary_test() {
|
|||||||
for (p2p, tributary) in &mut tributaries {
|
for (p2p, tributary) in &mut tributaries {
|
||||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
||||||
match msg.kind {
|
match msg.kind {
|
||||||
P2pMessageKind::Tributary(genesis) => {
|
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
||||||
assert_eq!(genesis, tributary.genesis());
|
assert_eq!(genesis, tributary.genesis());
|
||||||
tributary.handle_message(&msg.msg).await;
|
tributary.handle_message(&msg.msg).await;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -116,8 +116,8 @@ async fn sync_test() {
|
|||||||
.map_err(|_| "failed to send ActiveTributary to heartbeat")
|
.map_err(|_| "failed to send ActiveTributary to heartbeat")
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
|
||||||
// The heartbeat is once every 10 blocks
|
// The heartbeat is once every 10 blocks, with some limitations
|
||||||
sleep(Duration::from_secs(10 * block_time)).await;
|
sleep(Duration::from_secs(20 * block_time)).await;
|
||||||
assert!(syncer_tributary.tip().await != spec.genesis());
|
assert!(syncer_tributary.tip().await != spec.genesis());
|
||||||
|
|
||||||
// Verify it synced to the tip
|
// Verify it synced to the tip
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ impl TributarySpec {
|
|||||||
|
|
||||||
pub fn genesis(&self) -> [u8; 32] {
|
pub fn genesis(&self) -> [u8; 32] {
|
||||||
// Calculate the genesis for this Tributary
|
// Calculate the genesis for this Tributary
|
||||||
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis");
|
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis Testnet 2.1");
|
||||||
// This locks it to a specific Serai chain
|
// This locks it to a specific Serai chain
|
||||||
genesis.append_message(b"serai_block", self.serai_block);
|
genesis.append_message(b"serai_block", self.serai_block);
|
||||||
genesis.append_message(b"session", self.set.session.0.to_le_bytes());
|
genesis.append_message(b"session", self.set.session.0.to_le_bytes());
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use core::{marker::PhantomData, fmt::Debug};
|
use core::{marker::PhantomData, fmt::Debug};
|
||||||
use std::{sync::Arc, io};
|
use std::{sync::Arc, io, collections::VecDeque};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
@@ -59,8 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
|||||||
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
||||||
|
|
||||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||||
pub(crate) const BLOCK_MESSAGE: u8 = 1;
|
pub(crate) const TRANSACTION_MESSAGE: u8 = 2; // TODO: Normalize to 1
|
||||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 2;
|
|
||||||
|
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
@@ -194,7 +193,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
);
|
);
|
||||||
let blockchain = Arc::new(RwLock::new(blockchain));
|
let blockchain = Arc::new(RwLock::new(blockchain));
|
||||||
|
|
||||||
let to_rebroadcast = Arc::new(RwLock::new(vec![]));
|
let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new()));
|
||||||
// Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the
|
// Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the
|
||||||
// P2P layer
|
// P2P layer
|
||||||
let p2p_meta_task_handle = Arc::new(
|
let p2p_meta_task_handle = Arc::new(
|
||||||
@@ -207,7 +206,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
for msg in to_rebroadcast {
|
for msg in to_rebroadcast {
|
||||||
p2p.broadcast(genesis, msg).await;
|
p2p.broadcast(genesis, msg).await;
|
||||||
}
|
}
|
||||||
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
|
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
@@ -218,7 +217,15 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p };
|
TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p };
|
||||||
|
|
||||||
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
||||||
TendermintMachine::new(network.clone(), block_number, start_time, proposal).await;
|
TendermintMachine::new(
|
||||||
|
db.clone(),
|
||||||
|
network.clone(),
|
||||||
|
genesis,
|
||||||
|
block_number,
|
||||||
|
start_time,
|
||||||
|
proposal,
|
||||||
|
)
|
||||||
|
.await;
|
||||||
tokio::spawn(machine.run());
|
tokio::spawn(machine.run());
|
||||||
|
|
||||||
Some(Self {
|
Some(Self {
|
||||||
@@ -328,9 +335,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
|
|
||||||
// Return true if the message should be rebroadcasted.
|
// Return true if the message should be rebroadcasted.
|
||||||
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
pub async fn handle_message(&self, msg: &[u8]) -> bool {
|
||||||
// Acquire the lock now to prevent sync_block from being run at the same time
|
|
||||||
let mut sync_block = self.synced_block_result.write().await;
|
|
||||||
|
|
||||||
match msg.first() {
|
match msg.first() {
|
||||||
Some(&TRANSACTION_MESSAGE) => {
|
Some(&TRANSACTION_MESSAGE) => {
|
||||||
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else {
|
||||||
@@ -362,19 +366,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
false
|
false
|
||||||
}
|
}
|
||||||
|
|
||||||
Some(&BLOCK_MESSAGE) => {
|
|
||||||
let mut msg_ref = &msg[1 ..];
|
|
||||||
let Ok(block) = Block::<T>::read(&mut msg_ref) else {
|
|
||||||
log::error!("received invalid block message");
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
let commit = msg[(msg.len() - msg_ref.len()) ..].to_vec();
|
|
||||||
if self.sync_block_internal(block, commit, &mut sync_block).await {
|
|
||||||
log::debug!("synced block over p2p net instead of building the commit ourselves");
|
|
||||||
}
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
_ => false,
|
_ => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,5 +1,8 @@
|
|||||||
use core::ops::Deref;
|
use core::ops::Deref;
|
||||||
use std::{sync::Arc, collections::HashMap};
|
use std::{
|
||||||
|
sync::Arc,
|
||||||
|
collections::{VecDeque, HashMap},
|
||||||
|
};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
@@ -38,9 +41,8 @@ use tendermint::{
|
|||||||
use tokio::sync::RwLock;
|
use tokio::sync::RwLock;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite,
|
TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait,
|
||||||
transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError,
|
Transaction, BlockHeader, Block, BlockError, Blockchain, P2p,
|
||||||
Blockchain, P2p,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
pub mod tx;
|
pub mod tx;
|
||||||
@@ -268,7 +270,7 @@ pub struct TendermintNetwork<D: Db, T: TransactionTrait, P: P2p> {
|
|||||||
pub(crate) validators: Arc<Validators>,
|
pub(crate) validators: Arc<Validators>,
|
||||||
pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>,
|
pub(crate) blockchain: Arc<RwLock<Blockchain<D, T>>>,
|
||||||
|
|
||||||
pub(crate) to_rebroadcast: Arc<RwLock<Vec<Vec<u8>>>>,
|
pub(crate) to_rebroadcast: Arc<RwLock<VecDeque<Vec<u8>>>>,
|
||||||
|
|
||||||
pub(crate) p2p: P,
|
pub(crate) p2p: P,
|
||||||
}
|
}
|
||||||
@@ -277,31 +279,10 @@ pub const BLOCK_PROCESSING_TIME: u32 = 999;
|
|||||||
pub const LATENCY_TIME: u32 = 1667;
|
pub const LATENCY_TIME: u32 = 1667;
|
||||||
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
|
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn assert_target_block_time() {
|
|
||||||
use serai_db::MemDb;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct DummyP2p;
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl P2p for DummyP2p {
|
|
||||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
|
||||||
unimplemented!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Type paremeters don't matter here since we only need to call the block_time()
|
|
||||||
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
|
|
||||||
// TARGET_BLOCK_TIME is in milliseconds.
|
|
||||||
assert_eq!(
|
|
||||||
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
|
|
||||||
TARGET_BLOCK_TIME / 1000
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
|
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
|
||||||
|
type Db = D;
|
||||||
|
|
||||||
type ValidatorId = [u8; 32];
|
type ValidatorId = [u8; 32];
|
||||||
type SignatureScheme = Arc<Validators>;
|
type SignatureScheme = Arc<Validators>;
|
||||||
type Weights = Arc<Validators>;
|
type Weights = Arc<Validators>;
|
||||||
@@ -325,19 +306,28 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
|
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
|
||||||
|
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||||
|
to_broadcast.extend(msg.encode());
|
||||||
|
|
||||||
// Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second
|
// Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second
|
||||||
// until the block it's trying to build is complete
|
// until the block it's trying to build is complete
|
||||||
// If the P2P layer drops a message before all nodes obtained access, or a node had an
|
// If the P2P layer drops a message before all nodes obtained access, or a node had an
|
||||||
// intermittent failure, this will ensure reconcilliation
|
// intermittent failure, this will ensure reconcilliation
|
||||||
// Resolves halts caused by timing discrepancies, which technically are violations of
|
|
||||||
// Tendermint as a BFT protocol, and shouldn't occur yet have in low-powered testing
|
|
||||||
// environments
|
|
||||||
// This is atrocious if there's no content-based deduplication protocol for messages actively
|
// This is atrocious if there's no content-based deduplication protocol for messages actively
|
||||||
// being gossiped
|
// being gossiped
|
||||||
// LibP2p, as used by Serai, is configured to content-based deduplicate
|
// LibP2p, as used by Serai, is configured to content-based deduplicate
|
||||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
{
|
||||||
to_broadcast.extend(msg.encode());
|
let mut to_rebroadcast_lock = self.to_rebroadcast.write().await;
|
||||||
self.to_rebroadcast.write().await.push(to_broadcast.clone());
|
to_rebroadcast_lock.push_back(to_broadcast.clone());
|
||||||
|
// We should have, ideally, 3 * validators messages within a round
|
||||||
|
// Therefore, this should keep the most recent 2-rounds
|
||||||
|
// TODO: This isn't perfect. Each participant should just rebroadcast their latest round of
|
||||||
|
// messages
|
||||||
|
while to_rebroadcast_lock.len() > (6 * self.validators.weights.len()) {
|
||||||
|
to_rebroadcast_lock.pop_front();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
self.p2p.broadcast(self.genesis, to_broadcast).await
|
self.p2p.broadcast(self.genesis, to_broadcast).await
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -423,12 +413,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
|||||||
);
|
);
|
||||||
match block_res {
|
match block_res {
|
||||||
Ok(()) => {
|
Ok(()) => {
|
||||||
// If we successfully added this block, broadcast it
|
// If we successfully added this block, break
|
||||||
// TODO: Move this under the coordinator once we set up on new block notifications?
|
|
||||||
let mut msg = serialized_block.0;
|
|
||||||
msg.insert(0, BLOCK_MESSAGE);
|
|
||||||
msg.extend(encoded_commit);
|
|
||||||
self.p2p.broadcast(self.genesis, msg).await;
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
Err(BlockError::NonLocalProvided(hash)) => {
|
Err(BlockError::NonLocalProvided(hash)) => {
|
||||||
@@ -437,13 +422,14 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
|||||||
hex::encode(hash),
|
hex::encode(hash),
|
||||||
hex::encode(self.genesis)
|
hex::encode(self.genesis)
|
||||||
);
|
);
|
||||||
|
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||||
}
|
}
|
||||||
_ => return invalid_block(),
|
_ => return invalid_block(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since we've added a valid block, clear to_rebroadcast
|
// Since we've added a valid block, clear to_rebroadcast
|
||||||
*self.to_rebroadcast.write().await = vec![];
|
*self.to_rebroadcast.write().await = VecDeque::new();
|
||||||
|
|
||||||
Some(TendermintBlock(
|
Some(TendermintBlock(
|
||||||
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),
|
self.blockchain.write().await.build_block::<Self>(&self.signature_scheme()).serialize(),
|
||||||
|
|||||||
@@ -1,3 +1,6 @@
|
|||||||
|
#[cfg(test)]
|
||||||
|
mod tendermint;
|
||||||
|
|
||||||
mod transaction;
|
mod transaction;
|
||||||
pub use transaction::*;
|
pub use transaction::*;
|
||||||
|
|
||||||
|
|||||||
28
coordinator/tributary/src/tests/tendermint.rs
Normal file
28
coordinator/tributary/src/tests/tendermint.rs
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
use tendermint::ext::Network;
|
||||||
|
use crate::{
|
||||||
|
P2p, TendermintTx,
|
||||||
|
tendermint::{TARGET_BLOCK_TIME, TendermintNetwork},
|
||||||
|
};
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn assert_target_block_time() {
|
||||||
|
use serai_db::MemDb;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug)]
|
||||||
|
pub struct DummyP2p;
|
||||||
|
|
||||||
|
#[async_trait::async_trait]
|
||||||
|
impl P2p for DummyP2p {
|
||||||
|
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||||
|
unimplemented!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type paremeters don't matter here since we only need to call the block_time()
|
||||||
|
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
|
||||||
|
// TARGET_BLOCK_TIME is in milliseconds.
|
||||||
|
assert_eq!(
|
||||||
|
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
|
||||||
|
TARGET_BLOCK_TIME / 1000
|
||||||
|
)
|
||||||
|
}
|
||||||
@@ -27,5 +27,7 @@ futures-util = { version = "0.3", default-features = false, features = ["std", "
|
|||||||
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||||
tokio = { version = "1", default-features = false, features = ["time"] }
|
tokio = { version = "1", default-features = false, features = ["time"] }
|
||||||
|
|
||||||
|
serai-db = { path = "../../../common/db", version = "0.1", default-features = false }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] }
|
tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] }
|
||||||
|
|||||||
@@ -3,6 +3,9 @@ use std::{
|
|||||||
collections::{HashSet, HashMap},
|
collections::{HashSet, HashMap},
|
||||||
};
|
};
|
||||||
|
|
||||||
|
use parity_scale_codec::Encode;
|
||||||
|
use serai_db::{Get, DbTxn, Db};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
time::CanonicalInstant,
|
time::CanonicalInstant,
|
||||||
ext::{RoundNumber, BlockNumber, Block, Network},
|
ext::{RoundNumber, BlockNumber, Block, Network},
|
||||||
@@ -12,6 +15,9 @@ use crate::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
pub(crate) struct BlockData<N: Network> {
|
pub(crate) struct BlockData<N: Network> {
|
||||||
|
db: N::Db,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
|
||||||
pub(crate) number: BlockNumber,
|
pub(crate) number: BlockNumber,
|
||||||
pub(crate) validator_id: Option<N::ValidatorId>,
|
pub(crate) validator_id: Option<N::ValidatorId>,
|
||||||
pub(crate) proposal: Option<N::Block>,
|
pub(crate) proposal: Option<N::Block>,
|
||||||
@@ -32,12 +38,17 @@ pub(crate) struct BlockData<N: Network> {
|
|||||||
|
|
||||||
impl<N: Network> BlockData<N> {
|
impl<N: Network> BlockData<N> {
|
||||||
pub(crate) fn new(
|
pub(crate) fn new(
|
||||||
|
db: N::Db,
|
||||||
|
genesis: [u8; 32],
|
||||||
weights: Arc<N::Weights>,
|
weights: Arc<N::Weights>,
|
||||||
number: BlockNumber,
|
number: BlockNumber,
|
||||||
validator_id: Option<N::ValidatorId>,
|
validator_id: Option<N::ValidatorId>,
|
||||||
proposal: Option<N::Block>,
|
proposal: Option<N::Block>,
|
||||||
) -> BlockData<N> {
|
) -> BlockData<N> {
|
||||||
BlockData {
|
BlockData {
|
||||||
|
db,
|
||||||
|
genesis,
|
||||||
|
|
||||||
number,
|
number,
|
||||||
validator_id,
|
validator_id,
|
||||||
proposal,
|
proposal,
|
||||||
@@ -128,12 +139,35 @@ impl<N: Network> BlockData<N> {
|
|||||||
// 27, 33, 41, 46, 60, 64
|
// 27, 33, 41, 46, 60, 64
|
||||||
self.round_mut().step = data.step();
|
self.round_mut().step = data.step();
|
||||||
|
|
||||||
// Only return a message to if we're actually a current validator
|
// Only return a message to if we're actually a current validator and haven't prior posted a
|
||||||
self.validator_id.map(|validator_id| Message {
|
// message
|
||||||
|
let round_number = self.round().number;
|
||||||
|
let step = data.step();
|
||||||
|
let res = self.validator_id.map(|validator_id| Message {
|
||||||
sender: validator_id,
|
sender: validator_id,
|
||||||
block: self.number,
|
block: self.number,
|
||||||
round: self.round().number,
|
round: round_number,
|
||||||
data,
|
data,
|
||||||
})
|
});
|
||||||
|
|
||||||
|
if res.is_some() {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let key = [
|
||||||
|
b"tendermint-machine_already_sent_message".as_ref(),
|
||||||
|
&self.genesis,
|
||||||
|
&self.number.0.to_le_bytes(),
|
||||||
|
&round_number.0.to_le_bytes(),
|
||||||
|
&step.encode(),
|
||||||
|
]
|
||||||
|
.concat();
|
||||||
|
// If we've already sent a message, return
|
||||||
|
if txn.get(&key).is_some() {
|
||||||
|
None?;
|
||||||
|
}
|
||||||
|
txn.put(&key, []);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -212,6 +212,9 @@ pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode
|
|||||||
/// Trait representing the distributed system Tendermint is providing consensus over.
|
/// Trait representing the distributed system Tendermint is providing consensus over.
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait Network: Sized + Send + Sync {
|
pub trait Network: Sized + Send + Sync {
|
||||||
|
/// The database used to back this.
|
||||||
|
type Db: serai_db::Db;
|
||||||
|
|
||||||
// Type used to identify validators.
|
// Type used to identify validators.
|
||||||
type ValidatorId: ValidatorId;
|
type ValidatorId: ValidatorId;
|
||||||
/// Signature scheme used by validators.
|
/// Signature scheme used by validators.
|
||||||
|
|||||||
@@ -231,6 +231,9 @@ pub enum SlashEvent {
|
|||||||
|
|
||||||
/// A machine executing the Tendermint protocol.
|
/// A machine executing the Tendermint protocol.
|
||||||
pub struct TendermintMachine<N: Network> {
|
pub struct TendermintMachine<N: Network> {
|
||||||
|
db: N::Db,
|
||||||
|
genesis: [u8; 32],
|
||||||
|
|
||||||
network: N,
|
network: N,
|
||||||
signer: <N::SignatureScheme as SignatureScheme>::Signer,
|
signer: <N::SignatureScheme as SignatureScheme>::Signer,
|
||||||
validators: N::SignatureScheme,
|
validators: N::SignatureScheme,
|
||||||
@@ -310,11 +313,16 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now());
|
let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now());
|
||||||
if time_until_round_end == Duration::ZERO {
|
if time_until_round_end == Duration::ZERO {
|
||||||
log::trace!(
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
"resetting when prior round ended {}ms ago",
|
"resetting when prior round ended {}ms ago",
|
||||||
Instant::now().saturating_duration_since(round_end.instant()).as_millis(),
|
Instant::now().saturating_duration_since(round_end.instant()).as_millis(),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
log::trace!("sleeping until round ends in {}ms", time_until_round_end.as_millis());
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"sleeping until round ends in {}ms",
|
||||||
|
time_until_round_end.as_millis(),
|
||||||
|
);
|
||||||
sleep(time_until_round_end).await;
|
sleep(time_until_round_end).await;
|
||||||
|
|
||||||
// Clear our outbound message queue
|
// Clear our outbound message queue
|
||||||
@@ -322,6 +330,8 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
|
|
||||||
// Create the new block
|
// Create the new block
|
||||||
self.block = BlockData::new(
|
self.block = BlockData::new(
|
||||||
|
self.db.clone(),
|
||||||
|
self.genesis,
|
||||||
self.weights.clone(),
|
self.weights.clone(),
|
||||||
BlockNumber(self.block.number.0 + 1),
|
BlockNumber(self.block.number.0 + 1),
|
||||||
self.signer.validator_id().await,
|
self.signer.validator_id().await,
|
||||||
@@ -370,7 +380,9 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
/// the machine itself. The machine should have `run` called from an asynchronous task.
|
/// the machine itself. The machine should have `run` called from an asynchronous task.
|
||||||
#[allow(clippy::new_ret_no_self)]
|
#[allow(clippy::new_ret_no_self)]
|
||||||
pub async fn new(
|
pub async fn new(
|
||||||
|
db: N::Db,
|
||||||
network: N,
|
network: N,
|
||||||
|
genesis: [u8; 32],
|
||||||
last_block: BlockNumber,
|
last_block: BlockNumber,
|
||||||
last_time: u64,
|
last_time: u64,
|
||||||
proposal: N::Block,
|
proposal: N::Block,
|
||||||
@@ -409,6 +421,9 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
let validator_id = signer.validator_id().await;
|
let validator_id = signer.validator_id().await;
|
||||||
// 01-10
|
// 01-10
|
||||||
let mut machine = TendermintMachine {
|
let mut machine = TendermintMachine {
|
||||||
|
db: db.clone(),
|
||||||
|
genesis,
|
||||||
|
|
||||||
network,
|
network,
|
||||||
signer,
|
signer,
|
||||||
validators,
|
validators,
|
||||||
@@ -420,6 +435,8 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
synced_block_result_send,
|
synced_block_result_send,
|
||||||
|
|
||||||
block: BlockData::new(
|
block: BlockData::new(
|
||||||
|
db,
|
||||||
|
genesis,
|
||||||
weights,
|
weights,
|
||||||
BlockNumber(last_block.0 + 1),
|
BlockNumber(last_block.0 + 1),
|
||||||
validator_id,
|
validator_id,
|
||||||
@@ -586,7 +603,11 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
);
|
);
|
||||||
let id = block.id();
|
let id = block.id();
|
||||||
let proposal = self.network.add_block(block, commit).await;
|
let proposal = self.network.add_block(block, commit).await;
|
||||||
log::trace!("added block {} (produced by machine)", hex::encode(id.as_ref()));
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"added block {} (produced by machine)",
|
||||||
|
hex::encode(id.as_ref()),
|
||||||
|
);
|
||||||
self.reset(msg.round, proposal).await;
|
self.reset(msg.round, proposal).await;
|
||||||
}
|
}
|
||||||
Err(TendermintError::Malicious(sender, evidence)) => {
|
Err(TendermintError::Malicious(sender, evidence)) => {
|
||||||
@@ -680,7 +701,12 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
(msg.round == self.block.round().number) &&
|
(msg.round == self.block.round().number) &&
|
||||||
(msg.data.step() == Step::Propose)
|
(msg.data.step() == Step::Propose)
|
||||||
{
|
{
|
||||||
log::trace!("received Propose for block {}, round {}", msg.block.0, msg.round.0);
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"received Propose for block {}, round {}",
|
||||||
|
msg.block.0,
|
||||||
|
msg.round.0,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is a precommit, verify its signature
|
// If this is a precommit, verify its signature
|
||||||
@@ -698,7 +724,13 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
if !self.block.log.log(signed.clone())? {
|
if !self.block.log.log(signed.clone())? {
|
||||||
return Err(TendermintError::AlreadyHandled);
|
return Err(TendermintError::AlreadyHandled);
|
||||||
}
|
}
|
||||||
log::debug!(target: "tendermint", "received new tendermint message");
|
log::debug!(
|
||||||
|
target: "tendermint",
|
||||||
|
"received new tendermint message (block: {}, round: {}, step: {:?})",
|
||||||
|
msg.block.0,
|
||||||
|
msg.round.0,
|
||||||
|
msg.data.step(),
|
||||||
|
);
|
||||||
|
|
||||||
// All functions, except for the finalizer and the jump, are locked to the current round
|
// All functions, except for the finalizer and the jump, are locked to the current round
|
||||||
|
|
||||||
@@ -745,6 +777,13 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
// 55-56
|
// 55-56
|
||||||
// Jump, enabling processing by the below code
|
// Jump, enabling processing by the below code
|
||||||
if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() {
|
if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() {
|
||||||
|
log::debug!(
|
||||||
|
target: "tendermint",
|
||||||
|
"jumping from round {} to round {}",
|
||||||
|
self.block.round().number.0,
|
||||||
|
msg.round.0,
|
||||||
|
);
|
||||||
|
|
||||||
// Jump to the new round.
|
// Jump to the new round.
|
||||||
let proposer = self.round(msg.round, None);
|
let proposer = self.round(msg.round, None);
|
||||||
|
|
||||||
@@ -802,13 +841,26 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) {
|
if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) {
|
||||||
let (participation, weight) =
|
let (participation, weight) =
|
||||||
self.block.log.message_instances(self.block.round().number, &Data::Prevote(None));
|
self.block.log.message_instances(self.block.round().number, &Data::Prevote(None));
|
||||||
|
let threshold_weight = self.weights.threshold();
|
||||||
|
if participation < threshold_weight {
|
||||||
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"progess towards setting prevote timeout, participation: {}, needed: {}",
|
||||||
|
participation,
|
||||||
|
threshold_weight,
|
||||||
|
);
|
||||||
|
}
|
||||||
// 34-35
|
// 34-35
|
||||||
if participation >= self.weights.threshold() {
|
if participation >= threshold_weight {
|
||||||
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"setting timeout for prevote due to sufficient participation",
|
||||||
|
);
|
||||||
self.block.round_mut().set_timeout(Step::Prevote);
|
self.block.round_mut().set_timeout(Step::Prevote);
|
||||||
}
|
}
|
||||||
|
|
||||||
// 44-46
|
// 44-46
|
||||||
if weight >= self.weights.threshold() {
|
if weight >= threshold_weight {
|
||||||
self.broadcast(Data::Precommit(None));
|
self.broadcast(Data::Precommit(None));
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
}
|
}
|
||||||
@@ -818,6 +870,10 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
|||||||
if matches!(msg.data, Data::Precommit(_)) &&
|
if matches!(msg.data, Data::Precommit(_)) &&
|
||||||
self.block.log.has_participation(self.block.round().number, Step::Precommit)
|
self.block.log.has_participation(self.block.round().number, Step::Precommit)
|
||||||
{
|
{
|
||||||
|
log::trace!(
|
||||||
|
target: "tendermint",
|
||||||
|
"setting timeout for precommit due to sufficient participation",
|
||||||
|
);
|
||||||
self.block.round_mut().set_timeout(Step::Precommit);
|
self.block.round_mut().set_timeout(Step::Precommit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
use std::{sync::Arc, collections::HashMap};
|
use std::{sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
use log::debug;
|
|
||||||
use parity_scale_codec::Encode;
|
use parity_scale_codec::Encode;
|
||||||
|
|
||||||
use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence};
|
use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence};
|
||||||
@@ -27,7 +26,7 @@ impl<N: Network> MessageLog<N> {
|
|||||||
let step = msg.data.step();
|
let step = msg.data.step();
|
||||||
if let Some(existing) = msgs.get(&step) {
|
if let Some(existing) = msgs.get(&step) {
|
||||||
if existing.msg.data != msg.data {
|
if existing.msg.data != msg.data {
|
||||||
debug!(
|
log::debug!(
|
||||||
target: "tendermint",
|
target: "tendermint",
|
||||||
"Validator sent multiple messages for the same block + round + step"
|
"Validator sent multiple messages for the same block + round + step"
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -57,6 +57,7 @@ impl<N: Network> RoundData<N> {
|
|||||||
|
|
||||||
// Poll all set timeouts, returning the Step whose timeout has just expired
|
// Poll all set timeouts, returning the Step whose timeout has just expired
|
||||||
pub(crate) async fn timeout_future(&self) -> Step {
|
pub(crate) async fn timeout_future(&self) -> Step {
|
||||||
|
/*
|
||||||
let now = Instant::now();
|
let now = Instant::now();
|
||||||
log::trace!(
|
log::trace!(
|
||||||
target: "tendermint",
|
target: "tendermint",
|
||||||
@@ -64,6 +65,7 @@ impl<N: Network> RoundData<N> {
|
|||||||
self.step,
|
self.step,
|
||||||
self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::<HashMap<_, _>>()
|
self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::<HashMap<_, _>>()
|
||||||
);
|
);
|
||||||
|
*/
|
||||||
|
|
||||||
let timeout_future = |step| {
|
let timeout_future = |step| {
|
||||||
let timeout = self.timeouts.get(&step).copied();
|
let timeout = self.timeouts.get(&step).copied();
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ use parity_scale_codec::{Encode, Decode};
|
|||||||
use futures_util::sink::SinkExt;
|
use futures_util::sink::SinkExt;
|
||||||
use tokio::{sync::RwLock, time::sleep};
|
use tokio::{sync::RwLock, time::sleep};
|
||||||
|
|
||||||
|
use serai_db::MemDb;
|
||||||
|
|
||||||
use tendermint_machine::{
|
use tendermint_machine::{
|
||||||
ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
||||||
SlashEvent, TendermintMachine, TendermintHandle,
|
SlashEvent, TendermintMachine, TendermintHandle,
|
||||||
@@ -111,6 +113,8 @@ struct TestNetwork(
|
|||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
impl Network for TestNetwork {
|
impl Network for TestNetwork {
|
||||||
|
type Db = MemDb;
|
||||||
|
|
||||||
type ValidatorId = TestValidatorId;
|
type ValidatorId = TestValidatorId;
|
||||||
type SignatureScheme = TestSignatureScheme;
|
type SignatureScheme = TestSignatureScheme;
|
||||||
type Weights = TestWeights;
|
type Weights = TestWeights;
|
||||||
@@ -170,7 +174,9 @@ impl TestNetwork {
|
|||||||
let i = u16::try_from(i).unwrap();
|
let i = u16::try_from(i).unwrap();
|
||||||
let TendermintHandle { messages, synced_block, synced_block_result, machine } =
|
let TendermintHandle { messages, synced_block, synced_block_result, machine } =
|
||||||
TendermintMachine::new(
|
TendermintMachine::new(
|
||||||
|
MemDb::new(),
|
||||||
TestNetwork(i, arc.clone()),
|
TestNetwork(i, arc.clone()),
|
||||||
|
[0; 32],
|
||||||
BlockNumber(1),
|
BlockNumber(1),
|
||||||
start_time,
|
start_time,
|
||||||
TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) },
|
TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) },
|
||||||
|
|||||||
7
docs/.gitignore
vendored
Normal file
7
docs/.gitignore
vendored
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
_site/
|
||||||
|
.sass-cache/
|
||||||
|
.jekyll-cache/
|
||||||
|
.jekyll-metadata
|
||||||
|
|
||||||
|
.bundle/
|
||||||
|
vendor/
|
||||||
1
docs/.ruby-version
Normal file
1
docs/.ruby-version
Normal file
@@ -0,0 +1 @@
|
|||||||
|
3.1
|
||||||
4
docs/Gemfile
Normal file
4
docs/Gemfile
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
source 'https://rubygems.org'
|
||||||
|
|
||||||
|
gem "jekyll", "~> 4.3.3"
|
||||||
|
gem "just-the-docs", "0.8.1"
|
||||||
82
docs/Gemfile.lock
Normal file
82
docs/Gemfile.lock
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
GEM
|
||||||
|
remote: https://rubygems.org/
|
||||||
|
specs:
|
||||||
|
addressable (2.8.6)
|
||||||
|
public_suffix (>= 2.0.2, < 6.0)
|
||||||
|
colorator (1.1.0)
|
||||||
|
concurrent-ruby (1.2.3)
|
||||||
|
em-websocket (0.5.3)
|
||||||
|
eventmachine (>= 0.12.9)
|
||||||
|
http_parser.rb (~> 0)
|
||||||
|
eventmachine (1.2.7)
|
||||||
|
ffi (1.16.3)
|
||||||
|
forwardable-extended (2.6.0)
|
||||||
|
google-protobuf (3.25.3-x86_64-linux)
|
||||||
|
http_parser.rb (0.8.0)
|
||||||
|
i18n (1.14.4)
|
||||||
|
concurrent-ruby (~> 1.0)
|
||||||
|
jekyll (4.3.3)
|
||||||
|
addressable (~> 2.4)
|
||||||
|
colorator (~> 1.0)
|
||||||
|
em-websocket (~> 0.5)
|
||||||
|
i18n (~> 1.0)
|
||||||
|
jekyll-sass-converter (>= 2.0, < 4.0)
|
||||||
|
jekyll-watch (~> 2.0)
|
||||||
|
kramdown (~> 2.3, >= 2.3.1)
|
||||||
|
kramdown-parser-gfm (~> 1.0)
|
||||||
|
liquid (~> 4.0)
|
||||||
|
mercenary (>= 0.3.6, < 0.5)
|
||||||
|
pathutil (~> 0.9)
|
||||||
|
rouge (>= 3.0, < 5.0)
|
||||||
|
safe_yaml (~> 1.0)
|
||||||
|
terminal-table (>= 1.8, < 4.0)
|
||||||
|
webrick (~> 1.7)
|
||||||
|
jekyll-include-cache (0.2.1)
|
||||||
|
jekyll (>= 3.7, < 5.0)
|
||||||
|
jekyll-sass-converter (3.0.0)
|
||||||
|
sass-embedded (~> 1.54)
|
||||||
|
jekyll-seo-tag (2.8.0)
|
||||||
|
jekyll (>= 3.8, < 5.0)
|
||||||
|
jekyll-watch (2.2.1)
|
||||||
|
listen (~> 3.0)
|
||||||
|
just-the-docs (0.8.1)
|
||||||
|
jekyll (>= 3.8.5)
|
||||||
|
jekyll-include-cache
|
||||||
|
jekyll-seo-tag (>= 2.0)
|
||||||
|
rake (>= 12.3.1)
|
||||||
|
kramdown (2.4.0)
|
||||||
|
rexml
|
||||||
|
kramdown-parser-gfm (1.1.0)
|
||||||
|
kramdown (~> 2.0)
|
||||||
|
liquid (4.0.4)
|
||||||
|
listen (3.9.0)
|
||||||
|
rb-fsevent (~> 0.10, >= 0.10.3)
|
||||||
|
rb-inotify (~> 0.9, >= 0.9.10)
|
||||||
|
mercenary (0.4.0)
|
||||||
|
pathutil (0.16.2)
|
||||||
|
forwardable-extended (~> 2.6)
|
||||||
|
public_suffix (5.0.4)
|
||||||
|
rake (13.1.0)
|
||||||
|
rb-fsevent (0.11.2)
|
||||||
|
rb-inotify (0.10.1)
|
||||||
|
ffi (~> 1.0)
|
||||||
|
rexml (3.2.6)
|
||||||
|
rouge (4.2.0)
|
||||||
|
safe_yaml (1.0.5)
|
||||||
|
sass-embedded (1.63.6)
|
||||||
|
google-protobuf (~> 3.23)
|
||||||
|
rake (>= 13.0.0)
|
||||||
|
terminal-table (3.0.2)
|
||||||
|
unicode-display_width (>= 1.1.1, < 3)
|
||||||
|
unicode-display_width (2.5.0)
|
||||||
|
webrick (1.8.1)
|
||||||
|
|
||||||
|
PLATFORMS
|
||||||
|
x86_64-linux
|
||||||
|
|
||||||
|
DEPENDENCIES
|
||||||
|
jekyll (~> 4.3.3)
|
||||||
|
just-the-docs (= 0.8.1)
|
||||||
|
|
||||||
|
BUNDLED WITH
|
||||||
|
2.2.5
|
||||||
14
docs/_config.yml
Normal file
14
docs/_config.yml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
title: Serai Documentation
|
||||||
|
description: Documentation for the Serai protocol.
|
||||||
|
theme: just-the-docs
|
||||||
|
|
||||||
|
url: https://docs.serai.exchange
|
||||||
|
|
||||||
|
callouts:
|
||||||
|
warning:
|
||||||
|
title: Warning
|
||||||
|
color: red
|
||||||
|
|
||||||
|
definition:
|
||||||
|
title: Definition
|
||||||
|
color: blue
|
||||||
19
docs/amm/index.md
Normal file
19
docs/amm/index.md
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
title: Automatic Market Makers
|
||||||
|
layout: default
|
||||||
|
nav_order: 2
|
||||||
|
---
|
||||||
|
|
||||||
|
# Automatic Market Makers
|
||||||
|
|
||||||
|
*text on how AMMs work*
|
||||||
|
|
||||||
|
Serai uses a symmetric liquidity pool with the `xy=k` formula.
|
||||||
|
|
||||||
|
Concentrated liquidity would presumably offer less slippage on swaps, and there are
|
||||||
|
[discussions to evolve to a concentrated liquidity/order book environment](https://github.com/serai-dex/serai/issues/420).
|
||||||
|
Unfortunately, it effectively requires active management of provided liquidity.
|
||||||
|
This disenfranchises small liquidity providers who may not have the knowledge
|
||||||
|
and resources necessary to perform such management. Since Serai is expected to
|
||||||
|
have a community-bootstrapped start, starting with concentrated liquidity would
|
||||||
|
accordingly be contradictory.
|
||||||
7
docs/cross_chain/index.md
Normal file
7
docs/cross_chain/index.md
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
---
|
||||||
|
title: Cross-Chain Architecture
|
||||||
|
layout: default
|
||||||
|
nav_order: 3
|
||||||
|
---
|
||||||
|
|
||||||
|
# Cross-Chain Architecture
|
||||||
6
docs/economics/genesis.md
Normal file
6
docs/economics/genesis.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
title: Genesis
|
||||||
|
layout: default
|
||||||
|
nav_order: 1
|
||||||
|
parent: Economics
|
||||||
|
---
|
||||||
45
docs/economics/index.md
Normal file
45
docs/economics/index.md
Normal file
@@ -0,0 +1,45 @@
|
|||||||
|
---
|
||||||
|
title: Economics
|
||||||
|
layout: default
|
||||||
|
nav_order: 4
|
||||||
|
has_children: true
|
||||||
|
---
|
||||||
|
|
||||||
|
# Economics
|
||||||
|
|
||||||
|
Serai's economics change depending on which of three eras is currently
|
||||||
|
occurring.
|
||||||
|
|
||||||
|
## Genesis Era
|
||||||
|
|
||||||
|
The network starts with the "Genesis" era, where the goal of the network is to
|
||||||
|
attract the liquidity necessary to facilitate swaps. This period will last for
|
||||||
|
30 days and will let anyone add liquidity to the protocol. Only with its
|
||||||
|
conclusion will SRI start being distributed.
|
||||||
|
|
||||||
|
After the Genesis era, the network enters the "Pre-Economic Security" era.
|
||||||
|
|
||||||
|
## Pre-Economic Security
|
||||||
|
|
||||||
|
{: .definition-title }
|
||||||
|
> Definition: Economic Security
|
||||||
|
>
|
||||||
|
> Economic security is derived from it being unprofitable to misbehave.
|
||||||
|
> This is by the economic penalty which is presumed to occur upon misbehavior
|
||||||
|
> exceeding the value which would presumably be gained.
|
||||||
|
> Accordingly, rational actors would behave properly, causing the protocol to
|
||||||
|
> maintain its integrity.
|
||||||
|
>
|
||||||
|
> For Serai specifically, the stake required to produce unintended signatures
|
||||||
|
> must exceed the value accessible via producing unintended signatures.
|
||||||
|
|
||||||
|
With liquidity provided, and swaps enabled, the goal is to have validators stake
|
||||||
|
sufficiently for economic security to be achieved. This is primarily via
|
||||||
|
offering freshly minted, staked SRI to would-be validators who decide to swap
|
||||||
|
external coins for their stake.
|
||||||
|
|
||||||
|
## Post-Economic Security
|
||||||
|
|
||||||
|
Having achieved economic security, the protocol changes its economics one last
|
||||||
|
time (barring future upgrades to the protocol) to a 'normal' state of
|
||||||
|
operations.
|
||||||
6
docs/economics/post.md
Normal file
6
docs/economics/post.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
title: Post-Economic Security
|
||||||
|
layout: default
|
||||||
|
nav_order: 3
|
||||||
|
parent: Economics
|
||||||
|
---
|
||||||
6
docs/economics/pre.md
Normal file
6
docs/economics/pre.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
title: Pre-Economic Security
|
||||||
|
layout: default
|
||||||
|
nav_order: 2
|
||||||
|
parent: Economics
|
||||||
|
---
|
||||||
32
docs/index.md
Normal file
32
docs/index.md
Normal file
@@ -0,0 +1,32 @@
|
|||||||
|
---
|
||||||
|
title: Home
|
||||||
|
layout: home
|
||||||
|
nav_order: 1
|
||||||
|
---
|
||||||
|
|
||||||
|
{: .warning }
|
||||||
|
This documentation site is still under active development and may have missing
|
||||||
|
sections, errors, and typos. Even once this documentation site is 'complete', it
|
||||||
|
may become out-of-date (as Serai is an evolving protocol yet to release) or have
|
||||||
|
minor errors.
|
||||||
|
|
||||||
|
# Serai
|
||||||
|
|
||||||
|
Serai is a fairly launched cross-chain decentralized exchange, integrating
|
||||||
|
Bitcoin (BTC), Ethereum (ETH, DAI), and Monero (XMR).
|
||||||
|
|
||||||
|
The Serai mainnet has yet to launch, and until then, all details are subject to
|
||||||
|
change.
|
||||||
|
|
||||||
|
Prior to the Serai mainnet launching, SRI, Serai's native coin, will not
|
||||||
|
exist. As a fairly launched project, SRI will have no ICO, no IEO, no presale,
|
||||||
|
no developers' tax/fund, and no airdrop for out-of-mainnet activity.
|
||||||
|
|
||||||
|
Out-of-mainnet activity includes:
|
||||||
|
|
||||||
|
- Being a community member (such as on Discord or on Twitter)
|
||||||
|
- Participating in testnets
|
||||||
|
- Contributing to the GitHub
|
||||||
|
|
||||||
|
None of these will be awarded any airdrop. All distributions of SRI will happen
|
||||||
|
on-chain per the protocols' defined rules, based on on-chain activity.
|
||||||
21
docs/infrastructure/coordinator.md
Normal file
21
docs/infrastructure/coordinator.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
title: Coordinator
|
||||||
|
layout: default
|
||||||
|
nav_order: 3
|
||||||
|
parent: Infrastructure
|
||||||
|
---
|
||||||
|
|
||||||
|
# Coordinator
|
||||||
|
|
||||||
|
The coordinator is a local service which communicates with other validators'
|
||||||
|
coordinators. It provides a verifiable broadcast layer for various consensus
|
||||||
|
messages, such as agreement on external blockchains, key generation and signing
|
||||||
|
protocols, and the latest Serai block.
|
||||||
|
|
||||||
|
The verifiable broadcast layer is implemented via a blockchain, referred to as a
|
||||||
|
Tributary, which is agreed upon using Tendermint consensus. This consensus is
|
||||||
|
not as offered by Tendermint Core/CometBFT, as used in the Cosmos SDK
|
||||||
|
(historically/presently), yet by our own implementation designed to be used as a
|
||||||
|
library and not as another daemon. Tributaries are ephemeral, only used by the
|
||||||
|
current validators, and deleted upon the next epoch. All of the results from it
|
||||||
|
are verifiable via the external network and the Serai blockchain alone.
|
||||||
6
docs/infrastructure/index.md
Normal file
6
docs/infrastructure/index.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
title: Infrastructure
|
||||||
|
layout: default
|
||||||
|
nav_order: 6
|
||||||
|
has_children: true
|
||||||
|
---
|
||||||
29
docs/infrastructure/message_queue.md
Normal file
29
docs/infrastructure/message_queue.md
Normal file
@@ -0,0 +1,29 @@
|
|||||||
|
---
|
||||||
|
title: Message Queue
|
||||||
|
layout: default
|
||||||
|
nav_order: 1
|
||||||
|
parent: Infrastructure
|
||||||
|
---
|
||||||
|
|
||||||
|
# Message Queue
|
||||||
|
|
||||||
|
The Message Queue is a microservice to authenticate and relay messages between
|
||||||
|
services. It offers just three functions:
|
||||||
|
|
||||||
|
1) Queue a message.
|
||||||
|
|
||||||
|
2) Receive the next message.
|
||||||
|
|
||||||
|
3) Acknowledge a message, removing it from the queue.
|
||||||
|
|
||||||
|
This ensures messages are delivered between services, with their order
|
||||||
|
preserved. This also ensures that if a service reboots while handling a message,
|
||||||
|
it'll still handle the message once rebooted (and the message will not be lost).
|
||||||
|
|
||||||
|
The Message Queue also aims to offer increased liveliness and performance.
|
||||||
|
If services directly communicated, the rate at which one service could operate
|
||||||
|
would always be bottlenecked by the service it communicates with. If the
|
||||||
|
receiving service ever went offline, the sending service wouldn't be able to
|
||||||
|
deliver messages until the receiver came back online, halting its own work. By
|
||||||
|
defining a dedicated microservice, with a lack of complex logic, it's much less
|
||||||
|
likely to go offline or suffer from degraded performance.
|
||||||
21
docs/infrastructure/processor.md
Normal file
21
docs/infrastructure/processor.md
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
---
|
||||||
|
title: Processor
|
||||||
|
layout: default
|
||||||
|
nav_order: 2
|
||||||
|
parent: Infrastructure
|
||||||
|
---
|
||||||
|
|
||||||
|
# Processor
|
||||||
|
|
||||||
|
The processor performs several important tasks with regards to the external
|
||||||
|
network. Each of them are documented in the following sections.
|
||||||
|
|
||||||
|
## Key Generation
|
||||||
|
|
||||||
|
## Scanning
|
||||||
|
|
||||||
|
## Signing Batches
|
||||||
|
|
||||||
|
## Planning Transactions
|
||||||
|
|
||||||
|
## Cosigning
|
||||||
6
docs/infrastructure/serai.md
Normal file
6
docs/infrastructure/serai.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
title: Serai
|
||||||
|
layout: default
|
||||||
|
nav_order: 4
|
||||||
|
parent: Infrastructure
|
||||||
|
---
|
||||||
6
docs/integrating/index.md
Normal file
6
docs/integrating/index.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
title: Integrating with Serai
|
||||||
|
layout: default
|
||||||
|
nav_order: 7
|
||||||
|
has_children: true
|
||||||
|
---
|
||||||
44
docs/protocol_changes/index.md
Normal file
44
docs/protocol_changes/index.md
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
---
|
||||||
|
title: Protocol Changes
|
||||||
|
layout: default
|
||||||
|
nav_order: 5
|
||||||
|
---
|
||||||
|
|
||||||
|
# Protocol Changes
|
||||||
|
|
||||||
|
The protocol has no central authority nor organization nor actors (such as
|
||||||
|
liquidity providers/validators) who can compel new protocol rules. The Serai
|
||||||
|
protocol is as-written with all granted functionality and declared rules
|
||||||
|
present.
|
||||||
|
|
||||||
|
Validators are explicitly granted the ability to signal for two things to occur:
|
||||||
|
|
||||||
|
### 1) Halt another validator set.
|
||||||
|
|
||||||
|
This will presumably occur if another validator set turns malicious and is the
|
||||||
|
expected incident response in order to apply an economic penalty of ideally
|
||||||
|
greater value than damage wrecked. Halting a validator set prevents further
|
||||||
|
publication of `Batch`s, preventing improper actions on the Serai blockchain,
|
||||||
|
and preventing validators from unstaking (as unstaking only occurs once future
|
||||||
|
validator sets have accepted responsibility, and accepting responsibility
|
||||||
|
requires `Batch` publication). This effectively burns the malicious validators'
|
||||||
|
stake.
|
||||||
|
|
||||||
|
### 2) Retire the protocol.
|
||||||
|
|
||||||
|
A supermajority of validators may favor a signal (an opaque 32-byte ID). A
|
||||||
|
common signal gaining sufficient favor will cause the protocol to stop producing
|
||||||
|
blocks in two weeks.
|
||||||
|
|
||||||
|
Nodes will presumably, as individual entities, hard fork to new consensus rules.
|
||||||
|
These rules presumably will remove the rule to stop producing blocks in two
|
||||||
|
weeks, they may declare new validators, and they may declare new functionality
|
||||||
|
entirely.
|
||||||
|
|
||||||
|
While nodes individually hard fork, across every hard fork the state of the
|
||||||
|
various `sriXYZ` coins (such as `sriBTC`, `sriETH`, `sriDAI`, and `sriXMR`)
|
||||||
|
remains intact (unless the new rules modify such state). These coins can still
|
||||||
|
be burned with instructions (unless the new rules prevent that) and if a
|
||||||
|
validator set doesn't send `XYZ` as expected, they can be halted (effectively
|
||||||
|
burning their `SRI` stake). Accordingly, every node decides if and how to future
|
||||||
|
participate, with the abilities and powers they declare themselves to have.
|
||||||
0
docs/protocol_changes/signals.md
Normal file
0
docs/protocol_changes/signals.md
Normal file
6
docs/validator/index.md
Normal file
6
docs/validator/index.md
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
---
|
||||||
|
title: Running a Validator
|
||||||
|
layout: default
|
||||||
|
nav_order: 8
|
||||||
|
has_children: true
|
||||||
|
---
|
||||||
@@ -1,17 +1,20 @@
|
|||||||
FROM --platform=linux/amd64 rust:1.76.0-slim-bookworm as builder
|
# rust:1.77.0-slim-bookworm as of March 22nd, 2024 (GMT)
|
||||||
|
FROM --platform=linux/amd64 rust@sha256:e785e4aa81f87bc1ee02fa2026ffbc491e0410bdaf6652cea74884373f452664 as deterministic
|
||||||
|
|
||||||
# Move to a Debian package snapshot
|
# Move to a Debian package snapshot
|
||||||
RUN rm -rf /etc/apt/sources.list.d/debian.sources && \
|
RUN rm -rf /etc/apt/sources.list.d/debian.sources && \
|
||||||
rm -rf /var/lib/apt/lists/* && \
|
rm -rf /var/lib/apt/lists/* && \
|
||||||
echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240201T000000Z bookworm main" > /etc/apt/sources.list && \
|
echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240301T000000Z bookworm main" > /etc/apt/sources.list && \
|
||||||
apt update
|
apt update
|
||||||
|
|
||||||
# Install dependencies
|
# Install dependencies
|
||||||
RUN apt install clang -y
|
RUN apt update && apt upgrade && apt install clang -y
|
||||||
|
|
||||||
# Add the wasm toolchain
|
# Add the wasm toolchain
|
||||||
RUN rustup target add wasm32-unknown-unknown
|
RUN rustup target add wasm32-unknown-unknown
|
||||||
|
|
||||||
|
FROM deterministic
|
||||||
|
|
||||||
# Add files for build
|
# Add files for build
|
||||||
ADD patches /serai/patches
|
ADD patches /serai/patches
|
||||||
ADD common /serai/common
|
ADD common /serai/common
|
||||||
@@ -30,3 +33,8 @@ ADD Cargo.lock /serai
|
|||||||
ADD AGPL-3.0 /serai
|
ADD AGPL-3.0 /serai
|
||||||
|
|
||||||
WORKDIR /serai
|
WORKDIR /serai
|
||||||
|
|
||||||
|
# Build the runtime, copying it to the volume if it exists
|
||||||
|
CMD cargo build --release -p serai-runtime && \
|
||||||
|
mkdir -p /volume && \
|
||||||
|
cp /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm /volume/serai.wasm
|
||||||
|
|||||||
@@ -43,8 +43,7 @@ CMD ["/run.sh"]
|
|||||||
network.label()
|
network.label()
|
||||||
);
|
);
|
||||||
|
|
||||||
let run =
|
let run = os(Os::Debian, "", "bitcoin") + &run_bitcoin;
|
||||||
os(Os::Debian, "RUN mkdir /volume && chown bitcoin:bitcoin /volume", "bitcoin") + &run_bitcoin;
|
|
||||||
let res = setup + &run;
|
let res = setup + &run;
|
||||||
|
|
||||||
let mut bitcoin_path = orchestration_path.to_path_buf();
|
let mut bitcoin_path = orchestration_path.to_path_buf();
|
||||||
|
|||||||
@@ -55,12 +55,9 @@ CMD ["/run.sh"]
|
|||||||
network.label(),
|
network.label(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let run = crate::os(
|
let run =
|
||||||
os,
|
crate::os(os, if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }, "monero") +
|
||||||
&("RUN mkdir /volume && chown monero /volume\r\n".to_string() +
|
&run_monero;
|
||||||
if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }),
|
|
||||||
"monero",
|
|
||||||
) + &run_monero;
|
|
||||||
let res = setup + &run;
|
let res = setup + &run;
|
||||||
|
|
||||||
let mut monero_path = orchestration_path.to_path_buf();
|
let mut monero_path = orchestration_path.to_path_buf();
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ pub fn coordinator(
|
|||||||
orchestration_path: &Path,
|
orchestration_path: &Path,
|
||||||
network: Network,
|
network: Network,
|
||||||
coordinator_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
coordinator_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
serai_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
) {
|
) {
|
||||||
let db = network.db();
|
let db = network.db();
|
||||||
let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" };
|
let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" };
|
||||||
@@ -27,13 +27,16 @@ pub fn coordinator(
|
|||||||
RUN apt install -y ca-certificates
|
RUN apt install -y ca-certificates
|
||||||
"#;
|
"#;
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
const DEFAULT_RUST_LOG: &str = "info,serai_coordinator=debug,tributary_chain=debug,tendermint=debug,libp2p_gossipsub::behaviour=error";
|
||||||
|
|
||||||
let env_vars = [
|
let env_vars = [
|
||||||
("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())),
|
("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())),
|
||||||
("MESSAGE_QUEUE_KEY", hex::encode(coordinator_key.to_repr())),
|
("MESSAGE_QUEUE_KEY", hex::encode(coordinator_key.to_repr())),
|
||||||
("DB_PATH", "./coordinator-db".to_string()),
|
("DB_PATH", "/volume/coordinator-db".to_string()),
|
||||||
("SERAI_KEY", hex::encode(serai_key.to_repr())),
|
("SERAI_KEY", hex::encode(serai_key.to_repr())),
|
||||||
("SERAI_HOSTNAME", format!("serai-{}-serai", network.label())),
|
("SERAI_HOSTNAME", format!("serai-{}-serai", network.label())),
|
||||||
("RUST_LOG", "serai_coordinator=debug,tributary_chain=debug,tendermint=debug".to_string()),
|
("RUST_LOG", DEFAULT_RUST_LOG.to_string()),
|
||||||
];
|
];
|
||||||
let mut env_vars_str = String::new();
|
let mut env_vars_str = String::new();
|
||||||
for (env_var, value) in env_vars {
|
for (env_var, value) in env_vars {
|
||||||
|
|||||||
@@ -2,7 +2,14 @@
|
|||||||
// TODO: Generate keys for a validator and the infra
|
// TODO: Generate keys for a validator and the infra
|
||||||
|
|
||||||
use core::ops::Deref;
|
use core::ops::Deref;
|
||||||
use std::{collections::HashSet, env, path::PathBuf, io::Write, fs, process::Command};
|
use std::{
|
||||||
|
collections::{HashSet, HashMap},
|
||||||
|
env,
|
||||||
|
path::PathBuf,
|
||||||
|
io::Write,
|
||||||
|
fs,
|
||||||
|
process::{Stdio, Command},
|
||||||
|
};
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
@@ -89,8 +96,12 @@ ENV LD_PRELOAD=libmimalloc.so
|
|||||||
|
|
||||||
RUN apk update && apk upgrade
|
RUN apk update && apk upgrade
|
||||||
|
|
||||||
# System user (not a human), shell of nologin, no password assigned
|
RUN adduser --system --shell /sbin/nologin --disabled-password {user}
|
||||||
RUN adduser -S -s /sbin/nologin -D {user}
|
RUN addgroup {user}
|
||||||
|
RUN addgroup {user} {user}
|
||||||
|
|
||||||
|
# Make the /volume directory and transfer it to the user
|
||||||
|
RUN mkdir /volume && chown {user}:{user} /volume
|
||||||
|
|
||||||
{additional_root}
|
{additional_root}
|
||||||
|
|
||||||
@@ -110,7 +121,10 @@ RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload
|
|||||||
|
|
||||||
RUN apt update && apt upgrade -y && apt autoremove -y && apt clean
|
RUN apt update && apt upgrade -y && apt autoremove -y && apt clean
|
||||||
|
|
||||||
RUN useradd --system --create-home --shell /sbin/nologin {user}
|
RUN useradd --system --user-group --create-home --shell /sbin/nologin {user}
|
||||||
|
|
||||||
|
# Make the /volume directory and transfer it to the user
|
||||||
|
RUN mkdir /volume && chown {user}:{user} /volume
|
||||||
|
|
||||||
{additional_root}
|
{additional_root}
|
||||||
|
|
||||||
@@ -129,7 +143,7 @@ fn build_serai_service(release: bool, features: &str, package: &str) -> String {
|
|||||||
|
|
||||||
format!(
|
format!(
|
||||||
r#"
|
r#"
|
||||||
FROM rust:1.76-slim-bookworm as builder
|
FROM rust:1.77-slim-bookworm as builder
|
||||||
|
|
||||||
COPY --from=mimalloc-debian libmimalloc.so /usr/lib
|
COPY --from=mimalloc-debian libmimalloc.so /usr/lib
|
||||||
RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload
|
RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload
|
||||||
@@ -199,6 +213,55 @@ fn orchestration_path(network: Network) -> PathBuf {
|
|||||||
orchestration_path
|
orchestration_path
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type InfrastructureKeys =
|
||||||
|
HashMap<&'static str, (Zeroizing<<Ristretto as Ciphersuite>::F>, <Ristretto as Ciphersuite>::G)>;
|
||||||
|
fn infrastructure_keys(network: Network) -> InfrastructureKeys {
|
||||||
|
// Generate entropy for the infrastructure keys
|
||||||
|
|
||||||
|
let entropy = if network == Network::Dev {
|
||||||
|
// Don't use actual entropy if this is a dev environment
|
||||||
|
Zeroizing::new([0; 32])
|
||||||
|
} else {
|
||||||
|
let path = home::home_dir()
|
||||||
|
.unwrap()
|
||||||
|
.join(".serai")
|
||||||
|
.join(network.label())
|
||||||
|
.join("infrastructure_keys_entropy");
|
||||||
|
// Check if there's existing entropy
|
||||||
|
if let Ok(entropy) = fs::read(&path).map(Zeroizing::new) {
|
||||||
|
assert_eq!(entropy.len(), 32, "entropy saved to disk wasn't 32 bytes");
|
||||||
|
let mut res = Zeroizing::new([0; 32]);
|
||||||
|
res.copy_from_slice(entropy.as_ref());
|
||||||
|
res
|
||||||
|
} else {
|
||||||
|
// If there isn't, generate fresh entropy
|
||||||
|
let mut res = Zeroizing::new([0; 32]);
|
||||||
|
OsRng.fill_bytes(res.as_mut());
|
||||||
|
fs::write(&path, &res).unwrap();
|
||||||
|
res
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut transcript =
|
||||||
|
RecommendedTranscript::new(b"Serai Orchestrator Infrastructure Keys Transcript");
|
||||||
|
transcript.append_message(b"network", network.label().as_bytes());
|
||||||
|
transcript.append_message(b"entropy", entropy);
|
||||||
|
let mut rng = ChaCha20Rng::from_seed(transcript.rng_seed(b"infrastructure_keys"));
|
||||||
|
|
||||||
|
let mut key_pair = || {
|
||||||
|
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut rng));
|
||||||
|
let public = Ristretto::generator() * key.deref();
|
||||||
|
(key, public)
|
||||||
|
};
|
||||||
|
|
||||||
|
HashMap::from([
|
||||||
|
("coordinator", key_pair()),
|
||||||
|
("bitcoin", key_pair()),
|
||||||
|
("ethereum", key_pair()),
|
||||||
|
("monero", key_pair()),
|
||||||
|
])
|
||||||
|
}
|
||||||
|
|
||||||
fn dockerfiles(network: Network) {
|
fn dockerfiles(network: Network) {
|
||||||
let orchestration_path = orchestration_path(network);
|
let orchestration_path = orchestration_path(network);
|
||||||
|
|
||||||
@@ -209,28 +272,11 @@ fn dockerfiles(network: Network) {
|
|||||||
monero_wallet_rpc(&orchestration_path);
|
monero_wallet_rpc(&orchestration_path);
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Generate infra keys in key_gen, yet service entropy here?
|
let mut infrastructure_keys = infrastructure_keys(network);
|
||||||
|
let coordinator_key = infrastructure_keys.remove("coordinator").unwrap();
|
||||||
// Generate entropy for the infrastructure keys
|
let bitcoin_key = infrastructure_keys.remove("bitcoin").unwrap();
|
||||||
let mut entropy = Zeroizing::new([0; 32]);
|
let ethereum_key = infrastructure_keys.remove("ethereum").unwrap();
|
||||||
// Only use actual entropy if this isn't a development environment
|
let monero_key = infrastructure_keys.remove("monero").unwrap();
|
||||||
if network != Network::Dev {
|
|
||||||
OsRng.fill_bytes(entropy.as_mut());
|
|
||||||
}
|
|
||||||
let mut transcript = RecommendedTranscript::new(b"Serai Orchestrator Transcript");
|
|
||||||
transcript.append_message(b"entropy", entropy);
|
|
||||||
let mut new_rng = |label| ChaCha20Rng::from_seed(transcript.rng_seed(label));
|
|
||||||
|
|
||||||
let mut message_queue_keys_rng = new_rng(b"message_queue_keys");
|
|
||||||
let mut key_pair = || {
|
|
||||||
let key = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut message_queue_keys_rng));
|
|
||||||
let public = Ristretto::generator() * key.deref();
|
|
||||||
(key, public)
|
|
||||||
};
|
|
||||||
let coordinator_key = key_pair();
|
|
||||||
let bitcoin_key = key_pair();
|
|
||||||
let ethereum_key = key_pair();
|
|
||||||
let monero_key = key_pair();
|
|
||||||
|
|
||||||
message_queue(
|
message_queue(
|
||||||
&orchestration_path,
|
&orchestration_path,
|
||||||
@@ -241,10 +287,9 @@ fn dockerfiles(network: Network) {
|
|||||||
monero_key.1,
|
monero_key.1,
|
||||||
);
|
);
|
||||||
|
|
||||||
let mut processor_entropy_rng = new_rng(b"processor_entropy");
|
let new_entropy = || {
|
||||||
let mut new_entropy = || {
|
|
||||||
let mut res = Zeroizing::new([0; 32]);
|
let mut res = Zeroizing::new([0; 32]);
|
||||||
processor_entropy_rng.fill_bytes(res.as_mut());
|
OsRng.fill_bytes(res.as_mut());
|
||||||
res
|
res
|
||||||
};
|
};
|
||||||
processor(
|
processor(
|
||||||
@@ -276,9 +321,9 @@ fn dockerfiles(network: Network) {
|
|||||||
Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(*serai_key_repr).unwrap())
|
Zeroizing::new(<Ristretto as Ciphersuite>::F::from_repr(*serai_key_repr).unwrap())
|
||||||
};
|
};
|
||||||
|
|
||||||
coordinator(&orchestration_path, network, coordinator_key.0, serai_key);
|
coordinator(&orchestration_path, network, coordinator_key.0, &serai_key);
|
||||||
|
|
||||||
serai(&orchestration_path, network);
|
serai(&orchestration_path, network, &serai_key);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn key_gen(network: Network) {
|
fn key_gen(network: Network) {
|
||||||
@@ -325,6 +370,87 @@ fn start(network: Network, services: HashSet<String>) {
|
|||||||
_ => panic!("starting unrecognized service"),
|
_ => panic!("starting unrecognized service"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// If we're building the Serai service, first build the runtime
|
||||||
|
let serai_runtime_volume = format!("serai-{}-runtime-volume", network.label());
|
||||||
|
if name == "serai" {
|
||||||
|
// Check if it's built by checking if the volume has the expected runtime file
|
||||||
|
let built = || {
|
||||||
|
if let Ok(path) = Command::new("docker")
|
||||||
|
.arg("volume")
|
||||||
|
.arg("inspect")
|
||||||
|
.arg("-f")
|
||||||
|
.arg("{{ .Mountpoint }}")
|
||||||
|
.arg(&serai_runtime_volume)
|
||||||
|
.output()
|
||||||
|
{
|
||||||
|
if let Ok(path) = String::from_utf8(path.stdout) {
|
||||||
|
if let Ok(iter) = std::fs::read_dir(PathBuf::from(path.trim())) {
|
||||||
|
for item in iter.flatten() {
|
||||||
|
if item.file_name() == "serai.wasm" {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
|
if !built() {
|
||||||
|
let mut repo_path = env::current_exe().unwrap();
|
||||||
|
repo_path.pop();
|
||||||
|
if repo_path.as_path().ends_with("deps") {
|
||||||
|
repo_path.pop();
|
||||||
|
}
|
||||||
|
assert!(repo_path.as_path().ends_with("debug") || repo_path.as_path().ends_with("release"));
|
||||||
|
repo_path.pop();
|
||||||
|
assert!(repo_path.as_path().ends_with("target"));
|
||||||
|
repo_path.pop();
|
||||||
|
|
||||||
|
// Build the image to build the runtime
|
||||||
|
if !Command::new("docker")
|
||||||
|
.current_dir(&repo_path)
|
||||||
|
.arg("build")
|
||||||
|
.arg("-f")
|
||||||
|
.arg("orchestration/runtime/Dockerfile")
|
||||||
|
.arg(".")
|
||||||
|
.arg("-t")
|
||||||
|
.arg(format!("serai-{}-runtime-img", network.label()))
|
||||||
|
.spawn()
|
||||||
|
.unwrap()
|
||||||
|
.wait()
|
||||||
|
.unwrap()
|
||||||
|
.success()
|
||||||
|
{
|
||||||
|
panic!("failed to build runtime image");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run the image, building the runtime
|
||||||
|
println!("Building the Serai runtime");
|
||||||
|
let container_name = format!("serai-{}-runtime", network.label());
|
||||||
|
let _ =
|
||||||
|
Command::new("docker").arg("rm").arg("-f").arg(&container_name).spawn().unwrap().wait();
|
||||||
|
let _ = Command::new("docker")
|
||||||
|
.arg("run")
|
||||||
|
.arg("--name")
|
||||||
|
.arg(container_name)
|
||||||
|
.arg("--volume")
|
||||||
|
.arg(format!("{serai_runtime_volume}:/volume"))
|
||||||
|
.arg(format!("serai-{}-runtime-img", network.label()))
|
||||||
|
.spawn();
|
||||||
|
|
||||||
|
// Wait until its built
|
||||||
|
let mut ticks = 0;
|
||||||
|
while !built() {
|
||||||
|
std::thread::sleep(core::time::Duration::from_secs(60));
|
||||||
|
ticks += 1;
|
||||||
|
if ticks > 6 * 60 {
|
||||||
|
panic!("couldn't build the runtime after 6 hours")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Build it
|
// Build it
|
||||||
println!("Building {service}");
|
println!("Building {service}");
|
||||||
docker::build(&orchestration_path(network), network, name);
|
docker::build(&orchestration_path(network), network, name);
|
||||||
@@ -335,6 +461,10 @@ fn start(network: Network, services: HashSet<String>) {
|
|||||||
.arg("container")
|
.arg("container")
|
||||||
.arg("inspect")
|
.arg("inspect")
|
||||||
.arg(&docker_name)
|
.arg(&docker_name)
|
||||||
|
// Use null for all IO to silence 'container does not exist'
|
||||||
|
.stdin(Stdio::null())
|
||||||
|
.stdout(Stdio::null())
|
||||||
|
.stderr(Stdio::null())
|
||||||
.status()
|
.status()
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.success()
|
.success()
|
||||||
@@ -346,25 +476,53 @@ fn start(network: Network, services: HashSet<String>) {
|
|||||||
let command = command.arg("create").arg("--name").arg(&docker_name);
|
let command = command.arg("create").arg("--name").arg(&docker_name);
|
||||||
let command = command.arg("--network").arg("serai");
|
let command = command.arg("--network").arg("serai");
|
||||||
let command = command.arg("--restart").arg("always");
|
let command = command.arg("--restart").arg("always");
|
||||||
|
let command = command.arg("--log-opt").arg("max-size=100m");
|
||||||
|
let command = command.arg("--log-opt").arg("max-file=3");
|
||||||
|
let command = if network == Network::Dev {
|
||||||
|
command
|
||||||
|
} else {
|
||||||
|
// Assign a persistent volume if this isn't for Dev
|
||||||
|
command.arg("--volume").arg(volume)
|
||||||
|
};
|
||||||
let command = match name {
|
let command = match name {
|
||||||
"bitcoin" => {
|
"bitcoin" => {
|
||||||
|
// Expose the RPC for tests
|
||||||
if network == Network::Dev {
|
if network == Network::Dev {
|
||||||
command.arg("-p").arg("8332:8332")
|
command.arg("-p").arg("8332:8332")
|
||||||
} else {
|
} else {
|
||||||
command.arg("--volume").arg(volume)
|
command
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"monero" => {
|
"monero" => {
|
||||||
|
// Expose the RPC for tests
|
||||||
if network == Network::Dev {
|
if network == Network::Dev {
|
||||||
command.arg("-p").arg("18081:18081")
|
command.arg("-p").arg("18081:18081")
|
||||||
} else {
|
} else {
|
||||||
command.arg("--volume").arg(volume)
|
command
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"monero-wallet-rpc" => {
|
"monero-wallet-rpc" => {
|
||||||
assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev");
|
assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev");
|
||||||
|
// Expose the RPC for tests
|
||||||
command.arg("-p").arg("18082:18082")
|
command.arg("-p").arg("18082:18082")
|
||||||
}
|
}
|
||||||
|
"coordinator" => {
|
||||||
|
if network == Network::Dev {
|
||||||
|
command
|
||||||
|
} else {
|
||||||
|
// Publish the port
|
||||||
|
command.arg("-p").arg("30564:30564")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"serai" => {
|
||||||
|
let command = command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime"));
|
||||||
|
if network == Network::Dev {
|
||||||
|
command
|
||||||
|
} else {
|
||||||
|
// Publish the port
|
||||||
|
command.arg("-p").arg("30333:30333")
|
||||||
|
}
|
||||||
|
}
|
||||||
_ => command,
|
_ => command,
|
||||||
};
|
};
|
||||||
assert!(
|
assert!(
|
||||||
@@ -388,10 +546,10 @@ Serai Orchestrator v0.0.1
|
|||||||
|
|
||||||
Commands:
|
Commands:
|
||||||
key_gen *network*
|
key_gen *network*
|
||||||
Generates a key for the validator.
|
Generate a key for the validator.
|
||||||
|
|
||||||
setup *network*
|
setup *network*
|
||||||
Generate infrastructure keys and the Dockerfiles for every Serai service.
|
Generate the Dockerfiles for every Serai service.
|
||||||
|
|
||||||
start *network* [service1, service2...]
|
start *network* [service1, service2...]
|
||||||
Start the specified services for the specified network ("dev" or "testnet").
|
Start the specified services for the specified network ("dev" or "testnet").
|
||||||
|
|||||||
@@ -20,8 +20,8 @@ pub fn message_queue(
|
|||||||
("BITCOIN_KEY", hex::encode(bitcoin_key.to_bytes())),
|
("BITCOIN_KEY", hex::encode(bitcoin_key.to_bytes())),
|
||||||
("ETHEREUM_KEY", hex::encode(ethereum_key.to_bytes())),
|
("ETHEREUM_KEY", hex::encode(ethereum_key.to_bytes())),
|
||||||
("MONERO_KEY", hex::encode(monero_key.to_bytes())),
|
("MONERO_KEY", hex::encode(monero_key.to_bytes())),
|
||||||
("DB_PATH", "./message-queue-db".to_string()),
|
("DB_PATH", "/volume/message-queue-db".to_string()),
|
||||||
("RUST_LOG", "serai_message_queue=trace".to_string()),
|
("RUST_LOG", "info,serai_message_queue=trace".to_string()),
|
||||||
];
|
];
|
||||||
let mut env_vars_str = String::new();
|
let mut env_vars_str = String::new();
|
||||||
for (env_var, value) in env_vars {
|
for (env_var, value) in env_vars {
|
||||||
|
|||||||
@@ -40,15 +40,15 @@ RUN apt install -y ca-certificates
|
|||||||
};
|
};
|
||||||
|
|
||||||
let env_vars = [
|
let env_vars = [
|
||||||
("MESSAGE_QUEUE_RPC", format!("serai-{}-message_queue", network.label())),
|
("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())),
|
||||||
("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())),
|
("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())),
|
||||||
("ENTROPY", hex::encode(entropy.as_ref())),
|
("ENTROPY", hex::encode(entropy.as_ref())),
|
||||||
("NETWORK", coin.to_string()),
|
("NETWORK", coin.to_string()),
|
||||||
("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")),
|
("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")),
|
||||||
("NETWORK_RPC_HOSTNAME", hostname),
|
("NETWORK_RPC_HOSTNAME", hostname),
|
||||||
("NETWORK_RPC_PORT", format!("{port}")),
|
("NETWORK_RPC_PORT", format!("{port}")),
|
||||||
("DB_PATH", "./processor-db".to_string()),
|
("DB_PATH", "/volume/processor-db".to_string()),
|
||||||
("RUST_LOG", "serai_processor=debug".to_string()),
|
("RUST_LOG", "info,serai_processor=debug".to_string()),
|
||||||
];
|
];
|
||||||
let mut env_vars_str = String::new();
|
let mut env_vars_str = String::new();
|
||||||
for (env_var, value) in env_vars {
|
for (env_var, value) in env_vars {
|
||||||
|
|||||||
@@ -1,14 +1,26 @@
|
|||||||
use std::{path::Path};
|
use std::{path::Path};
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};
|
||||||
|
|
||||||
use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};
|
use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile};
|
||||||
|
|
||||||
pub fn serai(orchestration_path: &Path, network: Network) {
|
pub fn serai(
|
||||||
|
orchestration_path: &Path,
|
||||||
|
network: Network,
|
||||||
|
serai_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
) {
|
||||||
// Always builds in release for performance reasons
|
// Always builds in release for performance reasons
|
||||||
let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node");
|
let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node");
|
||||||
let setup_fast_epoch =
|
let setup_fast_epoch =
|
||||||
mimalloc(Os::Debian).to_string() + &build_serai_service(true, "fast-epoch", "serai-node");
|
mimalloc(Os::Debian).to_string() + &build_serai_service(true, "fast-epoch", "serai-node");
|
||||||
|
|
||||||
// TODO: Review the ports exposed here
|
let env_vars = [("KEY", hex::encode(serai_key.to_repr()))];
|
||||||
|
let mut env_vars_str = String::new();
|
||||||
|
for (env_var, value) in env_vars {
|
||||||
|
env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#);
|
||||||
|
}
|
||||||
|
|
||||||
let run_serai = format!(
|
let run_serai = format!(
|
||||||
r#"
|
r#"
|
||||||
# Copy the Serai binary and relevant license
|
# Copy the Serai binary and relevant license
|
||||||
@@ -16,12 +28,12 @@ COPY --from=builder --chown=serai /serai/bin/serai-node /bin/
|
|||||||
COPY --from=builder --chown=serai /serai/AGPL-3.0 .
|
COPY --from=builder --chown=serai /serai/AGPL-3.0 .
|
||||||
|
|
||||||
# Run the Serai node
|
# Run the Serai node
|
||||||
EXPOSE 30333 9615 9933 9944
|
EXPOSE 30333 9944
|
||||||
|
|
||||||
ADD /orchestration/{}/serai/run.sh /
|
ADD /orchestration/{}/serai/run.sh /
|
||||||
CMD ["/run.sh"]
|
CMD {env_vars_str} "/run.sh"
|
||||||
"#,
|
"#,
|
||||||
network.label()
|
network.label(),
|
||||||
);
|
);
|
||||||
|
|
||||||
let run = os(Os::Debian, "", "serai") + &run_serai;
|
let run = os(Os::Debian, "", "serai") + &run_serai;
|
||||||
|
|||||||
@@ -1,3 +1,3 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
exit 1
|
serai-node --base-path /volume --unsafe-rpc-external --rpc-cors all --chain testnet --validator
|
||||||
|
|||||||
26
patches/rocksdb/Cargo.toml
Normal file
26
patches/rocksdb/Cargo.toml
Normal file
@@ -0,0 +1,26 @@
|
|||||||
|
[package]
|
||||||
|
name = "rocksdb"
|
||||||
|
version = "0.21.0"
|
||||||
|
description = "rocksdb which patches to the latest update"
|
||||||
|
license = "MIT"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/patches/rocksdb"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
keywords = []
|
||||||
|
edition = "2021"
|
||||||
|
rust-version = "1.70"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
rocksdb = { version = "0.22", default-features = false }
|
||||||
|
|
||||||
|
[features]
|
||||||
|
jemalloc = []
|
||||||
|
snappy = ["rocksdb/snappy"]
|
||||||
|
lz4 = ["rocksdb/lz4"]
|
||||||
|
zstd = ["rocksdb/zstd"]
|
||||||
|
zlib = ["rocksdb/zlib"]
|
||||||
|
bzip2 = ["rocksdb/bzip2"]
|
||||||
|
default = ["snappy", "lz4", "zstd", "zlib", "bzip2"]
|
||||||
1
patches/rocksdb/src/lib.rs
Normal file
1
patches/rocksdb/src/lib.rs
Normal file
@@ -0,0 +1 @@
|
|||||||
|
pub use rocksdb::*;
|
||||||
@@ -14,4 +14,4 @@ all-features = true
|
|||||||
rustdoc-args = ["--cfg", "docsrs"]
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
zstd = "0.12"
|
zstd = "0.13"
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
[toolchain]
|
[toolchain]
|
||||||
channel = "1.76"
|
channel = "1.77"
|
||||||
targets = ["wasm32-unknown-unknown"]
|
targets = ["wasm32-unknown-unknown"]
|
||||||
profile = "minimal"
|
profile = "minimal"
|
||||||
components = ["rust-src", "rustfmt", "clippy"]
|
components = ["rust-src", "rustfmt", "clippy"]
|
||||||
|
|||||||
@@ -36,16 +36,16 @@ rustup target add wasm32-unknown-unknown --toolchain nightly
|
|||||||
|
|
||||||
```
|
```
|
||||||
cargo install svm-rs
|
cargo install svm-rs
|
||||||
svm install 0.8.16
|
svm install 0.8.25
|
||||||
svm use 0.8.16
|
svm use 0.8.25
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install Solidity Compiler Version Manager
|
### Install Solidity Compiler Version Manager
|
||||||
|
|
||||||
```
|
```
|
||||||
cargo install svm-rs
|
cargo install svm-rs
|
||||||
svm install 0.8.16
|
svm install 0.8.25
|
||||||
svm use 0.8.16
|
svm use 0.8.25
|
||||||
```
|
```
|
||||||
|
|
||||||
### Install foundry (for tests)
|
### Install foundry (for tests)
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user