mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-09 04:39:24 +00:00
Re-arrange coordinator/
coordinator/tributary was tributary-chain. This crate has been renamed tributary-sdk and moved to coordinator/tributary-sdk. coordinator/src/tributary was our instantion of a Tributary, the Transaction type and scan task. This has been moved to coordinator/tributary. The main reason for this was due to coordinator/main.rs becoming untidy. There is now a collection of clean, independent APIs present in the codebase. coordinator/main.rs is to compose them. Sometimes, these compositions are a bit silly (reading from a channel just to forward the message to a distinct channel). That's more than fine as the code is still readable and the value from the cleanliness of the APIs composed far exceeds the nits from having these odd compositions. This breaks down a bit as we now define a global database, and have some APIs interact with multiple other APIs. coordinator/src/tributary was a self-contained, clean API. The recently added task present in coordinator/tributary/mod.rs, which bound it to the rest of the Coordinator, wasn't. Now, coordinator/src is solely the API compositions, and all self-contained APIs are their own crates.
This commit is contained in:
33
coordinator/tributary-sdk/tendermint/Cargo.toml
Normal file
33
coordinator/tributary-sdk/tendermint/Cargo.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[package]
|
||||
name = "tendermint-machine"
|
||||
version = "0.2.0"
|
||||
description = "An implementation of the Tendermint state machine in Rust"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/tendermint"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
parity-scale-codec = { version = "3", default-features = false, features = ["std", "derive"] }
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std", "async-await-macro", "sink", "channel"] }
|
||||
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||
patchable-async-sleep = { version = "0.1", path = "../../../common/patchable-async-sleep", default-features = false }
|
||||
|
||||
serai-db = { path = "../../../common/db", version = "0.1", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] }
|
||||
21
coordinator/tributary-sdk/tendermint/LICENSE
Normal file
21
coordinator/tributary-sdk/tendermint/LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
61
coordinator/tributary-sdk/tendermint/README.md
Normal file
61
coordinator/tributary-sdk/tendermint/README.md
Normal file
@@ -0,0 +1,61 @@
|
||||
# Tendermint
|
||||
|
||||
An implementation of the Tendermint state machine in Rust.
|
||||
|
||||
This is solely the state machine, intended to be mapped to any arbitrary system.
|
||||
It supports an arbitrary signature scheme, weighting, and block definition
|
||||
accordingly. It is not intended to work with the Cosmos SDK, solely to be an
|
||||
implementation of the [academic protocol](https://arxiv.org/pdf/1807.04938.pdf).
|
||||
|
||||
### Caveats
|
||||
|
||||
- Only SCALE serialization is supported currently. Ideally, everything from
|
||||
SCALE to borsh to bincode would be supported. SCALE was chosen due to this
|
||||
being under Serai, which uses Substrate, which uses SCALE. Accordingly, when
|
||||
deciding which of the three (mutually incompatible) options to support...
|
||||
|
||||
- The only supported runtime is tokio due to requiring a `sleep` implementation.
|
||||
Ideally, the runtime choice will be moved to a feature in the future.
|
||||
|
||||
- It is possible for `add_block` to be called on a block which failed (or never
|
||||
went through in the first place) validation. This is a break from the paper
|
||||
which is accepted here. This is for two reasons.
|
||||
|
||||
1) Serai needing this functionality.
|
||||
2) If a block is committed which is invalid, either there's a malicious
|
||||
majority now defining consensus OR the local node is malicious by virtue of
|
||||
being faulty. Considering how either represents a fatal circumstance,
|
||||
except with regards to system like Serai which have their own logic for
|
||||
pseudo-valid blocks, it is accepted as a possible behavior with the caveat
|
||||
any consumers must be aware of it. No machine will vote nor precommit to a
|
||||
block it considers invalid, so for a network with an honest majority, this
|
||||
is a non-issue.
|
||||
|
||||
### Paper
|
||||
|
||||
The [paper](https://arxiv.org/abs/1807.04938) describes the algorithm with
|
||||
pseudocode on page 6. This pseudocode isn't directly implementable, nor does it
|
||||
specify faulty behavior. Instead, it's solely a series of conditions which
|
||||
trigger events in order to successfully achieve consensus.
|
||||
|
||||
The included pseudocode segments can be minimally described as follows:
|
||||
|
||||
```
|
||||
01-09 Init
|
||||
10-10 StartRound(0)
|
||||
11-21 StartRound
|
||||
22-27 Fresh proposal
|
||||
28-33 Proposal building off a valid round with prevotes
|
||||
34-35 2f+1 prevote -> schedule timeout prevote
|
||||
36-43 First proposal with prevotes -> precommit Some
|
||||
44-46 2f+1 nil prevote -> precommit nil
|
||||
47-48 2f+1 precommit -> schedule timeout precommit
|
||||
49-54 First proposal with precommits -> finalize
|
||||
55-56 f+1 round > local round, jump
|
||||
57-60 on timeout propose
|
||||
61-64 on timeout prevote
|
||||
65-67 on timeout precommit
|
||||
```
|
||||
|
||||
The corresponding Rust code implementing these tasks are marked with their
|
||||
related line numbers.
|
||||
208
coordinator/tributary-sdk/tendermint/src/block.rs
Normal file
208
coordinator/tributary-sdk/tendermint/src/block.rs
Normal file
@@ -0,0 +1,208 @@
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db};
|
||||
|
||||
use crate::{
|
||||
time::CanonicalInstant,
|
||||
ext::{RoundNumber, BlockNumber, Block, Network},
|
||||
round::RoundData,
|
||||
message_log::MessageLog,
|
||||
Step, Data, DataFor, Message, MessageFor,
|
||||
};
|
||||
|
||||
pub(crate) struct BlockData<N: Network> {
|
||||
db: N::Db,
|
||||
genesis: [u8; 32],
|
||||
|
||||
pub(crate) number: BlockNumber,
|
||||
pub(crate) validator_id: Option<N::ValidatorId>,
|
||||
pub(crate) our_proposal: Option<N::Block>,
|
||||
|
||||
pub(crate) log: MessageLog<N>,
|
||||
pub(crate) slashes: HashSet<N::ValidatorId>,
|
||||
// We track the end times of each round for two reasons:
|
||||
// 1) Knowing the start time of the next round
|
||||
// 2) Validating precommits, which include the end time of the round which produced it
|
||||
// This HashMap contains the end time of the round we're currently in and every round prior
|
||||
pub(crate) end_time: HashMap<RoundNumber, CanonicalInstant>,
|
||||
|
||||
pub(crate) round: Option<RoundData<N>>,
|
||||
|
||||
pub(crate) locked: Option<(RoundNumber, <N::Block as Block>::Id)>,
|
||||
pub(crate) valid: Option<(RoundNumber, N::Block)>,
|
||||
}
|
||||
|
||||
impl<N: Network> BlockData<N> {
|
||||
pub(crate) fn new(
|
||||
db: N::Db,
|
||||
genesis: [u8; 32],
|
||||
weights: Arc<N::Weights>,
|
||||
number: BlockNumber,
|
||||
validator_id: Option<N::ValidatorId>,
|
||||
our_proposal: Option<N::Block>,
|
||||
) -> BlockData<N> {
|
||||
BlockData {
|
||||
db,
|
||||
genesis,
|
||||
|
||||
number,
|
||||
validator_id,
|
||||
our_proposal,
|
||||
|
||||
log: MessageLog::new(weights),
|
||||
slashes: HashSet::new(),
|
||||
end_time: HashMap::new(),
|
||||
|
||||
// The caller of BlockData::new is expected to be populated after by the caller
|
||||
round: None,
|
||||
|
||||
locked: None,
|
||||
valid: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn round(&self) -> &RoundData<N> {
|
||||
self.round.as_ref().unwrap()
|
||||
}
|
||||
|
||||
pub(crate) fn round_mut(&mut self) -> &mut RoundData<N> {
|
||||
self.round.as_mut().unwrap()
|
||||
}
|
||||
|
||||
// Populate the end time up to the specified round
|
||||
// This is generally used when moving to the next round, where this will only populate one time,
|
||||
// yet is also used when jumping rounds (when 33% of the validators are on a round ahead of us)
|
||||
pub(crate) fn populate_end_time(&mut self, round: RoundNumber) {
|
||||
// Starts from the current round since we only start the current round once we have have all
|
||||
// the prior time data
|
||||
for r in (self.round().number.0 + 1) ..= round.0 {
|
||||
self.end_time.insert(
|
||||
RoundNumber(r),
|
||||
RoundData::<N>::new(RoundNumber(r), self.end_time[&RoundNumber(r - 1)]).end_time(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Start a new round. Optionally takes in the time for when this is the first round, and the time
|
||||
// isn't simply the time of the prior round (yet rather the prior block). Returns the proposal
|
||||
// data, if we are the proposer.
|
||||
pub(crate) fn new_round(
|
||||
&mut self,
|
||||
round: RoundNumber,
|
||||
proposer: N::ValidatorId,
|
||||
time: Option<CanonicalInstant>,
|
||||
) -> Option<DataFor<N>> {
|
||||
debug_assert_eq!(round.0 == 0, time.is_some());
|
||||
|
||||
// If this is the first round, we don't have a prior round's end time to use as the start
|
||||
// We use the passed in time instead
|
||||
// If this isn't the first round, ensure we have the prior round's end time by populating the
|
||||
// map with all rounds till this round
|
||||
// This can happen we jump from round x to round x+n, where n != 1
|
||||
// The paper says to do so whenever you observe a sufficient amount of peers on a higher round
|
||||
if round.0 != 0 {
|
||||
self.populate_end_time(round);
|
||||
}
|
||||
|
||||
// L11-13
|
||||
self.round = Some(RoundData::<N>::new(
|
||||
round,
|
||||
time.unwrap_or_else(|| self.end_time[&RoundNumber(round.0 - 1)]),
|
||||
));
|
||||
self.end_time.insert(round, self.round().end_time());
|
||||
|
||||
// L14-21
|
||||
if Some(proposer) == self.validator_id {
|
||||
let (round, block) = self.valid.clone().unzip();
|
||||
block.or_else(|| self.our_proposal.clone()).map(|block| Data::Proposal(round, block))
|
||||
} else {
|
||||
self.round_mut().set_timeout(Step::Propose);
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
// Transform Data into an actual Message, using the contextual data from this block
|
||||
pub(crate) fn message(&mut self, data: DataFor<N>) -> Option<MessageFor<N>> {
|
||||
debug_assert_eq!(
|
||||
self.round().step,
|
||||
match data.step() {
|
||||
Step::Propose | Step::Prevote => Step::Propose,
|
||||
Step::Precommit => Step::Prevote,
|
||||
},
|
||||
);
|
||||
// Tendermint always sets the round's step to whatever it just broadcasted
|
||||
// Consolidate all of those here to ensure they aren't missed by an oversight
|
||||
// 27, 33, 41, 46, 60, 64
|
||||
self.round_mut().step = data.step();
|
||||
|
||||
// Only return a message to if we're actually a current validator
|
||||
let round_number = self.round().number;
|
||||
let res = self.validator_id.map(|validator_id| Message {
|
||||
sender: validator_id,
|
||||
block: self.number,
|
||||
round: round_number,
|
||||
data,
|
||||
});
|
||||
|
||||
if let Some(res) = res.as_ref() {
|
||||
const LATEST_BLOCK_KEY: &[u8] = b"tendermint-machine-sent_block";
|
||||
const LATEST_ROUND_KEY: &[u8] = b"tendermint-machine-sent_round";
|
||||
const PROPOSE_KEY: &[u8] = b"tendermint-machine-sent_propose";
|
||||
const PEVOTE_KEY: &[u8] = b"tendermint-machine-sent_prevote";
|
||||
const PRECOMMIT_KEY: &[u8] = b"tendermint-machine-sent_commit";
|
||||
|
||||
let genesis = self.genesis;
|
||||
let key = |prefix: &[u8]| [prefix, &genesis].concat();
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
// Ensure we haven't prior sent a message for a future block/round
|
||||
let last_block_or_round = |txn: &mut <N::Db as Db>::Transaction<'_>, prefix, current| {
|
||||
let key = key(prefix);
|
||||
let latest =
|
||||
u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap());
|
||||
if latest > current {
|
||||
None?;
|
||||
}
|
||||
if current > latest {
|
||||
txn.put(&key, current.to_le_bytes());
|
||||
return Some(true);
|
||||
}
|
||||
Some(false)
|
||||
};
|
||||
let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?;
|
||||
if new_block {
|
||||
// Delete the latest round key
|
||||
txn.del(key(LATEST_ROUND_KEY));
|
||||
}
|
||||
let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?;
|
||||
if new_block || new_round {
|
||||
// Delete the messages for the old round
|
||||
txn.del(key(PROPOSE_KEY));
|
||||
txn.del(key(PEVOTE_KEY));
|
||||
txn.del(key(PRECOMMIT_KEY));
|
||||
}
|
||||
|
||||
// Check we haven't sent this message within this round
|
||||
let msg_key = key(match res.data.step() {
|
||||
Step::Propose => PROPOSE_KEY,
|
||||
Step::Prevote => PEVOTE_KEY,
|
||||
Step::Precommit => PRECOMMIT_KEY,
|
||||
});
|
||||
if txn.get(&msg_key).is_some() {
|
||||
assert!(!new_block);
|
||||
assert!(!new_round);
|
||||
None?;
|
||||
}
|
||||
// Put that we're sending this message to the DB
|
||||
txn.put(&msg_key, []);
|
||||
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
}
|
||||
307
coordinator/tributary-sdk/tendermint/src/ext.rs
Normal file
307
coordinator/tributary-sdk/tendermint/src/ext.rs
Normal file
@@ -0,0 +1,307 @@
|
||||
use core::{hash::Hash, fmt::Debug, future::Future};
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use parity_scale_codec::{Encode, Decode};
|
||||
|
||||
use crate::{SignedMessageFor, SlashEvent, commit_msg};
|
||||
|
||||
/// An alias for a series of traits required for a type to be usable as a validator ID,
|
||||
/// automatically implemented for all types satisfying those traits.
|
||||
pub trait ValidatorId:
|
||||
Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode
|
||||
{
|
||||
}
|
||||
impl<V: Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode> ValidatorId
|
||||
for V
|
||||
{
|
||||
}
|
||||
|
||||
/// An alias for a series of traits required for a type to be usable as a signature,
|
||||
/// automatically implemented for all types satisfying those traits.
|
||||
pub trait Signature: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {}
|
||||
impl<S: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode> Signature for S {}
|
||||
|
||||
// Type aliases which are distinct according to the type system
|
||||
|
||||
/// A struct containing a Block Number, wrapped to have a distinct type.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
|
||||
pub struct BlockNumber(pub u64);
|
||||
/// A struct containing a round number, wrapped to have a distinct type.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
|
||||
pub struct RoundNumber(pub u32);
|
||||
|
||||
/// A signer for a validator.
|
||||
pub trait Signer: Send + Sync {
|
||||
// Type used to identify validators.
|
||||
type ValidatorId: ValidatorId;
|
||||
/// Signature type.
|
||||
type Signature: Signature;
|
||||
|
||||
/// Returns the validator's current ID. Returns None if they aren't a current validator.
|
||||
fn validator_id(&self) -> impl Send + Future<Output = Option<Self::ValidatorId>>;
|
||||
/// Sign a signature with the current validator's private key.
|
||||
fn sign(&self, msg: &[u8]) -> impl Send + Future<Output = Self::Signature>;
|
||||
}
|
||||
|
||||
impl<S: Signer> Signer for Arc<S> {
|
||||
type ValidatorId = S::ValidatorId;
|
||||
type Signature = S::Signature;
|
||||
|
||||
fn validator_id(&self) -> impl Send + Future<Output = Option<Self::ValidatorId>> {
|
||||
self.as_ref().validator_id()
|
||||
}
|
||||
|
||||
fn sign(&self, msg: &[u8]) -> impl Send + Future<Output = Self::Signature> {
|
||||
self.as_ref().sign(msg)
|
||||
}
|
||||
}
|
||||
|
||||
/// A signature scheme used by validators.
|
||||
pub trait SignatureScheme: Send + Sync + Clone {
|
||||
// Type used to identify validators.
|
||||
type ValidatorId: ValidatorId;
|
||||
/// Signature type.
|
||||
type Signature: Signature;
|
||||
/// Type representing an aggregate signature. This would presumably be a BLS signature,
|
||||
/// yet even with Schnorr signatures
|
||||
/// [half-aggregation is possible](https://eprint.iacr.org/2021/350).
|
||||
/// It could even be a threshold signature scheme, though that's currently unexpected.
|
||||
type AggregateSignature: Signature;
|
||||
|
||||
/// Type representing a signer of this scheme.
|
||||
type Signer: Signer<ValidatorId = Self::ValidatorId, Signature = Self::Signature>;
|
||||
|
||||
/// Verify a signature from the validator in question.
|
||||
#[must_use]
|
||||
fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool;
|
||||
|
||||
/// Aggregate signatures.
|
||||
/// It may panic if corrupted data passed in.
|
||||
fn aggregate(
|
||||
&self,
|
||||
validators: &[Self::ValidatorId],
|
||||
msg: &[u8],
|
||||
sigs: &[Self::Signature],
|
||||
) -> Self::AggregateSignature;
|
||||
/// Verify an aggregate signature for the list of signers.
|
||||
#[must_use]
|
||||
fn verify_aggregate(
|
||||
&self,
|
||||
signers: &[Self::ValidatorId],
|
||||
msg: &[u8],
|
||||
sig: &Self::AggregateSignature,
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
impl<S: SignatureScheme> SignatureScheme for Arc<S> {
|
||||
type ValidatorId = S::ValidatorId;
|
||||
type Signature = S::Signature;
|
||||
type AggregateSignature = S::AggregateSignature;
|
||||
type Signer = S::Signer;
|
||||
|
||||
fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool {
|
||||
self.as_ref().verify(validator, msg, sig)
|
||||
}
|
||||
|
||||
fn aggregate(
|
||||
&self,
|
||||
validators: &[Self::ValidatorId],
|
||||
msg: &[u8],
|
||||
sigs: &[Self::Signature],
|
||||
) -> Self::AggregateSignature {
|
||||
self.as_ref().aggregate(validators, msg, sigs)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn verify_aggregate(
|
||||
&self,
|
||||
signers: &[Self::ValidatorId],
|
||||
msg: &[u8],
|
||||
sig: &Self::AggregateSignature,
|
||||
) -> bool {
|
||||
self.as_ref().verify_aggregate(signers, msg, sig)
|
||||
}
|
||||
}
|
||||
|
||||
/// A commit for a specific block.
|
||||
///
|
||||
/// The list of validators have weight exceeding the threshold for a valid commit.
|
||||
#[derive(PartialEq, Debug, Encode, Decode)]
|
||||
pub struct Commit<S: SignatureScheme> {
|
||||
/// End time of the round which created this commit, used as the start time of the next block.
|
||||
pub end_time: u64,
|
||||
/// Validators participating in the signature.
|
||||
pub validators: Vec<S::ValidatorId>,
|
||||
/// Aggregate signature.
|
||||
pub signature: S::AggregateSignature,
|
||||
}
|
||||
|
||||
impl<S: SignatureScheme> Clone for Commit<S> {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
end_time: self.end_time,
|
||||
validators: self.validators.clone(),
|
||||
signature: self.signature.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Weights for the validators present.
|
||||
pub trait Weights: Send + Sync {
|
||||
type ValidatorId: ValidatorId;
|
||||
|
||||
/// Total weight of all validators.
|
||||
fn total_weight(&self) -> u64;
|
||||
/// Weight for a specific validator.
|
||||
fn weight(&self, validator: Self::ValidatorId) -> u64;
|
||||
/// Threshold needed for BFT consensus.
|
||||
fn threshold(&self) -> u64 {
|
||||
((self.total_weight() * 2) / 3) + 1
|
||||
}
|
||||
/// Threshold preventing BFT consensus.
|
||||
fn fault_threshold(&self) -> u64 {
|
||||
(self.total_weight() - self.threshold()) + 1
|
||||
}
|
||||
|
||||
/// Weighted round robin function.
|
||||
fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::ValidatorId;
|
||||
}
|
||||
|
||||
impl<W: Weights> Weights for Arc<W> {
|
||||
type ValidatorId = W::ValidatorId;
|
||||
|
||||
fn total_weight(&self) -> u64 {
|
||||
self.as_ref().total_weight()
|
||||
}
|
||||
|
||||
fn weight(&self, validator: Self::ValidatorId) -> u64 {
|
||||
self.as_ref().weight(validator)
|
||||
}
|
||||
|
||||
fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::ValidatorId {
|
||||
self.as_ref().proposer(block, round)
|
||||
}
|
||||
}
|
||||
|
||||
/// Simplified error enum representing a block's validity.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error, Encode, Decode)]
|
||||
pub enum BlockError {
|
||||
/// Malformed block which is wholly invalid.
|
||||
#[error("invalid block")]
|
||||
Fatal,
|
||||
/// Valid block by syntax, with semantics which may or may not be valid yet are locally
|
||||
/// considered invalid. If a block fails to validate with this, a slash will not be triggered.
|
||||
#[error("invalid block under local view")]
|
||||
Temporal,
|
||||
}
|
||||
|
||||
/// Trait representing a Block.
|
||||
pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {
|
||||
// Type used to identify blocks. Presumably a cryptographic hash of the block.
|
||||
type Id: Send + Sync + Copy + Clone + PartialEq + Eq + AsRef<[u8]> + Debug + Encode + Decode;
|
||||
|
||||
/// Return the deterministic, unique ID for this block.
|
||||
fn id(&self) -> Self::Id;
|
||||
}
|
||||
|
||||
/// Trait representing the distributed system Tendermint is providing consensus over.
|
||||
pub trait Network: Sized + Send + Sync {
|
||||
/// The database used to back this.
|
||||
type Db: serai_db::Db;
|
||||
|
||||
// Type used to identify validators.
|
||||
type ValidatorId: ValidatorId;
|
||||
/// Signature scheme used by validators.
|
||||
type SignatureScheme: SignatureScheme<ValidatorId = Self::ValidatorId>;
|
||||
/// Object representing the weights of validators.
|
||||
type Weights: Weights<ValidatorId = Self::ValidatorId>;
|
||||
/// Type used for ordered blocks of information.
|
||||
type Block: Block;
|
||||
|
||||
/// Maximum block processing time in milliseconds.
|
||||
///
|
||||
/// This should include both the time to download the block and the actual processing time.
|
||||
///
|
||||
/// BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME) must be divisible by 1000.
|
||||
// TODO: Redefine as Duration
|
||||
const BLOCK_PROCESSING_TIME: u32;
|
||||
/// Network latency time in milliseconds.
|
||||
///
|
||||
/// BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME) must be divisible by 1000.
|
||||
const LATENCY_TIME: u32;
|
||||
|
||||
/// The block time, in seconds. Defined as the processing time plus three times the latency.
|
||||
fn block_time() -> u32 {
|
||||
let raw = Self::BLOCK_PROCESSING_TIME + (3 * Self::LATENCY_TIME);
|
||||
let res = raw / 1000;
|
||||
assert_eq!(res * 1000, raw);
|
||||
res
|
||||
}
|
||||
|
||||
/// Return a handle on the signer in use, usable for the entire lifetime of the machine.
|
||||
fn signer(&self) -> <Self::SignatureScheme as SignatureScheme>::Signer;
|
||||
/// Return a handle on the signing scheme in use, usable for the entire lifetime of the machine.
|
||||
fn signature_scheme(&self) -> Self::SignatureScheme;
|
||||
/// Return a handle on the validators' weights, usable for the entire lifetime of the machine.
|
||||
fn weights(&self) -> Self::Weights;
|
||||
|
||||
/// Verify a commit for a given block. Intended for use when syncing or when not an active
|
||||
/// validator.
|
||||
#[must_use]
|
||||
fn verify_commit(
|
||||
&self,
|
||||
id: <Self::Block as Block>::Id,
|
||||
commit: &Commit<Self::SignatureScheme>,
|
||||
) -> bool {
|
||||
if commit.validators.iter().collect::<HashSet<_>>().len() != commit.validators.len() {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.signature_scheme().verify_aggregate(
|
||||
&commit.validators,
|
||||
&commit_msg(commit.end_time, id.as_ref()),
|
||||
&commit.signature,
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let weights = self.weights();
|
||||
commit.validators.iter().map(|v| weights.weight(*v)).sum::<u64>() >= weights.threshold()
|
||||
}
|
||||
|
||||
/// Broadcast a message to the other validators.
|
||||
///
|
||||
/// If authenticated channels have already been established, this will double-authenticate.
|
||||
/// Switching to unauthenticated channels in a system already providing authenticated channels is
|
||||
/// not recommended as this is a minor, temporal inefficiency, while downgrading channels may
|
||||
/// have wider implications.
|
||||
fn broadcast(&mut self, msg: SignedMessageFor<Self>) -> impl Send + Future<Output = ()>;
|
||||
|
||||
/// Trigger a slash for the validator in question who was definitively malicious.
|
||||
///
|
||||
/// The exact process of triggering a slash is undefined and left to the network as a whole.
|
||||
fn slash(
|
||||
&mut self,
|
||||
validator: Self::ValidatorId,
|
||||
slash_event: SlashEvent,
|
||||
) -> impl Send + Future<Output = ()>;
|
||||
|
||||
/// Validate a block.
|
||||
fn validate(&self, block: &Self::Block) -> impl Send + Future<Output = Result<(), BlockError>>;
|
||||
|
||||
/// Add a block, returning the proposal for the next one.
|
||||
///
|
||||
/// It's possible a block, which was never validated or even failed validation, may be passed
|
||||
/// here if a supermajority of validators did consider it valid and created a commit for it.
|
||||
///
|
||||
/// This deviates from the paper which will have a local node refuse to decide on a block it
|
||||
/// considers invalid. This library acknowledges the network did decide on it, leaving handling
|
||||
/// of it to the network, and outside of this scope.
|
||||
fn add_block(
|
||||
&mut self,
|
||||
block: Self::Block,
|
||||
commit: Commit<Self::SignatureScheme>,
|
||||
) -> impl Send + Future<Output = Option<Self::Block>>;
|
||||
}
|
||||
1092
coordinator/tributary-sdk/tendermint/src/lib.rs
Normal file
1092
coordinator/tributary-sdk/tendermint/src/lib.rs
Normal file
File diff suppressed because it is too large
Load Diff
74
coordinator/tributary-sdk/tendermint/src/message_log.rs
Normal file
74
coordinator/tributary-sdk/tendermint/src/message_log.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use parity_scale_codec::Encode;
|
||||
|
||||
use crate::{ext::*, RoundNumber, Step, DataFor, SignedMessageFor, Evidence};
|
||||
|
||||
type RoundLog<N> = HashMap<<N as Network>::ValidatorId, HashMap<Step, SignedMessageFor<N>>>;
|
||||
pub(crate) struct MessageLog<N: Network> {
|
||||
weights: Arc<N::Weights>,
|
||||
round_participation: HashMap<RoundNumber, u64>,
|
||||
participation: HashMap<(RoundNumber, Step), u64>,
|
||||
message_instances: HashMap<(RoundNumber, DataFor<N>), u64>,
|
||||
pub(crate) log: HashMap<RoundNumber, RoundLog<N>>,
|
||||
}
|
||||
|
||||
impl<N: Network> MessageLog<N> {
|
||||
pub(crate) fn new(weights: Arc<N::Weights>) -> MessageLog<N> {
|
||||
MessageLog {
|
||||
weights,
|
||||
round_participation: HashMap::new(),
|
||||
participation: HashMap::new(),
|
||||
message_instances: HashMap::new(),
|
||||
log: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns true if it's a new message
|
||||
pub(crate) fn log(&mut self, signed: SignedMessageFor<N>) -> Result<bool, Evidence> {
|
||||
let msg = &signed.msg;
|
||||
// Clarity, and safety around default != new edge cases
|
||||
let round = self.log.entry(msg.round).or_insert_with(HashMap::new);
|
||||
let msgs = round.entry(msg.sender).or_insert_with(HashMap::new);
|
||||
|
||||
// Handle message replays without issue. It's only multiple messages which is malicious
|
||||
let step = msg.data.step();
|
||||
if let Some(existing) = msgs.get(&step) {
|
||||
if existing.msg.data != msg.data {
|
||||
log::debug!(
|
||||
target: "tendermint",
|
||||
"Validator sent multiple messages for the same block + round + step"
|
||||
);
|
||||
Err(Evidence::ConflictingMessages(existing.encode(), signed.encode()))?;
|
||||
}
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Since we have a new message, update the participation
|
||||
let sender_weight = self.weights.weight(msg.sender);
|
||||
if msgs.is_empty() {
|
||||
*self.round_participation.entry(msg.round).or_insert_with(|| 0) += sender_weight;
|
||||
}
|
||||
*self.participation.entry((msg.round, step)).or_insert_with(|| 0) += sender_weight;
|
||||
*self.message_instances.entry((msg.round, msg.data.clone())).or_insert_with(|| 0) +=
|
||||
sender_weight;
|
||||
|
||||
msgs.insert(step, signed);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
// Get the participation in a given round
|
||||
pub(crate) fn round_participation(&self, round: RoundNumber) -> u64 {
|
||||
*self.round_participation.get(&round).unwrap_or(&0)
|
||||
}
|
||||
|
||||
// Check if a supermajority of nodes have participated on a specific step
|
||||
pub(crate) fn has_participation(&self, round: RoundNumber, step: Step) -> bool {
|
||||
*self.participation.get(&(round, step)).unwrap_or(&0) >= self.weights.threshold()
|
||||
}
|
||||
|
||||
// Check if consensus has been reached on a specific piece of data
|
||||
pub(crate) fn has_consensus(&self, round: RoundNumber, data: &DataFor<N>) -> bool {
|
||||
*self.message_instances.get(&(round, data.clone())).unwrap_or(&0) >= self.weights.threshold()
|
||||
}
|
||||
}
|
||||
93
coordinator/tributary-sdk/tendermint/src/round.rs
Normal file
93
coordinator/tributary-sdk/tendermint/src/round.rs
Normal file
@@ -0,0 +1,93 @@
|
||||
use std::{
|
||||
marker::PhantomData,
|
||||
time::{Duration, Instant},
|
||||
collections::HashMap,
|
||||
};
|
||||
|
||||
use futures_util::{FutureExt, future};
|
||||
use patchable_async_sleep::sleep;
|
||||
|
||||
use crate::{
|
||||
time::CanonicalInstant,
|
||||
Step,
|
||||
ext::{RoundNumber, Network},
|
||||
};
|
||||
|
||||
pub struct RoundData<N: Network> {
|
||||
_network: PhantomData<N>,
|
||||
pub number: RoundNumber,
|
||||
pub start_time: CanonicalInstant,
|
||||
pub step: Step,
|
||||
pub timeouts: HashMap<Step, Instant>,
|
||||
}
|
||||
|
||||
impl<N: Network> RoundData<N> {
|
||||
pub fn new(number: RoundNumber, start_time: CanonicalInstant) -> Self {
|
||||
RoundData {
|
||||
_network: PhantomData,
|
||||
number,
|
||||
start_time,
|
||||
step: Step::Propose,
|
||||
timeouts: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn timeout(&self, step: Step) -> CanonicalInstant {
|
||||
let adjusted_block = N::BLOCK_PROCESSING_TIME * (self.number.0 + 1);
|
||||
let adjusted_latency = N::LATENCY_TIME * (self.number.0 + 1);
|
||||
let offset = Duration::from_millis(
|
||||
(match step {
|
||||
Step::Propose => adjusted_block + adjusted_latency,
|
||||
Step::Prevote => adjusted_block + (2 * adjusted_latency),
|
||||
Step::Precommit => adjusted_block + (3 * adjusted_latency),
|
||||
})
|
||||
.into(),
|
||||
);
|
||||
self.start_time + offset
|
||||
}
|
||||
|
||||
pub fn end_time(&self) -> CanonicalInstant {
|
||||
self.timeout(Step::Precommit)
|
||||
}
|
||||
|
||||
pub(crate) fn set_timeout(&mut self, step: Step) {
|
||||
let timeout = self.timeout(step).instant();
|
||||
self.timeouts.entry(step).or_insert(timeout);
|
||||
}
|
||||
|
||||
// Poll all set timeouts, returning the Step whose timeout has just expired
|
||||
pub(crate) async fn timeout_future(&self) -> Step {
|
||||
/*
|
||||
let now = Instant::now();
|
||||
log::trace!(
|
||||
target: "tendermint",
|
||||
"getting timeout_future, from step {:?}, off timeouts: {:?}",
|
||||
self.step,
|
||||
self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::<HashMap<_, _>>()
|
||||
);
|
||||
*/
|
||||
|
||||
let timeout_future = |step| {
|
||||
let timeout = self.timeouts.get(&step).copied();
|
||||
(async move {
|
||||
if let Some(timeout) = timeout {
|
||||
sleep(timeout.saturating_duration_since(Instant::now())).await;
|
||||
} else {
|
||||
future::pending::<()>().await;
|
||||
}
|
||||
step
|
||||
})
|
||||
.fuse()
|
||||
};
|
||||
let propose_timeout = timeout_future(Step::Propose);
|
||||
let prevote_timeout = timeout_future(Step::Prevote);
|
||||
let precommit_timeout = timeout_future(Step::Precommit);
|
||||
futures_util::pin_mut!(propose_timeout, prevote_timeout, precommit_timeout);
|
||||
|
||||
futures_util::select_biased! {
|
||||
step = propose_timeout => step,
|
||||
step = prevote_timeout => step,
|
||||
step = precommit_timeout => step,
|
||||
}
|
||||
}
|
||||
}
|
||||
44
coordinator/tributary-sdk/tendermint/src/time.rs
Normal file
44
coordinator/tributary-sdk/tendermint/src/time.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
use core::ops::Add;
|
||||
use std::time::{UNIX_EPOCH, SystemTime, Instant, Duration};
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct CanonicalInstant {
|
||||
/// Time since the epoch.
|
||||
time: u64,
|
||||
/// An Instant synchronized with the above time.
|
||||
instant: Instant,
|
||||
}
|
||||
|
||||
pub(crate) fn sys_time(time: u64) -> SystemTime {
|
||||
UNIX_EPOCH + Duration::from_secs(time)
|
||||
}
|
||||
|
||||
impl CanonicalInstant {
|
||||
pub fn new(time: u64) -> CanonicalInstant {
|
||||
// This is imprecise yet should be precise enough, as it'll resolve within a few ms
|
||||
let instant_now = Instant::now();
|
||||
let sys_now = SystemTime::now();
|
||||
|
||||
// If the time is in the future, this will be off by that much time
|
||||
let elapsed = sys_now.duration_since(sys_time(time)).unwrap_or(Duration::ZERO);
|
||||
// Except for the fact this panics here
|
||||
let synced_instant = instant_now.checked_sub(elapsed).unwrap();
|
||||
|
||||
CanonicalInstant { time, instant: synced_instant }
|
||||
}
|
||||
|
||||
pub fn canonical(&self) -> u64 {
|
||||
self.time
|
||||
}
|
||||
|
||||
pub fn instant(&self) -> Instant {
|
||||
self.instant
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<Duration> for CanonicalInstant {
|
||||
type Output = CanonicalInstant;
|
||||
fn add(self, duration: Duration) -> CanonicalInstant {
|
||||
CanonicalInstant { time: self.time + duration.as_secs(), instant: self.instant + duration }
|
||||
}
|
||||
}
|
||||
202
coordinator/tributary-sdk/tendermint/tests/ext.rs
Normal file
202
coordinator/tributary-sdk/tendermint/tests/ext.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
use core::future::Future;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
time::{UNIX_EPOCH, SystemTime, Duration},
|
||||
};
|
||||
|
||||
use parity_scale_codec::{Encode, Decode};
|
||||
|
||||
use futures_util::sink::SinkExt;
|
||||
use tokio::{sync::RwLock, time::sleep};
|
||||
|
||||
use serai_db::MemDb;
|
||||
|
||||
use tendermint_machine::{
|
||||
ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender,
|
||||
SlashEvent, TendermintMachine, TendermintHandle,
|
||||
};
|
||||
|
||||
type TestValidatorId = u16;
|
||||
type TestBlockId = [u8; 4];
|
||||
|
||||
struct TestSigner(u16);
|
||||
impl Signer for TestSigner {
|
||||
type ValidatorId = TestValidatorId;
|
||||
type Signature = [u8; 32];
|
||||
|
||||
fn validator_id(&self) -> impl Send + Future<Output = Option<TestValidatorId>> {
|
||||
async move { Some(self.0) }
|
||||
}
|
||||
|
||||
fn sign(&self, msg: &[u8]) -> impl Send + Future<Output = [u8; 32]> {
|
||||
async move {
|
||||
let mut sig = [0; 32];
|
||||
sig[.. 2].copy_from_slice(&self.0.to_le_bytes());
|
||||
sig[2 .. (2 + 30.min(msg.len()))].copy_from_slice(&msg[.. 30.min(msg.len())]);
|
||||
sig
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct TestSignatureScheme;
|
||||
impl SignatureScheme for TestSignatureScheme {
|
||||
type ValidatorId = TestValidatorId;
|
||||
type Signature = [u8; 32];
|
||||
type AggregateSignature = Vec<[u8; 32]>;
|
||||
type Signer = TestSigner;
|
||||
|
||||
#[must_use]
|
||||
fn verify(&self, validator: u16, msg: &[u8], sig: &[u8; 32]) -> bool {
|
||||
(sig[.. 2] == validator.to_le_bytes()) && (sig[2 ..] == [msg, &[0; 30]].concat()[.. 30])
|
||||
}
|
||||
|
||||
fn aggregate(
|
||||
&self,
|
||||
_: &[Self::ValidatorId],
|
||||
_: &[u8],
|
||||
sigs: &[Self::Signature],
|
||||
) -> Self::AggregateSignature {
|
||||
sigs.to_vec()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn verify_aggregate(
|
||||
&self,
|
||||
signers: &[TestValidatorId],
|
||||
msg: &[u8],
|
||||
sigs: &Vec<[u8; 32]>,
|
||||
) -> bool {
|
||||
assert_eq!(signers.len(), sigs.len());
|
||||
for sig in signers.iter().zip(sigs.iter()) {
|
||||
assert!(self.verify(*sig.0, msg, sig.1));
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
struct TestWeights;
|
||||
impl Weights for TestWeights {
|
||||
type ValidatorId = TestValidatorId;
|
||||
|
||||
fn total_weight(&self) -> u64 {
|
||||
4
|
||||
}
|
||||
fn weight(&self, id: TestValidatorId) -> u64 {
|
||||
[1; 4][usize::from(id)]
|
||||
}
|
||||
|
||||
fn proposer(&self, number: BlockNumber, round: RoundNumber) -> TestValidatorId {
|
||||
TestValidatorId::try_from((number.0 + u64::from(round.0)) % 4).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
|
||||
struct TestBlock {
|
||||
id: TestBlockId,
|
||||
valid: Result<(), BlockError>,
|
||||
}
|
||||
|
||||
impl Block for TestBlock {
|
||||
type Id = TestBlockId;
|
||||
|
||||
fn id(&self) -> TestBlockId {
|
||||
self.id
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
struct TestNetwork(
|
||||
u16,
|
||||
Arc<RwLock<Vec<(MessageSender<Self>, SyncedBlockSender<Self>, SyncedBlockResultReceiver)>>>,
|
||||
);
|
||||
|
||||
impl Network for TestNetwork {
|
||||
type Db = MemDb;
|
||||
|
||||
type ValidatorId = TestValidatorId;
|
||||
type SignatureScheme = TestSignatureScheme;
|
||||
type Weights = TestWeights;
|
||||
type Block = TestBlock;
|
||||
|
||||
const BLOCK_PROCESSING_TIME: u32 = 2000;
|
||||
const LATENCY_TIME: u32 = 1000;
|
||||
|
||||
fn signer(&self) -> TestSigner {
|
||||
TestSigner(self.0)
|
||||
}
|
||||
|
||||
fn signature_scheme(&self) -> TestSignatureScheme {
|
||||
TestSignatureScheme
|
||||
}
|
||||
|
||||
fn weights(&self) -> TestWeights {
|
||||
TestWeights
|
||||
}
|
||||
|
||||
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
|
||||
for (messages, _, _) in self.1.write().await.iter_mut() {
|
||||
messages.send(msg.clone()).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn slash(&mut self, id: TestValidatorId, event: SlashEvent) {
|
||||
println!("Slash for {id} due to {event:?}");
|
||||
}
|
||||
|
||||
async fn validate(&self, block: &TestBlock) -> Result<(), BlockError> {
|
||||
block.valid
|
||||
}
|
||||
|
||||
async fn add_block(
|
||||
&mut self,
|
||||
block: TestBlock,
|
||||
commit: Commit<TestSignatureScheme>,
|
||||
) -> Option<TestBlock> {
|
||||
println!("Adding {:?}", &block);
|
||||
assert!(block.valid.is_ok());
|
||||
assert!(self.verify_commit(block.id(), &commit));
|
||||
Some(TestBlock { id: (u32::from_le_bytes(block.id) + 1).to_le_bytes(), valid: Ok(()) })
|
||||
}
|
||||
}
|
||||
|
||||
impl TestNetwork {
|
||||
async fn new(
|
||||
validators: usize,
|
||||
start_time: u64,
|
||||
) -> Arc<RwLock<Vec<(MessageSender<Self>, SyncedBlockSender<Self>, SyncedBlockResultReceiver)>>>
|
||||
{
|
||||
let arc = Arc::new(RwLock::new(vec![]));
|
||||
{
|
||||
let mut write = arc.write().await;
|
||||
for i in 0 .. validators {
|
||||
let i = u16::try_from(i).unwrap();
|
||||
let TendermintHandle { messages, synced_block, synced_block_result, machine } =
|
||||
TendermintMachine::new(
|
||||
MemDb::new(),
|
||||
TestNetwork(i, arc.clone()),
|
||||
[0; 32],
|
||||
BlockNumber(1),
|
||||
start_time,
|
||||
TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) },
|
||||
)
|
||||
.await;
|
||||
tokio::spawn(machine.run());
|
||||
write.push((messages, synced_block, synced_block_result));
|
||||
}
|
||||
}
|
||||
arc
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_machine() {
|
||||
TestNetwork::new(4, SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs()).await;
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_machine_with_historic_start_time() {
|
||||
TestNetwork::new(4, SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() - 60).await;
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
}
|
||||
Reference in New Issue
Block a user