2025-11-04 10:20:17 -05:00
|
|
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
2025-01-11 04:14:21 -05:00
|
|
|
#![doc = include_str!("../README.md")]
|
|
|
|
|
#![deny(missing_docs)]
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
use core::{marker::PhantomData, future::Future};
|
|
|
|
|
use std::collections::HashMap;
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
use ciphersuite::group::GroupEncoding;
|
2025-01-15 15:15:38 -05:00
|
|
|
use dkg::Participant;
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-09-01 21:04:57 -04:00
|
|
|
use serai_primitives::{
|
|
|
|
|
address::SeraiAddress,
|
|
|
|
|
validator_sets::{ExternalValidatorSet, Slash},
|
2023-04-13 18:43:03 -04:00
|
|
|
};
|
2023-04-11 10:18:31 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
use serai_db::*;
|
|
|
|
|
use serai_task::ContinuallyRan;
|
2023-11-05 07:04:41 +03:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
use tributary_sdk::{
|
|
|
|
|
tendermint::{
|
|
|
|
|
tx::{TendermintTx, Evidence, decode_signed_message},
|
|
|
|
|
TendermintNetwork,
|
|
|
|
|
},
|
|
|
|
|
Signed as TributarySigned, TransactionKind, TransactionTrait,
|
|
|
|
|
Transaction as TributaryTransaction, Block, TributaryReader, P2p,
|
|
|
|
|
};
|
Slash malevolent validators (#294)
* add slash tx
* ignore unsigned tx replays
* verify that provided evidence is valid
* fix clippy + fmt
* move application tx handling to another module
* partially handle the tendermint txs
* fix pr comments
* support unsigned app txs
* add slash target to the votes
* enforce provided, unsigned, signed tx ordering within a block
* bug fixes
* add unit test for tendermint txs
* bug fixes
* update tests for tendermint txs
* add tx ordering test
* tidy up tx ordering test
* cargo +nightly fmt
* Misc fixes from rebasing
* Finish resolving clippy
* Remove sha3 from tendermint-machine
* Resolve a DoS in SlashEvidence's read
Also moves Evidence from Vec<Message> to (Message, Option<Message>). That
should meet all requirements while being a bit safer.
* Make lazy_static a dev-depend for tributary
* Various small tweaks
One use of sort was inefficient, sorting unsigned || signed when unsigned was
already properly sorted. Given how the unsigned TXs were given a nonce of 0, an
unstable sort may swap places with an unsigned TX and a signed TX with a nonce
of 0 (leading to a faulty block).
The extra protection added here sorts signed, then concats.
* Fix Tributary tests I broke, start review on tendermint/tx.rs
* Finish reviewing everything outside tests and empty_signature
* Remove empty_signature
empty_signature led to corrupted local state histories. Unfortunately, the API
is only sane with a signature.
We now use the actual signature, which risks creating a signature over a
malicious message if we have ever have an invariant producing malicious
messages. Prior, we only signed the message after the local machine confirmed
it was okay per the local view of consensus.
This is tolerated/preferred over a corrupt state history since production of
such messages is already an invariant. TODOs are added to make handling of this
theoretical invariant further robust.
* Remove async_sequential for tokio::test
There was no competition for resources forcing them to be run sequentially.
* Modify block order test to be statistically significant without multiple runs
* Clean tests
---------
Co-authored-by: Luke Parker <lukeparker5132@gmail.com>
2023-08-21 07:28:23 +03:00
|
|
|
|
2025-01-12 05:52:33 -05:00
|
|
|
use serai_cosign::CosignIntent;
|
2025-01-11 04:14:21 -05:00
|
|
|
use serai_coordinator_substrate::NewSetInformation;
|
Slash malevolent validators (#294)
* add slash tx
* ignore unsigned tx replays
* verify that provided evidence is valid
* fix clippy + fmt
* move application tx handling to another module
* partially handle the tendermint txs
* fix pr comments
* support unsigned app txs
* add slash target to the votes
* enforce provided, unsigned, signed tx ordering within a block
* bug fixes
* add unit test for tendermint txs
* bug fixes
* update tests for tendermint txs
* add tx ordering test
* tidy up tx ordering test
* cargo +nightly fmt
* Misc fixes from rebasing
* Finish resolving clippy
* Remove sha3 from tendermint-machine
* Resolve a DoS in SlashEvidence's read
Also moves Evidence from Vec<Message> to (Message, Option<Message>). That
should meet all requirements while being a bit safer.
* Make lazy_static a dev-depend for tributary
* Various small tweaks
One use of sort was inefficient, sorting unsigned || signed when unsigned was
already properly sorted. Given how the unsigned TXs were given a nonce of 0, an
unstable sort may swap places with an unsigned TX and a signed TX with a nonce
of 0 (leading to a faulty block).
The extra protection added here sorts signed, then concats.
* Fix Tributary tests I broke, start review on tendermint/tx.rs
* Finish reviewing everything outside tests and empty_signature
* Remove empty_signature
empty_signature led to corrupted local state histories. Unfortunately, the API
is only sane with a signature.
We now use the actual signature, which risks creating a signature over a
malicious message if we have ever have an invariant producing malicious
messages. Prior, we only signed the message after the local machine confirmed
it was okay per the local view of consensus.
This is tolerated/preferred over a corrupt state history since production of
such messages is already an invariant. TODOs are added to make handling of this
theoretical invariant further robust.
* Remove async_sequential for tokio::test
There was no competition for resources forcing them to be run sequentially.
* Modify block order test to be statistically significant without multiple runs
* Clean tests
---------
Co-authored-by: Luke Parker <lukeparker5132@gmail.com>
2023-08-21 07:28:23 +03:00
|
|
|
|
2025-01-15 15:15:38 -05:00
|
|
|
use messages::sign::{VariantSignId, SignId};
|
2023-04-11 13:42:18 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
mod transaction;
|
2025-01-12 07:32:45 -05:00
|
|
|
pub use transaction::{SigningProtocolRound, Signed, Transaction};
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
mod db;
|
|
|
|
|
use db::*;
|
2025-01-15 07:01:24 -05:00
|
|
|
pub use db::Topic;
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
/// Messages to send to the Processors.
|
|
|
|
|
pub struct ProcessorMessages;
|
|
|
|
|
impl ProcessorMessages {
|
|
|
|
|
/// Try to receive a message to send to a Processor.
|
2025-01-30 03:14:24 -05:00
|
|
|
pub fn try_recv(
|
|
|
|
|
txn: &mut impl DbTxn,
|
|
|
|
|
set: ExternalValidatorSet,
|
|
|
|
|
) -> Option<messages::CoordinatorMessage> {
|
2025-01-11 04:14:21 -05:00
|
|
|
db::ProcessorMessages::try_recv(txn, set)
|
2023-04-13 18:43:03 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-15 15:15:38 -05:00
|
|
|
/// Messages for the DKG confirmation.
|
|
|
|
|
pub struct DkgConfirmationMessages;
|
|
|
|
|
impl DkgConfirmationMessages {
|
|
|
|
|
/// Receive a message for the DKG confirmation.
|
|
|
|
|
///
|
|
|
|
|
/// These messages use the ProcessorMessage API as that's what existing flows are designed
|
|
|
|
|
/// around, enabling their reuse. The ProcessorMessage includes a VariantSignId which isn't
|
|
|
|
|
/// applicable to the DKG confirmation (as there's no such variant of the VariantSignId). The
|
|
|
|
|
/// actual ID is undefined other than it will be consistent to the signing protocol and unique
|
|
|
|
|
/// across validator sets, with no guarantees of uniqueness across contexts.
|
|
|
|
|
pub fn try_recv(
|
|
|
|
|
txn: &mut impl DbTxn,
|
2025-01-30 03:14:24 -05:00
|
|
|
set: ExternalValidatorSet,
|
2025-01-15 15:15:38 -05:00
|
|
|
) -> Option<messages::sign::CoordinatorMessage> {
|
|
|
|
|
db::DkgConfirmationMessages::try_recv(txn, set)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-12 05:52:33 -05:00
|
|
|
/// The cosign intents.
|
|
|
|
|
pub struct CosignIntents;
|
|
|
|
|
impl CosignIntents {
|
|
|
|
|
/// Provide a CosignIntent for this Tributary.
|
|
|
|
|
///
|
|
|
|
|
/// This must be done before the associated `Transaction::Cosign` is provided.
|
2025-01-30 03:14:24 -05:00
|
|
|
pub fn provide(txn: &mut impl DbTxn, set: ExternalValidatorSet, intent: &CosignIntent) {
|
2025-01-12 05:52:33 -05:00
|
|
|
db::CosignIntents::set(txn, set, intent.block_hash, intent);
|
|
|
|
|
}
|
|
|
|
|
fn take(
|
|
|
|
|
txn: &mut impl DbTxn,
|
2025-01-30 03:14:24 -05:00
|
|
|
set: ExternalValidatorSet,
|
2025-01-12 05:52:33 -05:00
|
|
|
substrate_block_hash: [u8; 32],
|
|
|
|
|
) -> Option<CosignIntent> {
|
|
|
|
|
db::CosignIntents::take(txn, set, substrate_block_hash)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-15 07:01:24 -05:00
|
|
|
/// An interface to the topics recognized on this Tributary.
|
|
|
|
|
pub struct RecognizedTopics;
|
|
|
|
|
impl RecognizedTopics {
|
|
|
|
|
/// If this topic has been recognized by this Tributary.
|
|
|
|
|
///
|
|
|
|
|
/// This will either be by explicit recognition or participation.
|
2025-01-30 03:14:24 -05:00
|
|
|
pub fn recognized(getter: &impl Get, set: ExternalValidatorSet, topic: Topic) -> bool {
|
2025-01-15 07:01:24 -05:00
|
|
|
TributaryDb::recognized(getter, set, topic)
|
|
|
|
|
}
|
|
|
|
|
/// The next topic requiring recognition which has been recognized by this Tributary.
|
|
|
|
|
pub fn try_recv_topic_requiring_recognition(
|
|
|
|
|
txn: &mut impl DbTxn,
|
2025-01-30 03:14:24 -05:00
|
|
|
set: ExternalValidatorSet,
|
2025-01-15 07:01:24 -05:00
|
|
|
) -> Option<Topic> {
|
|
|
|
|
db::RecognizedTopics::try_recv(txn, set)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
|
2025-01-12 07:32:45 -05:00
|
|
|
pub struct SubstrateBlockPlans;
|
|
|
|
|
impl SubstrateBlockPlans {
|
2025-01-15 07:01:24 -05:00
|
|
|
/// Set the plans to recognize upon the associated `Transaction::SubstrateBlock` being included
|
2025-01-12 07:32:45 -05:00
|
|
|
/// on-chain.
|
|
|
|
|
///
|
|
|
|
|
/// This must be done before the associated `Transaction::Cosign` is provided.
|
|
|
|
|
pub fn set(
|
|
|
|
|
txn: &mut impl DbTxn,
|
2025-01-30 03:14:24 -05:00
|
|
|
set: ExternalValidatorSet,
|
2025-01-12 07:32:45 -05:00
|
|
|
substrate_block_hash: [u8; 32],
|
|
|
|
|
plans: &Vec<[u8; 32]>,
|
|
|
|
|
) {
|
2025-01-15 07:01:24 -05:00
|
|
|
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, plans);
|
2025-01-12 07:32:45 -05:00
|
|
|
}
|
|
|
|
|
fn take(
|
|
|
|
|
txn: &mut impl DbTxn,
|
2025-01-30 03:14:24 -05:00
|
|
|
set: ExternalValidatorSet,
|
2025-01-12 07:32:45 -05:00
|
|
|
substrate_block_hash: [u8; 32],
|
|
|
|
|
) -> Option<Vec<[u8; 32]>> {
|
|
|
|
|
db::SubstrateBlockPlans::take(txn, set, substrate_block_hash)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-12 05:52:33 -05:00
|
|
|
struct ScanBlock<'a, TD: Db, TDT: DbTxn, P: P2p> {
|
2025-01-11 04:14:21 -05:00
|
|
|
_td: PhantomData<TD>,
|
|
|
|
|
_p2p: PhantomData<P>,
|
|
|
|
|
tributary_txn: &'a mut TDT,
|
2025-01-15 14:24:51 -05:00
|
|
|
set: &'a NewSetInformation,
|
2025-01-11 04:14:21 -05:00
|
|
|
validators: &'a [SeraiAddress],
|
2025-01-15 14:24:51 -05:00
|
|
|
total_weight: u16,
|
|
|
|
|
validator_weights: &'a HashMap<SeraiAddress, u16>,
|
2023-04-13 18:43:03 -04:00
|
|
|
}
|
2025-01-18 12:31:11 -05:00
|
|
|
impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
2025-01-11 04:14:21 -05:00
|
|
|
fn potentially_start_cosign(&mut self) {
|
|
|
|
|
// Don't start a new cosigning instance if we're actively running one
|
2025-01-15 14:24:51 -05:00
|
|
|
if TributaryDb::actively_cosigning(self.tributary_txn, self.set.set).is_some() {
|
2025-01-11 04:14:21 -05:00
|
|
|
return;
|
|
|
|
|
}
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
// Fetch the latest intended-to-be-cosigned block
|
|
|
|
|
let Some(latest_substrate_block_to_cosign) =
|
2025-01-15 14:24:51 -05:00
|
|
|
TributaryDb::latest_substrate_block_to_cosign(self.tributary_txn, self.set.set)
|
2025-01-11 04:14:21 -05:00
|
|
|
else {
|
|
|
|
|
return;
|
2023-04-14 14:11:19 -04:00
|
|
|
};
|
2023-04-23 23:15:15 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
// If it was already cosigned, return
|
2025-01-15 14:24:51 -05:00
|
|
|
if TributaryDb::cosigned(self.tributary_txn, self.set.set, latest_substrate_block_to_cosign) {
|
2025-01-11 04:14:21 -05:00
|
|
|
return;
|
2023-04-13 18:43:03 -04:00
|
|
|
}
|
|
|
|
|
|
2025-01-12 05:52:33 -05:00
|
|
|
let intent =
|
2025-01-15 14:24:51 -05:00
|
|
|
CosignIntents::take(self.tributary_txn, self.set.set, latest_substrate_block_to_cosign)
|
2025-01-12 05:52:33 -05:00
|
|
|
.expect("Transaction::Cosign locally provided but CosignIntents wasn't populated");
|
|
|
|
|
assert_eq!(
|
|
|
|
|
intent.block_hash, latest_substrate_block_to_cosign,
|
|
|
|
|
"provided CosignIntent wasn't saved by its block hash"
|
|
|
|
|
);
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
// Mark us as actively cosigning
|
|
|
|
|
TributaryDb::start_cosigning(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
latest_substrate_block_to_cosign,
|
2025-01-12 05:52:33 -05:00
|
|
|
intent.block_number,
|
2025-01-11 04:14:21 -05:00
|
|
|
);
|
|
|
|
|
// Send the message for the processor to start signing
|
|
|
|
|
TributaryDb::send_message(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
2025-01-15 14:24:51 -05:00
|
|
|
session: self.set.set.session,
|
2025-01-18 12:31:11 -05:00
|
|
|
cosign: intent.into_cosign(self.set.set.network),
|
2025-01-11 04:14:21 -05:00
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
}
|
2025-01-15 15:15:38 -05:00
|
|
|
|
|
|
|
|
fn accumulate_dkg_confirmation<D: AsRef<[u8]> + Borshy>(
|
|
|
|
|
&mut self,
|
|
|
|
|
block_number: u64,
|
|
|
|
|
topic: Topic,
|
|
|
|
|
data: &D,
|
|
|
|
|
signer: SeraiAddress,
|
|
|
|
|
) -> Option<(SignId, HashMap<Participant, Vec<u8>>)> {
|
|
|
|
|
match TributaryDb::accumulate::<D>(
|
|
|
|
|
self.tributary_txn,
|
|
|
|
|
self.set.set,
|
|
|
|
|
self.validators,
|
|
|
|
|
self.total_weight,
|
|
|
|
|
block_number,
|
|
|
|
|
topic,
|
|
|
|
|
signer,
|
|
|
|
|
self.validator_weights[&signer],
|
|
|
|
|
data,
|
|
|
|
|
) {
|
|
|
|
|
DataSet::None => None,
|
|
|
|
|
DataSet::Participating(data_set) => {
|
2025-01-15 17:49:00 -05:00
|
|
|
let id = topic.dkg_confirmation_sign_id(self.set.set).unwrap();
|
2025-01-15 15:15:38 -05:00
|
|
|
|
|
|
|
|
// This will be used in a MuSig protocol, so the Participant indexes are the validator's
|
|
|
|
|
// position in the list regardless of their weight
|
|
|
|
|
let flatten_data_set = |data_set: HashMap<_, D>| {
|
|
|
|
|
let mut entries = HashMap::with_capacity(usize::from(self.total_weight));
|
|
|
|
|
for (validator, participation) in data_set {
|
|
|
|
|
let (index, (_validator, _weight)) = &self
|
|
|
|
|
.set
|
|
|
|
|
.validators
|
|
|
|
|
.iter()
|
|
|
|
|
.enumerate()
|
|
|
|
|
.find(|(_i, (validator_i, _weight))| validator == *validator_i)
|
|
|
|
|
.unwrap();
|
2025-01-15 17:49:00 -05:00
|
|
|
// The index is zero-indexed yet participants are one-indexed
|
|
|
|
|
let index = index + 1;
|
|
|
|
|
|
2025-01-15 15:15:38 -05:00
|
|
|
entries.insert(
|
2025-01-15 17:49:00 -05:00
|
|
|
Participant::new(u16::try_from(index).unwrap()).unwrap(),
|
2025-01-15 15:15:38 -05:00
|
|
|
participation.as_ref().to_vec(),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
entries
|
|
|
|
|
};
|
|
|
|
|
let data_set = flatten_data_set(data_set);
|
|
|
|
|
Some((id, data_set))
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
|
|
|
|
let signer = |signed: Signed| SeraiAddress(signed.signer().to_bytes());
|
|
|
|
|
|
|
|
|
|
if let TransactionKind::Signed(_, TributarySigned { signer, .. }) = tx.kind() {
|
|
|
|
|
// Don't handle transactions from those fatally slashed
|
|
|
|
|
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
|
|
|
|
if TributaryDb::is_fatally_slashed(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
SeraiAddress(signer.to_bytes()),
|
|
|
|
|
) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2023-04-13 18:43:03 -04:00
|
|
|
}
|
|
|
|
|
|
2025-01-15 07:01:24 -05:00
|
|
|
let topic = tx.topic();
|
2025-01-11 04:14:21 -05:00
|
|
|
match tx {
|
|
|
|
|
// Accumulate this vote and fatally slash the participant if past the threshold
|
|
|
|
|
Transaction::RemoveParticipant { participant, signed } => {
|
|
|
|
|
let signer = signer(signed);
|
|
|
|
|
|
|
|
|
|
// Check the participant voted to be removed actually exists
|
2025-08-25 09:17:29 -04:00
|
|
|
if !self.validators.contains(&participant) {
|
2025-01-11 04:14:21 -05:00
|
|
|
TributaryDb::fatal_slash(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
signer,
|
|
|
|
|
"voted to remove non-existent participant",
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
match TributaryDb::accumulate(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
self.validators,
|
|
|
|
|
self.total_weight,
|
|
|
|
|
block_number,
|
2025-01-15 07:01:24 -05:00
|
|
|
topic.unwrap(),
|
2025-01-11 04:14:21 -05:00
|
|
|
signer,
|
|
|
|
|
self.validator_weights[&signer],
|
|
|
|
|
&(),
|
|
|
|
|
) {
|
|
|
|
|
DataSet::None => {}
|
|
|
|
|
DataSet::Participating(_) => {
|
2025-01-15 14:24:51 -05:00
|
|
|
TributaryDb::fatal_slash(
|
|
|
|
|
self.tributary_txn,
|
|
|
|
|
self.set.set,
|
|
|
|
|
participant,
|
|
|
|
|
"voted to remove",
|
|
|
|
|
);
|
2025-01-11 04:14:21 -05:00
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
}
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
// Send the participation to the processor
|
|
|
|
|
Transaction::DkgParticipation { participation, signed } => {
|
|
|
|
|
TributaryDb::send_message(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
messages::key_gen::CoordinatorMessage::Participation {
|
2025-01-15 14:24:51 -05:00
|
|
|
session: self.set.set.session,
|
|
|
|
|
participant: self.set.participant_indexes[&signer(signed)][0],
|
2025-01-11 04:14:21 -05:00
|
|
|
participation,
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
}
|
2025-01-15 17:49:00 -05:00
|
|
|
Transaction::DkgConfirmationPreprocess { attempt: _, preprocess, signed } => {
|
2025-01-15 15:15:38 -05:00
|
|
|
let topic = topic.unwrap();
|
|
|
|
|
let signer = signer(signed);
|
|
|
|
|
|
|
|
|
|
let Some((id, data_set)) =
|
2025-01-15 17:49:00 -05:00
|
|
|
self.accumulate_dkg_confirmation(block_number, topic, &preprocess, signer)
|
2025-01-15 15:15:38 -05:00
|
|
|
else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
db::DkgConfirmationMessages::send(
|
|
|
|
|
self.tributary_txn,
|
|
|
|
|
self.set.set,
|
|
|
|
|
&messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set },
|
|
|
|
|
);
|
2025-01-11 04:14:21 -05:00
|
|
|
}
|
2025-01-15 17:49:00 -05:00
|
|
|
Transaction::DkgConfirmationShare { attempt: _, share, signed } => {
|
2025-01-15 15:15:38 -05:00
|
|
|
let topic = topic.unwrap();
|
|
|
|
|
let signer = signer(signed);
|
|
|
|
|
|
|
|
|
|
let Some((id, data_set)) =
|
2025-01-15 17:49:00 -05:00
|
|
|
self.accumulate_dkg_confirmation(block_number, topic, &share, signer)
|
2025-01-15 15:15:38 -05:00
|
|
|
else {
|
|
|
|
|
return;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
db::DkgConfirmationMessages::send(
|
|
|
|
|
self.tributary_txn,
|
|
|
|
|
self.set.set,
|
|
|
|
|
&messages::sign::CoordinatorMessage::Shares { id, shares: data_set },
|
|
|
|
|
);
|
2025-01-11 04:14:21 -05:00
|
|
|
}
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
Transaction::Cosign { substrate_block_hash } => {
|
|
|
|
|
// Update the latest intended-to-be-cosigned Substrate block
|
|
|
|
|
TributaryDb::set_latest_substrate_block_to_cosign(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
substrate_block_hash,
|
|
|
|
|
);
|
|
|
|
|
// Start a new cosign if we aren't already working on one
|
|
|
|
|
self.potentially_start_cosign();
|
|
|
|
|
}
|
|
|
|
|
Transaction::Cosigned { substrate_block_hash } => {
|
|
|
|
|
/*
|
|
|
|
|
We provide one Cosigned per Cosign transaction, but they have independent orders. This
|
|
|
|
|
means we may receive Cosigned before Cosign. In order to ensure we only start work on
|
|
|
|
|
not-yet-Cosigned cosigns, we flag all cosigned blocks as cosigned. Then, when we choose
|
|
|
|
|
the next block to work on, we won't if it's already been cosigned.
|
|
|
|
|
*/
|
2025-01-15 14:24:51 -05:00
|
|
|
TributaryDb::mark_cosigned(self.tributary_txn, self.set.set, substrate_block_hash);
|
2025-01-11 04:14:21 -05:00
|
|
|
|
|
|
|
|
// If we aren't actively cosigning this block, return
|
|
|
|
|
// This occurs when we have Cosign TXs A, B, C, we received Cosigned for A and start on C,
|
|
|
|
|
// and then receive Cosigned for B
|
2025-01-15 14:24:51 -05:00
|
|
|
if TributaryDb::actively_cosigning(self.tributary_txn, self.set.set) !=
|
2025-01-11 04:14:21 -05:00
|
|
|
Some(substrate_block_hash)
|
|
|
|
|
{
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Since this is the block we were cosigning, mark us as having finished cosigning
|
2025-01-15 14:24:51 -05:00
|
|
|
TributaryDb::finish_cosigning(self.tributary_txn, self.set.set);
|
2025-01-11 04:14:21 -05:00
|
|
|
|
|
|
|
|
// Start working on the next cosign
|
|
|
|
|
self.potentially_start_cosign();
|
|
|
|
|
}
|
|
|
|
|
Transaction::SubstrateBlock { hash } => {
|
2025-01-15 07:01:24 -05:00
|
|
|
// Recognize all of the IDs this Substrate block causes to be signed
|
2025-01-15 14:24:51 -05:00
|
|
|
let plans = SubstrateBlockPlans::take(self.tributary_txn, self.set.set, hash).expect(
|
2025-01-12 07:32:45 -05:00
|
|
|
"Transaction::SubstrateBlock locally provided but SubstrateBlockPlans wasn't populated",
|
|
|
|
|
);
|
|
|
|
|
for plan in plans {
|
|
|
|
|
TributaryDb::recognize_topic(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-12 07:32:45 -05:00
|
|
|
Topic::Sign {
|
|
|
|
|
id: VariantSignId::Transaction(plan),
|
|
|
|
|
attempt: 0,
|
|
|
|
|
round: SigningProtocolRound::Preprocess,
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
}
|
2025-01-11 04:14:21 -05:00
|
|
|
}
|
|
|
|
|
Transaction::Batch { hash } => {
|
2025-01-15 07:01:24 -05:00
|
|
|
// Recognize the signing of this batch
|
2025-01-12 07:32:45 -05:00
|
|
|
TributaryDb::recognize_topic(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-12 07:32:45 -05:00
|
|
|
Topic::Sign {
|
|
|
|
|
id: VariantSignId::Batch(hash),
|
|
|
|
|
attempt: 0,
|
|
|
|
|
round: SigningProtocolRound::Preprocess,
|
|
|
|
|
},
|
|
|
|
|
);
|
2025-01-11 04:14:21 -05:00
|
|
|
}
|
2023-08-27 05:01:19 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
Transaction::SlashReport { slash_points, signed } => {
|
|
|
|
|
let signer = signer(signed);
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
if slash_points.len() != self.validators.len() {
|
|
|
|
|
TributaryDb::fatal_slash(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
signer,
|
|
|
|
|
"slash report was for a distinct amount of signers",
|
Slash malevolent validators (#294)
* add slash tx
* ignore unsigned tx replays
* verify that provided evidence is valid
* fix clippy + fmt
* move application tx handling to another module
* partially handle the tendermint txs
* fix pr comments
* support unsigned app txs
* add slash target to the votes
* enforce provided, unsigned, signed tx ordering within a block
* bug fixes
* add unit test for tendermint txs
* bug fixes
* update tests for tendermint txs
* add tx ordering test
* tidy up tx ordering test
* cargo +nightly fmt
* Misc fixes from rebasing
* Finish resolving clippy
* Remove sha3 from tendermint-machine
* Resolve a DoS in SlashEvidence's read
Also moves Evidence from Vec<Message> to (Message, Option<Message>). That
should meet all requirements while being a bit safer.
* Make lazy_static a dev-depend for tributary
* Various small tweaks
One use of sort was inefficient, sorting unsigned || signed when unsigned was
already properly sorted. Given how the unsigned TXs were given a nonce of 0, an
unstable sort may swap places with an unsigned TX and a signed TX with a nonce
of 0 (leading to a faulty block).
The extra protection added here sorts signed, then concats.
* Fix Tributary tests I broke, start review on tendermint/tx.rs
* Finish reviewing everything outside tests and empty_signature
* Remove empty_signature
empty_signature led to corrupted local state histories. Unfortunately, the API
is only sane with a signature.
We now use the actual signature, which risks creating a signature over a
malicious message if we have ever have an invariant producing malicious
messages. Prior, we only signed the message after the local machine confirmed
it was okay per the local view of consensus.
This is tolerated/preferred over a corrupt state history since production of
such messages is already an invariant. TODOs are added to make handling of this
theoretical invariant further robust.
* Remove async_sequential for tokio::test
There was no competition for resources forcing them to be run sequentially.
* Modify block order test to be statistically significant without multiple runs
* Clean tests
---------
Co-authored-by: Luke Parker <lukeparker5132@gmail.com>
2023-08-21 07:28:23 +03:00
|
|
|
);
|
2025-01-11 04:14:21 -05:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
|
|
|
|
match TributaryDb::accumulate(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
self.validators,
|
|
|
|
|
self.total_weight,
|
|
|
|
|
block_number,
|
2025-01-15 07:01:24 -05:00
|
|
|
topic.unwrap(),
|
2025-01-11 04:14:21 -05:00
|
|
|
signer,
|
|
|
|
|
self.validator_weights[&signer],
|
|
|
|
|
&slash_points,
|
|
|
|
|
) {
|
|
|
|
|
DataSet::None => {}
|
|
|
|
|
DataSet::Participating(data_set) => {
|
|
|
|
|
// Find the median reported slashes for this validator
|
|
|
|
|
/*
|
|
|
|
|
TODO: This lets 34% perform a fatal slash. That shouldn't be allowed. We need
|
|
|
|
|
to accept slash reports for a period past the threshold, and only fatally slash if we
|
|
|
|
|
have a supermajority agree the slash should be fatal. If there isn't a supermajority,
|
|
|
|
|
but the median believe the slash should be fatal, we need to fallback to a large
|
|
|
|
|
constant.
|
|
|
|
|
*/
|
|
|
|
|
let mut median_slash_report = Vec::with_capacity(self.validators.len());
|
|
|
|
|
for i in 0 .. self.validators.len() {
|
|
|
|
|
let mut this_validator =
|
|
|
|
|
data_set.values().map(|report| report[i]).collect::<Vec<_>>();
|
|
|
|
|
this_validator.sort_unstable();
|
|
|
|
|
// Choose the median, where if there are two median values, the lower one is chosen
|
|
|
|
|
let median_index = if (this_validator.len() % 2) == 1 {
|
|
|
|
|
this_validator.len() / 2
|
|
|
|
|
} else {
|
|
|
|
|
(this_validator.len() / 2) - 1
|
|
|
|
|
};
|
|
|
|
|
median_slash_report.push(this_validator[median_index]);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// We only publish slashes for the `f` worst performers to:
|
|
|
|
|
// 1) Effect amnesty if there were network disruptions which affected everyone
|
|
|
|
|
// 2) Ensure the signing threshold doesn't have a disincentive to do their job
|
|
|
|
|
|
|
|
|
|
// Find the worst performer within the signing threshold's slash points
|
|
|
|
|
let f = (self.validators.len() - 1) / 3;
|
|
|
|
|
let worst_validator_in_supermajority_slash_points = {
|
|
|
|
|
let mut sorted_slash_points = median_slash_report.clone();
|
|
|
|
|
sorted_slash_points.sort_unstable();
|
|
|
|
|
// This won't be a valid index if `f == 0`, which means we don't have any validators
|
|
|
|
|
// to slash
|
|
|
|
|
let index_of_first_validator_to_slash = self.validators.len() - f;
|
|
|
|
|
let index_of_worst_validator_in_supermajority = index_of_first_validator_to_slash - 1;
|
|
|
|
|
sorted_slash_points[index_of_worst_validator_in_supermajority]
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Perform the amortization
|
|
|
|
|
for slash_points in &mut median_slash_report {
|
|
|
|
|
*slash_points =
|
|
|
|
|
slash_points.saturating_sub(worst_validator_in_supermajority_slash_points)
|
|
|
|
|
}
|
|
|
|
|
let amortized_slash_report = median_slash_report;
|
|
|
|
|
|
|
|
|
|
// Create the resulting slash report
|
|
|
|
|
let mut slash_report = vec![];
|
2025-01-15 12:08:28 -05:00
|
|
|
for points in amortized_slash_report {
|
2025-01-14 07:51:39 -05:00
|
|
|
// TODO: Natively store this as a `Slash`
|
|
|
|
|
if points == u32::MAX {
|
|
|
|
|
slash_report.push(Slash::Fatal);
|
|
|
|
|
} else {
|
|
|
|
|
slash_report.push(Slash::Points(points));
|
2025-01-11 04:14:21 -05:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
assert!(slash_report.len() <= f);
|
|
|
|
|
|
|
|
|
|
// Recognize the topic for signing the slash report
|
|
|
|
|
TributaryDb::recognize_topic(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
Topic::Sign {
|
|
|
|
|
id: VariantSignId::SlashReport,
|
|
|
|
|
attempt: 0,
|
|
|
|
|
round: SigningProtocolRound::Preprocess,
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
// Send the message for the processor to start signing
|
|
|
|
|
TributaryDb::send_message(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
messages::coordinator::CoordinatorMessage::SignSlashReport {
|
2025-01-15 14:24:51 -05:00
|
|
|
session: self.set.set.session,
|
2025-01-15 12:08:28 -05:00
|
|
|
slash_report: slash_report.try_into().unwrap(),
|
2025-01-11 04:14:21 -05:00
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
};
|
2023-04-13 18:43:03 -04:00
|
|
|
}
|
|
|
|
|
|
2025-01-15 15:15:38 -05:00
|
|
|
Transaction::Sign { id: _, attempt: _, round, data, signed } => {
|
2025-01-15 07:01:24 -05:00
|
|
|
let topic = topic.unwrap();
|
2025-01-11 04:14:21 -05:00
|
|
|
let signer = signer(signed);
|
2023-04-13 18:43:03 -04:00
|
|
|
|
2025-01-15 14:24:51 -05:00
|
|
|
if data.len() != usize::from(self.validator_weights[&signer]) {
|
2025-01-11 04:14:21 -05:00
|
|
|
TributaryDb::fatal_slash(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
signer,
|
|
|
|
|
"signer signed with a distinct amount of key shares than they had key shares",
|
|
|
|
|
);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
match TributaryDb::accumulate(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
self.validators,
|
|
|
|
|
self.total_weight,
|
|
|
|
|
block_number,
|
|
|
|
|
topic,
|
|
|
|
|
signer,
|
|
|
|
|
self.validator_weights[&signer],
|
|
|
|
|
&data,
|
|
|
|
|
) {
|
|
|
|
|
DataSet::None => {}
|
|
|
|
|
DataSet::Participating(data_set) => {
|
2025-01-15 14:24:51 -05:00
|
|
|
let id = topic.sign_id(self.set.set).expect("Topic::Sign didn't have SignId");
|
|
|
|
|
let flatten_data_set = |data_set: HashMap<_, Vec<_>>| {
|
|
|
|
|
let mut entries = HashMap::with_capacity(usize::from(self.total_weight));
|
|
|
|
|
for (validator, shares) in data_set {
|
|
|
|
|
let indexes = &self.set.participant_indexes[&validator];
|
|
|
|
|
assert_eq!(indexes.len(), shares.len());
|
|
|
|
|
for (index, share) in indexes.iter().zip(shares) {
|
|
|
|
|
entries.insert(*index, share);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
entries
|
|
|
|
|
};
|
2025-01-11 04:14:21 -05:00
|
|
|
let data_set = flatten_data_set(data_set);
|
|
|
|
|
TributaryDb::send_message(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
match round {
|
|
|
|
|
SigningProtocolRound::Preprocess => {
|
|
|
|
|
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
|
|
|
|
|
}
|
|
|
|
|
SigningProtocolRound::Share => {
|
|
|
|
|
messages::sign::CoordinatorMessage::Shares { id, shares: data_set }
|
|
|
|
|
}
|
|
|
|
|
},
|
|
|
|
|
)
|
|
|
|
|
}
|
2025-01-15 15:15:38 -05:00
|
|
|
}
|
2023-04-13 18:43:03 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-09-25 23:11:36 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
2025-01-15 14:24:51 -05:00
|
|
|
TributaryDb::start_of_block(self.tributary_txn, self.set.set, block_number);
|
2025-01-11 04:14:21 -05:00
|
|
|
|
|
|
|
|
for tx in block.transactions {
|
|
|
|
|
match tx {
|
|
|
|
|
TributaryTransaction::Tendermint(TendermintTx::SlashEvidence(ev)) => {
|
|
|
|
|
// Since the evidence is on the chain, it will have already been validated
|
|
|
|
|
// We can just punish the signer
|
|
|
|
|
let data = match ev {
|
|
|
|
|
Evidence::ConflictingMessages(first, second) => (first, Some(second)),
|
|
|
|
|
Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),
|
|
|
|
|
};
|
|
|
|
|
let msgs = (
|
|
|
|
|
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.0).unwrap(),
|
|
|
|
|
if data.1.is_some() {
|
|
|
|
|
Some(
|
|
|
|
|
decode_signed_message::<TendermintNetwork<TD, Transaction, P>>(&data.1.unwrap())
|
|
|
|
|
.unwrap(),
|
|
|
|
|
)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
|
|
|
|
// errors, mark the node as fatally slashed
|
|
|
|
|
TributaryDb::fatal_slash(
|
|
|
|
|
self.tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
SeraiAddress(msgs.0.msg.sender),
|
|
|
|
|
&format!("invalid tendermint messages: {msgs:?}"),
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
TributaryTransaction::Application(tx) => {
|
|
|
|
|
self.handle_application_tx(block_number, tx);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-09-25 23:11:36 -04:00
|
|
|
}
|
2023-04-13 18:43:03 -04:00
|
|
|
}
|
2023-04-24 06:50:40 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
/// The task to scan the Tributary, populating `ProcessorMessages`.
|
2025-01-12 05:52:33 -05:00
|
|
|
pub struct ScanTributaryTask<TD: Db, P: P2p> {
|
2025-01-11 04:14:21 -05:00
|
|
|
tributary_db: TD,
|
2025-01-15 14:24:51 -05:00
|
|
|
set: NewSetInformation,
|
2025-01-11 04:14:21 -05:00
|
|
|
validators: Vec<SeraiAddress>,
|
2025-01-15 14:24:51 -05:00
|
|
|
total_weight: u16,
|
|
|
|
|
validator_weights: HashMap<SeraiAddress, u16>,
|
2025-01-11 04:14:21 -05:00
|
|
|
tributary: TributaryReader<TD, Transaction>,
|
|
|
|
|
_p2p: PhantomData<P>,
|
|
|
|
|
}
|
2023-09-25 17:15:36 -04:00
|
|
|
|
2025-01-12 05:52:33 -05:00
|
|
|
impl<TD: Db, P: P2p> ScanTributaryTask<TD, P> {
|
2025-01-11 04:14:21 -05:00
|
|
|
/// Create a new instance of this task.
|
|
|
|
|
pub fn new(
|
|
|
|
|
tributary_db: TD,
|
2025-01-15 14:24:51 -05:00
|
|
|
set: NewSetInformation,
|
2025-01-11 04:14:21 -05:00
|
|
|
tributary: TributaryReader<TD, Transaction>,
|
|
|
|
|
) -> Self {
|
2025-01-15 14:24:51 -05:00
|
|
|
let mut validators = Vec::with_capacity(set.validators.len());
|
2025-01-11 04:14:21 -05:00
|
|
|
let mut total_weight = 0;
|
2025-01-15 14:24:51 -05:00
|
|
|
let mut validator_weights = HashMap::with_capacity(set.validators.len());
|
|
|
|
|
for (validator, weight) in set.validators.iter().copied() {
|
2025-01-11 04:14:21 -05:00
|
|
|
validators.push(validator);
|
|
|
|
|
total_weight += weight;
|
|
|
|
|
validator_weights.insert(validator, weight);
|
|
|
|
|
}
|
2023-09-25 17:15:36 -04:00
|
|
|
|
2025-01-11 04:14:21 -05:00
|
|
|
ScanTributaryTask {
|
|
|
|
|
tributary_db,
|
2025-01-15 14:24:51 -05:00
|
|
|
set,
|
2025-01-11 04:14:21 -05:00
|
|
|
validators,
|
|
|
|
|
total_weight,
|
|
|
|
|
validator_weights,
|
|
|
|
|
tributary,
|
|
|
|
|
_p2p: PhantomData,
|
|
|
|
|
}
|
2023-10-14 02:45:47 +03:00
|
|
|
}
|
2025-01-11 04:14:21 -05:00
|
|
|
}
|
2023-10-14 02:45:47 +03:00
|
|
|
|
2025-01-12 05:52:33 -05:00
|
|
|
impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
2025-01-12 18:29:08 -05:00
|
|
|
type Error = String;
|
|
|
|
|
|
|
|
|
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
2025-01-11 04:14:21 -05:00
|
|
|
async move {
|
|
|
|
|
let (mut last_block_number, mut last_block_hash) =
|
2025-01-15 14:24:51 -05:00
|
|
|
TributaryDb::last_handled_tributary_block(&self.tributary_db, self.set.set)
|
2025-01-11 04:14:21 -05:00
|
|
|
.unwrap_or((0, self.tributary.genesis()));
|
|
|
|
|
|
|
|
|
|
let mut made_progress = false;
|
|
|
|
|
while let Some(next) = self.tributary.block_after(&last_block_hash) {
|
|
|
|
|
let block = self.tributary.block(&next).unwrap();
|
|
|
|
|
let block_number = last_block_number + 1;
|
|
|
|
|
let block_hash = block.hash();
|
|
|
|
|
|
|
|
|
|
// Make sure we have all of the provided transactions for this block
|
|
|
|
|
for tx in &block.transactions {
|
|
|
|
|
let TransactionKind::Provided(order) = tx.kind() else {
|
|
|
|
|
continue;
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// make sure we have all the provided txs in this block locally
|
|
|
|
|
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
|
|
|
|
return Err(format!(
|
|
|
|
|
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set
|
2025-01-11 04:14:21 -05:00
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut tributary_txn = self.tributary_db.txn();
|
|
|
|
|
(ScanBlock {
|
|
|
|
|
_td: PhantomData::<TD>,
|
|
|
|
|
_p2p: PhantomData::<P>,
|
|
|
|
|
tributary_txn: &mut tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
set: &self.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
validators: &self.validators,
|
|
|
|
|
total_weight: self.total_weight,
|
|
|
|
|
validator_weights: &self.validator_weights,
|
|
|
|
|
})
|
|
|
|
|
.handle_block(block_number, block);
|
|
|
|
|
TributaryDb::set_last_handled_tributary_block(
|
|
|
|
|
&mut tributary_txn,
|
2025-01-15 14:24:51 -05:00
|
|
|
self.set.set,
|
2025-01-11 04:14:21 -05:00
|
|
|
block_number,
|
|
|
|
|
block_hash,
|
|
|
|
|
);
|
|
|
|
|
last_block_number = block_number;
|
|
|
|
|
last_block_hash = block_hash;
|
|
|
|
|
tributary_txn.commit();
|
|
|
|
|
|
|
|
|
|
made_progress = true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(made_progress)
|
|
|
|
|
}
|
2023-09-25 17:15:36 -04:00
|
|
|
}
|
2023-04-24 06:50:40 -04:00
|
|
|
}
|
2025-01-11 06:51:55 -05:00
|
|
|
|
|
|
|
|
/// Create the Transaction::SlashReport to publish per the local view.
|
|
|
|
|
pub fn slash_report_transaction(getter: &impl Get, set: &NewSetInformation) -> Transaction {
|
|
|
|
|
let mut slash_points = Vec::with_capacity(set.validators.len());
|
|
|
|
|
for (validator, _weight) in set.validators.iter().copied() {
|
|
|
|
|
slash_points.push(SlashPoints::get(getter, set.set, validator).unwrap_or(0));
|
|
|
|
|
}
|
|
|
|
|
Transaction::SlashReport { slash_points, signed: Signed::default() }
|
|
|
|
|
}
|