mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 20:29:23 +00:00
DKG Removals (#467)
* Update ValidatorSets with a remove_participant call * Add DkgRemoval, a sign machine for producing the relevant MuSig signatures * Don't use position-dependent u8s yet Public when removing validators from the DKG * Add DkgRemovalPreprocess, DkgRemovalShares Implementation is via a new publish_tributary_tx lambda. This is code is a copy-pasted mess which will need to be cleaned up. * Only allow non-removed validators to vote for removals Otherwise, it's risked that the remaining validators fall below 67% of the original set. * Correct publish_serai_tx, which was prior publish_set_keys in practice
This commit is contained in:
@@ -1,6 +1,8 @@
|
||||
use core::{ops::Deref, future::Future};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
@@ -26,10 +28,13 @@ use serai_db::{Get, Db};
|
||||
use crate::{
|
||||
processors::Processors,
|
||||
tributary::{
|
||||
Transaction, TributarySpec, SeraiBlockNumber, Topic, DataSpecification, DataSet, Accumulation,
|
||||
SignData, Transaction, TributarySpec, SeraiBlockNumber, Topic, DataSpecification, DataSet,
|
||||
Accumulation,
|
||||
dkg_confirmer::DkgConfirmer,
|
||||
scanner::{RecognizedIdType, RIDTrait},
|
||||
FatallySlashed, DkgShare, PlanIds, ConfirmationNonces, AttemptDb, DataDb,
|
||||
dkg_removal::DkgRemoval,
|
||||
scanner::{RecognizedIdType, RIDTrait, PstTxType},
|
||||
FatallySlashed, DkgShare, DkgCompleted, PlanIds, ConfirmationNonces, RemovalNonces, AttemptDb,
|
||||
DataDb,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -40,8 +45,11 @@ const DKG_SHARES: &str = "shares";
|
||||
const DKG_CONFIRMATION_NONCES: &str = "confirmation_nonces";
|
||||
const DKG_CONFIRMATION_SHARES: &str = "confirmation_shares";
|
||||
|
||||
// These s/b prefixes between Batch and Sign should be unnecessary, as Batch/Share entries
|
||||
// themselves should already be domain separated
|
||||
// These d/s/b prefixes between DKG Removal, Batch, and Sign should be unnecessary, as Batch/Share
|
||||
// entries themselves should already be domain separated
|
||||
const DKG_REMOVAL_PREPROCESS: &str = "d_preprocess";
|
||||
const DKG_REMOVAL_SHARE: &str = "d_share";
|
||||
|
||||
const BATCH_PREPROCESS: &str = "b_preprocess";
|
||||
const BATCH_SHARE: &str = "b_share";
|
||||
|
||||
@@ -94,26 +102,54 @@ pub fn generated_key_pair<D: Db>(
|
||||
DkgConfirmer::share(spec, key, attempt, preprocesses, key_pair)
|
||||
}
|
||||
|
||||
pub(super) fn fatal_slash<D: Db>(
|
||||
pub(super) async fn fatal_slash<
|
||||
D: Db,
|
||||
FPtt: Future<Output = ()>,
|
||||
PTT: Clone + Fn(Transaction) -> FPtt,
|
||||
>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
genesis: [u8; 32],
|
||||
account: [u8; 32],
|
||||
spec: &TributarySpec,
|
||||
publish_tributary_tx: &PTT,
|
||||
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
slashing: [u8; 32],
|
||||
reason: &str,
|
||||
) {
|
||||
log::warn!("fatally slashing {}. reason: {}", hex::encode(account), reason);
|
||||
FatallySlashed::set_fatally_slashed(txn, genesis, account);
|
||||
let genesis = spec.genesis();
|
||||
|
||||
log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason);
|
||||
FatallySlashed::set_fatally_slashed(txn, genesis, slashing);
|
||||
// TODO: disconnect the node from network/ban from further participation in all Tributaries
|
||||
|
||||
// TODO: If during DKG, trigger a re-attempt
|
||||
// Despite triggering a re-attempt, this DKG may still complete and may become in-use
|
||||
|
||||
// If during a DKG, remove the participant
|
||||
if DkgCompleted::get(txn, genesis).is_none() {
|
||||
let preprocess = DkgRemoval::preprocess(spec, our_key, 0);
|
||||
let mut tx = Transaction::DkgRemovalPreprocess(SignData {
|
||||
plan: slashing,
|
||||
attempt: 0,
|
||||
data: vec![preprocess.to_vec()],
|
||||
signed: Transaction::empty_signed(),
|
||||
});
|
||||
tx.sign(&mut OsRng, genesis, our_key);
|
||||
publish_tributary_tx(tx).await;
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second
|
||||
// Tributary post-DKG
|
||||
// https://github.com/serai-dex/serai/issues/426
|
||||
|
||||
fn fatal_slash_with_participant_index<D: Db>(
|
||||
spec: &TributarySpec,
|
||||
async fn fatal_slash_with_participant_index<
|
||||
D: Db,
|
||||
FPtt: Future<Output = ()>,
|
||||
PTT: Clone + Fn(Transaction) -> FPtt,
|
||||
>(
|
||||
txn: &mut <D as Db>::Transaction<'_>,
|
||||
spec: &TributarySpec,
|
||||
publish_tributary_tx: &PTT,
|
||||
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
i: Participant,
|
||||
reason: &str,
|
||||
) {
|
||||
@@ -129,14 +165,18 @@ fn fatal_slash_with_participant_index<D: Db>(
|
||||
}
|
||||
let validator = validator.unwrap();
|
||||
|
||||
fatal_slash::<D>(txn, spec.genesis(), validator.to_bytes(), reason);
|
||||
fatal_slash::<D, _, _>(txn, spec, publish_tributary_tx, our_key, validator.to_bytes(), reason)
|
||||
.await;
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) async fn handle_application_tx<
|
||||
D: Db,
|
||||
Pro: Processors,
|
||||
FPst: Future<Output = ()>,
|
||||
PST: Clone + Fn(ValidatorSet, Vec<u8>) -> FPst,
|
||||
PST: Clone + Fn(ValidatorSet, PstTxType, Vec<u8>) -> FPst,
|
||||
FPtt: Future<Output = ()>,
|
||||
PTT: Clone + Fn(Transaction) -> FPtt,
|
||||
FRid: Future<Output = ()>,
|
||||
RID: RIDTrait<FRid>,
|
||||
>(
|
||||
@@ -144,6 +184,7 @@ pub(crate) async fn handle_application_tx<
|
||||
spec: &TributarySpec,
|
||||
processors: &Pro,
|
||||
publish_serai_tx: PST,
|
||||
publish_tributary_tx: &PTT,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
recognized_id: RID,
|
||||
txn: &mut <D as Db>::Transaction<'_>,
|
||||
@@ -159,18 +200,28 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
}
|
||||
|
||||
let handle = |txn: &mut <D as Db>::Transaction<'_>,
|
||||
data_spec: &DataSpecification,
|
||||
bytes: Vec<u8>,
|
||||
signed: &Signed| {
|
||||
async fn handle<D: Db, FPtt: Future<Output = ()>, PTT: Clone + Fn(Transaction) -> FPtt>(
|
||||
txn: &mut <D as Db>::Transaction<'_>,
|
||||
spec: &TributarySpec,
|
||||
publish_tributary_tx: &PTT,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
data_spec: &DataSpecification,
|
||||
bytes: Vec<u8>,
|
||||
signed: &Signed,
|
||||
) -> Accumulation {
|
||||
let genesis = spec.genesis();
|
||||
|
||||
let Some(curr_attempt) = AttemptDb::attempt(txn, genesis, data_spec.topic) else {
|
||||
// Premature publication of a valid ID/publication of an invalid ID
|
||||
fatal_slash::<D>(
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
genesis,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signed.signer.to_bytes(),
|
||||
"published data for ID without an attempt",
|
||||
);
|
||||
)
|
||||
.await;
|
||||
return Accumulation::NotReady;
|
||||
};
|
||||
|
||||
@@ -178,7 +229,15 @@ pub(crate) async fn handle_application_tx<
|
||||
// This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a
|
||||
// cheap check to leave in for safety
|
||||
if DataDb::get(txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() {
|
||||
fatal_slash::<D>(txn, genesis, signed.signer.to_bytes(), "published data multiple times");
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signed.signer.to_bytes(),
|
||||
"published data multiple times",
|
||||
)
|
||||
.await;
|
||||
return Accumulation::NotReady;
|
||||
}
|
||||
|
||||
@@ -189,12 +248,15 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
// If the attempt is greater, this is a premature publication, full slash
|
||||
if data_spec.attempt > curr_attempt {
|
||||
fatal_slash::<D>(
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
genesis,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signed.signer.to_bytes(),
|
||||
"published data with an attempt which hasn't started",
|
||||
);
|
||||
)
|
||||
.await;
|
||||
return Accumulation::NotReady;
|
||||
}
|
||||
|
||||
@@ -205,22 +267,31 @@ pub(crate) async fn handle_application_tx<
|
||||
|
||||
// Accumulate this data
|
||||
DataDb::accumulate(txn, key, spec, data_spec, signed.signer, &bytes)
|
||||
};
|
||||
}
|
||||
|
||||
fn check_sign_data_len<D: Db>(
|
||||
async fn check_sign_data_len<
|
||||
D: Db,
|
||||
FPtt: Future<Output = ()>,
|
||||
PTT: Clone + Fn(Transaction) -> FPtt,
|
||||
>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
spec: &TributarySpec,
|
||||
publish_tributary_tx: &PTT,
|
||||
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
signer: <Ristretto as Ciphersuite>::G,
|
||||
len: usize,
|
||||
) -> Result<(), ()> {
|
||||
let signer_i = spec.i(signer).unwrap();
|
||||
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
||||
fatal_slash::<D>(
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
spec.genesis(),
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
our_key,
|
||||
signer.to_bytes(),
|
||||
"signer published a distinct amount of sign data than they had shares",
|
||||
);
|
||||
)
|
||||
.await;
|
||||
Err(())?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -242,18 +313,40 @@ pub(crate) async fn handle_application_tx<
|
||||
|
||||
match tx {
|
||||
Transaction::RemoveParticipant(i) => {
|
||||
fatal_slash_with_participant_index::<D>(spec, txn, i, "RemoveParticipant Provided TX")
|
||||
fatal_slash_with_participant_index::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
i,
|
||||
"RemoveParticipant Provided TX",
|
||||
)
|
||||
.await
|
||||
}
|
||||
Transaction::DkgCommitments(attempt, commitments, signed) => {
|
||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, signed.signer, commitments.len()) else {
|
||||
let Ok(_) = check_sign_data_len::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signed.signer,
|
||||
commitments.len(),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
match handle(
|
||||
match handle::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
&DataSpecification { topic: Topic::Dkg, label: DKG_COMMITMENTS, attempt },
|
||||
commitments.encode(),
|
||||
&signed,
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
||||
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
||||
unflatten(spec, &mut commitments);
|
||||
@@ -281,17 +374,28 @@ pub(crate) async fn handle_application_tx<
|
||||
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||
|
||||
if shares.len() != usize::from(sender_is_len) {
|
||||
fatal_slash::<D>(
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
genesis,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signed.signer.to_bytes(),
|
||||
"invalid amount of DKG shares by key shares",
|
||||
);
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
for shares in &shares {
|
||||
if shares.len() != (usize::from(spec.n() - sender_is_len)) {
|
||||
fatal_slash::<D>(txn, genesis, signed.signer.to_bytes(), "invalid amount of DKG shares");
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signed.signer.to_bytes(),
|
||||
"invalid amount of DKG shares",
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -348,18 +452,27 @@ pub(crate) async fn handle_application_tx<
|
||||
// Drop shares as it's been mutated into invalidity
|
||||
drop(shares);
|
||||
|
||||
let confirmation_nonces = handle(
|
||||
let confirmation_nonces = handle::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
&DataSpecification { topic: Topic::Dkg, label: DKG_CONFIRMATION_NONCES, attempt },
|
||||
confirmation_nonces.to_vec(),
|
||||
&signed,
|
||||
);
|
||||
match handle(
|
||||
)
|
||||
.await;
|
||||
match handle::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
&DataSpecification { topic: Topic::Dkg, label: DKG_SHARES, attempt },
|
||||
our_shares.encode(),
|
||||
&signed,
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
||||
|
||||
@@ -417,24 +530,30 @@ pub(crate) async fn handle_application_tx<
|
||||
if (u16::from(accuser) < u16::from(range.start)) ||
|
||||
(u16::from(range.end) <= u16::from(accuser))
|
||||
{
|
||||
fatal_slash::<D>(
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
genesis,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signed.signer.to_bytes(),
|
||||
"accused with a Participant index which wasn't theirs",
|
||||
);
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
|
||||
if !((u16::from(range.start) <= u16::from(faulty)) &&
|
||||
(u16::from(faulty) < u16::from(range.end)))
|
||||
{
|
||||
fatal_slash::<D>(
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
genesis,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signed.signer.to_bytes(),
|
||||
"accused self of having an InvalidDkgShare",
|
||||
);
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -454,12 +573,17 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
|
||||
Transaction::DkgConfirmed(attempt, shares, signed) => {
|
||||
match handle(
|
||||
match handle::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
&DataSpecification { topic: Topic::Dkg, label: DKG_CONFIRMATION_SHARES, attempt },
|
||||
shares.to_vec(),
|
||||
&signed,
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
||||
|
||||
@@ -474,13 +598,24 @@ pub(crate) async fn handle_application_tx<
|
||||
match DkgConfirmer::complete(spec, key, attempt, preprocesses, &key_pair, shares) {
|
||||
Ok(sig) => sig,
|
||||
Err(p) => {
|
||||
fatal_slash_with_participant_index::<D>(spec, txn, p, "invalid DkgConfirmer share");
|
||||
fatal_slash_with_participant_index::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
p,
|
||||
"invalid DkgConfirmer share",
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
DkgCompleted::set(txn, genesis, &());
|
||||
|
||||
publish_serai_tx(
|
||||
spec.set(),
|
||||
PstTxType::SetKeys,
|
||||
SeraiValidatorSets::set_keys(spec.set().network, key_pair, Signature(sig)),
|
||||
)
|
||||
.await;
|
||||
@@ -492,6 +627,124 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::DkgRemovalPreprocess(data) => {
|
||||
let signer = data.signed.signer;
|
||||
// TODO: Only handle this if we're not actively removing this validator
|
||||
if (data.data.len() != 1) || (data.data[0].len() != 64) {
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signer.to_bytes(),
|
||||
"non-64-byte DKG removal preprocess",
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
match handle::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
&DataSpecification {
|
||||
topic: Topic::DkgRemoval(data.plan),
|
||||
label: DKG_REMOVAL_PREPROCESS,
|
||||
attempt: data.attempt,
|
||||
},
|
||||
data.data.encode(),
|
||||
&data.signed,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Accumulation::Ready(DataSet::Participating(preprocesses)) => {
|
||||
RemovalNonces::set(txn, genesis, data.plan, data.attempt, &preprocesses);
|
||||
|
||||
let Ok(share) = DkgRemoval::share(spec, key, data.attempt, preprocesses, data.plan)
|
||||
else {
|
||||
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
|
||||
// slash) and censor transactions (yet don't explicitly ban)
|
||||
return;
|
||||
};
|
||||
|
||||
let mut tx = Transaction::DkgRemovalPreprocess(SignData {
|
||||
plan: data.plan,
|
||||
attempt: data.attempt,
|
||||
data: vec![share.to_vec()],
|
||||
signed: Transaction::empty_signed(),
|
||||
});
|
||||
tx.sign(&mut OsRng, genesis, key);
|
||||
publish_tributary_tx(tx).await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) => {}
|
||||
Accumulation::NotReady => {}
|
||||
}
|
||||
}
|
||||
Transaction::DkgRemovalShare(data) => {
|
||||
let signer = data.signed.signer;
|
||||
if (data.data.len() != 1) || (data.data[0].len() != 32) {
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signer.to_bytes(),
|
||||
"non-32-byte DKG removal share",
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
match handle::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
&DataSpecification {
|
||||
topic: Topic::DkgRemoval(data.plan),
|
||||
label: DKG_REMOVAL_SHARE,
|
||||
attempt: data.attempt,
|
||||
},
|
||||
data.data.encode(),
|
||||
&data.signed,
|
||||
)
|
||||
.await
|
||||
{
|
||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||
let preprocesses = RemovalNonces::get(txn, genesis, data.plan, data.attempt).unwrap();
|
||||
|
||||
let Ok((signers, signature)) =
|
||||
DkgRemoval::complete(spec, key, data.attempt, preprocesses, data.plan, shares)
|
||||
else {
|
||||
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
|
||||
// slash) and censor transactions (yet don't explicitly ban)
|
||||
return;
|
||||
};
|
||||
|
||||
// TODO: Only handle this if we're not actively removing any of the signers
|
||||
// The created Substrate call will fail if a removed validator was one of the signers
|
||||
// Since:
|
||||
// 1) publish_serai_tx will block this task until the TX is published
|
||||
// 2) We won't scan any more TXs/blocks until we handle this TX
|
||||
// The TX *must* be successfully published *before* we start removing any more signers
|
||||
// Accordingly, if the signers aren't currently being removed, they won't be removed
|
||||
// by the time this transaction is successfully published *unless* a malicious 34%
|
||||
// participates with the non-participating 33% to continue operation and produce a
|
||||
// distinct removal (since the non-participating won't block in this block)
|
||||
// This breaks BFT and is accordingly within bounds
|
||||
|
||||
let tx = serai_client::SeraiValidatorSets::remove_participant(
|
||||
spec.set().network,
|
||||
Public(data.plan),
|
||||
signers,
|
||||
Signature(signature),
|
||||
);
|
||||
publish_serai_tx(spec.set(), PstTxType::RemoveParticipant(data.plan), tx).await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) => {}
|
||||
Accumulation::NotReady => {}
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::CosignSubstrateBlock(hash) => {
|
||||
AttemptDb::recognize_topic(
|
||||
txn,
|
||||
@@ -540,17 +793,37 @@ pub(crate) async fn handle_application_tx<
|
||||
|
||||
Transaction::SubstratePreprocess(data) => {
|
||||
let signer = data.signed.signer;
|
||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, signer, data.data.len()) else {
|
||||
let Ok(_) = check_sign_data_len::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signer,
|
||||
data.data.len(),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
for data in &data.data {
|
||||
if data.len() != 64 {
|
||||
fatal_slash::<D>(txn, genesis, signer.to_bytes(), "non-64-byte Substrate preprocess");
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
signer.to_bytes(),
|
||||
"non-64-byte Substrate preprocess",
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
}
|
||||
match handle(
|
||||
match handle::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
&DataSpecification {
|
||||
topic: Topic::SubstrateSign(data.plan),
|
||||
label: BATCH_PREPROCESS,
|
||||
@@ -558,7 +831,9 @@ pub(crate) async fn handle_application_tx<
|
||||
},
|
||||
data.data.encode(),
|
||||
&data.signed,
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
|
||||
unflatten(spec, &mut preprocesses);
|
||||
processors
|
||||
@@ -583,11 +858,23 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
}
|
||||
Transaction::SubstrateShare(data) => {
|
||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
|
||||
let Ok(_) = check_sign_data_len::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
data.signed.signer,
|
||||
data.data.len(),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
match handle(
|
||||
match handle::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
&DataSpecification {
|
||||
topic: Topic::SubstrateSign(data.plan),
|
||||
label: BATCH_SHARE,
|
||||
@@ -595,7 +882,9 @@ pub(crate) async fn handle_application_tx<
|
||||
},
|
||||
data.data.encode(),
|
||||
&data.signed,
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Accumulation::Ready(DataSet::Participating(mut shares)) => {
|
||||
unflatten(spec, &mut shares);
|
||||
processors
|
||||
@@ -621,11 +910,23 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
|
||||
Transaction::SignPreprocess(data) => {
|
||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
|
||||
let Ok(_) = check_sign_data_len::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
data.signed.signer,
|
||||
data.data.len(),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
match handle(
|
||||
match handle::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
&DataSpecification {
|
||||
topic: Topic::Sign(data.plan),
|
||||
label: SIGN_PREPROCESS,
|
||||
@@ -633,7 +934,9 @@ pub(crate) async fn handle_application_tx<
|
||||
},
|
||||
data.data.encode(),
|
||||
&data.signed,
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
|
||||
unflatten(spec, &mut preprocesses);
|
||||
processors
|
||||
@@ -651,11 +954,23 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
}
|
||||
Transaction::SignShare(data) => {
|
||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
|
||||
let Ok(_) = check_sign_data_len::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
data.signed.signer,
|
||||
data.data.len(),
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return;
|
||||
};
|
||||
match handle(
|
||||
match handle::<D, _, _>(
|
||||
txn,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
&DataSpecification {
|
||||
topic: Topic::Sign(data.plan),
|
||||
label: SIGN_SHARE,
|
||||
@@ -663,7 +978,9 @@ pub(crate) async fn handle_application_tx<
|
||||
},
|
||||
data.data.encode(),
|
||||
&data.signed,
|
||||
) {
|
||||
)
|
||||
.await
|
||||
{
|
||||
Accumulation::Ready(DataSet::Participating(mut shares)) => {
|
||||
unflatten(spec, &mut shares);
|
||||
processors
|
||||
@@ -688,12 +1005,15 @@ pub(crate) async fn handle_application_tx<
|
||||
);
|
||||
|
||||
if AttemptDb::attempt(txn, genesis, Topic::Sign(plan)).is_none() {
|
||||
fatal_slash::<D>(
|
||||
fatal_slash::<D, _, _>(
|
||||
txn,
|
||||
genesis,
|
||||
spec,
|
||||
publish_tributary_tx,
|
||||
key,
|
||||
first_signer.to_bytes(),
|
||||
"claimed an unrecognized plan was completed",
|
||||
);
|
||||
)
|
||||
.await;
|
||||
return;
|
||||
};
|
||||
|
||||
|
||||
Reference in New Issue
Block a user