mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Further expand clippy workspace lints
Achieves a notable amount of reduced async and clones.
This commit is contained in:
@@ -85,13 +85,13 @@ impl FirstPreprocessDb {
|
||||
network: NetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: &[u8],
|
||||
preprocess: Vec<Vec<u8>>,
|
||||
preprocess: &Vec<Vec<u8>>,
|
||||
) {
|
||||
if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) {
|
||||
assert_eq!(existing, preprocess, "saved a distinct first preprocess");
|
||||
assert_eq!(&existing, preprocess, "saved a distinct first preprocess");
|
||||
return;
|
||||
}
|
||||
FirstPreprocessDb::set(txn, network, id_type, id, &preprocess);
|
||||
FirstPreprocessDb::set(txn, network, id_type, id, preprocess);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ impl HandoverBatchDb {
|
||||
}
|
||||
}
|
||||
impl QueuedBatchesDb {
|
||||
pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: Transaction) {
|
||||
pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: &Transaction) {
|
||||
let mut batches = Self::get(txn, set).unwrap_or_default();
|
||||
batch.write(&mut batches).unwrap();
|
||||
Self::set(txn, set, &batches);
|
||||
|
||||
@@ -159,17 +159,17 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
// We'll only receive these if we fired GenerateKey, which we'll only do if if we're
|
||||
// in-set, making the Tributary relevant
|
||||
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
||||
key_gen::ProcessorMessage::Commitments { id, .. } => Some(id.session),
|
||||
key_gen::ProcessorMessage::InvalidCommitments { id, .. } => Some(id.session),
|
||||
key_gen::ProcessorMessage::Shares { id, .. } => Some(id.session),
|
||||
key_gen::ProcessorMessage::InvalidShare { id, .. } => Some(id.session),
|
||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.session),
|
||||
key_gen::ProcessorMessage::Commitments { id, .. } |
|
||||
key_gen::ProcessorMessage::InvalidCommitments { id, .. } |
|
||||
key_gen::ProcessorMessage::Shares { id, .. } |
|
||||
key_gen::ProcessorMessage::InvalidShare { id, .. } |
|
||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } |
|
||||
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),
|
||||
},
|
||||
ProcessorMessage::Sign(inner_msg) => match inner_msg {
|
||||
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
|
||||
sign::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session),
|
||||
sign::ProcessorMessage::Preprocess { id, .. } => Some(id.session),
|
||||
sign::ProcessorMessage::InvalidParticipant { id, .. } |
|
||||
sign::ProcessorMessage::Preprocess { id, .. } |
|
||||
sign::ProcessorMessage::Share { id, .. } => Some(id.session),
|
||||
// While the Processor's Scanner will always emit Completed, that's routed through the
|
||||
// Signer and only becomes a ProcessorMessage::Completed if the Signer is present and
|
||||
@@ -233,9 +233,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
None
|
||||
}
|
||||
// We'll only fire these if we are the Substrate signer, making the Tributary relevant
|
||||
coordinator::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session),
|
||||
coordinator::ProcessorMessage::CosignPreprocess { id, .. } => Some(id.session),
|
||||
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => Some(id.session),
|
||||
coordinator::ProcessorMessage::InvalidParticipant { id, .. } |
|
||||
coordinator::ProcessorMessage::CosignPreprocess { id, .. } |
|
||||
coordinator::ProcessorMessage::BatchPreprocess { id, .. } |
|
||||
coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session),
|
||||
coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => {
|
||||
let cosigned_block = CosignedBlock {
|
||||
@@ -486,7 +486,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
network,
|
||||
RecognizedIdType::Plan,
|
||||
&id.id,
|
||||
preprocesses,
|
||||
&preprocesses,
|
||||
);
|
||||
|
||||
vec![]
|
||||
@@ -566,7 +566,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
};
|
||||
id.to_le_bytes()
|
||||
},
|
||||
preprocesses.into_iter().map(Into::into).collect(),
|
||||
&preprocesses.into_iter().map(Into::into).collect::<Vec<_>>(),
|
||||
);
|
||||
|
||||
let intended = Transaction::Batch {
|
||||
@@ -611,8 +611,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
// the prior Batch hasn't been verified yet...
|
||||
if (last_received != 0) &&
|
||||
LastVerifiedBatchDb::get(&txn, msg.network)
|
||||
.map(|last_verified| last_verified < (last_received - 1))
|
||||
.unwrap_or(true)
|
||||
.map_or(true, |last_verified| last_verified < (last_received - 1))
|
||||
{
|
||||
// Withhold this TX until we verify all prior `Batch`s
|
||||
queue = true;
|
||||
@@ -620,7 +619,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
}
|
||||
|
||||
if queue {
|
||||
QueuedBatchesDb::queue(&mut txn, spec.set(), intended);
|
||||
QueuedBatchesDb::queue(&mut txn, spec.set(), &intended);
|
||||
vec![]
|
||||
} else {
|
||||
// Because this is post-verification of the handover batch, take all queued `Batch`s
|
||||
@@ -650,10 +649,11 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
signed: Transaction::empty_signed(),
|
||||
})]
|
||||
}
|
||||
#[allow(clippy::match_same_arms)] // Allowed to preserve layout
|
||||
coordinator::ProcessorMessage::CosignedBlock { .. } => unreachable!(),
|
||||
},
|
||||
ProcessorMessage::Substrate(inner_msg) => match inner_msg {
|
||||
processor_messages::substrate::ProcessorMessage::Batch { .. } => unreachable!(),
|
||||
processor_messages::substrate::ProcessorMessage::Batch { .. } |
|
||||
processor_messages::substrate::ProcessorMessage::SignedBatch { .. } => unreachable!(),
|
||||
},
|
||||
};
|
||||
@@ -823,9 +823,8 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
||||
let _hvq_lock = HANDOVER_VERIFY_QUEUE_LOCK.get_or_init(|| Mutex::new(())).lock().await;
|
||||
let mut txn = db.txn();
|
||||
let mut to_publish = vec![];
|
||||
let start_id = LastVerifiedBatchDb::get(&txn, network)
|
||||
.map(|already_verified| already_verified + 1)
|
||||
.unwrap_or(0);
|
||||
let start_id =
|
||||
LastVerifiedBatchDb::get(&txn, network).map_or(0, |already_verified| already_verified + 1);
|
||||
if let Some(last_id) =
|
||||
substrate::verify_published_batches::<D>(&mut txn, network, u32::MAX).await
|
||||
{
|
||||
@@ -847,7 +846,7 @@ async fn handle_cosigns_and_batch_publication<D: Db, P: P2p>(
|
||||
to_publish.push((set.session, queued.remove(0)));
|
||||
// Re-queue the remaining batches
|
||||
for remaining in queued {
|
||||
QueuedBatchesDb::queue(&mut txn, set, remaining);
|
||||
QueuedBatchesDb::queue(&mut txn, set, &remaining);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -59,11 +59,10 @@ pub enum P2pMessageKind {
|
||||
impl P2pMessageKind {
|
||||
fn genesis(&self) -> Option<[u8; 32]> {
|
||||
match self {
|
||||
P2pMessageKind::KeepAlive => None,
|
||||
P2pMessageKind::Tributary(genesis) => Some(*genesis),
|
||||
P2pMessageKind::Heartbeat(genesis) => Some(*genesis),
|
||||
P2pMessageKind::KeepAlive | P2pMessageKind::CosignedBlock => None,
|
||||
P2pMessageKind::Tributary(genesis) |
|
||||
P2pMessageKind::Heartbeat(genesis) |
|
||||
P2pMessageKind::Block(genesis) => Some(*genesis),
|
||||
P2pMessageKind::CosignedBlock => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -303,7 +302,7 @@ impl LibP2p {
|
||||
let mut time_of_last_p2p_message = Instant::now();
|
||||
|
||||
#[allow(clippy::needless_pass_by_ref_mut)] // False positive
|
||||
async fn broadcast_raw(
|
||||
fn broadcast_raw(
|
||||
p2p: &mut Swarm<Behavior>,
|
||||
time_of_last_p2p_message: &mut Instant,
|
||||
genesis: Option<[u8; 32]>,
|
||||
@@ -364,7 +363,7 @@ impl LibP2p {
|
||||
&mut time_of_last_p2p_message,
|
||||
genesis,
|
||||
msg,
|
||||
).await;
|
||||
);
|
||||
}
|
||||
|
||||
// Handle new incoming messages
|
||||
@@ -416,7 +415,7 @@ impl LibP2p {
|
||||
&mut time_of_last_p2p_message,
|
||||
None,
|
||||
P2pMessageKind::KeepAlive.serialize()
|
||||
).await;
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -689,16 +688,8 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
let msg = p2p.receive().await;
|
||||
match msg.kind {
|
||||
P2pMessageKind::KeepAlive => {}
|
||||
P2pMessageKind::Tributary(genesis) => {
|
||||
if let Some(channel) = channels.read().await.get(&genesis) {
|
||||
channel.send(msg).unwrap();
|
||||
}
|
||||
}
|
||||
P2pMessageKind::Heartbeat(genesis) => {
|
||||
if let Some(channel) = channels.read().await.get(&genesis) {
|
||||
channel.send(msg).unwrap();
|
||||
}
|
||||
}
|
||||
P2pMessageKind::Tributary(genesis) |
|
||||
P2pMessageKind::Heartbeat(genesis) |
|
||||
P2pMessageKind::Block(genesis) => {
|
||||
if let Some(channel) = channels.read().await.get(&genesis) {
|
||||
channel.send(msg).unwrap();
|
||||
|
||||
@@ -18,7 +18,7 @@ pub use inner_db::{NextBlock, BatchInstructionsHashDb};
|
||||
pub struct HandledEvent;
|
||||
impl HandledEvent {
|
||||
fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 {
|
||||
inner_db::HandledEvent::get(getter, block).map(|last| last + 1).unwrap_or(0)
|
||||
inner_db::HandledEvent::get(getter, block).map_or(0, |last| last + 1)
|
||||
}
|
||||
pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool {
|
||||
let next = Self::next_to_handle_event(getter, block);
|
||||
|
||||
@@ -396,9 +396,8 @@ pub async fn scan_task<D: Db, Pro: Processors>(
|
||||
Ok(latest) => {
|
||||
if latest.header.number >= next_substrate_block {
|
||||
return latest;
|
||||
} else {
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
}
|
||||
sleep(Duration::from_secs(3)).await;
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("couldn't communicate with serai node: {e}");
|
||||
@@ -493,7 +492,7 @@ pub(crate) async fn verify_published_batches<D: Db>(
|
||||
) -> Option<u32> {
|
||||
// TODO: Localize from MainDb to SubstrateDb
|
||||
let last = crate::LastVerifiedBatchDb::get(txn, network);
|
||||
for id in last.map(|last| last + 1).unwrap_or(0) ..= optimistic_up_to {
|
||||
for id in last.map_or(0, |last| last + 1) ..= optimistic_up_to {
|
||||
let Some(on_chain) = BatchInstructionsHashDb::get(txn, network, id) else {
|
||||
break;
|
||||
};
|
||||
|
||||
@@ -60,7 +60,7 @@ fn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Encode + De
|
||||
|
||||
data: {
|
||||
let mut res = vec![];
|
||||
for _ in 0 .. ((rng.next_u64() % 255) + 1) {
|
||||
for _ in 0 ..= (rng.next_u64() % 256) {
|
||||
res.push(random_vec(&mut OsRng, 512));
|
||||
}
|
||||
res
|
||||
@@ -70,8 +70,8 @@ fn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Encode + De
|
||||
}
|
||||
}
|
||||
|
||||
fn test_read_write<RW: Eq + Debug + ReadWrite>(value: RW) {
|
||||
assert_eq!(value, RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap());
|
||||
fn test_read_write<RW: Eq + Debug + ReadWrite>(value: &RW) {
|
||||
assert_eq!(value, &RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -102,36 +102,36 @@ fn tx_size_limit() {
|
||||
|
||||
#[test]
|
||||
fn serialize_sign_data() {
|
||||
fn test_read_write<Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(value: SignData<Id>) {
|
||||
fn test_read_write<Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(value: &SignData<Id>) {
|
||||
let mut buf = vec![];
|
||||
value.write(&mut buf).unwrap();
|
||||
assert_eq!(value, SignData::read(&mut buf.as_slice()).unwrap())
|
||||
assert_eq!(value, &SignData::read(&mut buf.as_slice()).unwrap())
|
||||
}
|
||||
|
||||
let mut plan = [0; 3];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(random_sign_data::<_, _>(
|
||||
test_read_write(&random_sign_data::<_, _>(
|
||||
&mut OsRng,
|
||||
plan,
|
||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||
));
|
||||
let mut plan = [0; 5];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(random_sign_data::<_, _>(
|
||||
test_read_write(&random_sign_data::<_, _>(
|
||||
&mut OsRng,
|
||||
plan,
|
||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||
));
|
||||
let mut plan = [0; 8];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(random_sign_data::<_, _>(
|
||||
test_read_write(&random_sign_data::<_, _>(
|
||||
&mut OsRng,
|
||||
plan,
|
||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||
));
|
||||
let mut plan = [0; 24];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(random_sign_data::<_, _>(
|
||||
test_read_write(&random_sign_data::<_, _>(
|
||||
&mut OsRng,
|
||||
plan,
|
||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
||||
@@ -140,7 +140,7 @@ fn serialize_sign_data() {
|
||||
|
||||
#[test]
|
||||
fn serialize_transaction() {
|
||||
test_read_write(Transaction::RemoveParticipantDueToDkg {
|
||||
test_read_write(&Transaction::RemoveParticipantDueToDkg {
|
||||
attempt: u32::try_from(OsRng.next_u64() >> 32).unwrap(),
|
||||
participant: frost::Participant::new(
|
||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
||||
@@ -155,7 +155,7 @@ fn serialize_transaction() {
|
||||
OsRng.fill_bytes(&mut temp);
|
||||
commitments.push(temp);
|
||||
}
|
||||
test_read_write(Transaction::DkgCommitments {
|
||||
test_read_write(&Transaction::DkgCommitments {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
commitments,
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
@@ -170,7 +170,7 @@ fn serialize_transaction() {
|
||||
// Create a valid vec of shares
|
||||
let mut shares = vec![];
|
||||
// Create up to 150 participants
|
||||
for _ in 0 .. ((OsRng.next_u64() % 150) + 1) {
|
||||
for _ in 0 ..= (OsRng.next_u64() % 150) {
|
||||
// Give each sender multiple shares
|
||||
let mut sender_shares = vec![];
|
||||
for _ in 0 .. amount_of_shares {
|
||||
@@ -181,7 +181,7 @@ fn serialize_transaction() {
|
||||
shares.push(sender_shares);
|
||||
}
|
||||
|
||||
test_read_write(Transaction::DkgShares {
|
||||
test_read_write(&Transaction::DkgShares {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
shares,
|
||||
confirmation_nonces: {
|
||||
@@ -194,7 +194,7 @@ fn serialize_transaction() {
|
||||
}
|
||||
|
||||
for i in 0 .. 2 {
|
||||
test_read_write(Transaction::InvalidDkgShare {
|
||||
test_read_write(&Transaction::InvalidDkgShare {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
accuser: frost::Participant::new(
|
||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
||||
@@ -213,7 +213,7 @@ fn serialize_transaction() {
|
||||
});
|
||||
}
|
||||
|
||||
test_read_write(Transaction::DkgConfirmed {
|
||||
test_read_write(&Transaction::DkgConfirmed {
|
||||
attempt: random_u32(&mut OsRng),
|
||||
confirmation_share: {
|
||||
let mut share = [0; 32];
|
||||
@@ -226,20 +226,20 @@ fn serialize_transaction() {
|
||||
{
|
||||
let mut block = [0; 32];
|
||||
OsRng.fill_bytes(&mut block);
|
||||
test_read_write(Transaction::CosignSubstrateBlock(block));
|
||||
test_read_write(&Transaction::CosignSubstrateBlock(block));
|
||||
}
|
||||
|
||||
{
|
||||
let mut block = [0; 32];
|
||||
OsRng.fill_bytes(&mut block);
|
||||
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
||||
test_read_write(Transaction::Batch { block, batch });
|
||||
test_read_write(&Transaction::Batch { block, batch });
|
||||
}
|
||||
test_read_write(Transaction::SubstrateBlock(OsRng.next_u64()));
|
||||
test_read_write(&Transaction::SubstrateBlock(OsRng.next_u64()));
|
||||
|
||||
{
|
||||
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
||||
test_read_write(Transaction::SubstrateSign(random_sign_data(
|
||||
test_read_write(&Transaction::SubstrateSign(random_sign_data(
|
||||
&mut OsRng,
|
||||
SubstrateSignableId::Batch(batch),
|
||||
Label::Preprocess,
|
||||
@@ -247,7 +247,7 @@ fn serialize_transaction() {
|
||||
}
|
||||
{
|
||||
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
||||
test_read_write(Transaction::SubstrateSign(random_sign_data(
|
||||
test_read_write(&Transaction::SubstrateSign(random_sign_data(
|
||||
&mut OsRng,
|
||||
SubstrateSignableId::Batch(batch),
|
||||
Label::Share,
|
||||
@@ -257,12 +257,12 @@ fn serialize_transaction() {
|
||||
{
|
||||
let mut plan = [0; 32];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess)));
|
||||
test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess)));
|
||||
}
|
||||
{
|
||||
let mut plan = [0; 32];
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
test_read_write(Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share)));
|
||||
test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share)));
|
||||
}
|
||||
|
||||
{
|
||||
@@ -270,7 +270,7 @@ fn serialize_transaction() {
|
||||
OsRng.fill_bytes(&mut plan);
|
||||
let mut tx_hash = vec![0; (OsRng.next_u64() % 64).try_into().unwrap()];
|
||||
OsRng.fill_bytes(&mut tx_hash);
|
||||
test_read_write(Transaction::SignCompleted {
|
||||
test_read_write(&Transaction::SignCompleted {
|
||||
plan,
|
||||
tx_hash,
|
||||
first_signer: random_signed_with_nonce(&mut OsRng, 2).signer,
|
||||
|
||||
@@ -204,18 +204,18 @@ impl<
|
||||
Accumulation::NotReady
|
||||
}
|
||||
|
||||
async fn handle_data(
|
||||
fn handle_data(
|
||||
&mut self,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
data_spec: &DataSpecification,
|
||||
bytes: Vec<u8>,
|
||||
bytes: &Vec<u8>,
|
||||
signed: &Signed,
|
||||
) -> Accumulation {
|
||||
let genesis = self.spec.genesis();
|
||||
|
||||
let Some(curr_attempt) = AttemptDb::attempt(self.txn, genesis, data_spec.topic) else {
|
||||
// Premature publication of a valid ID/publication of an invalid ID
|
||||
self.fatal_slash(signed.signer.to_bytes(), "published data for ID without an attempt").await;
|
||||
self.fatal_slash(signed.signer.to_bytes(), "published data for ID without an attempt");
|
||||
return Accumulation::NotReady;
|
||||
};
|
||||
|
||||
@@ -223,7 +223,7 @@ impl<
|
||||
// This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a
|
||||
// cheap check to leave in for safety
|
||||
if DataDb::get(self.txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "published data multiple times").await;
|
||||
self.fatal_slash(signed.signer.to_bytes(), "published data multiple times");
|
||||
return Accumulation::NotReady;
|
||||
}
|
||||
|
||||
@@ -239,12 +239,10 @@ impl<
|
||||
}
|
||||
// If the attempt is greater, this is a premature publication, full slash
|
||||
if data_spec.attempt > curr_attempt {
|
||||
self
|
||||
.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"published data with an attempt which hasn't started",
|
||||
)
|
||||
.await;
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"published data with an attempt which hasn't started",
|
||||
);
|
||||
return Accumulation::NotReady;
|
||||
}
|
||||
|
||||
@@ -254,10 +252,10 @@ impl<
|
||||
// TODO: If this is shares, we need to check they are part of the selected signing set
|
||||
|
||||
// Accumulate this data
|
||||
self.accumulate(removed, data_spec, signed.signer, &bytes)
|
||||
self.accumulate(removed, data_spec, signed.signer, bytes)
|
||||
}
|
||||
|
||||
async fn check_sign_data_len(
|
||||
fn check_sign_data_len(
|
||||
&mut self,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
signer: <Ristretto as Ciphersuite>::G,
|
||||
@@ -265,12 +263,10 @@ impl<
|
||||
) -> Result<(), ()> {
|
||||
let signer_i = self.spec.i(removed, signer).unwrap();
|
||||
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
||||
self
|
||||
.fatal_slash(
|
||||
signer.to_bytes(),
|
||||
"signer published a distinct amount of sign data than they had shares",
|
||||
)
|
||||
.await;
|
||||
self.fatal_slash(
|
||||
signer.to_bytes(),
|
||||
"signer published a distinct amount of sign data than they had shares",
|
||||
);
|
||||
Err(())?;
|
||||
}
|
||||
Ok(())
|
||||
@@ -292,34 +288,28 @@ impl<
|
||||
}
|
||||
|
||||
match tx {
|
||||
Transaction::RemoveParticipantDueToDkg { attempt, participant } => {
|
||||
self
|
||||
.fatal_slash_with_participant_index(
|
||||
&removed_as_of_dkg_attempt(self.txn, genesis, attempt).unwrap_or_else(|| {
|
||||
panic!(
|
||||
"removed a participant due to a provided transaction with an attempt not {}",
|
||||
"locally handled?"
|
||||
)
|
||||
}),
|
||||
participant,
|
||||
"RemoveParticipantDueToDkg Provided TX",
|
||||
)
|
||||
.await
|
||||
}
|
||||
Transaction::RemoveParticipantDueToDkg { attempt, participant } => self
|
||||
.fatal_slash_with_participant_index(
|
||||
&removed_as_of_dkg_attempt(self.txn, genesis, attempt).unwrap_or_else(|| {
|
||||
panic!(
|
||||
"removed a participant due to a provided transaction with an attempt not {}",
|
||||
"locally handled?"
|
||||
)
|
||||
}),
|
||||
participant,
|
||||
"RemoveParticipantDueToDkg Provided TX",
|
||||
),
|
||||
|
||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self
|
||||
.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt")
|
||||
.await;
|
||||
self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()).await
|
||||
else {
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else {
|
||||
return;
|
||||
};
|
||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt };
|
||||
match self.handle_data(&removed, &data_spec, commitments.encode(), &signed).await {
|
||||
match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) {
|
||||
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
||||
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
||||
unflatten(self.spec, &removed, &mut commitments);
|
||||
@@ -343,12 +333,10 @@ impl<
|
||||
|
||||
Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self
|
||||
.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt")
|
||||
.await;
|
||||
self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()).await else {
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else {
|
||||
return;
|
||||
};
|
||||
|
||||
@@ -359,7 +347,7 @@ impl<
|
||||
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||
for shares in &shares {
|
||||
if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares").await;
|
||||
self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares");
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -419,7 +407,7 @@ impl<
|
||||
|
||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };
|
||||
let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode();
|
||||
match self.handle_data(&removed, &data_spec, encoded_data, &signed).await {
|
||||
match self.handle_data(&removed, &data_spec, &encoded_data, &signed) {
|
||||
Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => {
|
||||
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
||||
|
||||
@@ -479,34 +467,27 @@ impl<
|
||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self
|
||||
.fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt")
|
||||
.await;
|
||||
.fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let range = self.spec.i(&removed, signed.signer).unwrap();
|
||||
if !range.contains(&accuser) {
|
||||
self
|
||||
.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"accused with a Participant index which wasn't theirs",
|
||||
)
|
||||
.await;
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"accused with a Participant index which wasn't theirs",
|
||||
);
|
||||
return;
|
||||
}
|
||||
if range.contains(&faulty) {
|
||||
self
|
||||
.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare")
|
||||
.await;
|
||||
self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare");
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else {
|
||||
self
|
||||
.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"InvalidDkgShare had a non-existent faulty participant",
|
||||
)
|
||||
.await;
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"InvalidDkgShare had a non-existent faulty participant",
|
||||
);
|
||||
return;
|
||||
};
|
||||
self
|
||||
@@ -526,15 +507,13 @@ impl<
|
||||
|
||||
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
self
|
||||
.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt")
|
||||
.await;
|
||||
self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
|
||||
let data_spec =
|
||||
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };
|
||||
match self.handle_data(&removed, &data_spec, confirmation_share.to_vec(), &signed).await {
|
||||
match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) {
|
||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
||||
|
||||
@@ -556,9 +535,7 @@ impl<
|
||||
let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
|
||||
Ok(sig) => sig,
|
||||
Err(p) => {
|
||||
self
|
||||
.fatal_slash_with_participant_index(&removed, p, "invalid DkgConfirmer share")
|
||||
.await;
|
||||
self.fatal_slash_with_participant_index(&removed, p, "invalid DkgConfirmer share");
|
||||
return;
|
||||
}
|
||||
};
|
||||
@@ -641,16 +618,14 @@ impl<
|
||||
let Some(removed) =
|
||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||
else {
|
||||
self
|
||||
.fatal_slash(
|
||||
data.signed.signer.to_bytes(),
|
||||
"signing despite not having set keys on substrate",
|
||||
)
|
||||
.await;
|
||||
self.fatal_slash(
|
||||
data.signed.signer.to_bytes(),
|
||||
"signing despite not having set keys on substrate",
|
||||
);
|
||||
return;
|
||||
};
|
||||
let signer = data.signed.signer;
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()).await else {
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else {
|
||||
return;
|
||||
};
|
||||
let expected_len = match data.label {
|
||||
@@ -659,12 +634,10 @@ impl<
|
||||
};
|
||||
for data in &data.data {
|
||||
if data.len() != expected_len {
|
||||
self
|
||||
.fatal_slash(
|
||||
signer.to_bytes(),
|
||||
"unexpected length data for substrate signing protocol",
|
||||
)
|
||||
.await;
|
||||
self.fatal_slash(
|
||||
signer.to_bytes(),
|
||||
"unexpected length data for substrate signing protocol",
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@@ -675,7 +648,7 @@ impl<
|
||||
attempt: data.attempt,
|
||||
};
|
||||
let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||
self.handle_data(&removed, &data_spec, data.data.encode(), &data.signed).await
|
||||
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)
|
||||
else {
|
||||
return;
|
||||
};
|
||||
@@ -703,16 +676,13 @@ impl<
|
||||
let Some(removed) =
|
||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||
else {
|
||||
self
|
||||
.fatal_slash(
|
||||
data.signed.signer.to_bytes(),
|
||||
"signing despite not having set keys on substrate",
|
||||
)
|
||||
.await;
|
||||
self.fatal_slash(
|
||||
data.signed.signer.to_bytes(),
|
||||
"signing despite not having set keys on substrate",
|
||||
);
|
||||
return;
|
||||
};
|
||||
let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()).await
|
||||
else {
|
||||
let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else {
|
||||
return;
|
||||
};
|
||||
|
||||
@@ -722,7 +692,7 @@ impl<
|
||||
attempt: data.attempt,
|
||||
};
|
||||
if let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||
self.handle_data(&removed, &data_spec, data.data.encode(), &data.signed).await
|
||||
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)
|
||||
{
|
||||
unflatten(self.spec, &removed, &mut results);
|
||||
let id =
|
||||
@@ -750,9 +720,7 @@ impl<
|
||||
);
|
||||
|
||||
if AttemptDb::attempt(self.txn, genesis, Topic::Sign(plan)).is_none() {
|
||||
self
|
||||
.fatal_slash(first_signer.to_bytes(), "claimed an unrecognized plan was completed")
|
||||
.await;
|
||||
self.fatal_slash(first_signer.to_bytes(), "claimed an unrecognized plan was completed");
|
||||
return;
|
||||
};
|
||||
|
||||
|
||||
@@ -192,7 +192,7 @@ impl<
|
||||
P: P2p,
|
||||
> TributaryBlockHandler<'_, T, Pro, PST, PTT, RID, P>
|
||||
{
|
||||
pub async fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) {
|
||||
pub fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) {
|
||||
// TODO: If this fatal slash puts the remaining set below the threshold, spin
|
||||
|
||||
let genesis = self.spec.genesis();
|
||||
@@ -209,7 +209,7 @@ impl<
|
||||
// Tributary post-DKG
|
||||
// https://github.com/serai-dex/serai/issues/426
|
||||
|
||||
pub async fn fatal_slash_with_participant_index(
|
||||
pub fn fatal_slash_with_participant_index(
|
||||
&mut self,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
i: Participant,
|
||||
@@ -227,7 +227,7 @@ impl<
|
||||
}
|
||||
let validator = validator.unwrap();
|
||||
|
||||
self.fatal_slash(validator.to_bytes(), reason).await;
|
||||
self.fatal_slash(validator.to_bytes(), reason);
|
||||
}
|
||||
|
||||
async fn handle<D: Db>(mut self) {
|
||||
@@ -240,10 +240,9 @@ impl<
|
||||
// Since the evidence is on the chain, it should already have been validated
|
||||
// We can just punish the signer
|
||||
let data = match ev {
|
||||
Evidence::ConflictingMessages(first, second) => (first, Some(second)),
|
||||
Evidence::ConflictingMessages(first, second) |
|
||||
Evidence::ConflictingPrecommit(first, second) => (first, Some(second)),
|
||||
Evidence::InvalidPrecommit(first) => (first, None),
|
||||
Evidence::InvalidValidRound(first) => (first, None),
|
||||
Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),
|
||||
};
|
||||
let msgs = (
|
||||
decode_signed_message::<TendermintNetwork<D, Transaction, P>>(&data.0).unwrap(),
|
||||
@@ -259,9 +258,7 @@ impl<
|
||||
|
||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal
|
||||
// errors, mark the node as fatally slashed
|
||||
self
|
||||
.fatal_slash(msgs.0.msg.sender, &format!("invalid tendermint messages: {:?}", msgs))
|
||||
.await;
|
||||
self.fatal_slash(msgs.0.msg.sender, &format!("invalid tendermint messages: {msgs:?}"));
|
||||
}
|
||||
TributaryTransaction::Application(tx) => {
|
||||
self.handle_application_tx(tx).await;
|
||||
@@ -348,8 +345,7 @@ impl<
|
||||
// Check if the cosigner has a signature from our set for this block/a newer one
|
||||
let latest_cosign =
|
||||
crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network)
|
||||
.map(|cosign| cosign.block_number)
|
||||
.unwrap_or(0);
|
||||
.map_or(0, |cosign| cosign.block_number);
|
||||
if latest_cosign < block_number {
|
||||
// Instruct the processor to start the next attempt
|
||||
self
|
||||
|
||||
@@ -184,7 +184,6 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
||||
}
|
||||
|
||||
fn complete_internal(
|
||||
&mut self,
|
||||
machine: AlgorithmSignatureMachine<Ristretto, Schnorrkel>,
|
||||
shares: HashMap<Participant, Vec<u8>>,
|
||||
) -> Result<[u8; 64], Participant> {
|
||||
@@ -251,6 +250,8 @@ fn threshold_i_map_to_keys_and_musig_i_map(
|
||||
(participants, map)
|
||||
}
|
||||
|
||||
type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8; 12], u32)>;
|
||||
|
||||
pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
|
||||
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
spec: &'a TributarySpec,
|
||||
@@ -271,7 +272,7 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||
let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?;
|
||||
Some(DkgConfirmer { key, spec, removed, txn, attempt })
|
||||
}
|
||||
fn signing_protocol(&mut self) -> SigningProtocol<'_, T, (&'static [u8; 12], u32)> {
|
||||
fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> {
|
||||
let context = (b"DkgConfirmer", self.attempt);
|
||||
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
||||
}
|
||||
@@ -323,6 +324,6 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
||||
.expect("trying to complete a machine which failed to preprocess")
|
||||
.0;
|
||||
|
||||
self.signing_protocol().complete_internal(machine, shares)
|
||||
DkgConfirmerSigningProtocol::<'_, T>::complete_internal(machine, shares)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -489,7 +489,7 @@ impl ReadWrite for Transaction {
|
||||
writer.write_all(&u16::from(*faulty).to_le_bytes())?;
|
||||
|
||||
// Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length
|
||||
assert!(blame.as_ref().map(|blame| blame.len()).unwrap_or(1) != 0);
|
||||
assert!(blame.as_ref().map_or(1, Vec::len) != 0);
|
||||
let blame_len =
|
||||
u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB");
|
||||
writer.write_all(&blame_len.to_le_bytes())?;
|
||||
@@ -547,15 +547,9 @@ impl TransactionTrait for Transaction {
|
||||
match self {
|
||||
Transaction::RemoveParticipantDueToDkg { .. } => TransactionKind::Provided("remove"),
|
||||
|
||||
Transaction::DkgCommitments { attempt, commitments: _, signed } => {
|
||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||
}
|
||||
Transaction::DkgShares { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||
}
|
||||
Transaction::InvalidDkgShare { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||
}
|
||||
Transaction::DkgCommitments { attempt, commitments: _, signed } |
|
||||
Transaction::DkgShares { attempt, signed, .. } |
|
||||
Transaction::InvalidDkgShare { attempt, signed, .. } |
|
||||
Transaction::DkgConfirmed { attempt, signed, .. } => {
|
||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||
}
|
||||
@@ -625,8 +619,7 @@ impl Transaction {
|
||||
|
||||
Transaction::DkgCommitments { .. } => 0,
|
||||
Transaction::DkgShares { .. } => 1,
|
||||
Transaction::InvalidDkgShare { .. } => 2,
|
||||
Transaction::DkgConfirmed { .. } => 2,
|
||||
Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2,
|
||||
|
||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||
|
||||
@@ -635,6 +628,7 @@ impl Transaction {
|
||||
|
||||
Transaction::SubstrateSign(data) => data.label.nonce(),
|
||||
Transaction::Sign(data) => data.label.nonce(),
|
||||
|
||||
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
||||
};
|
||||
|
||||
@@ -643,9 +637,9 @@ impl Transaction {
|
||||
match tx {
|
||||
Transaction::RemoveParticipantDueToDkg { .. } => panic!("signing RemoveParticipant"),
|
||||
|
||||
Transaction::DkgCommitments { ref mut signed, .. } => signed,
|
||||
Transaction::DkgShares { ref mut signed, .. } => signed,
|
||||
Transaction::InvalidDkgShare { ref mut signed, .. } => signed,
|
||||
Transaction::DkgCommitments { ref mut signed, .. } |
|
||||
Transaction::DkgShares { ref mut signed, .. } |
|
||||
Transaction::InvalidDkgShare { ref mut signed, .. } |
|
||||
Transaction::DkgConfirmed { ref mut signed, .. } => signed,
|
||||
|
||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||
@@ -655,6 +649,7 @@ impl Transaction {
|
||||
|
||||
Transaction::SubstrateSign(ref mut data) => &mut data.signed,
|
||||
Transaction::Sign(ref mut data) => &mut data.signed,
|
||||
|
||||
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
||||
},
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user