mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Remove unused_variables
This commit is contained in:
@@ -67,19 +67,24 @@ impl<D: Db> MainDb<D> {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
fn first_preprocess_key(id: [u8; 32]) -> Vec<u8> {
|
fn first_preprocess_key(network: NetworkId, id: [u8; 32]) -> Vec<u8> {
|
||||||
Self::main_key(b"first_preprocess", id)
|
Self::main_key(b"first_preprocess", (network, id).encode())
|
||||||
}
|
}
|
||||||
pub fn save_first_preprocess(txn: &mut D::Transaction<'_>, id: [u8; 32], preprocess: Vec<u8>) {
|
pub fn save_first_preprocess(
|
||||||
let key = Self::first_preprocess_key(id);
|
txn: &mut D::Transaction<'_>,
|
||||||
|
network: NetworkId,
|
||||||
|
id: [u8; 32],
|
||||||
|
preprocess: Vec<u8>,
|
||||||
|
) {
|
||||||
|
let key = Self::first_preprocess_key(network, id);
|
||||||
if let Some(existing) = txn.get(&key) {
|
if let Some(existing) = txn.get(&key) {
|
||||||
assert_eq!(existing, preprocess, "saved a distinct first preprocess");
|
assert_eq!(existing, preprocess, "saved a distinct first preprocess");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
txn.put(key, preprocess);
|
txn.put(key, preprocess);
|
||||||
}
|
}
|
||||||
pub fn first_preprocess<G: Get>(getter: &G, id: [u8; 32]) -> Option<Vec<u8>> {
|
pub fn first_preprocess<G: Get>(getter: &G, network: NetworkId, id: [u8; 32]) -> Option<Vec<u8>> {
|
||||||
getter.get(Self::first_preprocess_key(id))
|
getter.get(Self::first_preprocess_key(network, id))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn batch_key(network: NetworkId, id: u32) -> Vec<u8> {
|
fn batch_key(network: NetworkId, id: u32) -> Vec<u8> {
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
#![allow(unused_variables)]
|
|
||||||
|
|
||||||
use core::{ops::Deref, future::Future};
|
use core::{ops::Deref, future::Future};
|
||||||
use std::{
|
use std::{
|
||||||
sync::Arc,
|
sync::Arc,
|
||||||
@@ -203,7 +201,6 @@ impl<FRid, F: Clone + Fn(NetworkId, [u8; 32], RecognizedIdType, [u8; 32], u32) -
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
|
||||||
pub(crate) async fn scan_tributaries<
|
pub(crate) async fn scan_tributaries<
|
||||||
D: Db,
|
D: Db,
|
||||||
Pro: Processors,
|
Pro: Processors,
|
||||||
@@ -214,7 +211,6 @@ pub(crate) async fn scan_tributaries<
|
|||||||
raw_db: D,
|
raw_db: D,
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
recognized_id: RID,
|
recognized_id: RID,
|
||||||
p2p: P,
|
|
||||||
processors: Pro,
|
processors: Pro,
|
||||||
serai: Arc<Serai>,
|
serai: Arc<Serai>,
|
||||||
mut new_tributary: broadcast::Receiver<ActiveTributary<D, P>>,
|
mut new_tributary: broadcast::Receiver<ActiveTributary<D, P>>,
|
||||||
@@ -229,7 +225,6 @@ pub(crate) async fn scan_tributaries<
|
|||||||
let raw_db = raw_db.clone();
|
let raw_db = raw_db.clone();
|
||||||
let key = key.clone();
|
let key = key.clone();
|
||||||
let recognized_id = recognized_id.clone();
|
let recognized_id = recognized_id.clone();
|
||||||
let p2p = p2p.clone();
|
|
||||||
let processors = processors.clone();
|
let processors = processors.clone();
|
||||||
let serai = serai.clone();
|
let serai = serai.clone();
|
||||||
async move {
|
async move {
|
||||||
@@ -305,7 +300,7 @@ pub async fn heartbeat_tributaries<D: Db, P: P2p>(
|
|||||||
|
|
||||||
let mut readers = vec![];
|
let mut readers = vec![];
|
||||||
loop {
|
loop {
|
||||||
while let Ok(ActiveTributary { spec, tributary }) = {
|
while let Ok(ActiveTributary { spec: _, tributary }) = {
|
||||||
match new_tributary.try_recv() {
|
match new_tributary.try_recv() {
|
||||||
Ok(tributary) => Ok(tributary),
|
Ok(tributary) => Ok(tributary),
|
||||||
Err(broadcast::error::TryRecvError::Empty) => Err(()),
|
Err(broadcast::error::TryRecvError::Empty) => Err(()),
|
||||||
@@ -608,7 +603,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
|
|||||||
ProcessorMessage::Sign(msg) => match msg {
|
ProcessorMessage::Sign(msg) => match msg {
|
||||||
sign::ProcessorMessage::Preprocess { id, preprocess } => {
|
sign::ProcessorMessage::Preprocess { id, preprocess } => {
|
||||||
if id.attempt == 0 {
|
if id.attempt == 0 {
|
||||||
MainDb::<D>::save_first_preprocess(&mut txn, id.id, preprocess);
|
MainDb::<D>::save_first_preprocess(&mut txn, network, id.id, preprocess);
|
||||||
|
|
||||||
None
|
None
|
||||||
} else {
|
} else {
|
||||||
@@ -668,7 +663,7 @@ async fn handle_processor_messages<D: Db, Pro: Processors, P: P2p>(
|
|||||||
// If this is the first attempt instance, wait until we synchronize around
|
// If this is the first attempt instance, wait until we synchronize around
|
||||||
// the batch first
|
// the batch first
|
||||||
if id.attempt == 0 {
|
if id.attempt == 0 {
|
||||||
MainDb::<D>::save_first_preprocess(&mut txn, id.id, preprocess);
|
MainDb::<D>::save_first_preprocess(&mut txn, spec.set().network, id.id, preprocess);
|
||||||
|
|
||||||
Some(Transaction::Batch(block.0, id.id))
|
Some(Transaction::Batch(block.0, id.id))
|
||||||
} else {
|
} else {
|
||||||
@@ -942,7 +937,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
|||||||
// This waits until the necessary preprocess is available
|
// This waits until the necessary preprocess is available
|
||||||
let get_preprocess = |raw_db, id| async move {
|
let get_preprocess = |raw_db, id| async move {
|
||||||
loop {
|
loop {
|
||||||
let Some(preprocess) = MainDb::<D>::first_preprocess(raw_db, id) else {
|
let Some(preprocess) = MainDb::<D>::first_preprocess(raw_db, network, id) else {
|
||||||
sleep(Duration::from_millis(100)).await;
|
sleep(Duration::from_millis(100)).await;
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
@@ -985,7 +980,6 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
|||||||
raw_db,
|
raw_db,
|
||||||
key.clone(),
|
key.clone(),
|
||||||
recognized_id,
|
recognized_id,
|
||||||
p2p.clone(),
|
|
||||||
processors.clone(),
|
processors.clone(),
|
||||||
serai.clone(),
|
serai.clone(),
|
||||||
new_tributary_listener_2,
|
new_tributary_listener_2,
|
||||||
|
|||||||
@@ -43,11 +43,10 @@ async fn in_set(
|
|||||||
Ok(Some(data.participants.iter().any(|(participant, _)| participant.0 == key)))
|
Ok(Some(data.participants.iter().any(|(participant, _)| participant.0 == key)))
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_new_set<D: Db, CNT: Clone + Fn(&mut D, TributarySpec), Pro: Processors>(
|
async fn handle_new_set<D: Db, CNT: Clone + Fn(&mut D, TributarySpec)>(
|
||||||
db: &mut D,
|
db: &mut D,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
create_new_tributary: CNT,
|
create_new_tributary: CNT,
|
||||||
processors: &Pro,
|
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
block: &Block,
|
block: &Block,
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
@@ -88,7 +87,6 @@ async fn handle_new_set<D: Db, CNT: Clone + Fn(&mut D, TributarySpec), Pro: Proc
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_key_gen<Pro: Processors>(
|
async fn handle_key_gen<Pro: Processors>(
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
processors: &Pro,
|
processors: &Pro,
|
||||||
serai: &Serai,
|
serai: &Serai,
|
||||||
block: &Block,
|
block: &Block,
|
||||||
@@ -239,8 +237,7 @@ async fn handle_block<D: Db, CNT: Clone + Fn(&mut D, TributarySpec), Pro: Proces
|
|||||||
|
|
||||||
if !SubstrateDb::<D>::handled_event(&db.0, hash, event_id) {
|
if !SubstrateDb::<D>::handled_event(&db.0, hash, event_id) {
|
||||||
log::info!("found fresh new set event {:?}", new_set);
|
log::info!("found fresh new set event {:?}", new_set);
|
||||||
handle_new_set(&mut db.0, key, create_new_tributary.clone(), processors, serai, &block, set)
|
handle_new_set(&mut db.0, key, create_new_tributary.clone(), serai, &block, set).await?;
|
||||||
.await?;
|
|
||||||
let mut txn = db.0.txn();
|
let mut txn = db.0.txn();
|
||||||
SubstrateDb::<D>::handle_event(&mut txn, hash, event_id);
|
SubstrateDb::<D>::handle_event(&mut txn, hash, event_id);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
@@ -259,7 +256,7 @@ async fn handle_block<D: Db, CNT: Clone + Fn(&mut D, TributarySpec), Pro: Proces
|
|||||||
TributaryDb::<D>::set_key_pair(&mut txn, set, &key_pair);
|
TributaryDb::<D>::set_key_pair(&mut txn, set, &key_pair);
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|
||||||
handle_key_gen(key, processors, serai, &block, set, key_pair).await?;
|
handle_key_gen(processors, serai, &block, set, key_pair).await?;
|
||||||
} else {
|
} else {
|
||||||
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
|
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -281,7 +281,7 @@ async fn dkg_test() {
|
|||||||
let key_pair = (serai_client::Public(substrate_key), network_key.try_into().unwrap());
|
let key_pair = (serai_client::Public(substrate_key), network_key.try_into().unwrap());
|
||||||
|
|
||||||
let mut txs = vec![];
|
let mut txs = vec![];
|
||||||
for (k, key) in keys.iter().enumerate() {
|
for key in keys.iter() {
|
||||||
let attempt = 0;
|
let attempt = 0;
|
||||||
// This is fine to re-use the one DB as such, due to exactly how this specific call is coded,
|
// This is fine to re-use the one DB as such, due to exactly how this specific call is coded,
|
||||||
// albeit poor
|
// albeit poor
|
||||||
|
|||||||
@@ -66,7 +66,7 @@ fn serialize_transaction() {
|
|||||||
// Create a valid vec of shares
|
// Create a valid vec of shares
|
||||||
let mut shares = vec![];
|
let mut shares = vec![];
|
||||||
// Create up to 512 participants
|
// Create up to 512 participants
|
||||||
for i in 0 .. (OsRng.next_u64() % 512) {
|
for _ in 0 .. (OsRng.next_u64() % 512) {
|
||||||
let mut share = vec![0; share_len];
|
let mut share = vec![0; share_len];
|
||||||
OsRng.fill_bytes(&mut share);
|
OsRng.fill_bytes(&mut share);
|
||||||
shares.push(share);
|
shares.push(share);
|
||||||
|
|||||||
@@ -297,7 +297,7 @@ impl ReadWrite for Transaction {
|
|||||||
let share_len = usize::from(u16::from_le_bytes(share_len));
|
let share_len = usize::from(u16::from_le_bytes(share_len));
|
||||||
|
|
||||||
let mut shares = vec![];
|
let mut shares = vec![];
|
||||||
for i in 0 .. u16::from_le_bytes(share_quantity) {
|
for _ in 0 .. u16::from_le_bytes(share_quantity) {
|
||||||
let mut share = vec![0; share_len];
|
let mut share = vec![0; share_len];
|
||||||
reader.read_exact(&mut share)?;
|
reader.read_exact(&mut share)?;
|
||||||
shares.push(share);
|
shares.push(share);
|
||||||
@@ -490,7 +490,7 @@ impl TransactionTrait for Transaction {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self {
|
if let Transaction::SignCompleted { first_signer, signature, .. } = self {
|
||||||
if !signature.verify(*first_signer, self.sign_completed_challenge()) {
|
if !signature.verify(*first_signer, self.sign_completed_challenge()) {
|
||||||
Err(TransactionError::InvalidContent)?;
|
Err(TransactionError::InvalidContent)?;
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user