mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Add workspace lints
This commit is contained in:
@@ -234,7 +234,7 @@ impl<D: Db> BatchSigner<D> {
|
||||
|
||||
let mut parsed = HashMap::new();
|
||||
for l in {
|
||||
let mut keys = preprocesses.keys().cloned().collect::<Vec<_>>();
|
||||
let mut keys = preprocesses.keys().copied().collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
keys
|
||||
} {
|
||||
@@ -329,7 +329,7 @@ impl<D: Db> BatchSigner<D> {
|
||||
|
||||
let mut parsed = HashMap::new();
|
||||
for l in {
|
||||
let mut keys = shares.keys().cloned().collect::<Vec<_>>();
|
||||
let mut keys = shares.keys().copied().collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
keys
|
||||
} {
|
||||
|
||||
@@ -150,7 +150,7 @@ impl Cosigner {
|
||||
|
||||
let mut parsed = HashMap::new();
|
||||
for l in {
|
||||
let mut keys = preprocesses.keys().cloned().collect::<Vec<_>>();
|
||||
let mut keys = preprocesses.keys().copied().collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
keys
|
||||
} {
|
||||
@@ -241,7 +241,7 @@ impl Cosigner {
|
||||
|
||||
let mut parsed = HashMap::new();
|
||||
for l in {
|
||||
let mut keys = shares.keys().cloned().collect::<Vec<_>>();
|
||||
let mut keys = shares.keys().copied().collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
keys
|
||||
} {
|
||||
|
||||
@@ -305,7 +305,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
|
||||
|
||||
let mut these_shares: HashMap<_, _> =
|
||||
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
|
||||
for (i, share) in these_shares.iter_mut() {
|
||||
for (i, share) in &mut these_shares {
|
||||
share.extend(network_shares[i].serialize());
|
||||
}
|
||||
shares.push(these_shares);
|
||||
|
||||
@@ -113,7 +113,7 @@ impl ResolvedDb {
|
||||
let end = i + 32;
|
||||
if signing[start .. end] == plan {
|
||||
found = true;
|
||||
signing = [&signing[.. start], &signing[end ..]].concat().to_vec();
|
||||
signing = [&signing[.. start], &signing[end ..]].concat();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -198,12 +198,12 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
|
||||
(
|
||||
MultisigManager {
|
||||
scanner,
|
||||
existing: current_keys.first().cloned().map(|(activation_block, key)| MultisigViewer {
|
||||
existing: current_keys.first().copied().map(|(activation_block, key)| MultisigViewer {
|
||||
activation_block,
|
||||
key,
|
||||
scheduler: schedulers.remove(0),
|
||||
}),
|
||||
new: current_keys.get(1).cloned().map(|(activation_block, key)| MultisigViewer {
|
||||
new: current_keys.get(1).copied().map(|(activation_block, key)| MultisigViewer {
|
||||
activation_block,
|
||||
key,
|
||||
scheduler: schedulers.remove(0),
|
||||
|
||||
@@ -484,7 +484,7 @@ impl<N: Network, D: Db> Scanner<N, D> {
|
||||
let needing_ack = {
|
||||
let scanner_lock = scanner_hold.read().await;
|
||||
let scanner = scanner_lock.as_ref().unwrap();
|
||||
scanner.need_ack.front().cloned()
|
||||
scanner.need_ack.front().copied()
|
||||
};
|
||||
|
||||
if let Some(needing_ack) = needing_ack {
|
||||
|
||||
@@ -197,7 +197,6 @@ impl<N: Network> Scheduler<N> {
|
||||
|
||||
let mut add_plan = |payments| {
|
||||
let amount = payment_amounts(&payments);
|
||||
#[allow(clippy::unwrap_or_default)]
|
||||
self.queued_plans.entry(amount).or_insert(VecDeque::new()).push_back(payments);
|
||||
amount
|
||||
};
|
||||
@@ -474,7 +473,7 @@ impl<N: Network> Scheduler<N> {
|
||||
let per_payment = to_amortize / payments_len;
|
||||
let mut overage = to_amortize % payments_len;
|
||||
|
||||
for payment in payments.iter_mut() {
|
||||
for payment in &mut payments {
|
||||
let to_subtract = per_payment + overage;
|
||||
// Only subtract the overage once
|
||||
overage = 0;
|
||||
@@ -499,7 +498,6 @@ impl<N: Network> Scheduler<N> {
|
||||
return;
|
||||
}
|
||||
|
||||
#[allow(clippy::unwrap_or_default)]
|
||||
self.plans.entry(actual).or_insert(VecDeque::new()).push_back(payments);
|
||||
|
||||
// TODO2: This shows how ridiculous the serialize function is
|
||||
|
||||
@@ -383,7 +383,7 @@ impl Bitcoin {
|
||||
}
|
||||
}
|
||||
fees.sort();
|
||||
let fee = fees.get(fees.len() / 2).cloned().unwrap_or(0);
|
||||
let fee = fees.get(fees.len() / 2).copied().unwrap_or(0);
|
||||
|
||||
// The DUST constant documentation notes a relay rule practically enforcing a
|
||||
// 1000 sat/kilo-vbyte minimum fee.
|
||||
|
||||
@@ -285,7 +285,7 @@ impl Monero {
|
||||
fees.push(tx.rct_signatures.base.fee / u64::try_from(tx.serialize().len()).unwrap());
|
||||
}
|
||||
fees.sort();
|
||||
let fee = fees.get(fees.len() / 2).cloned().unwrap_or(0);
|
||||
let fee = fees.get(fees.len() / 2).copied().unwrap_or(0);
|
||||
|
||||
// TODO: Set a sane minimum fee
|
||||
Ok(Fee { per_weight: fee.max(1500000), mask: 10000 })
|
||||
@@ -665,7 +665,7 @@ impl Network for Monero {
|
||||
|
||||
async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> {
|
||||
match self.rpc.publish_transaction(tx).await {
|
||||
Ok(_) => Ok(()),
|
||||
Ok(()) => Ok(()),
|
||||
Err(RpcError::ConnectionError(e)) => {
|
||||
log::debug!("Monero ConnectionError: {e}");
|
||||
Err(NetworkError::ConnectionError)?
|
||||
|
||||
@@ -469,7 +469,7 @@ impl<N: Network, D: Db> Signer<N, D> {
|
||||
|
||||
let mut parsed = HashMap::new();
|
||||
for l in {
|
||||
let mut keys = preprocesses.keys().cloned().collect::<Vec<_>>();
|
||||
let mut keys = preprocesses.keys().copied().collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
keys
|
||||
} {
|
||||
@@ -549,7 +549,7 @@ impl<N: Network, D: Db> Signer<N, D> {
|
||||
|
||||
let mut parsed = HashMap::new();
|
||||
for l in {
|
||||
let mut keys = shares.keys().cloned().collect::<Vec<_>>();
|
||||
let mut keys = shares.keys().copied().collect::<Vec<_>>();
|
||||
keys.sort();
|
||||
keys
|
||||
} {
|
||||
|
||||
@@ -78,7 +78,7 @@ async fn spend<N: Network, D: Db>(
|
||||
|
||||
pub async fn test_addresses<N: Network>(network: N) {
|
||||
let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng);
|
||||
for (_, keys) in keys.iter_mut() {
|
||||
for keys in keys.values_mut() {
|
||||
N::tweak_keys(keys);
|
||||
}
|
||||
let key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
|
||||
@@ -109,7 +109,7 @@ pub async fn test_no_deadlock_in_multisig_completed<N: Network>(network: N) {
|
||||
network.get_latest_block_number().await.unwrap() + N::CONFIRMATIONS + i,
|
||||
{
|
||||
let mut keys = key_gen(&mut OsRng);
|
||||
for (_, keys) in keys.iter_mut() {
|
||||
for keys in keys.values_mut() {
|
||||
N::tweak_keys(keys);
|
||||
}
|
||||
keys[&Participant::new(1).unwrap()].group_key()
|
||||
|
||||
@@ -147,7 +147,7 @@ pub async fn sign<N: Network>(
|
||||
|
||||
pub async fn test_signer<N: Network>(network: N) {
|
||||
let mut keys = key_gen(&mut OsRng);
|
||||
for (_, keys) in keys.iter_mut() {
|
||||
for keys in keys.values_mut() {
|
||||
N::tweak_keys(keys);
|
||||
}
|
||||
let key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
|
||||
@@ -31,7 +31,7 @@ pub async fn test_wallet<N: Network>(network: N) {
|
||||
}
|
||||
|
||||
let mut keys = key_gen(&mut OsRng);
|
||||
for (_, keys) in keys.iter_mut() {
|
||||
for keys in keys.values_mut() {
|
||||
N::tweak_keys(keys);
|
||||
}
|
||||
let key = keys[&Participant::new(1).unwrap()].group_key();
|
||||
|
||||
Reference in New Issue
Block a user