Further expand clippy workspace lints

Achieves a notable amount of reduced async and clones.
This commit is contained in:
Luke Parker
2023-12-17 00:01:41 -05:00
parent ea3af28139
commit 065d314e2a
113 changed files with 596 additions and 724 deletions

View File

@@ -130,8 +130,8 @@ pub mod sign {
pub fn session(&self) -> Session {
match self {
CoordinatorMessage::Preprocesses { id, .. } => id.session,
CoordinatorMessage::Shares { id, .. } => id.session,
CoordinatorMessage::Preprocesses { id, .. } |
CoordinatorMessage::Shares { id, .. } |
CoordinatorMessage::Reattempt { id } => id.session,
CoordinatorMessage::Completed { session, .. } => *session,
}
@@ -193,12 +193,7 @@ pub mod coordinator {
// network *and the local node*
// This synchrony obtained lets us ignore the synchrony requirement offered here
pub fn required_block(&self) -> Option<BlockHash> {
match self {
CoordinatorMessage::CosignSubstrateBlock { .. } => None,
CoordinatorMessage::SubstratePreprocesses { .. } => None,
CoordinatorMessage::SubstrateShares { .. } => None,
CoordinatorMessage::BatchReattempt { .. } => None,
}
None
}
}
@@ -240,7 +235,7 @@ pub mod substrate {
impl CoordinatorMessage {
pub fn required_block(&self) -> Option<BlockHash> {
let context = match self {
CoordinatorMessage::ConfirmKeyPair { context, .. } => context,
CoordinatorMessage::ConfirmKeyPair { context, .. } |
CoordinatorMessage::SubstrateBlock { context, .. } => context,
};
Some(context.network_latest_finalized_block)

View File

@@ -111,7 +111,7 @@ impl<D: Db> BatchSigner<D> {
}
#[must_use]
async fn attempt(
fn attempt(
&mut self,
txn: &mut D::Transaction<'_>,
id: u32,
@@ -189,11 +189,7 @@ impl<D: Db> BatchSigner<D> {
}
#[must_use]
pub async fn sign(
&mut self,
txn: &mut D::Transaction<'_>,
batch: Batch,
) -> Option<ProcessorMessage> {
pub fn sign(&mut self, txn: &mut D::Transaction<'_>, batch: Batch) -> Option<ProcessorMessage> {
debug_assert_eq!(self.network, batch.network);
let id = batch.id;
if CompletedDb::get(txn, id).is_some() {
@@ -203,11 +199,11 @@ impl<D: Db> BatchSigner<D> {
}
self.signable.insert(id, batch);
self.attempt(txn, id, 0).await
self.attempt(txn, id, 0)
}
#[must_use]
pub async fn handle(
pub fn handle(
&mut self,
txn: &mut D::Transaction<'_>,
msg: CoordinatorMessage,
@@ -394,7 +390,7 @@ impl<D: Db> BatchSigner<D> {
let SubstrateSignableId::Batch(batch_id) = id.id else {
panic!("BatchReattempt passed non-Batch ID")
};
self.attempt(txn, batch_id, id.attempt).await.map(Into::into)
self.attempt(txn, batch_id, id.attempt).map(Into::into)
}
}
}

View File

@@ -114,7 +114,7 @@ impl Cosigner {
}
#[must_use]
pub async fn handle(
pub fn handle(
&mut self,
txn: &mut impl DbTxn,
msg: CoordinatorMessage,

View File

@@ -32,7 +32,7 @@ impl PendingActivationsDb {
}
pub fn set_pending_activation<N: Network>(
txn: &mut impl DbTxn,
block_before_queue_block: <N::Block as Block<N>>::Id,
block_before_queue_block: &<N::Block as Block<N>>::Id,
session: Session,
key_pair: KeyPair,
) {

View File

@@ -89,7 +89,7 @@ impl KeysDb {
fn confirm_keys<N: Network>(
txn: &mut impl DbTxn,
session: Session,
key_pair: KeyPair,
key_pair: &KeyPair,
) -> (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>) {
let (keys_vec, keys) = GeneratedKeysDb::read_keys::<N>(
txn,
@@ -175,7 +175,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
KeysDb::substrate_keys_by_session::<N>(&self.db, session)
}
pub async fn handle(
pub fn handle(
&mut self,
txn: &mut D::Transaction<'_>,
msg: CoordinatorMessage,
@@ -582,11 +582,13 @@ impl<N: Network, D: Db> KeyGen<N, D> {
}
}
pub async fn confirm(
// This should only be called if we're participating, hence taking our instance
#[allow(clippy::unused_self)]
pub fn confirm(
&mut self,
txn: &mut D::Transaction<'_>,
session: Session,
key_pair: KeyPair,
key_pair: &KeyPair,
) -> KeyConfirmed<N::Curve> {
info!(
"Confirmed key pair {} {} for {:?}",

View File

@@ -199,7 +199,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
if tributary_mutable.key_gen.in_set(&session) {
// See TributaryMutable's struct definition for why this block is safe
let KeyConfirmed { substrate_keys, network_keys } =
tributary_mutable.key_gen.confirm(txn, session, key_pair.clone()).await;
tributary_mutable.key_gen.confirm(txn, session, &key_pair);
if session.0 == 0 {
tributary_mutable.batch_signer =
Some(BatchSigner::new(N::NETWORK, session, substrate_keys));
@@ -214,7 +214,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
match msg.msg.clone() {
CoordinatorMessage::KeyGen(msg) => {
coordinator.send(tributary_mutable.key_gen.handle(txn, msg).await).await;
coordinator.send(tributary_mutable.key_gen.handle(txn, msg)).await;
}
CoordinatorMessage::Sign(msg) => {
@@ -232,9 +232,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
CoordinatorMessage::Coordinator(msg) => {
let is_batch = match msg {
CoordinatorCoordinatorMessage::CosignSubstrateBlock { .. } => false,
CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } => {
matches!(&id.id, SubstrateSignableId::Batch(_))
}
CoordinatorCoordinatorMessage::SubstratePreprocesses { ref id, .. } |
CoordinatorCoordinatorMessage::SubstrateShares { ref id, .. } => {
matches!(&id.id, SubstrateSignableId::Batch(_))
}
@@ -248,7 +246,6 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
"coordinator told us to sign a batch when we don't currently have a Substrate signer",
)
.handle(txn, msg)
.await
{
coordinator.send(msg).await;
}
@@ -272,7 +269,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
}
_ => {
if let Some(cosigner) = tributary_mutable.cosigner.as_mut() {
if let Some(msg) = cosigner.handle(txn, msg).await {
if let Some(msg) = cosigner.handle(txn, msg) {
coordinator.send(msg).await;
}
} else {
@@ -355,7 +352,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
// Set this variable so when we get the next Batch event, we can handle it
PendingActivationsDb::set_pending_activation::<N>(
txn,
block_before_queue_block,
&block_before_queue_block,
session,
key_pair,
);
@@ -429,7 +426,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
for (key, id, tx, eventuality) in to_sign {
if let Some(session) = SessionDb::get(txn, key.to_bytes().as_ref()) {
let signer = signers.get_mut(&session).unwrap();
if let Some(msg) = signer.sign_transaction(txn, id, tx, eventuality).await {
if let Some(msg) = signer.sign_transaction(txn, id, tx, &eventuality).await {
coordinator.send(msg).await;
}
}
@@ -521,7 +518,7 @@ async fn boot<N: Network, D: Db, Co: Coordinator>(
if plan.key == network_key {
let mut txn = raw_db.txn();
if let Some(msg) =
signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality.clone()).await
signer.sign_transaction(&mut txn, plan.id(), tx.clone(), eventuality).await
{
coordinator.send(msg).await;
}
@@ -622,7 +619,7 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
).await;
if let Some(batch_signer) = tributary_mutable.batch_signer.as_mut() {
if let Some(msg) = batch_signer.sign(&mut txn, batch).await {
if let Some(msg) = batch_signer.sign(&mut txn, batch) {
coordinator.send(msg).await;
}
}
@@ -644,7 +641,7 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
MultisigEvent::Completed(key, id, tx) => {
if let Some(session) = SessionDb::get(&txn, &key) {
let signer = tributary_mutable.signers.get_mut(&session).unwrap();
if let Some(msg) = signer.completed(&mut txn, id, tx) {
if let Some(msg) = signer.completed(&mut txn, id, &tx) {
coordinator.send(msg).await;
}
}

View File

@@ -102,7 +102,7 @@ impl ResolvedDb {
txn: &mut impl DbTxn,
key: &[u8],
plan: [u8; 32],
resolution: <N::Transaction as Transaction<N>>::Id,
resolution: &<N::Transaction as Transaction<N>>::Id,
) {
let mut signing = SigningDb::get(txn, key).unwrap_or_default();
assert_eq!(signing.len() % 32, 0);
@@ -160,7 +160,7 @@ impl PlansFromScanningDb {
}
impl ForwardedOutputDb {
pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: InInstructionWithBalance) {
pub fn save_forwarded_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) {
let mut existing = Self::get(txn, instruction.balance).unwrap_or_default();
existing.extend(instruction.encode());
Self::set(txn, instruction.balance, &existing);
@@ -184,7 +184,7 @@ impl ForwardedOutputDb {
}
impl DelayedOutputDb {
pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: InInstructionWithBalance) {
pub fn save_delayed_output(txn: &mut impl DbTxn, instruction: &InInstructionWithBalance) {
let mut existing = Self::get(txn).unwrap_or_default();
existing.extend(instruction.encode());
Self::set(txn, &existing);

View File

@@ -7,7 +7,7 @@ use scale::{Encode, Decode};
use messages::SubstrateContext;
use serai_client::{
primitives::{MAX_DATA_LEN, NetworkId, Coin, ExternalAddress, BlockHash},
primitives::{MAX_DATA_LEN, NetworkId, Coin, ExternalAddress, BlockHash, Data},
in_instructions::primitives::{
InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE,
},
@@ -316,7 +316,7 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
assert_eq!(balance.coin.network(), N::NETWORK);
if let Ok(address) = N::Address::try_from(address.consume()) {
payments.push(Payment { address, data: data.map(|data| data.consume()), balance });
payments.push(Payment { address, data: data.map(Data::consume), balance });
}
}
@@ -513,7 +513,7 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
let mut plans = vec![];
existing_outputs.retain(|output| {
match output.kind() {
OutputType::External => false,
OutputType::External | OutputType::Forwarded => false,
OutputType::Branch => {
let scheduler = &mut self.existing.as_mut().unwrap().scheduler;
// There *would* be a race condition here due to the fact we only mark a `Branch` output
@@ -576,7 +576,6 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
}
false
}
OutputType::Forwarded => false,
}
});
plans
@@ -873,7 +872,7 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
// letting it die out
if let Some(tx) = &tx {
instruction.balance.amount.0 -= tx.0.fee();
ForwardedOutputDb::save_forwarded_output(txn, instruction);
ForwardedOutputDb::save_forwarded_output(txn, &instruction);
}
} else if let Some(refund_to) = refund_to {
if let Ok(refund_to) = refund_to.consume().try_into() {
@@ -907,9 +906,7 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
}
let (refund_to, instruction) = instruction_from_output::<N>(&output);
let instruction = if let Some(instruction) = instruction {
instruction
} else {
let Some(instruction) = instruction else {
if let Some(refund_to) = refund_to {
if let Ok(refund_to) = refund_to.consume().try_into() {
plans.push(Self::refund_plan(output.clone(), refund_to));
@@ -922,7 +919,7 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
if Some(output.key()) == self.new.as_ref().map(|new| new.key) {
match step {
RotationStep::UseExisting => {
DelayedOutputDb::save_delayed_output(txn, instruction);
DelayedOutputDb::save_delayed_output(txn, &instruction);
continue;
}
RotationStep::NewAsChange |
@@ -1003,7 +1000,7 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
// within the block. Unknown Eventualities may have their Completed events emitted after
// ScannerEvent::Block however.
ScannerEvent::Completed(key, block_number, id, tx) => {
ResolvedDb::resolve_plan::<N>(txn, &key, id, tx.id());
ResolvedDb::resolve_plan::<N>(txn, &key, id, &tx.id());
(block_number, MultisigEvent::Completed(key, id, tx))
}
};

View File

@@ -415,7 +415,7 @@ impl<N: Network, D: Db> Scanner<N, D> {
)
}
async fn emit(&mut self, event: ScannerEvent<N>) -> bool {
fn emit(&mut self, event: ScannerEvent<N>) -> bool {
if self.events.send(event).is_err() {
info!("Scanner handler was dropped. Shutting down?");
return false;
@@ -496,12 +496,9 @@ impl<N: Network, D: Db> Scanner<N, D> {
}
}
let block = match network.get_block(block_being_scanned).await {
Ok(block) => block,
Err(_) => {
warn!("couldn't get block {block_being_scanned}");
break;
}
let Ok(block) = network.get_block(block_being_scanned).await else {
warn!("couldn't get block {block_being_scanned}");
break;
};
let block_id = block.id();
@@ -570,7 +567,7 @@ impl<N: Network, D: Db> Scanner<N, D> {
completion_block_numbers.push(block_number);
// This must be before the mission of ScannerEvent::Block, per commentary in mod.rs
if !scanner.emit(ScannerEvent::Completed(key_vec.clone(), block_number, id, tx)).await {
if !scanner.emit(ScannerEvent::Completed(key_vec.clone(), block_number, id, tx)) {
return;
}
}
@@ -687,10 +684,7 @@ impl<N: Network, D: Db> Scanner<N, D> {
txn.commit();
// Send all outputs
if !scanner
.emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs })
.await
{
if !scanner.emit(ScannerEvent::Block { is_retirement_block, block: block_id, outputs }) {
return;
}

View File

@@ -335,7 +335,7 @@ impl<N: Network> Scheduler<N> {
// Since we do multiple aggregation TXs at once, this will execute in logarithmic time
let utxos = self.utxos.drain(..).collect::<Vec<_>>();
let mut utxo_chunks =
utxos.chunks(N::MAX_INPUTS).map(|chunk| chunk.to_vec()).collect::<Vec<_>>();
utxos.chunks(N::MAX_INPUTS).map(<[<N as Network>::Output]>::to_vec).collect::<Vec<_>>();
// Use the first chunk for any scheduled payments, since it has the most value
let utxos = utxo_chunks.remove(0);
@@ -456,10 +456,7 @@ impl<N: Network> Scheduler<N> {
}
// If we didn't actually create this output, return, dropping the child payments
let actual = match actual {
Some(actual) => actual,
None => return,
};
let Some(actual) = actual else { return };
// Amortize the fee amongst all payments underneath this branch
{

View File

@@ -427,7 +427,7 @@ impl Bitcoin {
match BSignableTransaction::new(
inputs.iter().map(|input| input.output.clone()).collect(),
&payments,
change.as_ref().map(|change| change.0.clone()),
change.as_ref().map(|change| &change.0),
None,
fee.0,
) {
@@ -435,16 +435,14 @@ impl Bitcoin {
Err(TransactionError::NoInputs) => {
panic!("trying to create a bitcoin transaction without inputs")
}
// No outputs left and the change isn't worth enough
Err(TransactionError::NoOutputs) => Ok(None),
// No outputs left and the change isn't worth enough/not even enough funds to pay the fee
Err(TransactionError::NoOutputs | TransactionError::NotEnoughFunds) => Ok(None),
// amortize_fee removes payments which fall below the dust threshold
Err(TransactionError::DustPayment) => panic!("dust payment despite removing dust"),
Err(TransactionError::TooMuchData) => panic!("too much data despite not specifying data"),
Err(TransactionError::TooLowFee) => {
panic!("created a transaction whose fee is below the minimum")
}
// Mot even enough funds to pay the fee
Err(TransactionError::NotEnoughFunds) => Ok(None),
Err(TransactionError::TooLargeTransaction) => {
panic!("created a too large transaction despite limiting inputs/outputs")
}
@@ -637,7 +635,7 @@ impl Network for Bitcoin {
return res;
}
async fn check_block(
fn check_block(
eventualities: &mut EventualitiesTracker<Eventuality>,
block: &Block,
res: &mut HashMap<[u8; 32], (usize, Transaction)>,
@@ -678,11 +676,11 @@ impl Network for Bitcoin {
block.unwrap()
};
check_block(eventualities, &block, &mut res).await;
check_block(eventualities, &block, &mut res);
}
// Also check the current block
check_block(eventualities, block, &mut res).await;
check_block(eventualities, block, &mut res);
assert_eq!(eventualities.block_number, this_block_num);
res
@@ -733,7 +731,7 @@ impl Network for Bitcoin {
transaction
.actual
.clone()
.multisig(keys.clone(), transaction.transcript)
.multisig(&keys, transaction.transcript)
.expect("used the wrong keys"),
)
}

View File

@@ -229,6 +229,7 @@ impl PartialEq for Monero {
}
impl Eq for Monero {}
#[allow(clippy::needless_pass_by_value)] // Needed to satisfy API expectations
fn map_rpc_err(err: RpcError) -> NetworkError {
if let RpcError::InvalidNode(reason) = &err {
log::error!("Monero RpcError::InvalidNode({reason})");
@@ -384,7 +385,7 @@ impl Monero {
Some(Zeroizing::new(*plan_id)),
inputs.clone(),
payments,
Change::fingerprintable(change.as_ref().map(|change| change.clone().into())),
&Change::fingerprintable(change.as_ref().map(|change| change.clone().into())),
vec![],
fee_rate,
) {
@@ -657,7 +658,7 @@ impl Network for Monero {
keys: ThresholdKeys<Self::Curve>,
transaction: SignableTransaction,
) -> Result<Self::TransactionMachine, NetworkError> {
match transaction.actual.clone().multisig(keys, transaction.transcript) {
match transaction.actual.clone().multisig(&keys, transaction.transcript) {
Ok(machine) => Ok(machine),
Err(e) => panic!("failed to create a multisig machine for TX: {e}"),
}
@@ -753,7 +754,7 @@ impl Network for Monero {
None,
inputs,
vec![(address.into(), amount - fee)],
Change::fingerprintable(Some(Self::test_address().into())),
&Change::fingerprintable(Some(Self::test_address().into())),
vec![],
self.rpc.get_fee(protocol, FeePriority::Low).await.unwrap(),
)

View File

@@ -99,7 +99,7 @@ impl<N: Network> core::fmt::Debug for Plan<N> {
.field("key", &hex::encode(self.key.to_bytes()))
.field("inputs", &self.inputs)
.field("payments", &self.payments)
.field("change", &self.change.as_ref().map(|change| change.to_string()))
.field("change", &self.change.as_ref().map(ToString::to_string))
.finish()
}
}

View File

@@ -97,7 +97,11 @@ impl CompletionsDb {
}
impl EventualityDb {
fn save_eventuality<N: Network>(txn: &mut impl DbTxn, id: [u8; 32], eventuality: N::Eventuality) {
fn save_eventuality<N: Network>(
txn: &mut impl DbTxn,
id: [u8; 32],
eventuality: &N::Eventuality,
) {
txn.put(Self::key(id), eventuality.serialize());
}
@@ -113,7 +117,7 @@ impl TransactionDb {
fn transaction<N: Network>(
getter: &impl Get,
id: <N::Transaction as Transaction<N>>::Id,
id: &<N::Transaction as Transaction<N>>::Id,
) -> Option<N::Transaction> {
Self::get(getter, id.as_ref()).map(|tx| N::Transaction::read(&mut tx.as_slice()).unwrap())
}
@@ -164,7 +168,7 @@ impl<N: Network, D: Db> Signer<N, D> {
log::info!("rebroadcasting {}", hex::encode(&completion));
// TODO: Don't drop the error entirely. Check for invariants
let _ = network
.publish_transaction(&TransactionDb::transaction::<N>(&db, completion).unwrap())
.publish_transaction(&TransactionDb::transaction::<N>(&db, &completion).unwrap())
.await;
}
}
@@ -221,7 +225,7 @@ impl<N: Network, D: Db> Signer<N, D> {
}
#[must_use]
fn already_completed(&self, txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool {
fn already_completed(txn: &mut D::Transaction<'_>, id: [u8; 32]) -> bool {
if !CompletionsDb::completions::<N>(txn, id).is_empty() {
debug!(
"SignTransaction/Reattempt order for {}, which we've already completed signing",
@@ -238,7 +242,7 @@ impl<N: Network, D: Db> Signer<N, D> {
fn complete(
&mut self,
id: [u8; 32],
tx_id: <N::Transaction as Transaction<N>>::Id,
tx_id: &<N::Transaction as Transaction<N>>::Id,
) -> ProcessorMessage {
// Assert we're actively signing for this TX
assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for");
@@ -260,16 +264,16 @@ impl<N: Network, D: Db> Signer<N, D> {
&mut self,
txn: &mut D::Transaction<'_>,
id: [u8; 32],
tx: N::Transaction,
tx: &N::Transaction,
) -> Option<ProcessorMessage> {
let first_completion = !self.already_completed(txn, id);
let first_completion = !Self::already_completed(txn, id);
// Save this completion to the DB
CompletedOnChainDb::complete_on_chain(txn, &id);
CompletionsDb::complete::<N>(txn, id, &tx);
CompletionsDb::complete::<N>(txn, id, tx);
if first_completion {
Some(self.complete(id, tx.id()))
Some(self.complete(id, &tx.id()))
} else {
None
}
@@ -302,13 +306,13 @@ impl<N: Network, D: Db> Signer<N, D> {
if self.network.confirm_completion(&eventuality, &tx) {
info!("signer eventuality for {} resolved in TX {}", hex::encode(id), hex::encode(tx_id));
let first_completion = !self.already_completed(txn, id);
let first_completion = !Self::already_completed(txn, id);
// Save this completion to the DB
CompletionsDb::complete::<N>(txn, id, &tx);
if first_completion {
return Some(self.complete(id, tx.id()));
return Some(self.complete(id, &tx.id()));
}
} else {
warn!(
@@ -337,7 +341,7 @@ impl<N: Network, D: Db> Signer<N, D> {
id: [u8; 32],
attempt: u32,
) -> Option<ProcessorMessage> {
if self.already_completed(txn, id) {
if Self::already_completed(txn, id) {
return None;
}
@@ -427,13 +431,13 @@ impl<N: Network, D: Db> Signer<N, D> {
txn: &mut D::Transaction<'_>,
id: [u8; 32],
tx: N::SignableTransaction,
eventuality: N::Eventuality,
eventuality: &N::Eventuality,
) -> Option<ProcessorMessage> {
// The caller is expected to re-issue sign orders on reboot
// This is solely used by the rebroadcast task
ActiveSignsDb::add_active_sign(txn, &id);
if self.already_completed(txn, id) {
if Self::already_completed(txn, id) {
return None;
}
@@ -596,7 +600,7 @@ impl<N: Network, D: Db> Signer<N, D> {
}
// Stop trying to sign for this TX
Some(self.complete(id.id, tx_id))
Some(self.complete(id.id, &tx_id))
}
CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await,

View File

@@ -23,8 +23,8 @@ use messages::{
};
use crate::batch_signer::BatchSigner;
#[tokio::test]
async fn test_batch_signer() {
#[test]
fn test_batch_signer() {
let keys = key_gen::<_, Ristretto>(&mut OsRng);
let participant_one = Participant::new(1).unwrap();
@@ -74,7 +74,7 @@ async fn test_batch_signer() {
let mut db = MemDb::new();
let mut txn = db.txn();
match signer.sign(&mut txn, batch.clone()).await.unwrap() {
match signer.sign(&mut txn, batch.clone()).unwrap() {
// All participants should emit a preprocess
coordinator::ProcessorMessage::BatchPreprocess {
id,
@@ -109,7 +109,6 @@ async fn test_batch_signer() {
preprocesses: clone_without(&preprocesses, i),
},
)
.await
.unwrap()
{
ProcessorMessage::Coordinator(coordinator::ProcessorMessage::SubstrateShare {
@@ -137,7 +136,6 @@ async fn test_batch_signer() {
shares: clone_without(&shares, i),
},
)
.await
.unwrap()
{
ProcessorMessage::Substrate(substrate::ProcessorMessage::SignedBatch {

View File

@@ -18,8 +18,8 @@ use serai_client::{primitives::*, validator_sets::primitives::Session};
use messages::coordinator::*;
use crate::cosigner::Cosigner;
#[tokio::test]
async fn test_cosigner() {
#[test]
fn test_cosigner() {
let keys = key_gen::<_, Ristretto>(&mut OsRng);
let participant_one = Participant::new(1).unwrap();
@@ -88,7 +88,6 @@ async fn test_cosigner() {
preprocesses: clone_without(&preprocesses, i),
},
)
.await
.unwrap()
{
ProcessorMessage::SubstrateShare { id, shares: mut these_shares } => {
@@ -113,7 +112,6 @@ async fn test_cosigner() {
shares: clone_without(&shares, i),
},
)
.await
.unwrap()
{
ProcessorMessage::CosignedBlock { block_number, block: signed_block, signature } => {

View File

@@ -20,7 +20,7 @@ use crate::{
const ID: KeyGenId = KeyGenId { session: Session(1), attempt: 3 };
pub async fn test_key_gen<N: Network>() {
pub fn test_key_gen<N: Network>() {
let mut entropies = HashMap::new();
let mut dbs = HashMap::new();
let mut key_gens = HashMap::new();
@@ -37,18 +37,15 @@ pub async fn test_key_gen<N: Network>() {
for i in 1 ..= 5 {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
if let ProcessorMessage::Commitments { id, mut commitments } = key_gen
.handle(
&mut txn,
CoordinatorMessage::GenerateKey {
id: ID,
params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap())
.unwrap(),
shares: 1,
},
)
.await
{
if let ProcessorMessage::Commitments { id, mut commitments } = key_gen.handle(
&mut txn,
CoordinatorMessage::GenerateKey {
id: ID,
params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap())
.unwrap(),
shares: 1,
},
) {
assert_eq!(id, ID);
assert_eq!(commitments.len(), 1);
all_commitments
@@ -74,16 +71,10 @@ pub async fn test_key_gen<N: Network>() {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
if let ProcessorMessage::Shares { id, mut shares } = key_gen
.handle(
&mut txn,
CoordinatorMessage::Commitments {
id: ID,
commitments: clone_without(&all_commitments, &i),
},
)
.await
{
if let ProcessorMessage::Shares { id, mut shares } = key_gen.handle(
&mut txn,
CoordinatorMessage::Commitments { id: ID, commitments: clone_without(&all_commitments, &i) },
) {
assert_eq!(id, ID);
assert_eq!(shares.len(), 1);
all_shares.insert(i, shares.swap_remove(0));
@@ -102,19 +93,16 @@ pub async fn test_key_gen<N: Network>() {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen
.handle(
&mut txn,
CoordinatorMessage::Shares {
id: ID,
shares: vec![all_shares
.iter()
.filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) })
.collect()],
},
)
.await
{
if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } = key_gen.handle(
&mut txn,
CoordinatorMessage::Shares {
id: ID,
shares: vec![all_shares
.iter()
.filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) })
.collect()],
},
) {
assert_eq!(id, ID);
if res.is_none() {
res = Some((substrate_key, network_key.clone()));
@@ -134,13 +122,11 @@ pub async fn test_key_gen<N: Network>() {
for i in 1 ..= 5 {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen
.confirm(
&mut txn,
ID.session,
KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()),
)
.await;
let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen.confirm(
&mut txn,
ID.session,
&KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()),
);
txn.commit();
assert_eq!(substrate_keys.len(), 1);

View File

@@ -46,7 +46,7 @@ macro_rules! test_network {
#[tokio::test]
async fn $key_gen() {
init_logger();
test_key_gen::<$N>().await;
test_key_gen::<$N>();
}
#[test]

View File

@@ -72,7 +72,7 @@ pub async fn sign<N: Network>(
match signers
.get_mut(&i)
.unwrap()
.sign_transaction(&mut txn, actual_id.id, tx, eventuality)
.sign_transaction(&mut txn, actual_id.id, tx, &eventuality)
.await
{
// All participants should emit a preprocess