Use a single txn for an entire coordinator message

Removes direct DB accesses whre possible. Documents the safety of the rest.
Does uncover one case of unsafety not previously noted.
This commit is contained in:
Luke Parker
2023-04-17 23:20:48 -04:00
parent 7579c71765
commit fd1bbec134
12 changed files with 370 additions and 284 deletions

View File

@@ -7,7 +7,7 @@ use frost::{Participant, ThresholdKeys};
use tokio::time::timeout;
use serai_db::MemDb;
use serai_db::{DbTxn, MemDb};
use crate::{
Plan, Db,
@@ -78,10 +78,12 @@ pub async fn test_addresses<C: Coin>(coin: C) {
coin.mine_block().await;
}
let db = MemDb::new();
let mut db = MemDb::new();
let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone());
assert!(active_keys.is_empty());
scanner.rotate_key(coin.get_latest_block_number().await.unwrap(), key).await;
let mut txn = db.txn();
scanner.rotate_key(&mut txn, coin.get_latest_block_number().await.unwrap(), key).await;
txn.commit();
// Receive funds to the branch address and make sure it's properly identified
let block_id = coin.test_send(C::branch_address(key)).await.id();

View File

@@ -7,7 +7,7 @@ use rand_core::{RngCore, OsRng};
use group::GroupEncoding;
use frost::{Participant, ThresholdParams, tests::clone_without};
use serai_db::MemDb;
use serai_db::{DbTxn, Db, MemDb};
use serai_client::{
primitives::{MONERO_NET_ID, BlockHash},
@@ -31,19 +31,24 @@ pub async fn test_key_gen<C: Coin>() {
let mut entropy = Zeroizing::new([0; 32]);
OsRng.fill_bytes(entropy.as_mut());
entropies.insert(i, entropy);
dbs.insert(i, MemDb::new());
key_gens.insert(i, KeyGen::<C, _>::new(dbs[&i].clone(), entropies[&i].clone()));
let db = MemDb::new();
dbs.insert(i, db.clone());
key_gens.insert(i, KeyGen::<C, MemDb>::new(db, entropies[&i].clone()));
}
let mut all_commitments = HashMap::new();
for i in 1 ..= 5 {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
if let ProcessorMessage::Commitments { id, commitments } = key_gen
.handle(CoordinatorMessage::GenerateKey {
id: ID,
params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap())
.unwrap(),
})
.handle(
&mut txn,
CoordinatorMessage::GenerateKey {
id: ID,
params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap())
.unwrap(),
},
)
.await
{
assert_eq!(id, ID);
@@ -51,27 +56,32 @@ pub async fn test_key_gen<C: Coin>() {
} else {
panic!("didn't get commitments back");
}
txn.commit();
}
// 1 is rebuilt on every step
// 2 is rebuilt here
// 3 ... are rebuilt once, one at each of the following steps
let rebuild = |key_gens: &mut HashMap<_, _>, i| {
let rebuild = |key_gens: &mut HashMap<_, _>, dbs: &HashMap<_, MemDb>, i| {
key_gens.remove(&i);
key_gens.insert(i, KeyGen::<C, _>::new(dbs[&i].clone(), entropies[&i].clone()));
};
rebuild(&mut key_gens, 1);
rebuild(&mut key_gens, 2);
rebuild(&mut key_gens, &dbs, 1);
rebuild(&mut key_gens, &dbs, 2);
let mut all_shares = HashMap::new();
for i in 1 ..= 5 {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
if let ProcessorMessage::Shares { id, shares } = key_gen
.handle(CoordinatorMessage::Commitments {
id: ID,
commitments: clone_without(&all_commitments, &i),
})
.handle(
&mut txn,
CoordinatorMessage::Commitments {
id: ID,
commitments: clone_without(&all_commitments, &i),
},
)
.await
{
assert_eq!(id, ID);
@@ -79,24 +89,29 @@ pub async fn test_key_gen<C: Coin>() {
} else {
panic!("didn't get shares back");
}
txn.commit();
}
// Rebuild 1 and 3
rebuild(&mut key_gens, 1);
rebuild(&mut key_gens, 3);
rebuild(&mut key_gens, &dbs, 1);
rebuild(&mut key_gens, &dbs, 3);
let mut res = None;
for i in 1 ..= 5 {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
if let ProcessorMessage::GeneratedKeyPair { id, substrate_key, coin_key } = key_gen
.handle(CoordinatorMessage::Shares {
id: ID,
shares: all_shares
.iter()
.filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) })
.collect(),
})
.handle(
&mut txn,
CoordinatorMessage::Shares {
id: ID,
shares: all_shares
.iter()
.filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) })
.collect(),
},
)
.await
{
assert_eq!(id, ID);
@@ -107,17 +122,25 @@ pub async fn test_key_gen<C: Coin>() {
} else {
panic!("didn't get key back");
}
txn.commit();
}
// Rebuild 1 and 4
rebuild(&mut key_gens, 1);
rebuild(&mut key_gens, 4);
rebuild(&mut key_gens, &dbs, 1);
rebuild(&mut key_gens, &dbs, 4);
for i in 1 ..= 5 {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let KeyConfirmed { activation_block, substrate_keys, coin_keys } = key_gen
.confirm(SubstrateContext { coin_latest_finalized_block: BlockHash([0x11; 32]) }, ID)
.confirm(
&mut txn,
SubstrateContext { coin_latest_finalized_block: BlockHash([0x11; 32]) },
ID,
)
.await;
txn.commit();
assert_eq!(activation_block, BlockHash([0x11; 32]));
let params =
ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap();

View File

@@ -9,7 +9,7 @@ use tokio::time::timeout;
use serai_client::primitives::BlockHash;
use serai_db::MemDb;
use serai_db::{DbTxn, Db, MemDb};
use crate::{
coins::{OutputType, Output, Block, Coin},
@@ -20,6 +20,7 @@ pub async fn test_scanner<C: Coin>(coin: C) {
let mut keys =
frost::tests::key_gen::<_, C::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap();
C::tweak_keys(&mut keys);
let group_key = keys.group_key();
// Mine blocks so there's a confirmed block
for _ in 0 .. C::CONFIRMATIONS {
@@ -30,11 +31,14 @@ pub async fn test_scanner<C: Coin>(coin: C) {
let activation_number = coin.get_latest_block_number().await.unwrap();
let db = MemDb::new();
let new_scanner = || async {
let mut db = db.clone();
let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone());
let mut first = first.lock().unwrap();
if *first {
assert!(active_keys.is_empty());
scanner.rotate_key(activation_number, keys.group_key()).await;
let mut txn = db.txn();
scanner.rotate_key(&mut txn, activation_number, group_key).await;
txn.commit();
*first = false;
} else {
assert_eq!(active_keys.len(), 1);
@@ -83,7 +87,14 @@ pub async fn test_scanner<C: Coin>(coin: C) {
}
curr_block += 1;
}
assert_eq!(scanner.ack_up_to_block(keys.group_key(), block_id).await, (blocks, outputs));
let mut cloned_db = db.clone();
let mut txn = cloned_db.txn();
assert_eq!(
scanner.ack_up_to_block(&mut txn, keys.group_key(), block_id).await,
(blocks, outputs)
);
txn.commit();
// There should be no more events
assert!(timeout(Duration::from_secs(30), scanner.events.recv()).await.is_err());

View File

@@ -8,7 +8,7 @@ use frost::{
dkg::tests::{key_gen, clone_without},
};
use serai_db::MemDb;
use serai_db::{DbTxn, Db, MemDb};
use messages::sign::*;
use crate::{
@@ -39,19 +39,23 @@ pub async fn sign<C: Coin>(
}
let mut signers = HashMap::new();
let mut dbs = HashMap::new();
let mut t = 0;
for i in 1 ..= keys.len() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
let keys = keys.remove(&i).unwrap();
t = keys.params().t();
signers.insert(i, Signer::new(MemDb::new(), coin.clone(), keys));
signers.insert(i, Signer::<_, MemDb>::new(coin.clone(), keys));
dbs.insert(i, MemDb::new());
}
drop(keys);
for i in 1 ..= signers.len() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
let (tx, eventuality) = txs.remove(&i).unwrap();
signers.get_mut(&i).unwrap().sign_transaction(actual_id.id, tx, eventuality).await;
let mut txn = dbs.get_mut(&i).unwrap().txn();
signers.get_mut(&i).unwrap().sign_transaction(&mut txn, actual_id.id, tx, eventuality).await;
txn.commit();
}
let mut signing_set = vec![];
@@ -84,14 +88,20 @@ pub async fn sign<C: Coin>(
let mut shares = HashMap::new();
for i in &signing_set {
let mut txn = dbs.get_mut(i).unwrap().txn();
signers
.get_mut(i)
.unwrap()
.handle(CoordinatorMessage::Preprocesses {
id: actual_id.clone(),
preprocesses: clone_without(&preprocesses, i),
})
.handle(
&mut txn,
CoordinatorMessage::Preprocesses {
id: actual_id.clone(),
preprocesses: clone_without(&preprocesses, i),
},
)
.await;
txn.commit();
if let SignerEvent::ProcessorMessage(ProcessorMessage::Share { id, share }) =
signers.get_mut(i).unwrap().events.pop_front().unwrap()
{
@@ -104,14 +114,17 @@ pub async fn sign<C: Coin>(
let mut tx_id = None;
for i in &signing_set {
let mut txn = dbs.get_mut(i).unwrap().txn();
signers
.get_mut(i)
.unwrap()
.handle(CoordinatorMessage::Shares {
id: actual_id.clone(),
shares: clone_without(&shares, i),
})
.handle(
&mut txn,
CoordinatorMessage::Shares { id: actual_id.clone(), shares: clone_without(&shares, i) },
)
.await;
txn.commit();
if let SignerEvent::SignedTransaction { id, tx } =
signers.get_mut(i).unwrap().events.pop_front().unwrap()
{

View File

@@ -12,7 +12,7 @@ use frost::{
use scale::Encode;
use sp_application_crypto::{RuntimePublic, sr25519::Public};
use serai_db::MemDb;
use serai_db::{DbTxn, Db, MemDb};
use serai_client::{primitives::*, in_instructions::primitives::*};
@@ -49,14 +49,21 @@ async fn test_substrate_signer() {
};
let mut signers = HashMap::new();
let mut dbs = HashMap::new();
let mut t = 0;
for i in 1 ..= keys.len() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
let keys = keys.remove(&i).unwrap();
t = keys.params().t();
let mut signer = SubstrateSigner::new(MemDb::new(), keys);
signer.sign(batch.clone()).await;
let mut signer = SubstrateSigner::<MemDb>::new(keys);
let mut db = MemDb::new();
let mut txn = db.txn();
signer.sign(&mut txn, batch.clone()).await;
txn.commit();
signers.insert(i, signer);
dbs.insert(i, db);
}
drop(keys);
@@ -92,14 +99,20 @@ async fn test_substrate_signer() {
let mut shares = HashMap::new();
for i in &signing_set {
let mut txn = dbs.get_mut(i).unwrap().txn();
signers
.get_mut(i)
.unwrap()
.handle(CoordinatorMessage::BatchPreprocesses {
id: actual_id.clone(),
preprocesses: clone_without(&preprocesses, i),
})
.handle(
&mut txn,
CoordinatorMessage::BatchPreprocesses {
id: actual_id.clone(),
preprocesses: clone_without(&preprocesses, i),
},
)
.await;
txn.commit();
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchShare { id, share }) =
signers.get_mut(i).unwrap().events.pop_front().unwrap()
{
@@ -111,14 +124,19 @@ async fn test_substrate_signer() {
}
for i in &signing_set {
let mut txn = dbs.get_mut(i).unwrap().txn();
signers
.get_mut(i)
.unwrap()
.handle(CoordinatorMessage::BatchShares {
id: actual_id.clone(),
shares: clone_without(&shares, i),
})
.handle(
&mut txn,
CoordinatorMessage::BatchShares {
id: actual_id.clone(),
shares: clone_without(&shares, i),
},
)
.await;
txn.commit();
if let SubstrateSignerEvent::SignedBatch(signed_batch) =
signers.get_mut(i).unwrap().events.pop_front().unwrap()

View File

@@ -6,7 +6,7 @@ use frost::{Participant, dkg::tests::key_gen};
use tokio::time::timeout;
use serai_db::MemDb;
use serai_db::{DbTxn, Db, MemDb};
use crate::{
Payment, Plan,
@@ -24,10 +24,13 @@ pub async fn test_wallet<C: Coin>(coin: C) {
}
let key = keys[&Participant::new(1).unwrap()].group_key();
let (mut scanner, active_keys) = Scanner::new(coin.clone(), MemDb::new());
let mut db = MemDb::new();
let (mut scanner, active_keys) = Scanner::new(coin.clone(), db.clone());
assert!(active_keys.is_empty());
let (block_id, outputs) = {
scanner.rotate_key(coin.get_latest_block_number().await.unwrap(), key).await;
let mut txn = db.txn();
scanner.rotate_key(&mut txn, coin.get_latest_block_number().await.unwrap(), key).await;
txn.commit();
let block = coin.test_send(C::address(key)).await;
let block_id = block.id();
@@ -114,8 +117,10 @@ pub async fn test_wallet<C: Coin>(coin: C) {
}
// Check the Scanner DB can reload the outputs
let mut txn = db.txn();
assert_eq!(
scanner.ack_up_to_block(key, block.id()).await.1,
scanner.ack_up_to_block(&mut txn, key, block.id()).await.1,
[first_outputs, outputs].concat().to_vec()
);
txn.commit();
}