Apply DKG TX handling code to all sign TXs

The existing code was almost entirely applicable. It just needed to be scoped
with an ID. While the handle function is now a bit convoluted, I don't see a
better option.
This commit is contained in:
Luke Parker
2023-04-20 06:27:00 -04:00
parent 8b5eaa8092
commit 9c2a44f9df
4 changed files with 138 additions and 50 deletions

View File

@@ -25,56 +25,72 @@ impl<D: Db> TributaryDb<D> {
self.0.get(Self::block_key(genesis)).unwrap_or(genesis.to_vec()).try_into().unwrap() self.0.get(Self::block_key(genesis)).unwrap_or(genesis.to_vec()).try_into().unwrap()
} }
fn dkg_attempt_key(genesis: [u8; 32]) -> Vec<u8> { fn attempt_key(genesis: [u8; 32], id: [u8; 32]) -> Vec<u8> {
Self::tributary_key(b"dkg_attempt", genesis) let genesis_ref: &[u8] = genesis.as_ref();
Self::tributary_key(b"attempt", [genesis_ref, id.as_ref()].concat())
} }
pub fn dkg_attempt<G: Get>(getter: &G, genesis: [u8; 32]) -> u32 { pub fn attempt<G: Get>(getter: &G, genesis: [u8; 32], id: [u8; 32]) -> u32 {
u32::from_le_bytes( u32::from_le_bytes(
getter.get(Self::dkg_attempt_key(genesis)).unwrap_or(vec![0; 4]).try_into().unwrap(), getter.get(Self::attempt_key(genesis, id)).unwrap_or(vec![0; 4]).try_into().unwrap(),
) )
} }
fn dkg_data_received_key(label: &'static [u8], genesis: &[u8], attempt: u32) -> Vec<u8> { fn data_received_key(
Self::tributary_key(
b"dkg_data_received",
[label, genesis, attempt.to_le_bytes().as_ref()].concat(),
)
}
fn dkg_data_key(
label: &'static [u8], label: &'static [u8],
genesis: &[u8], genesis: [u8; 32],
signer: &<Ristretto as Ciphersuite>::G, id: [u8; 32],
attempt: u32, attempt: u32,
) -> Vec<u8> { ) -> Vec<u8> {
Self::tributary_key( Self::tributary_key(
b"dkg_data", b"data_received",
[label, genesis, signer.to_bytes().as_ref(), attempt.to_le_bytes().as_ref()].concat(), [label, genesis.as_ref(), id.as_ref(), attempt.to_le_bytes().as_ref()].concat(),
) )
} }
pub fn dkg_data<G: Get>( fn data_key(
label: &'static [u8],
genesis: [u8; 32],
id: [u8; 32],
attempt: u32,
signer: &<Ristretto as Ciphersuite>::G,
) -> Vec<u8> {
Self::tributary_key(
b"data",
[
label,
genesis.as_ref(),
id.as_ref(),
attempt.to_le_bytes().as_ref(),
signer.to_bytes().as_ref(),
]
.concat(),
)
}
pub fn data<G: Get>(
label: &'static [u8], label: &'static [u8],
getter: &G, getter: &G,
genesis: [u8; 32], genesis: [u8; 32],
signer: &<Ristretto as Ciphersuite>::G, id: [u8; 32],
attempt: u32, attempt: u32,
signer: &<Ristretto as Ciphersuite>::G,
) -> Option<Vec<u8>> { ) -> Option<Vec<u8>> {
getter.get(Self::dkg_data_key(label, &genesis, signer, attempt)) getter.get(Self::data_key(label, genesis, id, attempt, signer))
} }
pub fn set_dkg_data( pub fn set_data(
label: &'static [u8], label: &'static [u8],
txn: &mut D::Transaction<'_>, txn: &mut D::Transaction<'_>,
genesis: [u8; 32], genesis: [u8; 32],
signer: &<Ristretto as Ciphersuite>::G, id: [u8; 32],
attempt: u32, attempt: u32,
signer: &<Ristretto as Ciphersuite>::G,
data: &[u8], data: &[u8],
) -> u16 { ) -> u16 {
let received_key = Self::dkg_data_received_key(label, &genesis, attempt); let received_key = Self::data_received_key(label, genesis, id, attempt);
let mut received = let mut received =
u16::from_le_bytes(txn.get(&received_key).unwrap_or(vec![0; 2]).try_into().unwrap()); u16::from_le_bytes(txn.get(&received_key).unwrap_or(vec![0; 2]).try_into().unwrap());
received += 1; received += 1;
txn.put(received_key, received.to_le_bytes()); txn.put(received_key, received.to_le_bytes());
txn.put(Self::dkg_data_key(label, &genesis, signer, attempt), data); txn.put(Self::data_key(label, genesis, id, attempt, signer), data);
received received
} }

View File

@@ -327,6 +327,13 @@ impl TransactionTrait for Transaction {
fn verify(&self) -> Result<(), TransactionError> { fn verify(&self) -> Result<(), TransactionError> {
// TODO: Augment with checks that the Vecs can be deser'd and are for recognized IDs // TODO: Augment with checks that the Vecs can be deser'd and are for recognized IDs
if let Transaction::BatchShare(data) = self {
if data.data.len() != 32 {
Err(TransactionError::InvalidContent)?;
}
}
Ok(()) Ok(())
} }
} }

View File

@@ -9,7 +9,8 @@ use tributary::{Signed, Block, P2p, Tributary};
use processor_messages::{ use processor_messages::{
key_gen::{self, KeyGenId}, key_gen::{self, KeyGenId},
CoordinatorMessage, sign::{self, SignId},
coordinator, CoordinatorMessage,
}; };
use serai_db::DbTxn; use serai_db::DbTxn;
@@ -36,10 +37,10 @@ async fn handle_block<D: Db, Pro: Processor, P: P2p>(
if !TributaryDb::<D>::handled_event(&db.0, hash, event_id) { if !TributaryDb::<D>::handled_event(&db.0, hash, event_id) {
let mut txn = db.0.txn(); let mut txn = db.0.txn();
let mut handle_dkg = |label, attempt, mut bytes: Vec<u8>, signed: Signed| { let mut handle = |label, needed, id, attempt, mut bytes: Vec<u8>, signed: Signed| {
// If they've already published a TX for this attempt, slash // If they've already published a TX for this attempt, slash
if let Some(data) = if let Some(data) =
TributaryDb::<D>::dkg_data(label, &txn, tributary.genesis(), &signed.signer, attempt) TributaryDb::<D>::data(label, &txn, tributary.genesis(), id, attempt, &signed.signer)
{ {
if data != bytes { if data != bytes {
// TODO: Full slash // TODO: Full slash
@@ -51,7 +52,7 @@ async fn handle_block<D: Db, Pro: Processor, P: P2p>(
} }
// If the attempt is lesser than the blockchain's, slash // If the attempt is lesser than the blockchain's, slash
let curr_attempt = TributaryDb::<D>::dkg_attempt(&txn, tributary.genesis()); let curr_attempt = TributaryDb::<D>::attempt(&txn, tributary.genesis(), id);
if attempt < curr_attempt { if attempt < curr_attempt {
// TODO: Slash for being late // TODO: Slash for being late
return None; return None;
@@ -62,46 +63,48 @@ async fn handle_block<D: Db, Pro: Processor, P: P2p>(
} }
// Store this data // Store this data
let received = TributaryDb::<D>::set_dkg_data( let received = TributaryDb::<D>::set_data(
label, label,
&mut txn, &mut txn,
tributary.genesis(), tributary.genesis(),
&signed.signer, id,
attempt, attempt,
&signed.signer,
&bytes, &bytes,
); );
// If we have all commitments/shares, tell the processor // If we have all the needed commitments/preprocesses/shares, tell the processor
if received == spec.n() { if received == needed {
let mut data = HashMap::new(); let mut data = HashMap::new();
for validator in spec.validators().keys() { for validator in spec.validators().keys() {
data.insert( data.insert(
spec.i(*validator).unwrap(), spec.i(*validator).unwrap(),
if validator == &signed.signer { if validator == &signed.signer {
bytes.split_off(0) bytes.split_off(0)
} else if let Some(data) =
TributaryDb::<D>::data(label, &txn, tributary.genesis(), id, attempt, validator)
{
data
} else { } else {
TributaryDb::<D>::dkg_data(label, &txn, tributary.genesis(), validator, attempt) continue;
.unwrap_or_else(|| {
panic!(
"received all DKG data yet couldn't load {} for a validator",
std::str::from_utf8(label).unwrap(),
)
})
}, },
); );
} }
assert_eq!(data.len(), usize::from(needed));
return Some((KeyGenId { set: spec.set(), attempt }, data)); return Some(data);
} }
None None
}; };
match tx { match tx {
Transaction::DkgCommitments(attempt, bytes, signed) => { Transaction::DkgCommitments(attempt, bytes, signed) => {
if let Some((id, commitments)) = handle_dkg(b"commitments", attempt, bytes, signed) { if let Some(commitments) =
handle(b"dkg_commitments", spec.n(), [0; 32], attempt, bytes, signed)
{
processor processor
.send(CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments { .send(CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id, id: KeyGenId { set: spec.set(), attempt },
commitments, commitments,
})) }))
.await; .await;
@@ -122,20 +125,77 @@ async fn handle_block<D: Db, Pro: Processor, P: P2p>(
) )
.unwrap(); .unwrap();
if let Some((id, shares)) = handle_dkg(b"shares", attempt, bytes, signed) { if let Some(shares) = handle(b"dkg_shares", spec.n(), [0; 32], attempt, bytes, signed) {
processor processor
.send(CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares { id, shares })) .send(CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
id: KeyGenId { set: spec.set(), attempt },
shares,
}))
.await; .await;
} }
} }
Transaction::SignPreprocess(..) => todo!(), Transaction::SignPreprocess(data) => {
Transaction::SignShare(..) => todo!(), // TODO: Validate data.plan
if let Some(preprocesses) =
handle(b"sign_preprocess", spec.t(), data.plan, data.attempt, data.data, data.signed)
{
processor
.send(CoordinatorMessage::Sign(sign::CoordinatorMessage::Preprocesses {
id: SignId { key: todo!(), id: data.plan, attempt: data.attempt },
preprocesses,
}))
.await;
}
}
Transaction::SignShare(data) => {
// TODO: Validate data.plan
if let Some(shares) =
handle(b"sign_share", spec.t(), data.plan, data.attempt, data.data, data.signed)
{
processor
.send(CoordinatorMessage::Sign(sign::CoordinatorMessage::Shares {
id: SignId { key: todo!(), id: data.plan, attempt: data.attempt },
shares,
}))
.await;
}
}
// TODO
Transaction::FinalizedBlock(..) => todo!(), Transaction::FinalizedBlock(..) => todo!(),
Transaction::BatchPreprocess(..) => todo!(), Transaction::BatchPreprocess(data) => {
Transaction::BatchShare(..) => todo!(), // TODO: Validate data.plan
if let Some(preprocesses) =
handle(b"batch_preprocess", spec.t(), data.plan, data.attempt, data.data, data.signed)
{
processor
.send(CoordinatorMessage::Coordinator(
coordinator::CoordinatorMessage::BatchPreprocesses {
id: SignId { key: todo!(), id: data.plan, attempt: data.attempt },
preprocesses,
},
))
.await;
}
}
Transaction::BatchShare(data) => {
// TODO: Validate data.plan
if let Some(shares) =
handle(b"batch_share", spec.t(), data.plan, data.attempt, data.data, data.signed)
{
processor
.send(CoordinatorMessage::Coordinator(coordinator::CoordinatorMessage::BatchShares {
id: SignId { key: todo!(), id: data.plan, attempt: data.attempt },
shares: shares
.drain()
.map(|(validator, share)| (validator, share.try_into().unwrap()))
.collect(),
}))
.await;
}
}
} }
TributaryDb::<D>::handle_event(&mut txn, hash, event_id); TributaryDb::<D>::handle_event(&mut txn, hash, event_id);
@@ -143,6 +203,8 @@ async fn handle_block<D: Db, Pro: Processor, P: P2p>(
} }
event_id += 1; event_id += 1;
} }
// TODO: Trigger any necessary re-attempts
} }
pub async fn handle_new_blocks<D: Db, Pro: Processor, P: P2p>( pub async fn handle_new_blocks<D: Db, Pro: Processor, P: P2p>(

View File

@@ -13,17 +13,20 @@ use crate::{TRANSACTION_SIZE_LIMIT, ReadWrite};
#[derive(Clone, PartialEq, Eq, Debug, Error)] #[derive(Clone, PartialEq, Eq, Debug, Error)]
pub enum TransactionError { pub enum TransactionError {
/// Transaction exceeded the size limit. /// Transaction exceeded the size limit.
#[error("transaction was too large")] #[error("transaction is too large")]
TooLargeTransaction, TooLargeTransaction,
/// This transaction's signer isn't a participant. /// Transaction's signer isn't a participant.
#[error("invalid signer")] #[error("invalid signer")]
InvalidSigner, InvalidSigner,
/// This transaction's nonce isn't the prior nonce plus one. /// Transaction's nonce isn't the prior nonce plus one.
#[error("invalid nonce")] #[error("invalid nonce")]
InvalidNonce, InvalidNonce,
/// This transaction's signature is invalid. /// Transaction's signature is invalid.
#[error("invalid signature")] #[error("invalid signature")]
InvalidSignature, InvalidSignature,
/// Transaction's content is invalid.
#[error("transaction content is invalid")]
InvalidContent,
} }
/// Data for a signed transaction. /// Data for a signed transaction.