mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 04:09:23 +00:00
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence Participants are now identified by their starting index. While this compiles, the following is unimplemented: 1) A conversion for DKG `i` values. It assumes the threshold `i` values used will be identical for the MuSig signature used to confirm the DKG. 2) Expansion from compressed values to full values before forwarding to the processor. * Add a fn to the DkgConfirmer to convert `i` values as needed Also removes TODOs regarding Serai ensuring validator key uniqueness + validity. The current infra achieves both. * Have the Tributary DB track participation by shares, not by count * Prevent a node from obtaining 34% of the maximum amount of key shares This is actually mainly intended to set a bound on message sizes in the coordinator. Message sizes are amplified by the amount of key shares held, so setting an upper bound on said amount lets it determine constants. While that upper bound could be 150, that'd be unreasonable and increase the potential for DoS attacks. * Correct the mechanism to detect if sufficient accumulation has occured It used to check if the latest accumulation hit the required threshold. Now, accumulations may jump past the required threshold. The required mechanism is to check the threshold wasn't prior met and is now met. * Finish updating the coordinator to handle a multiple key share per validator environment * Adjust stategy re: preventing noce reuse in DKG Confirmer * Add TODOs regarding dropped transactions, add possible TODO fix * Update tests/coordinator This doesn't add new multi-key-share tests, it solely updates the existing single key-share tests to compile and run, with the necessary fixes to the coordinator. * Update processor key_gen to handle generating multiple key shares at once * Update SubstrateSigner * Update signer, clippy * Update processor tests * Update processor docker tests
This commit is contained in:
@@ -114,8 +114,9 @@ impl<D: Db> MainDb<D> {
|
||||
network: NetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: [u8; 32],
|
||||
preprocess: Vec<u8>,
|
||||
preprocess: Vec<Vec<u8>>,
|
||||
) {
|
||||
let preprocess = preprocess.encode();
|
||||
let key = Self::first_preprocess_key(network, id_type, id);
|
||||
if let Some(existing) = txn.get(&key) {
|
||||
assert_eq!(existing, preprocess, "saved a distinct first preprocess");
|
||||
@@ -128,8 +129,10 @@ impl<D: Db> MainDb<D> {
|
||||
network: NetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: [u8; 32],
|
||||
) -> Option<Vec<u8>> {
|
||||
getter.get(Self::first_preprocess_key(network, id_type, id))
|
||||
) -> Option<Vec<Vec<u8>>> {
|
||||
getter
|
||||
.get(Self::first_preprocess_key(network, id_type, id))
|
||||
.map(|bytes| Vec::<_>::decode(&mut bytes.as_slice()).unwrap())
|
||||
}
|
||||
|
||||
fn last_received_batch_key(network: NetworkId) -> Vec<u8> {
|
||||
|
||||
@@ -101,19 +101,16 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
|
||||
// If we're rebooting, we'll re-fire this message
|
||||
// This is safe due to the message-queue deduplicating based off the intent system
|
||||
let set = spec.set();
|
||||
let our_i = spec
|
||||
.i(Ristretto::generator() * key.deref())
|
||||
.expect("adding a tributary for a set we aren't in set for");
|
||||
processors
|
||||
.send(
|
||||
set.network,
|
||||
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
|
||||
id: processor_messages::key_gen::KeyGenId { set, attempt: 0 },
|
||||
params: frost::ThresholdParams::new(
|
||||
spec.t(),
|
||||
spec.n(),
|
||||
spec
|
||||
.i(Ristretto::generator() * key.deref())
|
||||
.expect("adding a tributary for a set we aren't in set for"),
|
||||
)
|
||||
.unwrap(),
|
||||
params: frost::ThresholdParams::new(spec.t(), spec.n(), our_i.start).unwrap(),
|
||||
shares: u16::from(our_i.end) - u16::from(our_i.start),
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -426,18 +423,29 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
// Create a MuSig-based machine to inform Substrate of this key generation
|
||||
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, id.attempt);
|
||||
|
||||
let our_i = spec
|
||||
.i(pub_key)
|
||||
.expect("processor message to DKG for a session we aren't a validator in");
|
||||
|
||||
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
|
||||
// without further context, it can't be deserialized without context
|
||||
let mut tx_shares = Vec::with_capacity(shares.len());
|
||||
for i in 1 ..= spec.n() {
|
||||
let i = Participant::new(i).unwrap();
|
||||
if i ==
|
||||
spec
|
||||
.i(pub_key)
|
||||
.expect("processor message to DKG for a session we aren't a validator in")
|
||||
{
|
||||
if our_i.contains(&i) {
|
||||
for shares in &shares {
|
||||
if shares.contains_key(&i) {
|
||||
panic!("processor sent us our own shares");
|
||||
}
|
||||
}
|
||||
continue;
|
||||
}
|
||||
tx_shares
|
||||
.push(shares.remove(&i).expect("processor didn't send share for another validator"));
|
||||
tx_shares.push(vec![]);
|
||||
for shares in &mut shares {
|
||||
tx_shares.last_mut().unwrap().push(
|
||||
shares.remove(&i).expect("processor didn't send share for another validator"),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
vec![Transaction::DkgShares {
|
||||
@@ -474,14 +482,14 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
}
|
||||
},
|
||||
ProcessorMessage::Sign(msg) => match msg {
|
||||
sign::ProcessorMessage::Preprocess { id, preprocess } => {
|
||||
sign::ProcessorMessage::Preprocess { id, preprocesses } => {
|
||||
if id.attempt == 0 {
|
||||
MainDb::<D>::save_first_preprocess(
|
||||
&mut txn,
|
||||
network,
|
||||
RecognizedIdType::Plan,
|
||||
id.id,
|
||||
preprocess,
|
||||
preprocesses,
|
||||
);
|
||||
|
||||
vec![]
|
||||
@@ -489,17 +497,19 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
vec![Transaction::SignPreprocess(SignData {
|
||||
plan: id.id,
|
||||
attempt: id.attempt,
|
||||
data: preprocess,
|
||||
data: preprocesses,
|
||||
signed: Transaction::empty_signed(),
|
||||
})]
|
||||
}
|
||||
}
|
||||
sign::ProcessorMessage::Share { id, share } => vec![Transaction::SignShare(SignData {
|
||||
plan: id.id,
|
||||
attempt: id.attempt,
|
||||
data: share,
|
||||
signed: Transaction::empty_signed(),
|
||||
})],
|
||||
sign::ProcessorMessage::Share { id, shares } => {
|
||||
vec![Transaction::SignShare(SignData {
|
||||
plan: id.id,
|
||||
attempt: id.attempt,
|
||||
data: shares,
|
||||
signed: Transaction::empty_signed(),
|
||||
})]
|
||||
}
|
||||
sign::ProcessorMessage::Completed { key: _, id, tx } => {
|
||||
let r = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
||||
#[allow(non_snake_case)]
|
||||
@@ -522,7 +532,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
},
|
||||
ProcessorMessage::Coordinator(inner_msg) => match inner_msg {
|
||||
coordinator::ProcessorMessage::SubstrateBlockAck { .. } => unreachable!(),
|
||||
coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocess } => {
|
||||
coordinator::ProcessorMessage::BatchPreprocess { id, block, preprocesses } => {
|
||||
log::info!(
|
||||
"informed of batch (sign ID {}, attempt {}) for block {}",
|
||||
hex::encode(id.id),
|
||||
@@ -538,7 +548,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
spec.set().network,
|
||||
RecognizedIdType::Batch,
|
||||
id.id,
|
||||
preprocess,
|
||||
preprocesses,
|
||||
);
|
||||
|
||||
// If this is the new key's first Batch, only create this TX once we verify all
|
||||
@@ -550,6 +560,10 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
if last_received != 0 {
|
||||
// Decrease by 1, to get the ID of the Batch prior to this Batch
|
||||
let prior_sets_last_batch = last_received - 1;
|
||||
// TODO: If we're looping here, we're not handling the messages we need to in order
|
||||
// to create the Batch we're looking for
|
||||
// Don't have the processor yield the handover batch untill the batch before is
|
||||
// acknowledged on-chain?
|
||||
loop {
|
||||
let successfully_verified = substrate::verify_published_batches::<D>(
|
||||
&mut txn,
|
||||
@@ -598,16 +612,16 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
vec![Transaction::BatchPreprocess(SignData {
|
||||
plan: id.id,
|
||||
attempt: id.attempt,
|
||||
data: preprocess,
|
||||
data: preprocesses,
|
||||
signed: Transaction::empty_signed(),
|
||||
})]
|
||||
}
|
||||
}
|
||||
coordinator::ProcessorMessage::BatchShare { id, share } => {
|
||||
coordinator::ProcessorMessage::BatchShare { id, shares } => {
|
||||
vec![Transaction::BatchShare(SignData {
|
||||
plan: id.id,
|
||||
attempt: id.attempt,
|
||||
data: share.to_vec(),
|
||||
data: shares.into_iter().map(|share| share.to_vec()).collect(),
|
||||
signed: Transaction::empty_signed(),
|
||||
})]
|
||||
}
|
||||
|
||||
@@ -79,7 +79,7 @@ async fn handle_new_set<D: Db>(
|
||||
.await?
|
||||
.expect("validator selected for set yet didn't have an allocation")
|
||||
.0;
|
||||
set_data.push((participant, allocation / allocation_per_key_share));
|
||||
set_data.push((participant, u16::try_from(allocation / allocation_per_key_share).unwrap()));
|
||||
}
|
||||
amortize_excess_key_shares(&mut set_data);
|
||||
set_data
|
||||
|
||||
@@ -47,7 +47,8 @@ async fn dkg_test() {
|
||||
let mut commitments = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut commitments);
|
||||
|
||||
let mut tx = Transaction::DkgCommitments(attempt, commitments, Transaction::empty_signed());
|
||||
let mut tx =
|
||||
Transaction::DkgCommitments(attempt, vec![commitments], Transaction::empty_signed());
|
||||
tx.sign(&mut OsRng, spec.genesis(), key, 0);
|
||||
txs.push(tx);
|
||||
}
|
||||
@@ -69,7 +70,7 @@ async fn dkg_test() {
|
||||
.enumerate()
|
||||
.map(|(i, tx)| {
|
||||
if let Transaction::DkgCommitments(_, commitments, _) = tx {
|
||||
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments.clone())
|
||||
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
|
||||
} else {
|
||||
panic!("txs had non-commitments");
|
||||
}
|
||||
@@ -165,7 +166,7 @@ async fn dkg_test() {
|
||||
if i != k {
|
||||
let mut share = vec![0; 256];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
shares.push(share);
|
||||
shares.push(vec![share]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -213,7 +214,7 @@ async fn dkg_test() {
|
||||
let shares_for = |i: usize| {
|
||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
|
||||
id: KeyGenId { set: spec.set(), attempt: 0 },
|
||||
shares: txs
|
||||
shares: vec![txs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.filter_map(|(l, tx)| {
|
||||
@@ -224,14 +225,14 @@ async fn dkg_test() {
|
||||
let relative_i = i - (if i > l { 1 } else { 0 });
|
||||
Some((
|
||||
Participant::new((l + 1).try_into().unwrap()).unwrap(),
|
||||
shares[relative_i].clone(),
|
||||
shares[relative_i][0].clone(),
|
||||
))
|
||||
}
|
||||
} else {
|
||||
panic!("txs had non-shares");
|
||||
}
|
||||
})
|
||||
.collect::<HashMap<_, _>>(),
|
||||
.collect::<HashMap<_, _>>()],
|
||||
})
|
||||
};
|
||||
|
||||
|
||||
@@ -36,7 +36,13 @@ fn random_sign_data<R: RngCore>(rng: &mut R) -> SignData {
|
||||
plan,
|
||||
attempt: random_u32(&mut OsRng),
|
||||
|
||||
data: random_vec(&mut OsRng, 512),
|
||||
data: {
|
||||
let mut res = vec![];
|
||||
for _ in 0 .. ((rng.next_u64() % 255) + 1) {
|
||||
res.push(random_vec(&mut OsRng, 512));
|
||||
}
|
||||
res
|
||||
},
|
||||
|
||||
signed: random_signed(&mut OsRng),
|
||||
}
|
||||
@@ -46,6 +52,32 @@ fn test_read_write<RW: Eq + Debug + ReadWrite>(value: RW) {
|
||||
assert_eq!(value, RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn tx_size_limit() {
|
||||
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, MAX_KEY_LEN};
|
||||
|
||||
use tributary::TRANSACTION_SIZE_LIMIT;
|
||||
|
||||
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
|
||||
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;
|
||||
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and
|
||||
// 1024 bytes for all overhead
|
||||
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;
|
||||
assert!(
|
||||
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
||||
(handwaved_dkg_commitments_size * max_key_shares_per_individual)
|
||||
);
|
||||
|
||||
// Encryption key, PoP (2 elements), message
|
||||
let elements_per_share = 4;
|
||||
let handwaved_dkg_shares_size =
|
||||
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;
|
||||
assert!(
|
||||
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
||||
(handwaved_dkg_shares_size * max_key_shares_per_individual)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn serialize_sign_data() {
|
||||
test_read_write(random_sign_data(&mut OsRng));
|
||||
@@ -53,23 +85,37 @@ fn serialize_sign_data() {
|
||||
|
||||
#[test]
|
||||
fn serialize_transaction() {
|
||||
test_read_write(Transaction::DkgCommitments(
|
||||
random_u32(&mut OsRng),
|
||||
random_vec(&mut OsRng, 512),
|
||||
random_signed(&mut OsRng),
|
||||
));
|
||||
{
|
||||
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
||||
for _ in 0 .. (OsRng.next_u64() % 100) {
|
||||
let mut temp = commitments[0].clone();
|
||||
OsRng.fill_bytes(&mut temp);
|
||||
commitments.push(temp);
|
||||
}
|
||||
test_read_write(Transaction::DkgCommitments(
|
||||
random_u32(&mut OsRng),
|
||||
commitments,
|
||||
random_signed(&mut OsRng),
|
||||
));
|
||||
}
|
||||
|
||||
{
|
||||
// This supports a variable share length, yet share length is expected to be constant among
|
||||
// shares
|
||||
let share_len = usize::try_from(OsRng.next_u64() % 512).unwrap();
|
||||
// This supports a variable share length, and variable amount of sent shares, yet share length
|
||||
// and sent shares is expected to be constant among recipients
|
||||
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
|
||||
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
|
||||
// Create a valid vec of shares
|
||||
let mut shares = vec![];
|
||||
// Create up to 512 participants
|
||||
for _ in 0 .. (OsRng.next_u64() % 512) {
|
||||
let mut share = vec![0; share_len];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
shares.push(share);
|
||||
// Create up to 150 participants
|
||||
for _ in 0 .. ((OsRng.next_u64() % 150) + 1) {
|
||||
// Give each sender multiple shares
|
||||
let mut sender_shares = vec![];
|
||||
for _ in 0 .. amount_of_shares {
|
||||
let mut share = vec![0; share_len];
|
||||
OsRng.fill_bytes(&mut share);
|
||||
sender_shares.push(share);
|
||||
}
|
||||
shares.push(sender_shares);
|
||||
}
|
||||
|
||||
test_read_write(Transaction::DkgShares {
|
||||
|
||||
@@ -40,7 +40,7 @@ async fn tx_test() {
|
||||
// Create the TX with a null signature so we can get its sig hash
|
||||
let block_before_tx = tributaries[sender].1.tip().await;
|
||||
let mut tx =
|
||||
Transaction::DkgCommitments(attempt, commitments.clone(), Transaction::empty_signed());
|
||||
Transaction::DkgCommitments(attempt, vec![commitments.clone()], Transaction::empty_signed());
|
||||
tx.sign(&mut OsRng, spec.genesis(), &key, 0);
|
||||
|
||||
assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true));
|
||||
|
||||
@@ -220,22 +220,23 @@ impl<D: Db> TributaryDb<D> {
|
||||
) -> Option<Vec<u8>> {
|
||||
getter.get(Self::data_key(genesis, data_spec, signer))
|
||||
}
|
||||
pub fn set_data(
|
||||
fn set_data(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
genesis: [u8; 32],
|
||||
data_spec: &DataSpecification,
|
||||
signer: <Ristretto as Ciphersuite>::G,
|
||||
signer_shares: u16,
|
||||
data: &[u8],
|
||||
) -> u16 {
|
||||
) -> (u16, u16) {
|
||||
let received_key = Self::data_received_key(genesis, data_spec);
|
||||
let mut received =
|
||||
let prior_received =
|
||||
u16::from_le_bytes(txn.get(&received_key).unwrap_or(vec![0; 2]).try_into().unwrap());
|
||||
received += 1;
|
||||
let received = prior_received + signer_shares;
|
||||
|
||||
txn.put(received_key, received.to_le_bytes());
|
||||
txn.put(Self::data_key(genesis, data_spec, signer), data);
|
||||
|
||||
received
|
||||
(prior_received, received)
|
||||
}
|
||||
|
||||
fn event_key(id: &[u8], index: u32) -> Vec<u8> {
|
||||
@@ -273,17 +274,22 @@ impl<D: Db> TributaryState<D> {
|
||||
if TributaryDb::<D>::data(txn, spec.genesis(), data_spec, signer).is_some() {
|
||||
panic!("accumulating data for a participant multiple times");
|
||||
}
|
||||
let received = TributaryDb::<D>::set_data(txn, spec.genesis(), data_spec, signer, data);
|
||||
let signer_shares = {
|
||||
let signer_i =
|
||||
spec.i(signer).expect("transaction signed by a non-validator for this tributary");
|
||||
u16::from(signer_i.end) - u16::from(signer_i.start)
|
||||
};
|
||||
let (prior_received, now_received) =
|
||||
TributaryDb::<D>::set_data(txn, spec.genesis(), data_spec, signer, signer_shares, data);
|
||||
|
||||
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
||||
// TODO: This needs to be coded by weight, not by validator count
|
||||
let needed = if data_spec.topic == Topic::Dkg { spec.n() } else { spec.t() };
|
||||
if received == needed {
|
||||
if (prior_received < needed) && (now_received >= needed) {
|
||||
return Accumulation::Ready({
|
||||
let mut data = HashMap::new();
|
||||
for validator in spec.validators().iter().map(|validator| validator.0) {
|
||||
data.insert(
|
||||
spec.i(validator).unwrap(),
|
||||
spec.i(validator).unwrap().start,
|
||||
if let Some(data) = TributaryDb::<D>::data(txn, spec.genesis(), data_spec, validator) {
|
||||
data
|
||||
} else {
|
||||
@@ -298,7 +304,8 @@ impl<D: Db> TributaryState<D> {
|
||||
.remove(
|
||||
&spec
|
||||
.i(Ristretto::generator() * our_key.deref())
|
||||
.expect("handling a message for a Tributary we aren't part of"),
|
||||
.expect("handling a message for a Tributary we aren't part of")
|
||||
.start,
|
||||
)
|
||||
.is_some()
|
||||
{
|
||||
|
||||
@@ -66,30 +66,43 @@ use crate::tributary::TributarySpec;
|
||||
1) The local view of received messages is static
|
||||
2) The local process doesn't rebuild after a byzantine fault produces multiple blockchains
|
||||
|
||||
We assume the former. The latter is deemed acceptable but sub-optimal.
|
||||
We assume the former. We can prevent the latter (TODO) by:
|
||||
|
||||
The benefit for this behavior is that on a validator's infrastructure collapsing, they can
|
||||
successfully rebuild on a new system.
|
||||
1) Defining a per-build entropy, used so long as a DB is used.
|
||||
2) Checking the initially used commitments for the DKG align with the per-build entropy.
|
||||
|
||||
TODO: Replace this with entropy. If a validator happens to have their infrastructure fail at this
|
||||
exact moment, they should just be kicked out and accept the loss. The risk of losing a private
|
||||
key on rebuild, by a feature meant to enable rebuild, can't be successfully argued for.
|
||||
If a rebuild occurs, which is the only way we could follow a distinct blockchain, our entropy
|
||||
will change (preventing nonce reuse).
|
||||
|
||||
Not only do we need to use randomly selected entropy, we need to confirm our local preprocess
|
||||
matches the on-chain preprocess before actually publishing our shares.
|
||||
This will allow a validator to still participate in DKGs within a single build, even if they have
|
||||
spontaneous reboots, and on collapse triggering a rebuild, they don't lose safety.
|
||||
|
||||
We also need to review how we're handling Processor preprocesses and likely implement the same
|
||||
on-chain-preprocess-matches-presumed-preprocess check before publishing shares (though a delay of
|
||||
the re-attempt protocol's trigger length would also be sufficient).
|
||||
TODO: We also need to review how we're handling Processor preprocesses and likely implement the
|
||||
same on-chain-preprocess-matches-presumed-preprocess check before publishing shares.
|
||||
*/
|
||||
pub(crate) struct DkgConfirmer;
|
||||
impl DkgConfirmer {
|
||||
// Convert the passed in HashMap, which uses the validators' start index for their `s` threshold
|
||||
// shares, to the indexes needed for MuSig
|
||||
fn from_threshold_i_to_musig_i(
|
||||
spec: &TributarySpec,
|
||||
mut old_map: HashMap<Participant, Vec<u8>>,
|
||||
) -> HashMap<Participant, Vec<u8>> {
|
||||
let mut new_map = HashMap::new();
|
||||
for (new_i, validator) in spec.validators().into_iter().enumerate() {
|
||||
let threshold_i = spec.i(validator.0).unwrap();
|
||||
if let Some(value) = old_map.remove(&threshold_i.start) {
|
||||
new_map.insert(Participant::new(u16::try_from(new_i + 1).unwrap()).unwrap(), value);
|
||||
}
|
||||
}
|
||||
new_map
|
||||
}
|
||||
|
||||
fn preprocess_internal(
|
||||
spec: &TributarySpec,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
attempt: u32,
|
||||
) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
||||
// TODO: Does Substrate already have a validator-uniqueness check?
|
||||
let validators = spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
||||
|
||||
let context = musig_context(spec.set());
|
||||
@@ -127,7 +140,7 @@ impl DkgConfirmer {
|
||||
key_pair: &KeyPair,
|
||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||
let machine = Self::preprocess_internal(spec, key, attempt).0;
|
||||
let preprocesses = preprocesses
|
||||
let preprocesses = Self::from_threshold_i_to_musig_i(spec, preprocesses)
|
||||
.into_iter()
|
||||
.map(|(p, preprocess)| {
|
||||
machine
|
||||
@@ -173,7 +186,7 @@ impl DkgConfirmer {
|
||||
.expect("trying to complete a machine which failed to preprocess")
|
||||
.0;
|
||||
|
||||
let shares = shares
|
||||
let shares = Self::from_threshold_i_to_musig_i(spec, shares)
|
||||
.into_iter()
|
||||
.map(|(p, share)| {
|
||||
machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p)
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
use core::{ops::Deref, future::Future};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use frost::dkg::Participant;
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use serai_client::{
|
||||
Signature,
|
||||
validator_sets::primitives::{ValidatorSet, KeyPair},
|
||||
@@ -142,16 +144,53 @@ pub(crate) async fn handle_application_tx<
|
||||
TributaryState::<D>::accumulate(txn, key, spec, data_spec, signed.signer, &bytes)
|
||||
};
|
||||
|
||||
fn check_sign_data_len<D: Db>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
spec: &TributarySpec,
|
||||
signer: <Ristretto as Ciphersuite>::G,
|
||||
len: usize,
|
||||
) -> Result<(), ()> {
|
||||
let signer_i = spec.i(signer).unwrap();
|
||||
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
||||
fatal_slash::<D>(
|
||||
txn,
|
||||
spec.genesis(),
|
||||
signer.to_bytes(),
|
||||
"signer published a distinct amount of sign data than they had shares",
|
||||
);
|
||||
Err(())?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn unflatten(spec: &TributarySpec, data: &mut HashMap<Participant, Vec<u8>>) {
|
||||
for (validator, _) in spec.validators() {
|
||||
let range = spec.i(validator).unwrap();
|
||||
let Some(all_segments) = data.remove(&range.start) else {
|
||||
continue;
|
||||
};
|
||||
let mut data_vec = Vec::<_>::decode(&mut all_segments.as_slice()).unwrap();
|
||||
for i in u16::from(range.start) .. u16::from(range.end) {
|
||||
let i = Participant::new(i).unwrap();
|
||||
data.insert(i, data_vec.remove(0));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
match tx {
|
||||
Transaction::DkgCommitments(attempt, bytes, signed) => {
|
||||
Transaction::DkgCommitments(attempt, commitments, signed) => {
|
||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, signed.signer, commitments.len()) else {
|
||||
return;
|
||||
};
|
||||
match handle(
|
||||
txn,
|
||||
&DataSpecification { topic: Topic::Dkg, label: DKG_COMMITMENTS, attempt },
|
||||
bytes,
|
||||
commitments.encode(),
|
||||
&signed,
|
||||
) {
|
||||
Accumulation::Ready(DataSet::Participating(commitments)) => {
|
||||
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
||||
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
||||
unflatten(spec, &mut commitments);
|
||||
processors
|
||||
.send(
|
||||
spec.set().network,
|
||||
@@ -170,29 +209,59 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
|
||||
Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {
|
||||
if shares.len() != (usize::from(spec.n()) - 1) {
|
||||
fatal_slash::<D>(txn, genesis, signed.signer.to_bytes(), "invalid amount of DKG shares");
|
||||
return;
|
||||
}
|
||||
|
||||
let sender_i = spec
|
||||
.i(signed.signer)
|
||||
.expect("transaction added to tributary by signer who isn't a participant");
|
||||
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||
|
||||
if shares.len() != (usize::from(spec.n() - sender_is_len)) {
|
||||
fatal_slash::<D>(txn, genesis, signed.signer.to_bytes(), "invalid amount of DKG shares");
|
||||
return;
|
||||
}
|
||||
for shares in &shares {
|
||||
if shares.len() != usize::from(sender_is_len) {
|
||||
fatal_slash::<D>(
|
||||
txn,
|
||||
genesis,
|
||||
signed.signer.to_bytes(),
|
||||
"invalid amount of DKG shares by key shares",
|
||||
);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Only save our share's bytes
|
||||
let our_i = spec
|
||||
.i(Ristretto::generator() * key.deref())
|
||||
.expect("in a tributary we're not a validator for");
|
||||
|
||||
let bytes = if sender_i == our_i {
|
||||
let our_shares = if sender_i == our_i {
|
||||
vec![]
|
||||
} else {
|
||||
// 1-indexed to 0-indexed, handling the omission of the sender's own data
|
||||
let relative_i = usize::from(u16::from(our_i) - 1) -
|
||||
(if u16::from(our_i) > u16::from(sender_i) { 1 } else { 0 });
|
||||
// Safe since we length-checked shares
|
||||
shares.swap_remove(relative_i)
|
||||
// 1-indexed to 0-indexed
|
||||
let mut our_i_pos = u16::from(our_i.start) - 1;
|
||||
// Handle the omission of the sender's own data
|
||||
if u16::from(our_i.start) > u16::from(sender_i.start) {
|
||||
our_i_pos -= sender_is_len;
|
||||
}
|
||||
let our_i_pos = usize::from(our_i_pos);
|
||||
let shares = shares
|
||||
.drain(
|
||||
our_i_pos .. (our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
|
||||
)
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// Transpose from our shares -> sender shares -> shares to
|
||||
// sender shares -> our shares -> shares
|
||||
let mut transposed = vec![vec![]; shares[0].len()];
|
||||
for shares in shares {
|
||||
for (sender_index, share) in shares.into_iter().enumerate() {
|
||||
transposed[sender_index].push(share);
|
||||
}
|
||||
}
|
||||
transposed
|
||||
};
|
||||
// Drop shares as it's been mutated into invalidity
|
||||
drop(shares);
|
||||
|
||||
let confirmation_nonces = handle(
|
||||
@@ -204,7 +273,7 @@ pub(crate) async fn handle_application_tx<
|
||||
match handle(
|
||||
txn,
|
||||
&DataSpecification { topic: Topic::Dkg, label: DKG_SHARES, attempt },
|
||||
bytes,
|
||||
our_shares.encode(),
|
||||
&signed,
|
||||
) {
|
||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||
@@ -217,12 +286,36 @@ pub(crate) async fn handle_application_tx<
|
||||
};
|
||||
TributaryDb::<D>::save_confirmation_nonces(txn, genesis, attempt, confirmation_nonces);
|
||||
|
||||
// shares is a HashMap<Participant, Vec<Vec<Vec<u8>>>>, with the values representing:
|
||||
// - Each of the sender's shares
|
||||
// - Each of the our shares
|
||||
// - Each share
|
||||
// We need a Vec<HashMap<Participant, Vec<u8>>>, with the outer being each of ours
|
||||
let mut expanded_shares = vec![];
|
||||
for (sender_start_i, shares) in shares {
|
||||
let shares: Vec<Vec<Vec<u8>>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap();
|
||||
for (sender_i_offset, our_shares) in shares.into_iter().enumerate() {
|
||||
for (our_share_i, our_share) in our_shares.into_iter().enumerate() {
|
||||
if expanded_shares.len() <= our_share_i {
|
||||
expanded_shares.push(HashMap::new());
|
||||
}
|
||||
expanded_shares[our_share_i].insert(
|
||||
Participant::new(
|
||||
u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(),
|
||||
)
|
||||
.unwrap(),
|
||||
our_share,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
processors
|
||||
.send(
|
||||
spec.set().network,
|
||||
key_gen::CoordinatorMessage::Shares {
|
||||
id: KeyGenId { set: spec.set(), attempt },
|
||||
shares,
|
||||
shares: expanded_shares,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
@@ -294,6 +387,9 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
|
||||
Transaction::BatchPreprocess(data) => {
|
||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
|
||||
return;
|
||||
};
|
||||
match handle(
|
||||
txn,
|
||||
&DataSpecification {
|
||||
@@ -301,10 +397,11 @@ pub(crate) async fn handle_application_tx<
|
||||
label: BATCH_PREPROCESS,
|
||||
attempt: data.attempt,
|
||||
},
|
||||
data.data,
|
||||
data.data.encode(),
|
||||
&data.signed,
|
||||
) {
|
||||
Accumulation::Ready(DataSet::Participating(preprocesses)) => {
|
||||
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
|
||||
unflatten(spec, &mut preprocesses);
|
||||
NonceDecider::<D>::selected_for_signing_batch(txn, genesis, data.plan);
|
||||
let key = TributaryDb::<D>::key_pair(txn, spec.set()).unwrap().0 .0.to_vec();
|
||||
processors
|
||||
@@ -322,6 +419,9 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
}
|
||||
Transaction::BatchShare(data) => {
|
||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
|
||||
return;
|
||||
};
|
||||
match handle(
|
||||
txn,
|
||||
&DataSpecification {
|
||||
@@ -329,10 +429,11 @@ pub(crate) async fn handle_application_tx<
|
||||
label: BATCH_SHARE,
|
||||
attempt: data.attempt,
|
||||
},
|
||||
data.data,
|
||||
data.data.encode(),
|
||||
&data.signed,
|
||||
) {
|
||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||
Accumulation::Ready(DataSet::Participating(mut shares)) => {
|
||||
unflatten(spec, &mut shares);
|
||||
let key = TributaryDb::<D>::key_pair(txn, spec.set()).unwrap().0 .0.to_vec();
|
||||
processors
|
||||
.send(
|
||||
@@ -353,6 +454,9 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
|
||||
Transaction::SignPreprocess(data) => {
|
||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
|
||||
return;
|
||||
};
|
||||
let key_pair = TributaryDb::<D>::key_pair(txn, spec.set());
|
||||
match handle(
|
||||
txn,
|
||||
@@ -361,10 +465,11 @@ pub(crate) async fn handle_application_tx<
|
||||
label: SIGN_PREPROCESS,
|
||||
attempt: data.attempt,
|
||||
},
|
||||
data.data,
|
||||
data.data.encode(),
|
||||
&data.signed,
|
||||
) {
|
||||
Accumulation::Ready(DataSet::Participating(preprocesses)) => {
|
||||
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
|
||||
unflatten(spec, &mut preprocesses);
|
||||
NonceDecider::<D>::selected_for_signing_plan(txn, genesis, data.plan);
|
||||
processors
|
||||
.send(
|
||||
@@ -388,6 +493,9 @@ pub(crate) async fn handle_application_tx<
|
||||
}
|
||||
}
|
||||
Transaction::SignShare(data) => {
|
||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
|
||||
return;
|
||||
};
|
||||
let key_pair = TributaryDb::<D>::key_pair(txn, spec.set());
|
||||
match handle(
|
||||
txn,
|
||||
@@ -396,10 +504,11 @@ pub(crate) async fn handle_application_tx<
|
||||
label: SIGN_SHARE,
|
||||
attempt: data.attempt,
|
||||
},
|
||||
data.data,
|
||||
data.data.encode(),
|
||||
&data.signed,
|
||||
) {
|
||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||
Accumulation::Ready(DataSet::Participating(mut shares)) => {
|
||||
unflatten(spec, &mut shares);
|
||||
processors
|
||||
.send(
|
||||
spec.set().network,
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use core::ops::Deref;
|
||||
use core::ops::{Deref, Range};
|
||||
use std::io::{self, Read, Write};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
@@ -24,7 +24,8 @@ use serai_client::{
|
||||
#[rustfmt::skip]
|
||||
use tributary::{
|
||||
ReadWrite,
|
||||
transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait}
|
||||
transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait},
|
||||
TRANSACTION_SIZE_LIMIT,
|
||||
};
|
||||
|
||||
mod db;
|
||||
@@ -45,7 +46,7 @@ pub struct TributarySpec {
|
||||
serai_block: [u8; 32],
|
||||
start_time: u64,
|
||||
set: ValidatorSet,
|
||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
|
||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||
}
|
||||
|
||||
impl TributarySpec {
|
||||
@@ -53,12 +54,10 @@ impl TributarySpec {
|
||||
serai_block: [u8; 32],
|
||||
start_time: u64,
|
||||
set: ValidatorSet,
|
||||
set_participants: Vec<(PublicKey, u64)>,
|
||||
set_participants: Vec<(PublicKey, u16)>,
|
||||
) -> TributarySpec {
|
||||
let mut validators = vec![];
|
||||
for (participant, shares) in set_participants {
|
||||
// TODO: Ban invalid keys from being validators on the Serai side
|
||||
// (make coordinator key a session key?)
|
||||
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
|
||||
.expect("invalid key registered as participant");
|
||||
validators.push((participant, shares));
|
||||
@@ -88,31 +87,29 @@ impl TributarySpec {
|
||||
}
|
||||
|
||||
pub fn n(&self) -> u16 {
|
||||
// TODO: Support multiple key shares
|
||||
// self.validators.iter().map(|(_, weight)| u16::try_from(weight).unwrap()).sum()
|
||||
self.validators().len().try_into().unwrap()
|
||||
self.validators.iter().map(|(_, weight)| weight).sum()
|
||||
}
|
||||
|
||||
pub fn t(&self) -> u16 {
|
||||
((2 * self.n()) / 3) + 1
|
||||
}
|
||||
|
||||
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Participant> {
|
||||
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Range<Participant>> {
|
||||
let mut i = 1;
|
||||
// TODO: Support multiple key shares
|
||||
for (validator, _weight) in &self.validators {
|
||||
for (validator, weight) in &self.validators {
|
||||
if validator == &key {
|
||||
// return (i .. (i + weight)).to_vec();
|
||||
return Some(Participant::new(i).unwrap());
|
||||
return Some(Range {
|
||||
start: Participant::new(i).unwrap(),
|
||||
end: Participant::new(i + weight).unwrap(),
|
||||
});
|
||||
}
|
||||
// i += weight;
|
||||
i += 1;
|
||||
i += weight;
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
||||
self.validators.clone()
|
||||
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
|
||||
}
|
||||
|
||||
pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
@@ -160,9 +157,9 @@ impl TributarySpec {
|
||||
let mut validators = Vec::with_capacity(validators_len);
|
||||
for _ in 0 .. validators_len {
|
||||
let key = Ristretto::read_G(reader)?;
|
||||
let mut bond = [0; 8];
|
||||
reader.read_exact(&mut bond)?;
|
||||
validators.push((key, u64::from_le_bytes(bond)));
|
||||
let mut weight = [0; 2];
|
||||
reader.read_exact(&mut weight)?;
|
||||
validators.push((key, u16::from_le_bytes(weight)));
|
||||
}
|
||||
|
||||
Ok(Self { serai_block, start_time, set: ValidatorSet { session, network }, validators })
|
||||
@@ -174,7 +171,7 @@ pub struct SignData {
|
||||
pub plan: [u8; 32],
|
||||
pub attempt: u32,
|
||||
|
||||
pub data: Vec<u8>,
|
||||
pub data: Vec<Vec<u8>>,
|
||||
|
||||
pub signed: Signed,
|
||||
}
|
||||
@@ -189,11 +186,20 @@ impl ReadWrite for SignData {
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let data = {
|
||||
let mut data_len = [0; 2];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
|
||||
reader.read_exact(&mut data)?;
|
||||
data
|
||||
let mut data_pieces = [0];
|
||||
reader.read_exact(&mut data_pieces)?;
|
||||
if data_pieces[0] == 0 {
|
||||
Err(io::Error::new(io::ErrorKind::Other, "zero pieces of data in SignData"))?;
|
||||
}
|
||||
let mut all_data = vec![];
|
||||
for _ in 0 .. data_pieces[0] {
|
||||
let mut data_len = [0; 2];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
|
||||
reader.read_exact(&mut data)?;
|
||||
all_data.push(data);
|
||||
}
|
||||
all_data
|
||||
};
|
||||
|
||||
let signed = Signed::read(reader)?;
|
||||
@@ -205,16 +211,21 @@ impl ReadWrite for SignData {
|
||||
writer.write_all(&self.plan)?;
|
||||
writer.write_all(&self.attempt.to_le_bytes())?;
|
||||
|
||||
if self.data.len() > u16::MAX.into() {
|
||||
// Currently, the largest sign item would be a Monero transaction
|
||||
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
|
||||
// key image and proof (96 bytes)
|
||||
// Even with all of that, we could support 227 inputs in a single TX
|
||||
// Monero is limited to ~120 inputs per TX
|
||||
Err(io::Error::new(io::ErrorKind::Other, "signing data exceeded 65535 bytes"))?;
|
||||
writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?;
|
||||
for data in &self.data {
|
||||
if data.len() > u16::MAX.into() {
|
||||
// Currently, the largest individual preproces is a Monero transaction
|
||||
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
|
||||
// key image and proof (96 bytes)
|
||||
// Even with all of that, we could support 227 inputs in a single TX
|
||||
// Monero is limited to ~120 inputs per TX
|
||||
//
|
||||
// Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess
|
||||
Err(io::Error::new(io::ErrorKind::Other, "signing data exceeded 65535 bytes"))?;
|
||||
}
|
||||
writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?;
|
||||
writer.write_all(data)?;
|
||||
}
|
||||
writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?;
|
||||
writer.write_all(&self.data)?;
|
||||
|
||||
self.signed.write(writer)
|
||||
}
|
||||
@@ -223,10 +234,11 @@ impl ReadWrite for SignData {
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Transaction {
|
||||
// Once this completes successfully, no more instances should be created.
|
||||
DkgCommitments(u32, Vec<u8>, Signed),
|
||||
DkgCommitments(u32, Vec<Vec<u8>>, Signed),
|
||||
DkgShares {
|
||||
attempt: u32,
|
||||
shares: Vec<Vec<u8>>,
|
||||
// Receiving Participant, Sending Participant, Share
|
||||
shares: Vec<Vec<Vec<u8>>>,
|
||||
confirmation_nonces: [u8; 64],
|
||||
signed: Signed,
|
||||
},
|
||||
@@ -273,10 +285,27 @@ impl ReadWrite for Transaction {
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let commitments = {
|
||||
let mut commitments_len = [0; 2];
|
||||
let mut commitments_len = [0; 1];
|
||||
reader.read_exact(&mut commitments_len)?;
|
||||
let mut commitments = vec![0; usize::from(u16::from_le_bytes(commitments_len))];
|
||||
reader.read_exact(&mut commitments)?;
|
||||
let commitments_len = usize::from(commitments_len[0]);
|
||||
if commitments_len == 0 {
|
||||
Err(io::Error::new(io::ErrorKind::Other, "zero commitments in DkgCommitments"))?;
|
||||
}
|
||||
|
||||
let mut each_commitments_len = [0; 2];
|
||||
reader.read_exact(&mut each_commitments_len)?;
|
||||
let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len));
|
||||
if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"commitments present in transaction exceeded transaction size limit",
|
||||
))?;
|
||||
}
|
||||
let mut commitments = vec![vec![]; commitments_len];
|
||||
for commitments in &mut commitments {
|
||||
*commitments = vec![0; each_commitments_len];
|
||||
reader.read_exact(commitments)?;
|
||||
}
|
||||
commitments
|
||||
};
|
||||
|
||||
@@ -291,20 +320,27 @@ impl ReadWrite for Transaction {
|
||||
let attempt = u32::from_le_bytes(attempt);
|
||||
|
||||
let shares = {
|
||||
let mut share_quantity = [0; 2];
|
||||
let mut share_quantity = [0; 1];
|
||||
reader.read_exact(&mut share_quantity)?;
|
||||
|
||||
let mut key_share_quantity = [0; 1];
|
||||
reader.read_exact(&mut key_share_quantity)?;
|
||||
|
||||
let mut share_len = [0; 2];
|
||||
reader.read_exact(&mut share_len)?;
|
||||
let share_len = usize::from(u16::from_le_bytes(share_len));
|
||||
|
||||
let mut shares = vec![];
|
||||
for _ in 0 .. u16::from_le_bytes(share_quantity) {
|
||||
let mut share = vec![0; share_len];
|
||||
reader.read_exact(&mut share)?;
|
||||
shares.push(share);
|
||||
let mut all_shares = vec![];
|
||||
for _ in 0 .. share_quantity[0] {
|
||||
let mut shares = vec![];
|
||||
for _ in 0 .. key_share_quantity[0] {
|
||||
let mut share = vec![0; share_len];
|
||||
reader.read_exact(&mut share)?;
|
||||
shares.push(share);
|
||||
}
|
||||
all_shares.push(shares);
|
||||
}
|
||||
shares
|
||||
all_shares
|
||||
};
|
||||
|
||||
let mut confirmation_nonces = [0; 64];
|
||||
@@ -372,12 +408,22 @@ impl ReadWrite for Transaction {
|
||||
Transaction::DkgCommitments(attempt, commitments, signed) => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
if commitments.len() > u16::MAX.into() {
|
||||
// t commitments and an encryption key mean a u16 is fine until a threshold > 2000 occurs
|
||||
Err(io::Error::new(io::ErrorKind::Other, "dkg commitments exceeded 65535 bytes"))?;
|
||||
if commitments.is_empty() {
|
||||
Err(io::Error::new(io::ErrorKind::Other, "zero commitments in DkgCommitments"))?
|
||||
}
|
||||
writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?;
|
||||
for commitments_i in commitments {
|
||||
if commitments_i.len() != commitments[0].len() {
|
||||
Err(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"commitments of differing sizes in DkgCommitments",
|
||||
))?
|
||||
}
|
||||
}
|
||||
writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?;
|
||||
for commitments in commitments {
|
||||
writer.write_all(commitments)?;
|
||||
}
|
||||
writer.write_all(&u16::try_from(commitments.len()).unwrap().to_le_bytes())?;
|
||||
writer.write_all(commitments)?;
|
||||
signed.write(writer)
|
||||
}
|
||||
|
||||
@@ -385,14 +431,12 @@ impl ReadWrite for Transaction {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
|
||||
// `shares` is a Vec which maps to a HashMap<Pariticpant, Vec<u8>> for any legitimate
|
||||
// `DkgShares`. Since Participant has a range of 1 ..= u16::MAX, the length must be <
|
||||
// u16::MAX. The only way for this to not be true if we were malicious, or if we read a
|
||||
// `DkgShares` with a `shares.len() > u16::MAX`. The former is assumed untrue. The latter
|
||||
// is impossible since we'll only read up to u16::MAX items.
|
||||
writer.write_all(&u16::try_from(shares.len()).unwrap().to_le_bytes())?;
|
||||
|
||||
let share_len = shares.first().map(|share| share.len()).unwrap_or(0);
|
||||
// `shares` is a Vec which is supposed to map to a HashMap<Pariticpant, Vec<u8>>. Since we
|
||||
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
|
||||
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
|
||||
// This assumes at least one share is being sent to another party
|
||||
writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?;
|
||||
let share_len = shares[0][0].len();
|
||||
// For BLS12-381 G2, this would be:
|
||||
// - A 32-byte share
|
||||
// - A 96-byte ephemeral key
|
||||
@@ -400,9 +444,12 @@ impl ReadWrite for Transaction {
|
||||
// Hence why this has to be u16
|
||||
writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;
|
||||
|
||||
for share in shares {
|
||||
assert_eq!(share.len(), share_len, "shares were of variable length");
|
||||
writer.write_all(share)?;
|
||||
for these_shares in shares {
|
||||
assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable");
|
||||
for share in these_shares {
|
||||
assert_eq!(share.len(), share_len, "sent shares were of variable length");
|
||||
writer.write_all(share)?;
|
||||
}
|
||||
}
|
||||
|
||||
writer.write_all(confirmation_nonces)?;
|
||||
@@ -487,8 +534,10 @@ impl TransactionTrait for Transaction {
|
||||
|
||||
fn verify(&self) -> Result<(), TransactionError> {
|
||||
if let Transaction::BatchShare(data) = self {
|
||||
if data.data.len() != 32 {
|
||||
Err(TransactionError::InvalidContent)?;
|
||||
for data in &data.data {
|
||||
if data.len() != 32 {
|
||||
Err(TransactionError::InvalidContent)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -54,6 +54,9 @@ impl<D: Db> NonceDecider<D> {
|
||||
Self::set_nonce(txn, genesis, BATCH_CODE, batch, nonce_for);
|
||||
nonce_for
|
||||
}
|
||||
// TODO: The processor won't yield shares for this if the signing protocol aborts. We need to
|
||||
// detect when we're expecting shares for an aborted protocol and insert a dummy transaction
|
||||
// there.
|
||||
pub fn selected_for_signing_batch(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
genesis: [u8; 32],
|
||||
@@ -76,6 +79,7 @@ impl<D: Db> NonceDecider<D> {
|
||||
}
|
||||
res
|
||||
}
|
||||
// TODO: Same TODO as selected_for_signing_batch
|
||||
pub fn selected_for_signing_plan(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
genesis: [u8; 32],
|
||||
|
||||
@@ -47,13 +47,13 @@ pub(crate) use crate::tendermint::*;
|
||||
pub mod tests;
|
||||
|
||||
/// Size limit for an individual transaction.
|
||||
pub const TRANSACTION_SIZE_LIMIT: usize = 50_000;
|
||||
pub const TRANSACTION_SIZE_LIMIT: usize = 3_000_000;
|
||||
/// Amount of transactions a single account may have in the mempool.
|
||||
pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
||||
/// Block size limit.
|
||||
// This targets a growth limit of roughly 5 GB a day, under load, in order to prevent a malicious
|
||||
// This targets a growth limit of roughly 45 GB a day, under load, in order to prevent a malicious
|
||||
// participant from flooding disks and causing out of space errors in order processes.
|
||||
pub const BLOCK_SIZE_LIMIT: usize = 350_000;
|
||||
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
||||
|
||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||
pub(crate) const BLOCK_MESSAGE: u8 = 1;
|
||||
|
||||
Reference in New Issue
Block a user