Route blame between Processor and Coordinator (#427)

* Have processor report errors during the DKG to the coordinator

* Add RemoveParticipant, InvalidDkgShare to coordinator

* Route DKG blame around coordinator

* Allow public construction of AdditionalBlameMachine

Necessary for upcoming work on handling DKG blame in the processor and
coordinator.

Additionally fixes a publicly reachable panic when commitments parsed with one
ThresholdParams are used in a machine using another set of ThresholdParams.

Renames InvalidProofOfKnowledge to InvalidCommitments.

* Remove unused error from dleq

* Implement support for VerifyBlame in the processor

* Have coordinator send the processor share message relevant to Blame

* Remove desync between processors reporting InvalidShare and ones reporting GeneratedKeyPair

* Route blame on sign between processor and coordinator

Doesn't yet act on it in coordinator.

* Move txn usage as needed for stable Rust to build

* Correct InvalidDkgShare serialization
This commit is contained in:
Luke Parker
2023-11-12 07:24:41 -05:00
committed by GitHub
parent d015ee96a3
commit 54f1929078
18 changed files with 931 additions and 281 deletions

View File

@@ -9,7 +9,9 @@ use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::group::GroupEncoding;
use frost::{
curve::{Ciphersuite, Ristretto},
dkg::{Participant, ThresholdParams, ThresholdCore, ThresholdKeys, encryption::*, frost::*},
dkg::{
DkgError, Participant, ThresholdParams, ThresholdCore, ThresholdKeys, encryption::*, frost::*,
},
};
use log::info;
@@ -28,7 +30,7 @@ pub struct KeyConfirmed<C: Ciphersuite> {
create_db!(
KeyGenDb {
ParamsDb: (key: &ValidatorSet) -> (ThresholdParams, u16),
ParamsDb: (set: &ValidatorSet) -> (ThresholdParams, u16),
// Not scoped to the set since that'd have latter attempts overwrite former
// A former attempt may become the finalized attempt, even if it doesn't in a timely manner
// Overwriting its commitments would be accordingly poor
@@ -155,18 +157,20 @@ impl<N: Network, D: Db> KeyGen<N, D> {
txn: &mut D::Transaction<'_>,
msg: CoordinatorMessage,
) -> ProcessorMessage {
let context = |id: &KeyGenId| {
const SUBSTRATE_KEY_CONTEXT: &str = "substrate";
const NETWORK_KEY_CONTEXT: &str = "network";
let context = |id: &KeyGenId, key| {
// TODO2: Also embed the chain ID/genesis block
format!(
"Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}",
id.set.session, id.set.network, id.attempt
"Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}, Key: {}",
id.set.session, id.set.network, id.attempt, key,
)
};
let rng = |label, id: KeyGenId| {
let mut transcript = RecommendedTranscript::new(label);
transcript.append_message(b"entropy", &self.entropy);
transcript.append_message(b"context", context(&id));
transcript.append_message(b"context", context(&id, "rng"));
ChaCha20Rng::from_seed(transcript.rng_seed(b"rng"))
};
let coefficients_rng = |id| rng(b"Key Gen Coefficients", id);
@@ -184,8 +188,10 @@ impl<N: Network, D: Db> KeyGen<N, D> {
Participant::new(u16::from(params.i()) + s).unwrap(),
)
.unwrap();
let substrate = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
let network = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
let substrate = KeyGenMachine::new(params, context(&id, SUBSTRATE_KEY_CONTEXT))
.generate_coefficients(&mut rng);
let network = KeyGenMachine::new(params, context(&id, NETWORK_KEY_CONTEXT))
.generate_coefficients(&mut rng);
machines.push((substrate.0, network.0));
let mut serialized = vec![];
substrate.1.write(&mut serialized).unwrap();
@@ -195,76 +201,91 @@ impl<N: Network, D: Db> KeyGen<N, D> {
(machines, commitments)
};
let secret_share_machines =
|id,
params: ThresholdParams,
(machines, our_commitments): (SecretShareMachines<N>, Vec<Vec<u8>>),
commitments: HashMap<Participant, Vec<u8>>| {
let mut rng = secret_shares_rng(id);
let secret_share_machines = |id,
params: ThresholdParams,
machines: SecretShareMachines<N>,
commitments: HashMap<Participant, Vec<u8>>|
-> Result<_, ProcessorMessage> {
let mut rng = secret_shares_rng(id);
#[allow(clippy::type_complexity)]
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
params: ThresholdParams,
machine: SecretShareMachine<C>,
commitments_ref: &mut HashMap<Participant, &[u8]>,
) -> (KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>) {
// Parse the commitments
let parsed = match commitments_ref
.iter_mut()
.map(|(i, commitments)| {
EncryptionKeyMessage::<C, Commitments<C>>::read(commitments, params)
.map(|commitments| (*i, commitments))
})
.collect()
{
Ok(commitments) => commitments,
Err(e) => todo!("malicious signer: {:?}", e),
};
match machine.generate_secret_shares(rng, parsed) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
}
}
let mut key_machines = vec![];
let mut shares = vec![];
for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() {
let mut commitments_ref: HashMap<Participant, &[u8]> =
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
for (i, our_commitments) in our_commitments.iter().enumerate() {
if m != i {
assert!(commitments_ref
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_commitments.as_ref(),
)
.is_none());
#[allow(clippy::type_complexity)]
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
id: KeyGenId,
machine: SecretShareMachine<C>,
commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<
(KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>),
ProcessorMessage,
> {
match machine.generate_secret_shares(rng, commitments) {
Ok(res) => Ok(res),
Err(e) => match e {
DkgError::ZeroParameter(_, _) |
DkgError::InvalidThreshold(_, _) |
DkgError::InvalidParticipant(_, _) |
DkgError::InvalidSigningSet |
DkgError::InvalidShare { .. } => unreachable!("{e:?}"),
DkgError::InvalidParticipantQuantity(_, _) |
DkgError::DuplicatedParticipant(_) |
DkgError::MissingParticipant(_) => {
panic!("coordinator sent invalid DKG commitments: {e:?}")
}
}
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, params, substrate_machine, &mut commitments_ref);
let (network_machine, network_shares) =
handle_machine(&mut rng, params, network_machine, &mut commitments_ref);
key_machines.push((substrate_machine, network_machine));
for (_, commitments) in commitments_ref {
if !commitments.is_empty() {
todo!("malicious signer: extra bytes");
DkgError::InvalidCommitments(i) => {
Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?
}
}
let mut these_shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in these_shares.iter_mut() {
share.extend(network_shares[i].serialize());
}
shares.push(these_shares);
},
}
(key_machines, shares)
};
}
let mut substrate_commitments = HashMap::new();
let mut network_commitments = HashMap::new();
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let mut commitments = commitments[&i].as_slice();
substrate_commitments.insert(
i,
EncryptionKeyMessage::<Ristretto, Commitments<Ristretto>>::read(&mut commitments, params)
.map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?,
);
network_commitments.insert(
i,
EncryptionKeyMessage::<N::Curve, Commitments<N::Curve>>::read(&mut commitments, params)
.map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?,
);
if !commitments.is_empty() {
// Malicious Participant included extra bytes in their commitments
// (a potential DoS attack)
Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?;
}
}
let mut key_machines = vec![];
let mut shares = vec![];
for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() {
let actual_i = Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap();
let mut substrate_commitments = substrate_commitments.clone();
substrate_commitments.remove(&actual_i);
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, id, substrate_machine, substrate_commitments)?;
let mut network_commitments = network_commitments.clone();
network_commitments.remove(&actual_i);
let (network_machine, network_shares) =
handle_machine(&mut rng, id, network_machine, network_commitments.clone())?;
key_machines.push((substrate_machine, network_machine));
let mut these_shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in these_shares.iter_mut() {
share.extend(network_shares[i].serialize());
}
shares.push(these_shares);
}
Ok((key_machines, shares))
};
match msg {
CoordinatorMessage::GenerateKey { id, params, shares } => {
@@ -284,7 +305,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
ProcessorMessage::Commitments { id, commitments }
}
CoordinatorMessage::Commitments { id, commitments } => {
CoordinatorMessage::Commitments { id, mut commitments } => {
info!("Received commitments for {:?}", id);
if self.active_share.contains_key(&id.set) {
@@ -301,17 +322,29 @@ impl<N: Network, D: Db> KeyGen<N, D> {
// This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for
// attempt y
// The coordinator is trusted to be proper in this regard
let prior = self
let (prior, our_commitments) = self
.active_commit
.remove(&id.set)
.unwrap_or_else(|| key_gen_machines(id, params, share_quantity));
for (i, our_commitments) in our_commitments.into_iter().enumerate() {
assert!(commitments
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_commitments,
)
.is_none());
}
CommitmentsDb::set(txn, &id, &commitments);
let (machines, shares) = secret_share_machines(id, params, prior, commitments);
self.active_share.insert(id.set, (machines, shares.clone()));
ProcessorMessage::Shares { id, shares }
match secret_share_machines(id, params, prior, commitments) {
Ok((machines, shares)) => {
self.active_share.insert(id.set, (machines, shares.clone()));
ProcessorMessage::Shares { id, shares }
}
Err(e) => e,
}
}
CoordinatorMessage::Shares { id, shares } => {
@@ -321,36 +354,70 @@ impl<N: Network, D: Db> KeyGen<N, D> {
// Same commentary on inconsistency as above exists
let (machines, our_shares) = self.active_share.remove(&id.set).unwrap_or_else(|| {
let prior = key_gen_machines(id, params, share_quantity);
secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap())
let prior = key_gen_machines(id, params, share_quantity).0;
let (machines, shares) =
secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap())
.expect("got Shares for a key gen which faulted");
(machines, shares)
});
let mut rng = share_rng(id);
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
id: KeyGenId,
// These are the params of our first share, not this machine's shares
params: ThresholdParams,
m: usize,
machine: KeyMachine<C>,
shares_ref: &mut HashMap<Participant, &[u8]>,
) -> ThresholdCore<C> {
// Parse the shares
let shares = match shares_ref
.iter_mut()
.map(|(i, share)| {
EncryptedMessage::<C, SecretShare<C::F>>::read(share, params).map(|share| (*i, share))
})
.collect()
{
Ok(shares) => shares,
Err(e) => todo!("malicious signer: {:?}", e),
};
) -> Result<ThresholdCore<C>, ProcessorMessage> {
let params = ThresholdParams::new(
params.t(),
params.n(),
Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(),
)
.unwrap();
// TODO2: Handle the blame machine properly
(match machine.calculate_share(rng, shares) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
})
.complete()
// Parse the shares
let mut shares = HashMap::new();
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let Some(share) = shares_ref.get_mut(&i) else { continue };
shares.insert(
i,
EncryptedMessage::<C, SecretShare<C::F>>::read(share, params).map_err(|_| {
ProcessorMessage::InvalidShare { id, accuser: params.i(), faulty: i, blame: None }
})?,
);
}
Ok(
(match machine.calculate_share(rng, shares) {
Ok(res) => res,
Err(e) => match e {
DkgError::ZeroParameter(_, _) |
DkgError::InvalidThreshold(_, _) |
DkgError::InvalidParticipant(_, _) |
DkgError::InvalidSigningSet |
DkgError::InvalidCommitments(_) => unreachable!("{e:?}"),
DkgError::InvalidParticipantQuantity(_, _) |
DkgError::DuplicatedParticipant(_) |
DkgError::MissingParticipant(_) => {
panic!("coordinator sent invalid DKG shares: {e:?}")
}
DkgError::InvalidShare { participant, blame } => {
Err(ProcessorMessage::InvalidShare {
id,
accuser: params.i(),
faulty: participant,
blame: Some(blame.map(|blame| blame.serialize())).flatten(),
})?
}
},
})
.complete(),
)
}
let mut substrate_keys = vec![];
@@ -371,12 +438,27 @@ impl<N: Network, D: Db> KeyGen<N, D> {
}
}
let these_substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref);
let these_network_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref);
let these_substrate_keys =
match handle_machine(&mut rng, id, params, m, machines.0, &mut shares_ref) {
Ok(keys) => keys,
Err(msg) => return msg,
};
let these_network_keys =
match handle_machine(&mut rng, id, params, m, machines.1, &mut shares_ref) {
Ok(keys) => keys,
Err(msg) => return msg,
};
for (_, shares) in shares_ref {
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let Some(shares) = shares_ref.get(&i) else { continue };
if !shares.is_empty() {
todo!("malicious signer: extra bytes");
return ProcessorMessage::InvalidShare {
id,
accuser: these_substrate_keys.params().i(),
faulty: i,
blame: None,
};
}
}
@@ -407,6 +489,70 @@ impl<N: Network, D: Db> KeyGen<N, D> {
network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(),
}
}
CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => {
let params = ParamsDb::get(txn, &id.set).unwrap().0;
let mut share_ref = share.as_slice();
let Ok(substrate_share) = EncryptedMessage::<
Ristretto,
SecretShare<<Ristretto as Ciphersuite>::F>,
>::read(&mut share_ref, params) else {
return ProcessorMessage::Blame { id, participant: accused };
};
let Ok(network_share) = EncryptedMessage::<
N::Curve,
SecretShare<<N::Curve as Ciphersuite>::F>,
>::read(&mut share_ref, params) else {
return ProcessorMessage::Blame { id, participant: accused };
};
if !share_ref.is_empty() {
return ProcessorMessage::Blame { id, participant: accused };
}
let mut substrate_commitment_msgs = HashMap::new();
let mut network_commitment_msgs = HashMap::new();
let commitments = CommitmentsDb::get(txn, &id).unwrap();
for (i, commitments) in commitments {
let mut commitments = commitments.as_slice();
substrate_commitment_msgs
.insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap());
network_commitment_msgs
.insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap());
}
// There is a mild DoS here where someone with a valid blame bloats it to the maximum size
// Given the ambiguity, and limited potential to DoS (this being called means *someone* is
// getting fatally slashed) voids the need to ensure blame is minimal
let substrate_blame =
blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok());
let network_blame =
blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok());
let substrate_blame = AdditionalBlameMachine::new(
&mut rand_core::OsRng,
context(&id, SUBSTRATE_KEY_CONTEXT),
params.n(),
substrate_commitment_msgs,
)
.unwrap()
.blame(accuser, accused, substrate_share, substrate_blame);
let network_blame = AdditionalBlameMachine::new(
&mut rand_core::OsRng,
context(&id, NETWORK_KEY_CONTEXT),
params.n(),
network_commitment_msgs,
)
.unwrap()
.blame(accuser, accused, network_share, network_blame);
// If thw accused was blamed for either, mark them as at fault
if (substrate_blame == accused) || (network_blame == accused) {
return ProcessorMessage::Blame { id, participant: accused };
}
ProcessorMessage::Blame { id, participant: accuser }
}
}
}