Route blame between Processor and Coordinator (#427)

* Have processor report errors during the DKG to the coordinator

* Add RemoveParticipant, InvalidDkgShare to coordinator

* Route DKG blame around coordinator

* Allow public construction of AdditionalBlameMachine

Necessary for upcoming work on handling DKG blame in the processor and
coordinator.

Additionally fixes a publicly reachable panic when commitments parsed with one
ThresholdParams are used in a machine using another set of ThresholdParams.

Renames InvalidProofOfKnowledge to InvalidCommitments.

* Remove unused error from dleq

* Implement support for VerifyBlame in the processor

* Have coordinator send the processor share message relevant to Blame

* Remove desync between processors reporting InvalidShare and ones reporting GeneratedKeyPair

* Route blame on sign between processor and coordinator

Doesn't yet act on it in coordinator.

* Move txn usage as needed for stable Rust to build

* Correct InvalidDkgShare serialization
This commit is contained in:
Luke Parker
2023-11-12 07:24:41 -05:00
committed by GitHub
parent d015ee96a3
commit 54f1929078
18 changed files with 931 additions and 281 deletions

View File

@@ -33,11 +33,29 @@ pub mod key_gen {
pub enum CoordinatorMessage {
// Instructs the Processor to begin the key generation process.
// TODO: Should this be moved under Substrate?
GenerateKey { id: KeyGenId, params: ThresholdParams, shares: u16 },
GenerateKey {
id: KeyGenId,
params: ThresholdParams,
shares: u16,
},
// Received commitments for the specified key generation protocol.
Commitments { id: KeyGenId, commitments: HashMap<Participant, Vec<u8>> },
Commitments {
id: KeyGenId,
commitments: HashMap<Participant, Vec<u8>>,
},
// Received shares for the specified key generation protocol.
Shares { id: KeyGenId, shares: Vec<HashMap<Participant, Vec<u8>>> },
Shares {
id: KeyGenId,
shares: Vec<HashMap<Participant, Vec<u8>>>,
},
/// Instruction to verify a blame accusation.
VerifyBlame {
id: KeyGenId,
accuser: Participant,
accused: Participant,
share: Vec<u8>,
blame: Option<Vec<u8>>,
},
}
impl CoordinatorMessage {
@@ -49,11 +67,39 @@ pub mod key_gen {
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum ProcessorMessage {
// Created commitments for the specified key generation protocol.
Commitments { id: KeyGenId, commitments: Vec<Vec<u8>> },
Commitments {
id: KeyGenId,
commitments: Vec<Vec<u8>>,
},
// Participant published invalid commitments.
InvalidCommitments {
id: KeyGenId,
faulty: Participant,
},
// Created shares for the specified key generation protocol.
Shares { id: KeyGenId, shares: Vec<HashMap<Participant, Vec<u8>>> },
Shares {
id: KeyGenId,
shares: Vec<HashMap<Participant, Vec<u8>>>,
},
// Participant published an invalid share.
#[rustfmt::skip]
InvalidShare {
id: KeyGenId,
accuser: Participant,
faulty: Participant,
blame: Option<Vec<u8>>,
},
// Resulting keys from the specified key generation protocol.
GeneratedKeyPair { id: KeyGenId, substrate_key: [u8; 32], network_key: Vec<u8> },
GeneratedKeyPair {
id: KeyGenId,
substrate_key: [u8; 32],
network_key: Vec<u8>,
},
// Blame this participant.
Blame {
id: KeyGenId,
participant: Participant,
},
}
}
@@ -94,8 +140,10 @@ pub mod sign {
}
}
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)]
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)]
pub enum ProcessorMessage {
// Participant sent an invalid message during the sign protocol.
InvalidParticipant { id: SignId, participant: Participant },
// Created preprocess for the specified signing protocol.
Preprocess { id: SignId, preprocesses: Vec<Vec<u8>> },
// Signed share for the specified signing protocol.
@@ -152,9 +200,10 @@ pub mod coordinator {
pub id: [u8; 32],
}
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)]
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Serialize, Deserialize)]
pub enum ProcessorMessage {
SubstrateBlockAck { network: NetworkId, block: u64, plans: Vec<PlanMeta> },
InvalidParticipant { id: BatchSignId, participant: Participant },
BatchPreprocess { id: BatchSignId, block: BlockHash, preprocesses: Vec<Vec<u8>> },
BatchShare { id: BatchSignId, shares: Vec<[u8; 32]> },
}
@@ -275,6 +324,7 @@ impl CoordinatorMessage {
key_gen::CoordinatorMessage::GenerateKey { id, .. } => (0, id),
key_gen::CoordinatorMessage::Commitments { id, .. } => (1, id),
key_gen::CoordinatorMessage::Shares { id, .. } => (2, id),
key_gen::CoordinatorMessage::VerifyBlame { id, .. } => (3, id),
};
let mut res = vec![COORDINATOR_UID, TYPE_KEY_GEN_UID, sub];
@@ -340,8 +390,11 @@ impl ProcessorMessage {
let (sub, id) = match msg {
// Unique since KeyGenId
key_gen::ProcessorMessage::Commitments { id, .. } => (0, id),
key_gen::ProcessorMessage::Shares { id, .. } => (1, id),
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => (2, id),
key_gen::ProcessorMessage::InvalidCommitments { id, .. } => (1, id),
key_gen::ProcessorMessage::Shares { id, .. } => (2, id),
key_gen::ProcessorMessage::InvalidShare { id, .. } => (3, id),
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => (4, id),
key_gen::ProcessorMessage::Blame { id, .. } => (5, id),
};
let mut res = vec![PROCESSSOR_UID, TYPE_KEY_GEN_UID, sub];
@@ -351,10 +404,11 @@ impl ProcessorMessage {
ProcessorMessage::Sign(msg) => {
let (sub, id) = match msg {
// Unique since SignId
sign::ProcessorMessage::Preprocess { id, .. } => (0, id.encode()),
sign::ProcessorMessage::Share { id, .. } => (1, id.encode()),
sign::ProcessorMessage::InvalidParticipant { id, .. } => (0, id.encode()),
sign::ProcessorMessage::Preprocess { id, .. } => (1, id.encode()),
sign::ProcessorMessage::Share { id, .. } => (2, id.encode()),
// Unique since a processor will only sign a TX once
sign::ProcessorMessage::Completed { id, .. } => (2, id.to_vec()),
sign::ProcessorMessage::Completed { id, .. } => (3, id.to_vec()),
};
let mut res = vec![PROCESSSOR_UID, TYPE_SIGN_UID, sub];
@@ -367,8 +421,9 @@ impl ProcessorMessage {
(0, (network, block).encode())
}
// Unique since BatchSignId
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (1, id.encode()),
coordinator::ProcessorMessage::BatchShare { id, .. } => (2, id.encode()),
coordinator::ProcessorMessage::InvalidParticipant { id, .. } => (1, id.encode()),
coordinator::ProcessorMessage::BatchPreprocess { id, .. } => (2, id.encode()),
coordinator::ProcessorMessage::BatchShare { id, .. } => (3, id.encode()),
};
let mut res = vec![PROCESSSOR_UID, TYPE_COORDINATOR_UID, sub];

View File

@@ -9,7 +9,9 @@ use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::group::GroupEncoding;
use frost::{
curve::{Ciphersuite, Ristretto},
dkg::{Participant, ThresholdParams, ThresholdCore, ThresholdKeys, encryption::*, frost::*},
dkg::{
DkgError, Participant, ThresholdParams, ThresholdCore, ThresholdKeys, encryption::*, frost::*,
},
};
use log::info;
@@ -28,7 +30,7 @@ pub struct KeyConfirmed<C: Ciphersuite> {
create_db!(
KeyGenDb {
ParamsDb: (key: &ValidatorSet) -> (ThresholdParams, u16),
ParamsDb: (set: &ValidatorSet) -> (ThresholdParams, u16),
// Not scoped to the set since that'd have latter attempts overwrite former
// A former attempt may become the finalized attempt, even if it doesn't in a timely manner
// Overwriting its commitments would be accordingly poor
@@ -155,18 +157,20 @@ impl<N: Network, D: Db> KeyGen<N, D> {
txn: &mut D::Transaction<'_>,
msg: CoordinatorMessage,
) -> ProcessorMessage {
let context = |id: &KeyGenId| {
const SUBSTRATE_KEY_CONTEXT: &str = "substrate";
const NETWORK_KEY_CONTEXT: &str = "network";
let context = |id: &KeyGenId, key| {
// TODO2: Also embed the chain ID/genesis block
format!(
"Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}",
id.set.session, id.set.network, id.attempt
"Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}, Key: {}",
id.set.session, id.set.network, id.attempt, key,
)
};
let rng = |label, id: KeyGenId| {
let mut transcript = RecommendedTranscript::new(label);
transcript.append_message(b"entropy", &self.entropy);
transcript.append_message(b"context", context(&id));
transcript.append_message(b"context", context(&id, "rng"));
ChaCha20Rng::from_seed(transcript.rng_seed(b"rng"))
};
let coefficients_rng = |id| rng(b"Key Gen Coefficients", id);
@@ -184,8 +188,10 @@ impl<N: Network, D: Db> KeyGen<N, D> {
Participant::new(u16::from(params.i()) + s).unwrap(),
)
.unwrap();
let substrate = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
let network = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
let substrate = KeyGenMachine::new(params, context(&id, SUBSTRATE_KEY_CONTEXT))
.generate_coefficients(&mut rng);
let network = KeyGenMachine::new(params, context(&id, NETWORK_KEY_CONTEXT))
.generate_coefficients(&mut rng);
machines.push((substrate.0, network.0));
let mut serialized = vec![];
substrate.1.write(&mut serialized).unwrap();
@@ -195,76 +201,91 @@ impl<N: Network, D: Db> KeyGen<N, D> {
(machines, commitments)
};
let secret_share_machines =
|id,
params: ThresholdParams,
(machines, our_commitments): (SecretShareMachines<N>, Vec<Vec<u8>>),
commitments: HashMap<Participant, Vec<u8>>| {
let mut rng = secret_shares_rng(id);
let secret_share_machines = |id,
params: ThresholdParams,
machines: SecretShareMachines<N>,
commitments: HashMap<Participant, Vec<u8>>|
-> Result<_, ProcessorMessage> {
let mut rng = secret_shares_rng(id);
#[allow(clippy::type_complexity)]
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
params: ThresholdParams,
machine: SecretShareMachine<C>,
commitments_ref: &mut HashMap<Participant, &[u8]>,
) -> (KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>) {
// Parse the commitments
let parsed = match commitments_ref
.iter_mut()
.map(|(i, commitments)| {
EncryptionKeyMessage::<C, Commitments<C>>::read(commitments, params)
.map(|commitments| (*i, commitments))
})
.collect()
{
Ok(commitments) => commitments,
Err(e) => todo!("malicious signer: {:?}", e),
};
match machine.generate_secret_shares(rng, parsed) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
}
}
let mut key_machines = vec![];
let mut shares = vec![];
for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() {
let mut commitments_ref: HashMap<Participant, &[u8]> =
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
for (i, our_commitments) in our_commitments.iter().enumerate() {
if m != i {
assert!(commitments_ref
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_commitments.as_ref(),
)
.is_none());
#[allow(clippy::type_complexity)]
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
id: KeyGenId,
machine: SecretShareMachine<C>,
commitments: HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>>,
) -> Result<
(KeyMachine<C>, HashMap<Participant, EncryptedMessage<C, SecretShare<C::F>>>),
ProcessorMessage,
> {
match machine.generate_secret_shares(rng, commitments) {
Ok(res) => Ok(res),
Err(e) => match e {
DkgError::ZeroParameter(_, _) |
DkgError::InvalidThreshold(_, _) |
DkgError::InvalidParticipant(_, _) |
DkgError::InvalidSigningSet |
DkgError::InvalidShare { .. } => unreachable!("{e:?}"),
DkgError::InvalidParticipantQuantity(_, _) |
DkgError::DuplicatedParticipant(_) |
DkgError::MissingParticipant(_) => {
panic!("coordinator sent invalid DKG commitments: {e:?}")
}
}
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, params, substrate_machine, &mut commitments_ref);
let (network_machine, network_shares) =
handle_machine(&mut rng, params, network_machine, &mut commitments_ref);
key_machines.push((substrate_machine, network_machine));
for (_, commitments) in commitments_ref {
if !commitments.is_empty() {
todo!("malicious signer: extra bytes");
DkgError::InvalidCommitments(i) => {
Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?
}
}
let mut these_shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in these_shares.iter_mut() {
share.extend(network_shares[i].serialize());
}
shares.push(these_shares);
},
}
(key_machines, shares)
};
}
let mut substrate_commitments = HashMap::new();
let mut network_commitments = HashMap::new();
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let mut commitments = commitments[&i].as_slice();
substrate_commitments.insert(
i,
EncryptionKeyMessage::<Ristretto, Commitments<Ristretto>>::read(&mut commitments, params)
.map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?,
);
network_commitments.insert(
i,
EncryptionKeyMessage::<N::Curve, Commitments<N::Curve>>::read(&mut commitments, params)
.map_err(|_| ProcessorMessage::InvalidCommitments { id, faulty: i })?,
);
if !commitments.is_empty() {
// Malicious Participant included extra bytes in their commitments
// (a potential DoS attack)
Err(ProcessorMessage::InvalidCommitments { id, faulty: i })?;
}
}
let mut key_machines = vec![];
let mut shares = vec![];
for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() {
let actual_i = Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap();
let mut substrate_commitments = substrate_commitments.clone();
substrate_commitments.remove(&actual_i);
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, id, substrate_machine, substrate_commitments)?;
let mut network_commitments = network_commitments.clone();
network_commitments.remove(&actual_i);
let (network_machine, network_shares) =
handle_machine(&mut rng, id, network_machine, network_commitments.clone())?;
key_machines.push((substrate_machine, network_machine));
let mut these_shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in these_shares.iter_mut() {
share.extend(network_shares[i].serialize());
}
shares.push(these_shares);
}
Ok((key_machines, shares))
};
match msg {
CoordinatorMessage::GenerateKey { id, params, shares } => {
@@ -284,7 +305,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
ProcessorMessage::Commitments { id, commitments }
}
CoordinatorMessage::Commitments { id, commitments } => {
CoordinatorMessage::Commitments { id, mut commitments } => {
info!("Received commitments for {:?}", id);
if self.active_share.contains_key(&id.set) {
@@ -301,17 +322,29 @@ impl<N: Network, D: Db> KeyGen<N, D> {
// This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for
// attempt y
// The coordinator is trusted to be proper in this regard
let prior = self
let (prior, our_commitments) = self
.active_commit
.remove(&id.set)
.unwrap_or_else(|| key_gen_machines(id, params, share_quantity));
for (i, our_commitments) in our_commitments.into_iter().enumerate() {
assert!(commitments
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_commitments,
)
.is_none());
}
CommitmentsDb::set(txn, &id, &commitments);
let (machines, shares) = secret_share_machines(id, params, prior, commitments);
self.active_share.insert(id.set, (machines, shares.clone()));
ProcessorMessage::Shares { id, shares }
match secret_share_machines(id, params, prior, commitments) {
Ok((machines, shares)) => {
self.active_share.insert(id.set, (machines, shares.clone()));
ProcessorMessage::Shares { id, shares }
}
Err(e) => e,
}
}
CoordinatorMessage::Shares { id, shares } => {
@@ -321,36 +354,70 @@ impl<N: Network, D: Db> KeyGen<N, D> {
// Same commentary on inconsistency as above exists
let (machines, our_shares) = self.active_share.remove(&id.set).unwrap_or_else(|| {
let prior = key_gen_machines(id, params, share_quantity);
secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap())
let prior = key_gen_machines(id, params, share_quantity).0;
let (machines, shares) =
secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap())
.expect("got Shares for a key gen which faulted");
(machines, shares)
});
let mut rng = share_rng(id);
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
id: KeyGenId,
// These are the params of our first share, not this machine's shares
params: ThresholdParams,
m: usize,
machine: KeyMachine<C>,
shares_ref: &mut HashMap<Participant, &[u8]>,
) -> ThresholdCore<C> {
// Parse the shares
let shares = match shares_ref
.iter_mut()
.map(|(i, share)| {
EncryptedMessage::<C, SecretShare<C::F>>::read(share, params).map(|share| (*i, share))
})
.collect()
{
Ok(shares) => shares,
Err(e) => todo!("malicious signer: {:?}", e),
};
) -> Result<ThresholdCore<C>, ProcessorMessage> {
let params = ThresholdParams::new(
params.t(),
params.n(),
Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap(),
)
.unwrap();
// TODO2: Handle the blame machine properly
(match machine.calculate_share(rng, shares) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
})
.complete()
// Parse the shares
let mut shares = HashMap::new();
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let Some(share) = shares_ref.get_mut(&i) else { continue };
shares.insert(
i,
EncryptedMessage::<C, SecretShare<C::F>>::read(share, params).map_err(|_| {
ProcessorMessage::InvalidShare { id, accuser: params.i(), faulty: i, blame: None }
})?,
);
}
Ok(
(match machine.calculate_share(rng, shares) {
Ok(res) => res,
Err(e) => match e {
DkgError::ZeroParameter(_, _) |
DkgError::InvalidThreshold(_, _) |
DkgError::InvalidParticipant(_, _) |
DkgError::InvalidSigningSet |
DkgError::InvalidCommitments(_) => unreachable!("{e:?}"),
DkgError::InvalidParticipantQuantity(_, _) |
DkgError::DuplicatedParticipant(_) |
DkgError::MissingParticipant(_) => {
panic!("coordinator sent invalid DKG shares: {e:?}")
}
DkgError::InvalidShare { participant, blame } => {
Err(ProcessorMessage::InvalidShare {
id,
accuser: params.i(),
faulty: participant,
blame: Some(blame.map(|blame| blame.serialize())).flatten(),
})?
}
},
})
.complete(),
)
}
let mut substrate_keys = vec![];
@@ -371,12 +438,27 @@ impl<N: Network, D: Db> KeyGen<N, D> {
}
}
let these_substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref);
let these_network_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref);
let these_substrate_keys =
match handle_machine(&mut rng, id, params, m, machines.0, &mut shares_ref) {
Ok(keys) => keys,
Err(msg) => return msg,
};
let these_network_keys =
match handle_machine(&mut rng, id, params, m, machines.1, &mut shares_ref) {
Ok(keys) => keys,
Err(msg) => return msg,
};
for (_, shares) in shares_ref {
for i in 1 ..= params.n() {
let i = Participant::new(i).unwrap();
let Some(shares) = shares_ref.get(&i) else { continue };
if !shares.is_empty() {
todo!("malicious signer: extra bytes");
return ProcessorMessage::InvalidShare {
id,
accuser: these_substrate_keys.params().i(),
faulty: i,
blame: None,
};
}
}
@@ -407,6 +489,70 @@ impl<N: Network, D: Db> KeyGen<N, D> {
network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(),
}
}
CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => {
let params = ParamsDb::get(txn, &id.set).unwrap().0;
let mut share_ref = share.as_slice();
let Ok(substrate_share) = EncryptedMessage::<
Ristretto,
SecretShare<<Ristretto as Ciphersuite>::F>,
>::read(&mut share_ref, params) else {
return ProcessorMessage::Blame { id, participant: accused };
};
let Ok(network_share) = EncryptedMessage::<
N::Curve,
SecretShare<<N::Curve as Ciphersuite>::F>,
>::read(&mut share_ref, params) else {
return ProcessorMessage::Blame { id, participant: accused };
};
if !share_ref.is_empty() {
return ProcessorMessage::Blame { id, participant: accused };
}
let mut substrate_commitment_msgs = HashMap::new();
let mut network_commitment_msgs = HashMap::new();
let commitments = CommitmentsDb::get(txn, &id).unwrap();
for (i, commitments) in commitments {
let mut commitments = commitments.as_slice();
substrate_commitment_msgs
.insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap());
network_commitment_msgs
.insert(i, EncryptionKeyMessage::<_, _>::read(&mut commitments, params).unwrap());
}
// There is a mild DoS here where someone with a valid blame bloats it to the maximum size
// Given the ambiguity, and limited potential to DoS (this being called means *someone* is
// getting fatally slashed) voids the need to ensure blame is minimal
let substrate_blame =
blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok());
let network_blame =
blame.clone().and_then(|blame| EncryptionKeyProof::read(&mut blame.as_slice()).ok());
let substrate_blame = AdditionalBlameMachine::new(
&mut rand_core::OsRng,
context(&id, SUBSTRATE_KEY_CONTEXT),
params.n(),
substrate_commitment_msgs,
)
.unwrap()
.blame(accuser, accused, substrate_share, substrate_blame);
let network_blame = AdditionalBlameMachine::new(
&mut rand_core::OsRng,
context(&id, NETWORK_KEY_CONTEXT),
params.n(),
network_commitment_msgs,
)
.unwrap()
.blame(accuser, accused, network_share, network_blame);
// If thw accused was blamed for either, mark them as at fault
if (substrate_blame == accused) || (network_blame == accused) {
return ProcessorMessage::Blame { id, participant: accused };
}
ProcessorMessage::Blame { id, participant: accuser }
}
}
}

View File

@@ -938,7 +938,8 @@ impl<D: Db, N: Network> MultisigManager<D, N> {
}
// Save the plans created while scanning
// TODO: Should we combine all of these plans?
// TODO: Should we combine all of these plans to reduce the fees incurred from their
// execution? They're refunds and forwards. Neither should need isolate Plan/Eventualities.
MultisigsDb::<N, D>::set_plans_from_scanning(txn, block_number, plans);
// If any outputs were delayed, append them into this block

View File

@@ -5,7 +5,7 @@ use rand_core::OsRng;
use ciphersuite::group::GroupEncoding;
use frost::{
ThresholdKeys,
ThresholdKeys, FrostError,
sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine},
};
@@ -470,7 +470,7 @@ impl<N: Network, D: Db> Signer<N, D> {
msg: CoordinatorMessage,
) -> Option<ProcessorMessage> {
match msg {
CoordinatorMessage::Preprocesses { id, mut preprocesses } => {
CoordinatorMessage::Preprocesses { id, preprocesses } => {
if self.verify_id(&id).is_err() {
return None;
}
@@ -487,23 +487,22 @@ impl<N: Network, D: Db> Signer<N, D> {
Some(machine) => machine,
};
let preprocesses = match preprocesses
.drain()
.map(|(l, preprocess)| {
let mut preprocess_ref = preprocess.as_ref();
let res = machines[0]
.read_preprocess::<&[u8]>(&mut preprocess_ref)
.map(|preprocess| (l, preprocess));
if !preprocess_ref.is_empty() {
todo!("malicious signer: extra bytes");
}
res
})
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(preprocesses) => preprocesses,
Err(e) => todo!("malicious signer: {:?}", e),
};
let mut parsed = HashMap::new();
for l in {
let mut keys = preprocesses.keys().cloned().collect::<Vec<_>>();
keys.sort();
keys
} {
let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice();
let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
};
if !preprocess_ref.is_empty() {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
}
parsed.insert(l, res);
}
let preprocesses = parsed;
// Only keep a single machine as we only need one to get the signature
let mut signature_machine = None;
@@ -520,7 +519,18 @@ impl<N: Network, D: Db> Signer<N, D> {
// Use an empty message, as expected of TransactionMachines
let (machine, share) = match machine.sign(preprocesses, &[]) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
Err(e) => match e {
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!(),
FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l })
}
},
};
if m == 0 {
signature_machine = Some(machine);
@@ -534,7 +544,7 @@ impl<N: Network, D: Db> Signer<N, D> {
Some(ProcessorMessage::Share { id, shares: serialized_shares })
}
CoordinatorMessage::Shares { id, mut shares } => {
CoordinatorMessage::Shares { id, shares } => {
if self.verify_id(&id).is_err() {
return None;
}
@@ -557,21 +567,22 @@ impl<N: Network, D: Db> Signer<N, D> {
Some(machine) => machine,
};
let mut shares = match shares
.drain()
.map(|(l, share)| {
let mut share_ref = share.as_ref();
let res = machine.read_share::<&[u8]>(&mut share_ref).map(|share| (l, share));
if !share_ref.is_empty() {
todo!("malicious signer: extra bytes");
}
res
})
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(shares) => shares,
Err(e) => todo!("malicious signer: {:?}", e),
};
let mut parsed = HashMap::new();
for l in {
let mut keys = shares.keys().cloned().collect::<Vec<_>>();
keys.sort();
keys
} {
let mut share_ref = shares.get(&l).unwrap().as_slice();
let Ok(res) = machine.read_share(&mut share_ref) else {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
};
if !share_ref.is_empty() {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l });
}
parsed.insert(l, res);
}
let mut shares = parsed;
for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {
assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());
@@ -579,7 +590,18 @@ impl<N: Network, D: Db> Signer<N, D> {
let tx = match machine.complete(shares) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
Err(e) => match e {
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!(),
FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {
return Some(ProcessorMessage::InvalidParticipant { id, participant: l })
}
},
};
// Save the transaction in case it's needed for recovery

View File

@@ -6,7 +6,7 @@ use rand_core::OsRng;
use ciphersuite::group::GroupEncoding;
use frost::{
curve::Ristretto,
ThresholdKeys,
ThresholdKeys, FrostError,
algorithm::Algorithm,
sign::{
Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
@@ -246,7 +246,7 @@ impl<D: Db> SubstrateSigner<D> {
msg: CoordinatorMessage,
) -> Option<messages::ProcessorMessage> {
match msg {
CoordinatorMessage::BatchPreprocesses { id, mut preprocesses } => {
CoordinatorMessage::BatchPreprocesses { id, preprocesses } => {
if self.verify_id(&id).is_err() {
return None;
}
@@ -263,23 +263,22 @@ impl<D: Db> SubstrateSigner<D> {
Some(preprocess) => preprocess,
};
let preprocesses = match preprocesses
.drain()
.map(|(l, preprocess)| {
let mut preprocess_ref = preprocess.as_ref();
let res = machines[0]
.read_preprocess::<&[u8]>(&mut preprocess_ref)
.map(|preprocess| (l, preprocess));
if !preprocess_ref.is_empty() {
todo!("malicious signer: extra bytes");
}
res
})
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(preprocesses) => preprocesses,
Err(e) => todo!("malicious signer: {:?}", e),
};
let mut parsed = HashMap::new();
for l in {
let mut keys = preprocesses.keys().cloned().collect::<Vec<_>>();
keys.sort();
keys
} {
let mut preprocess_ref = preprocesses.get(&l).unwrap().as_slice();
let Ok(res) = machines[0].read_preprocess(&mut preprocess_ref) else {
return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into());
};
if !preprocess_ref.is_empty() {
return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into());
}
parsed.insert(l, res);
}
let preprocesses = parsed;
// Only keep a single machine as we only need one to get the signature
let mut signature_machine = None;
@@ -296,7 +295,18 @@ impl<D: Db> SubstrateSigner<D> {
let (machine, share) =
match machine.sign(preprocesses, &batch_message(&self.signable[&id.id])) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
Err(e) => match e {
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!(),
FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {
return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into())
}
},
};
if m == 0 {
signature_machine = Some(machine);
@@ -314,7 +324,7 @@ impl<D: Db> SubstrateSigner<D> {
Some((ProcessorMessage::BatchShare { id, shares: serialized_shares }).into())
}
CoordinatorMessage::BatchShares { id, mut shares } => {
CoordinatorMessage::BatchShares { id, shares } => {
if self.verify_id(&id).is_err() {
return None;
}
@@ -337,21 +347,22 @@ impl<D: Db> SubstrateSigner<D> {
Some(signing) => signing,
};
let mut shares = match shares
.drain()
.map(|(l, share)| {
let mut share_ref = share.as_ref();
let res = machine.read_share::<&[u8]>(&mut share_ref).map(|share| (l, share));
if !share_ref.is_empty() {
todo!("malicious signer: extra bytes");
}
res
})
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(shares) => shares,
Err(e) => todo!("malicious signer: {:?}", e),
};
let mut parsed = HashMap::new();
for l in {
let mut keys = shares.keys().cloned().collect::<Vec<_>>();
keys.sort();
keys
} {
let mut share_ref = shares.get(&l).unwrap().as_slice();
let Ok(res) = machine.read_share(&mut share_ref) else {
return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into());
};
if !share_ref.is_empty() {
return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into());
}
parsed.insert(l, res);
}
let mut shares = parsed;
for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {
assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());
@@ -359,7 +370,18 @@ impl<D: Db> SubstrateSigner<D> {
let sig = match machine.complete(shares) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
Err(e) => match e {
FrostError::InternalError(_) |
FrostError::InvalidParticipant(_, _) |
FrostError::InvalidSigningSet(_) |
FrostError::InvalidParticipantQuantity(_, _) |
FrostError::DuplicatedParticipant(_) |
FrostError::MissingParticipant(_) => unreachable!(),
FrostError::InvalidPreprocess(l) | FrostError::InvalidShare(l) => {
return Some((ProcessorMessage::InvalidParticipant { id, participant: l }).into())
}
},
};
info!("signed batch {} with attempt #{}", hex::encode(id.id), id.attempt);