Support multiple key shares per validator (#416)

* Update the coordinator to give key shares based on weight, not based on existence

Participants are now identified by their starting index. While this compiles,
the following is unimplemented:

1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.

* Add a fn to the DkgConfirmer to convert `i` values as needed

Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.

* Have the Tributary DB track participation by shares, not by count

* Prevent a node from obtaining 34% of the maximum amount of key shares

This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.

* Correct the mechanism to detect if sufficient accumulation has occured

It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.

* Finish updating the coordinator to handle a multiple key share per validator environment

* Adjust stategy re: preventing noce reuse in DKG Confirmer

* Add TODOs regarding dropped transactions, add possible TODO fix

* Update tests/coordinator

This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.

* Update processor key_gen to handle generating multiple key shares at once

* Update SubstrateSigner

* Update signer, clippy

* Update processor tests

* Update processor docker tests
This commit is contained in:
Luke Parker
2023-11-04 19:26:13 -04:00
committed by GitHub
parent 5970a455d0
commit e05b77d830
28 changed files with 866 additions and 437 deletions

View File

@@ -33,11 +33,11 @@ pub mod key_gen {
pub enum CoordinatorMessage {
// Instructs the Processor to begin the key generation process.
// TODO: Should this be moved under Substrate?
GenerateKey { id: KeyGenId, params: ThresholdParams },
GenerateKey { id: KeyGenId, params: ThresholdParams, shares: u16 },
// Received commitments for the specified key generation protocol.
Commitments { id: KeyGenId, commitments: HashMap<Participant, Vec<u8>> },
// Received shares for the specified key generation protocol.
Shares { id: KeyGenId, shares: HashMap<Participant, Vec<u8>> },
Shares { id: KeyGenId, shares: Vec<HashMap<Participant, Vec<u8>>> },
}
impl CoordinatorMessage {
@@ -49,9 +49,9 @@ pub mod key_gen {
#[derive(Clone, PartialEq, Eq, Debug, Serialize, Deserialize)]
pub enum ProcessorMessage {
// Created commitments for the specified key generation protocol.
Commitments { id: KeyGenId, commitments: Vec<u8> },
Commitments { id: KeyGenId, commitments: Vec<Vec<u8>> },
// Created shares for the specified key generation protocol.
Shares { id: KeyGenId, shares: HashMap<Participant, Vec<u8>> },
Shares { id: KeyGenId, shares: Vec<HashMap<Participant, Vec<u8>>> },
// Resulting keys from the specified key generation protocol.
GeneratedKeyPair { id: KeyGenId, substrate_key: [u8; 32], network_key: Vec<u8> },
}
@@ -97,9 +97,9 @@ pub mod sign {
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)]
pub enum ProcessorMessage {
// Created preprocess for the specified signing protocol.
Preprocess { id: SignId, preprocess: Vec<u8> },
Preprocess { id: SignId, preprocesses: Vec<Vec<u8>> },
// Signed share for the specified signing protocol.
Share { id: SignId, share: Vec<u8> },
Share { id: SignId, shares: Vec<Vec<u8>> },
// Completed a signing protocol already.
Completed { key: Vec<u8>, id: [u8; 32], tx: Vec<u8> },
}
@@ -148,8 +148,8 @@ pub mod coordinator {
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, Encode, Decode, Serialize, Deserialize)]
pub enum ProcessorMessage {
SubstrateBlockAck { network: NetworkId, block: u64, plans: Vec<PlanMeta> },
BatchPreprocess { id: SignId, block: BlockHash, preprocess: Vec<u8> },
BatchShare { id: SignId, share: [u8; 32] },
BatchPreprocess { id: SignId, block: BlockHash, preprocesses: Vec<Vec<u8>> },
BatchShare { id: SignId, shares: Vec<[u8; 32]> },
}
}

View File

@@ -23,8 +23,8 @@ use crate::{Get, DbTxn, Db, networks::Network};
#[derive(Debug)]
pub struct KeyConfirmed<C: Ciphersuite> {
pub substrate_keys: ThresholdKeys<Ristretto>,
pub network_keys: ThresholdKeys<C>,
pub substrate_keys: Vec<ThresholdKeys<Ristretto>>,
pub network_keys: Vec<ThresholdKeys<C>>,
}
#[derive(Clone, Debug)]
@@ -37,10 +37,15 @@ impl<N: Network, D: Db> KeyGenDb<N, D> {
fn params_key(set: &ValidatorSet) -> Vec<u8> {
Self::key_gen_key(b"params", set.encode())
}
fn save_params(txn: &mut D::Transaction<'_>, set: &ValidatorSet, params: &ThresholdParams) {
txn.put(Self::params_key(set), bincode::serialize(params).unwrap());
fn save_params(
txn: &mut D::Transaction<'_>,
set: &ValidatorSet,
params: &ThresholdParams,
shares: u16,
) {
txn.put(Self::params_key(set), bincode::serialize(&(params, shares)).unwrap());
}
fn params<G: Get>(getter: &G, set: &ValidatorSet) -> Option<ThresholdParams> {
fn params<G: Get>(getter: &G, set: &ValidatorSet) -> Option<(ThresholdParams, u16)> {
getter.get(Self::params_key(set)).map(|bytes| bincode::deserialize(&bytes).unwrap())
}
@@ -70,17 +75,23 @@ impl<N: Network, D: Db> KeyGenDb<N, D> {
fn save_keys(
txn: &mut D::Transaction<'_>,
id: &KeyGenId,
substrate_keys: &ThresholdCore<Ristretto>,
network_keys: &ThresholdKeys<N::Curve>,
substrate_keys: &[ThresholdCore<Ristretto>],
network_keys: &[ThresholdKeys<N::Curve>],
) {
let mut keys = substrate_keys.serialize();
keys.extend(network_keys.serialize().iter());
let mut keys = Zeroizing::new(vec![]);
for (substrate_keys, network_keys) in substrate_keys.iter().zip(network_keys) {
keys.extend(substrate_keys.serialize().as_slice());
keys.extend(network_keys.serialize().as_slice());
}
txn.put(
Self::generated_keys_key(
id.set,
(&substrate_keys.group_key().to_bytes(), network_keys.group_key().to_bytes().as_ref()),
(
&substrate_keys[0].group_key().to_bytes(),
network_keys[0].group_key().to_bytes().as_ref(),
),
),
keys,
&keys,
);
}
@@ -91,54 +102,62 @@ impl<N: Network, D: Db> KeyGenDb<N, D> {
fn read_keys<G: Get>(
getter: &G,
key: &[u8],
) -> Option<(Vec<u8>, (ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>))> {
) -> Option<(Vec<u8>, (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>))> {
let keys_vec = getter.get(key)?;
let mut keys_ref: &[u8] = keys_vec.as_ref();
let substrate_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap());
let mut network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap());
N::tweak_keys(&mut network_keys);
let mut substrate_keys = vec![];
let mut network_keys = vec![];
while !keys_ref.is_empty() {
substrate_keys.push(ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap()));
let mut these_network_keys = ThresholdKeys::new(ThresholdCore::read(&mut keys_ref).unwrap());
N::tweak_keys(&mut these_network_keys);
network_keys.push(these_network_keys);
}
Some((keys_vec, (substrate_keys, network_keys)))
}
fn confirm_keys(
txn: &mut D::Transaction<'_>,
set: ValidatorSet,
key_pair: KeyPair,
) -> (ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>) {
) -> (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>) {
let (keys_vec, keys) =
Self::read_keys(txn, &Self::generated_keys_key(set, (&key_pair.0 .0, key_pair.1.as_ref())))
.unwrap();
assert_eq!(key_pair.0 .0, keys.0.group_key().to_bytes());
assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes());
assert_eq!(
{
let network_key: &[u8] = key_pair.1.as_ref();
network_key
},
keys.1.group_key().to_bytes().as_ref(),
keys.1[0].group_key().to_bytes().as_ref(),
);
txn.put(Self::keys_key(&keys.1.group_key()), keys_vec);
txn.put(Self::keys_key(&keys.1[0].group_key()), keys_vec);
keys
}
#[allow(clippy::type_complexity)]
fn keys<G: Get>(
getter: &G,
key: &<N::Curve as Ciphersuite>::G,
) -> Option<(ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>)> {
) -> Option<(Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>)> {
let res = Self::read_keys(getter, &Self::keys_key(key))?.1;
assert_eq!(&res.1.group_key(), key);
assert_eq!(&res.1[0].group_key(), key);
Some(res)
}
}
/// Coded so if the processor spontaneously reboots, one of two paths occur:
/// 1) It either didn't send its response, so the attempt will be aborted
/// 2) It did send its response, and has locally saved enough data to continue
type SecretShareMachines<N> =
Vec<(SecretShareMachine<Ristretto>, SecretShareMachine<<N as Network>::Curve>)>;
type KeyMachines<N> = Vec<(KeyMachine<Ristretto>, KeyMachine<<N as Network>::Curve>)>;
#[derive(Debug)]
pub struct KeyGen<N: Network, D: Db> {
db: D,
entropy: Zeroizing<[u8; 32]>,
active_commit:
HashMap<ValidatorSet, (SecretShareMachine<Ristretto>, SecretShareMachine<N::Curve>)>,
active_share: HashMap<ValidatorSet, (KeyMachine<Ristretto>, KeyMachine<N::Curve>)>,
active_commit: HashMap<ValidatorSet, (SecretShareMachines<N>, Vec<Vec<u8>>)>,
#[allow(clippy::type_complexity)]
active_share: HashMap<ValidatorSet, (KeyMachines<N>, Vec<HashMap<Participant, Vec<u8>>>)>,
}
impl<N: Network, D: Db> KeyGen<N, D> {
@@ -152,10 +171,11 @@ impl<N: Network, D: Db> KeyGen<N, D> {
KeyGenDb::<N, D>::params(&self.db, set).is_some()
}
#[allow(clippy::type_complexity)]
pub fn keys(
&self,
key: &<N::Curve as Ciphersuite>::G,
) -> Option<(ThresholdKeys<Ristretto>, ThresholdKeys<N::Curve>)> {
) -> Option<(Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>)> {
// This is safe, despite not having a txn, since it's a static value
// The only concern is it may not be set when expected, or it may be set unexpectedly
//
@@ -191,58 +211,35 @@ impl<N: Network, D: Db> KeyGen<N, D> {
let secret_shares_rng = |id| rng(b"Key Gen Secret Shares", id);
let share_rng = |id| rng(b"Key Gen Share", id);
let key_gen_machines = |id, params| {
let key_gen_machines = |id, params: ThresholdParams, shares| {
let mut rng = coefficients_rng(id);
let substrate = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
let network = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
((substrate.0, network.0), (substrate.1, network.1))
let mut machines = vec![];
let mut commitments = vec![];
for s in 0 .. shares {
let params = ThresholdParams::new(
params.t(),
params.n(),
Participant::new(u16::from(params.i()) + s).unwrap(),
)
.unwrap();
let substrate = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
let network = KeyGenMachine::new(params, context(&id)).generate_coefficients(&mut rng);
machines.push((substrate.0, network.0));
let mut serialized = vec![];
substrate.1.write(&mut serialized).unwrap();
network.1.write(&mut serialized).unwrap();
commitments.push(serialized);
}
(machines, commitments)
};
match msg {
CoordinatorMessage::GenerateKey { id, params } => {
info!("Generating new key. ID: {:?} Params: {:?}", id, params);
// Remove old attempts
if self.active_commit.remove(&id.set).is_none() &&
self.active_share.remove(&id.set).is_none()
{
// If we haven't handled this set before, save the params
KeyGenDb::<N, D>::save_params(txn, &id.set, &params);
}
let (machines, commitments) = key_gen_machines(id, params);
let mut serialized = commitments.0.serialize();
serialized.extend(commitments.1.serialize());
self.active_commit.insert(id.set, machines);
ProcessorMessage::Commitments { id, commitments: serialized }
}
CoordinatorMessage::Commitments { id, commitments } => {
info!("Received commitments for {:?}", id);
if self.active_share.contains_key(&id.set) {
// We should've been told of a new attempt before receiving commitments again
// The coordinator is either missing messages or repeating itself
// Either way, it's faulty
panic!("commitments when already handled commitments");
}
let params = KeyGenDb::<N, D>::params(txn, &id.set).unwrap();
// Unwrap the machines, rebuilding them if we didn't have them in our cache
// We won't if the processor rebooted
// This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for
// attempt y
// The coordinator is trusted to be proper in this regard
let machines =
self.active_commit.remove(&id.set).unwrap_or_else(|| key_gen_machines(id, params).0);
let secret_share_machines =
|id,
params: ThresholdParams,
(machines, our_commitments): (SecretShareMachines<N>, Vec<Vec<u8>>),
commitments: HashMap<Participant, Vec<u8>>| {
let mut rng = secret_shares_rng(id);
let mut commitments_ref: HashMap<Participant, &[u8]> =
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
#[allow(clippy::type_complexity)]
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
@@ -269,26 +266,88 @@ impl<N: Network, D: Db> KeyGen<N, D> {
}
}
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, params, machines.0, &mut commitments_ref);
let (network_machine, network_shares) =
handle_machine(&mut rng, params, machines.1, &mut commitments_ref);
for (_, commitments) in commitments_ref {
if !commitments.is_empty() {
todo!("malicious signer: extra bytes");
let mut key_machines = vec![];
let mut shares = vec![];
for (m, (substrate_machine, network_machine)) in machines.into_iter().enumerate() {
let mut commitments_ref: HashMap<Participant, &[u8]> =
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
for (i, our_commitments) in our_commitments.iter().enumerate() {
if m != i {
assert!(commitments_ref
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_commitments.as_ref(),
)
.is_none());
}
}
let (substrate_machine, mut substrate_shares) =
handle_machine::<Ristretto>(&mut rng, params, substrate_machine, &mut commitments_ref);
let (network_machine, network_shares) =
handle_machine(&mut rng, params, network_machine, &mut commitments_ref);
key_machines.push((substrate_machine, network_machine));
for (_, commitments) in commitments_ref {
if !commitments.is_empty() {
todo!("malicious signer: extra bytes");
}
}
let mut these_shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in these_shares.iter_mut() {
share.extend(network_shares[i].serialize());
}
shares.push(these_shares);
}
(key_machines, shares)
};
match msg {
CoordinatorMessage::GenerateKey { id, params, shares } => {
info!("Generating new key. ID: {id:?} Params: {params:?} Shares: {shares}");
// Remove old attempts
if self.active_commit.remove(&id.set).is_none() &&
self.active_share.remove(&id.set).is_none()
{
// If we haven't handled this set before, save the params
KeyGenDb::<N, D>::save_params(txn, &id.set, &params, shares);
}
self.active_share.insert(id.set, (substrate_machine, network_machine));
let (machines, commitments) = key_gen_machines(id, params, shares);
self.active_commit.insert(id.set, (machines, commitments.clone()));
let mut shares: HashMap<_, _> =
substrate_shares.drain().map(|(i, share)| (i, share.serialize())).collect();
for (i, share) in shares.iter_mut() {
share.extend(network_shares[i].serialize());
ProcessorMessage::Commitments { id, commitments }
}
CoordinatorMessage::Commitments { id, commitments } => {
info!("Received commitments for {:?}", id);
if self.active_share.contains_key(&id.set) {
// We should've been told of a new attempt before receiving commitments again
// The coordinator is either missing messages or repeating itself
// Either way, it's faulty
panic!("commitments when already handled commitments");
}
let (params, share_quantity) = KeyGenDb::<N, D>::params(txn, &id.set).unwrap();
// Unwrap the machines, rebuilding them if we didn't have them in our cache
// We won't if the processor rebooted
// This *may* be inconsistent if we receive a KeyGen for attempt x, then commitments for
// attempt y
// The coordinator is trusted to be proper in this regard
let prior = self
.active_commit
.remove(&id.set)
.unwrap_or_else(|| key_gen_machines(id, params, share_quantity));
KeyGenDb::<N, D>::save_commitments(txn, &id, &commitments);
let (machines, shares) = secret_share_machines(id, params, prior, commitments);
self.active_share.insert(id.set, (machines, shares.clone()));
ProcessorMessage::Shares { id, shares }
}
@@ -296,48 +355,16 @@ impl<N: Network, D: Db> KeyGen<N, D> {
CoordinatorMessage::Shares { id, shares } => {
info!("Received shares for {:?}", id);
let params = KeyGenDb::<N, D>::params(txn, &id.set).unwrap();
let (params, share_quantity) = KeyGenDb::<N, D>::params(txn, &id.set).unwrap();
// Same commentary on inconsistency as above exists
let machines = self.active_share.remove(&id.set).unwrap_or_else(|| {
let machines = key_gen_machines(id, params).0;
let mut rng = secret_shares_rng(id);
let commitments = KeyGenDb::<N, D>::commitments(txn, &id);
let mut commitments_ref: HashMap<Participant, &[u8]> =
commitments.iter().map(|(i, commitments)| (*i, commitments.as_ref())).collect();
fn parse_commitments<C: Ciphersuite>(
params: ThresholdParams,
commitments_ref: &mut HashMap<Participant, &[u8]>,
) -> HashMap<Participant, EncryptionKeyMessage<C, Commitments<C>>> {
commitments_ref
.iter_mut()
.map(|(i, commitments)| {
(*i, EncryptionKeyMessage::<C, Commitments<C>>::read(commitments, params).unwrap())
})
.collect()
}
(
machines
.0
.generate_secret_shares(&mut rng, parse_commitments(params, &mut commitments_ref))
.unwrap()
.0,
machines
.1
.generate_secret_shares(&mut rng, parse_commitments(params, &mut commitments_ref))
.unwrap()
.0,
)
let (machines, our_shares) = self.active_share.remove(&id.set).unwrap_or_else(|| {
let prior = key_gen_machines(id, params, share_quantity);
secret_share_machines(id, params, prior, KeyGenDb::<N, D>::commitments(txn, &id))
});
let mut rng = share_rng(id);
let mut shares_ref: HashMap<Participant, &[u8]> =
shares.iter().map(|(i, shares)| (*i, shares.as_ref())).collect();
fn handle_machine<C: Ciphersuite>(
rng: &mut ChaCha20Rng,
params: ThresholdParams,
@@ -364,24 +391,58 @@ impl<N: Network, D: Db> KeyGen<N, D> {
.complete()
}
let substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref);
let network_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref);
for (_, shares) in shares_ref {
if !shares.is_empty() {
todo!("malicious signer: extra bytes");
let mut substrate_keys = vec![];
let mut network_keys = vec![];
for (m, machines) in machines.into_iter().enumerate() {
let mut shares_ref: HashMap<Participant, &[u8]> =
shares[m].iter().map(|(i, shares)| (*i, shares.as_ref())).collect();
for (i, our_shares) in our_shares.iter().enumerate() {
if m != i {
assert!(shares_ref
.insert(
Participant::new(u16::from(params.i()) + u16::try_from(i).unwrap()).unwrap(),
our_shares
[&Participant::new(u16::from(params.i()) + u16::try_from(m).unwrap()).unwrap()]
.as_ref(),
)
.is_none());
}
}
let these_substrate_keys = handle_machine(&mut rng, params, machines.0, &mut shares_ref);
let these_network_keys = handle_machine(&mut rng, params, machines.1, &mut shares_ref);
for (_, shares) in shares_ref {
if !shares.is_empty() {
todo!("malicious signer: extra bytes");
}
}
let mut these_network_keys = ThresholdKeys::new(these_network_keys);
N::tweak_keys(&mut these_network_keys);
substrate_keys.push(these_substrate_keys);
network_keys.push(these_network_keys);
}
let mut network_keys = ThresholdKeys::new(network_keys);
N::tweak_keys(&mut network_keys);
let mut generated_substrate_key = None;
let mut generated_network_key = None;
for keys in substrate_keys.iter().zip(&network_keys) {
if generated_substrate_key.is_none() {
generated_substrate_key = Some(keys.0.group_key());
generated_network_key = Some(keys.1.group_key());
} else {
assert_eq!(generated_substrate_key, Some(keys.0.group_key()));
assert_eq!(generated_network_key, Some(keys.1.group_key()));
}
}
KeyGenDb::<N, D>::save_keys(txn, &id, &substrate_keys, &network_keys);
ProcessorMessage::GeneratedKeyPair {
id,
substrate_key: substrate_keys.group_key().to_bytes(),
network_key: network_keys.group_key().to_bytes().as_ref().to_vec(),
substrate_key: generated_substrate_key.unwrap().to_bytes(),
network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(),
}
}
}
@@ -393,12 +454,12 @@ impl<N: Network, D: Db> KeyGen<N, D> {
set: ValidatorSet,
key_pair: KeyPair,
) -> KeyConfirmed<N::Curve> {
let (substrate_keys, network_keys) = KeyGenDb::<N, D>::confirm_keys(txn, set, key_pair);
let (substrate_keys, network_keys) = KeyGenDb::<N, D>::confirm_keys(txn, set, key_pair.clone());
info!(
"Confirmed key pair {} {} for set {:?}",
hex::encode(substrate_keys.group_key().to_bytes()),
hex::encode(network_keys.group_key().to_bytes()),
hex::encode(key_pair.0),
hex::encode(key_pair.1),
set,
);

View File

@@ -424,7 +424,7 @@ async fn boot<N: Network, D: Db>(
for (i, key) in current_keys.iter().enumerate() {
let Some((substrate_keys, network_keys)) = key_gen.keys(key) else { continue };
let network_key = network_keys.group_key();
let network_key = network_keys[0].group_key();
// If this is the oldest key, load the SubstrateSigner for it as the active SubstrateSigner
// The new key only takes responsibility once the old key is fully deprecated

View File

@@ -142,23 +142,26 @@ impl<N: Network, D: Db> SignerDb<N, D> {
}
}
type PreprocessFor<N> = <<N as Network>::TransactionMachine as PreprocessMachine>::Preprocess;
type SignMachineFor<N> = <<N as Network>::TransactionMachine as PreprocessMachine>::SignMachine;
type SignatureShareFor<N> =
<SignMachineFor<N> as SignMachine<<N as Network>::Transaction>>::SignatureShare;
type SignatureMachineFor<N> =
<SignMachineFor<N> as SignMachine<<N as Network>::Transaction>>::SignatureMachine;
pub struct Signer<N: Network, D: Db> {
db: PhantomData<D>,
network: N,
keys: ThresholdKeys<N::Curve>,
keys: Vec<ThresholdKeys<N::Curve>>,
signable: HashMap<[u8; 32], N::SignableTransaction>,
attempt: HashMap<[u8; 32], u32>,
preprocessing: HashMap<[u8; 32], <N::TransactionMachine as PreprocessMachine>::SignMachine>,
#[allow(clippy::type_complexity)]
signing: HashMap<
[u8; 32],
<
<N::TransactionMachine as PreprocessMachine>::SignMachine as SignMachine<N::Transaction>
>::SignatureMachine,
>,
preprocessing: HashMap<[u8; 32], (Vec<SignMachineFor<N>>, Vec<PreprocessFor<N>>)>,
#[allow(clippy::type_complexity)]
signing: HashMap<[u8; 32], (SignatureMachineFor<N>, Vec<SignatureShareFor<N>>)>,
pub events: VecDeque<SignerEvent<N>>,
}
@@ -194,7 +197,8 @@ impl<N: Network, D: Db> Signer<N, D> {
tokio::time::sleep(core::time::Duration::from_secs(5 * 60)).await;
}
}
pub fn new(network: N, keys: ThresholdKeys<N::Curve>) -> Signer<N, D> {
pub fn new(network: N, keys: Vec<ThresholdKeys<N::Curve>>) -> Signer<N, D> {
assert!(!keys.is_empty());
Signer {
db: PhantomData,
@@ -329,7 +333,7 @@ impl<N: Network, D: Db> Signer<N, D> {
assert!(!SignerDb::<N, D>::completions(txn, id).is_empty());
info!(
"signer {} informed of the eventuality completion for plan {}, {}",
hex::encode(self.keys.group_key().to_bytes()),
hex::encode(self.keys[0].group_key().to_bytes()),
hex::encode(id),
"which we already marked as completed",
);
@@ -370,7 +374,7 @@ impl<N: Network, D: Db> Signer<N, D> {
// Update the attempt number
self.attempt.insert(id, attempt);
let id = SignId { key: self.keys.group_key().to_bytes().as_ref().to_vec(), id, attempt };
let id = SignId { key: self.keys[0].group_key().to_bytes().as_ref().to_vec(), id, attempt };
info!("signing for {} #{}", hex::encode(id.id), id.attempt);
@@ -398,25 +402,34 @@ impl<N: Network, D: Db> Signer<N, D> {
SignerDb::<N, D>::attempt(txn, &id);
// Attempt to create the TX
let machine = match self.network.attempt_send(self.keys.clone(), tx).await {
Err(e) => {
error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e);
return;
}
Ok(machine) => machine,
};
let mut machines = vec![];
let mut preprocesses = vec![];
let mut serialized_preprocesses = vec![];
for keys in &self.keys {
let machine = match self.network.attempt_send(keys.clone(), tx.clone()).await {
Err(e) => {
error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e);
return;
}
Ok(machine) => machine,
};
// TODO: Use a seeded RNG here so we don't produce distinct messages with the same intent
// This is also needed so we don't preprocess, send preprocess, reboot before ack'ing the
// message, send distinct preprocess, and then attempt a signing session premised on the former
// with the latter
let (machine, preprocess) = machine.preprocess(&mut OsRng);
self.preprocessing.insert(id.id, machine);
// TODO: Use a seeded RNG here so we don't produce distinct messages with the same intent
// This is also needed so we don't preprocess, send preprocess, reboot before ack'ing the
// message, send distinct preprocess, and then attempt a signing session premised on the
// former with the latter
let (machine, preprocess) = machine.preprocess(&mut OsRng);
machines.push(machine);
serialized_preprocesses.push(preprocess.serialize());
preprocesses.push(preprocess);
}
self.preprocessing.insert(id.id, (machines, preprocesses));
// Broadcast our preprocess
self.events.push_back(SignerEvent::ProcessorMessage(ProcessorMessage::Preprocess {
id,
preprocess: preprocess.serialize(),
preprocesses: serialized_preprocesses,
}));
}
@@ -448,7 +461,7 @@ impl<N: Network, D: Db> Signer<N, D> {
return;
}
let machine = match self.preprocessing.remove(&id.id) {
let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) {
// Either rebooted or RPC error, or some invariant
None => {
warn!(
@@ -464,7 +477,7 @@ impl<N: Network, D: Db> Signer<N, D> {
.drain()
.map(|(l, preprocess)| {
let mut preprocess_ref = preprocess.as_ref();
let res = machine
let res = machines[0]
.read_preprocess::<&[u8]>(&mut preprocess_ref)
.map(|preprocess| (l, preprocess));
if !preprocess_ref.is_empty() {
@@ -472,23 +485,41 @@ impl<N: Network, D: Db> Signer<N, D> {
}
res
})
.collect::<Result<_, _>>()
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(preprocesses) => preprocesses,
Err(e) => todo!("malicious signer: {:?}", e),
};
// Use an empty message, as expected of TransactionMachines
let (machine, share) = match machine.sign(preprocesses, &[]) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
};
self.signing.insert(id.id, machine);
// Only keep a single machine as we only need one to get the signature
let mut signature_machine = None;
let mut shares = vec![];
let mut serialized_shares = vec![];
for (m, machine) in machines.into_iter().enumerate() {
let mut preprocesses = preprocesses.clone();
for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() {
if i != m {
assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none());
}
}
// Broadcast our share
// Use an empty message, as expected of TransactionMachines
let (machine, share) = match machine.sign(preprocesses, &[]) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
};
if m == 0 {
signature_machine = Some(machine);
}
serialized_shares.push(share.serialize());
shares.push(share);
}
self.signing.insert(id.id, (signature_machine.unwrap(), shares));
// Broadcast our shares
self.events.push_back(SignerEvent::ProcessorMessage(ProcessorMessage::Share {
id,
share: share.serialize(),
shares: serialized_shares,
}));
}
@@ -497,7 +528,7 @@ impl<N: Network, D: Db> Signer<N, D> {
return;
}
let machine = match self.signing.remove(&id.id) {
let (machine, our_shares) = match self.signing.remove(&id.id) {
// Rebooted, RPC error, or some invariant
None => {
// If preprocessing has this ID, it means we were never sent the preprocess by the
@@ -515,7 +546,7 @@ impl<N: Network, D: Db> Signer<N, D> {
Some(machine) => machine,
};
let shares = match shares
let mut shares = match shares
.drain()
.map(|(l, share)| {
let mut share_ref = share.as_ref();
@@ -525,12 +556,16 @@ impl<N: Network, D: Db> Signer<N, D> {
}
res
})
.collect::<Result<_, _>>()
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(shares) => shares,
Err(e) => todo!("malicious signer: {:?}", e),
};
for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {
assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());
}
let tx = match machine.complete(shares) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),

View File

@@ -8,6 +8,7 @@ use ciphersuite::group::GroupEncoding;
use frost::{
curve::Ristretto,
ThresholdKeys,
algorithm::Algorithm,
sign::{
Writable, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine,
AlgorithmSignMachine, AlgorithmSignatureMachine,
@@ -77,16 +78,25 @@ impl<D: Db> SubstrateSignerDb<D> {
}
}
type Preprocess = <AlgorithmMachine<Ristretto, Schnorrkel> as PreprocessMachine>::Preprocess;
type SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as SignMachine<
<Schnorrkel as Algorithm<Ristretto>>::Signature,
>>::SignatureShare;
pub struct SubstrateSigner<D: Db> {
db: PhantomData<D>,
network: NetworkId,
keys: ThresholdKeys<Ristretto>,
keys: Vec<ThresholdKeys<Ristretto>>,
signable: HashMap<[u8; 32], Batch>,
attempt: HashMap<[u8; 32], u32>,
preprocessing: HashMap<[u8; 32], AlgorithmSignMachine<Ristretto, Schnorrkel>>,
signing: HashMap<[u8; 32], AlgorithmSignatureMachine<Ristretto, Schnorrkel>>,
#[allow(clippy::type_complexity)]
preprocessing:
HashMap<[u8; 32], (Vec<AlgorithmSignMachine<Ristretto, Schnorrkel>>, Vec<Preprocess>)>,
#[allow(clippy::type_complexity)]
signing:
HashMap<[u8; 32], (AlgorithmSignatureMachine<Ristretto, Schnorrkel>, Vec<SignatureShare>)>,
pub events: VecDeque<SubstrateSignerEvent>,
}
@@ -102,7 +112,8 @@ impl<D: Db> fmt::Debug for SubstrateSigner<D> {
}
impl<D: Db> SubstrateSigner<D> {
pub fn new(network: NetworkId, keys: ThresholdKeys<Ristretto>) -> SubstrateSigner<D> {
pub fn new(network: NetworkId, keys: Vec<ThresholdKeys<Ristretto>>) -> SubstrateSigner<D> {
assert!(!keys.is_empty());
SubstrateSigner {
db: PhantomData,
@@ -178,7 +189,7 @@ impl<D: Db> SubstrateSigner<D> {
// Update the attempt number
self.attempt.insert(id, attempt);
let id = SignId { key: self.keys.group_key().to_bytes().to_vec(), id, attempt };
let id = SignId { key: self.keys[0].group_key().to_bytes().to_vec(), id, attempt };
info!("signing batch {} #{}", hex::encode(id.id), id.attempt);
// If we reboot mid-sign, the current design has us abort all signs and wait for latter
@@ -204,19 +215,27 @@ impl<D: Db> SubstrateSigner<D> {
SubstrateSignerDb::<D>::attempt(txn, &id);
// b"substrate" is a literal from sp-core
let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), self.keys.clone());
let mut machines = vec![];
let mut preprocesses = vec![];
let mut serialized_preprocesses = vec![];
for keys in &self.keys {
// b"substrate" is a literal from sp-core
let machine = AlgorithmMachine::new(Schnorrkel::new(b"substrate"), keys.clone());
// TODO: Use a seeded RNG here so we don't produce distinct messages with the same intent
// This is also needed so we don't preprocess, send preprocess, reboot before ack'ing the
// message, send distinct preprocess, and then attempt a signing session premised on the former
// with the latter
let (machine, preprocess) = machine.preprocess(&mut OsRng);
self.preprocessing.insert(id.id, machine);
// TODO: Use a seeded RNG here so we don't produce distinct messages with the same intent
// This is also needed so we don't preprocess, send preprocess, reboot before ack'ing the
// message, send distinct preprocess, and then attempt a signing session premised on the
// former with the latter
let (machine, preprocess) = machine.preprocess(&mut OsRng);
machines.push(machine);
serialized_preprocesses.push(preprocess.serialize());
preprocesses.push(preprocess);
}
self.preprocessing.insert(id.id, (machines, preprocesses));
// Broadcast our preprocess
// Broadcast our preprocesses
self.events.push_back(SubstrateSignerEvent::ProcessorMessage(
ProcessorMessage::BatchPreprocess { id, block, preprocess: preprocess.serialize() },
ProcessorMessage::BatchPreprocess { id, block, preprocesses: serialized_preprocesses },
));
}
@@ -240,23 +259,23 @@ impl<D: Db> SubstrateSigner<D> {
return;
}
let machine = match self.preprocessing.remove(&id.id) {
let (machines, our_preprocesses) = match self.preprocessing.remove(&id.id) {
// Either rebooted or RPC error, or some invariant
None => {
warn!(
"not preprocessing for {}. this is an error if we didn't reboot",
hex::encode(id.id)
hex::encode(id.id),
);
return;
}
Some(machine) => machine,
Some(preprocess) => preprocess,
};
let preprocesses = match preprocesses
.drain()
.map(|(l, preprocess)| {
let mut preprocess_ref = preprocess.as_ref();
let res = machine
let res = machines[0]
.read_preprocess::<&[u8]>(&mut preprocess_ref)
.map(|preprocess| (l, preprocess));
if !preprocess_ref.is_empty() {
@@ -264,24 +283,44 @@ impl<D: Db> SubstrateSigner<D> {
}
res
})
.collect::<Result<_, _>>()
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(preprocesses) => preprocesses,
Err(e) => todo!("malicious signer: {:?}", e),
};
let (machine, share) =
match machine.sign(preprocesses, &batch_message(&self.signable[&id.id])) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
};
self.signing.insert(id.id, machine);
// Only keep a single machine as we only need one to get the signature
let mut signature_machine = None;
let mut shares = vec![];
let mut serialized_shares = vec![];
for (m, machine) in machines.into_iter().enumerate() {
let mut preprocesses = preprocesses.clone();
for (i, our_preprocess) in our_preprocesses.clone().into_iter().enumerate() {
if i != m {
assert!(preprocesses.insert(self.keys[i].params().i(), our_preprocess).is_none());
}
}
// Broadcast our share
let mut share_bytes = [0; 32];
share_bytes.copy_from_slice(&share.serialize());
let (machine, share) =
match machine.sign(preprocesses, &batch_message(&self.signable[&id.id])) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),
};
if m == 0 {
signature_machine = Some(machine);
}
let mut share_bytes = [0; 32];
share_bytes.copy_from_slice(&share.serialize());
serialized_shares.push(share_bytes);
shares.push(share);
}
self.signing.insert(id.id, (signature_machine.unwrap(), shares));
// Broadcast our shares
self.events.push_back(SubstrateSignerEvent::ProcessorMessage(
ProcessorMessage::BatchShare { id, share: share_bytes },
ProcessorMessage::BatchShare { id, shares: serialized_shares },
));
}
@@ -290,7 +329,7 @@ impl<D: Db> SubstrateSigner<D> {
return;
}
let machine = match self.signing.remove(&id.id) {
let (machine, our_shares) = match self.signing.remove(&id.id) {
// Rebooted, RPC error, or some invariant
None => {
// If preprocessing has this ID, it means we were never sent the preprocess by the
@@ -305,10 +344,10 @@ impl<D: Db> SubstrateSigner<D> {
);
return;
}
Some(machine) => machine,
Some(signing) => signing,
};
let shares = match shares
let mut shares = match shares
.drain()
.map(|(l, share)| {
let mut share_ref = share.as_ref();
@@ -318,12 +357,16 @@ impl<D: Db> SubstrateSigner<D> {
}
res
})
.collect::<Result<_, _>>()
.collect::<Result<HashMap<_, _>, _>>()
{
Ok(shares) => shares,
Err(e) => todo!("malicious signer: {:?}", e),
};
for (i, our_share) in our_shares.into_iter().enumerate().skip(1) {
assert!(shares.insert(self.keys[i].params().i(), our_share).is_none());
}
let sig = match machine.complete(shares) {
Ok(res) => res,
Err(e) => todo!("malicious signer: {:?}", e),

View File

@@ -41,19 +41,22 @@ pub async fn test_key_gen<N: Network>() {
for i in 1 ..= 5 {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
if let ProcessorMessage::Commitments { id, commitments } = key_gen
if let ProcessorMessage::Commitments { id, mut commitments } = key_gen
.handle(
&mut txn,
CoordinatorMessage::GenerateKey {
id: ID,
params: ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap())
.unwrap(),
shares: 1,
},
)
.await
{
assert_eq!(id, ID);
all_commitments.insert(Participant::new(u16::try_from(i).unwrap()).unwrap(), commitments);
assert_eq!(commitments.len(), 1);
all_commitments
.insert(Participant::new(u16::try_from(i).unwrap()).unwrap(), commitments.swap_remove(0));
} else {
panic!("didn't get commitments back");
}
@@ -75,7 +78,7 @@ pub async fn test_key_gen<N: Network>() {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
if let ProcessorMessage::Shares { id, shares } = key_gen
if let ProcessorMessage::Shares { id, mut shares } = key_gen
.handle(
&mut txn,
CoordinatorMessage::Commitments {
@@ -86,7 +89,8 @@ pub async fn test_key_gen<N: Network>() {
.await
{
assert_eq!(id, ID);
all_shares.insert(i, shares);
assert_eq!(shares.len(), 1);
all_shares.insert(i, shares.swap_remove(0));
} else {
panic!("didn't get shares back");
}
@@ -107,10 +111,10 @@ pub async fn test_key_gen<N: Network>() {
&mut txn,
CoordinatorMessage::Shares {
id: ID,
shares: all_shares
shares: vec![all_shares
.iter()
.filter_map(|(l, shares)| if i == *l { None } else { Some((*l, shares[&i].clone())) })
.collect(),
.collect()],
},
)
.await
@@ -134,11 +138,16 @@ pub async fn test_key_gen<N: Network>() {
for i in 1 ..= 5 {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let KeyConfirmed { substrate_keys, network_keys } = key_gen
let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen
.confirm(&mut txn, ID.set, (sr25519::Public(res.0), res.1.clone().try_into().unwrap()))
.await;
txn.commit();
assert_eq!(substrate_keys.len(), 1);
let substrate_keys = substrate_keys.swap_remove(0);
assert_eq!(network_keys.len(), 1);
let network_keys = network_keys.swap_remove(0);
let params =
ThresholdParams::new(3, 5, Participant::new(u16::try_from(i).unwrap()).unwrap()).unwrap();
assert_eq!(substrate_keys.params(), params);

View File

@@ -45,7 +45,7 @@ pub async fn sign<N: Network>(
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
let keys = keys.remove(&i).unwrap();
t = keys.params().t();
signers.insert(i, Signer::<_, MemDb>::new(network.clone(), keys));
signers.insert(i, Signer::<_, MemDb>::new(network.clone(), vec![keys]));
dbs.insert(i, MemDb::new());
}
drop(keys);
@@ -74,12 +74,15 @@ pub async fn sign<N: Network>(
let mut preprocesses = HashMap::new();
for i in 1 ..= signers.len() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
if let SignerEvent::ProcessorMessage(ProcessorMessage::Preprocess { id, preprocess }) =
signers.get_mut(&i).unwrap().events.pop_front().unwrap()
if let SignerEvent::ProcessorMessage(ProcessorMessage::Preprocess {
id,
preprocesses: mut these_preprocesses,
}) = signers.get_mut(&i).unwrap().events.pop_front().unwrap()
{
assert_eq!(id, actual_id);
assert_eq!(these_preprocesses.len(), 1);
if signing_set.contains(&i) {
preprocesses.insert(i, preprocess);
preprocesses.insert(i, these_preprocesses.swap_remove(0));
}
} else {
panic!("didn't get preprocess back");
@@ -102,11 +105,12 @@ pub async fn sign<N: Network>(
.await;
txn.commit();
if let SignerEvent::ProcessorMessage(ProcessorMessage::Share { id, share }) =
if let SignerEvent::ProcessorMessage(ProcessorMessage::Share { id, shares: mut these_shares }) =
signers.get_mut(i).unwrap().events.pop_front().unwrap()
{
assert_eq!(id, actual_id);
shares.insert(*i, share);
assert_eq!(these_shares.len(), 1);
shares.insert(*i, these_shares.swap_remove(0));
} else {
panic!("didn't get share back");
}

View File

@@ -56,7 +56,7 @@ async fn test_substrate_signer() {
let keys = keys.get(&i).unwrap().clone();
t = keys.params().t();
let mut signer = SubstrateSigner::<MemDb>::new(NetworkId::Monero, keys);
let mut signer = SubstrateSigner::<MemDb>::new(NetworkId::Monero, vec![keys]);
let mut db = MemDb::new();
let mut txn = db.txn();
signer.sign(&mut txn, batch.clone()).await;
@@ -85,7 +85,7 @@ async fn test_substrate_signer() {
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchPreprocess {
id,
block: batch_block,
preprocess,
preprocesses: mut these_preprocesses,
}) = signers.get_mut(&i).unwrap().events.pop_front().unwrap()
{
if actual_id.id == [0; 32] {
@@ -93,8 +93,9 @@ async fn test_substrate_signer() {
}
assert_eq!(id, actual_id);
assert_eq!(batch_block, block);
assert_eq!(these_preprocesses.len(), 1);
if signing_set.contains(&i) {
preprocesses.insert(i, preprocess);
preprocesses.insert(i, these_preprocesses.swap_remove(0));
}
} else {
panic!("didn't get preprocess back");
@@ -117,11 +118,14 @@ async fn test_substrate_signer() {
.await;
txn.commit();
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchShare { id, share }) =
signers.get_mut(i).unwrap().events.pop_front().unwrap()
if let SubstrateSignerEvent::ProcessorMessage(ProcessorMessage::BatchShare {
id,
shares: mut these_shares,
}) = signers.get_mut(i).unwrap().events.pop_front().unwrap()
{
assert_eq!(id, actual_id);
shares.insert(*i, share);
assert_eq!(these_shares.len(), 1);
shares.insert(*i, these_shares.swap_remove(0));
} else {
panic!("didn't get share back");
}