* Remove NetworkId from processor-messages

Because intent binds to the sender/receiver, it's not needed for intent.

The processor knows what the network is.

The coordinator knows which to use because it's sending this message to the
processor for that network.

Also removes the unused zeroize.

* ProcessorMessage::Completed use Session instead of key

* Move SubstrateSignId to Session

* Finish replacing key with session
This commit is contained in:
Luke Parker
2023-11-26 12:14:23 -05:00
committed by GitHub
parent b79cf8abde
commit 571195bfda
31 changed files with 304 additions and 455 deletions

View File

@@ -3,7 +3,6 @@ use std::collections::HashMap;
use rand_core::OsRng;
use ciphersuite::group::GroupEncoding;
use frost::{
curve::Ristretto,
ThresholdKeys, FrostError,
@@ -21,6 +20,7 @@ use scale::Encode;
use serai_client::{
primitives::{NetworkId, BlockHash},
in_instructions::primitives::{Batch, SignedBatch, batch_message},
validator_sets::primitives::Session,
};
use messages::coordinator::*;
@@ -48,6 +48,7 @@ pub struct BatchSigner<D: Db> {
db: PhantomData<D>,
network: NetworkId,
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
signable: HashMap<[u8; 5], Batch>,
@@ -71,12 +72,17 @@ impl<D: Db> fmt::Debug for BatchSigner<D> {
}
impl<D: Db> BatchSigner<D> {
pub fn new(network: NetworkId, keys: Vec<ThresholdKeys<Ristretto>>) -> BatchSigner<D> {
pub fn new(
network: NetworkId,
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
) -> BatchSigner<D> {
assert!(!keys.is_empty());
BatchSigner {
db: PhantomData,
network,
session,
keys,
signable: HashMap::new(),
@@ -86,11 +92,11 @@ impl<D: Db> BatchSigner<D> {
}
}
fn verify_id(&self, id: &SubstrateSignId) -> Result<([u8; 32], [u8; 5], u32), ()> {
let SubstrateSignId { key, id, attempt } = id;
fn verify_id(&self, id: &SubstrateSignId) -> Result<(Session, [u8; 5], u32), ()> {
let SubstrateSignId { session, id, attempt } = id;
let SubstrateSignableId::Batch(id) = id else { panic!("BatchSigner handed non-Batch") };
assert_eq!(key, &self.keys[0].group_key().to_bytes());
assert_eq!(session, &self.session);
// Check the attempt lines up
match self.attempt.get(id) {
@@ -114,7 +120,7 @@ impl<D: Db> BatchSigner<D> {
}
}
Ok((*key, *id, *attempt))
Ok((*session, *id, *attempt))
}
#[must_use]
@@ -196,11 +202,7 @@ impl<D: Db> BatchSigner<D> {
}
self.preprocessing.insert(id, (machines, preprocesses));
let id = SubstrateSignId {
key: self.keys[0].group_key().to_bytes(),
id: SubstrateSignableId::Batch(id),
attempt,
};
let id = SubstrateSignId { session: self.session, id: SubstrateSignableId::Batch(id), attempt };
// Broadcast our preprocesses
Some(ProcessorMessage::BatchPreprocess { id, block, preprocesses: serialized_preprocesses })
@@ -236,10 +238,10 @@ impl<D: Db> BatchSigner<D> {
}
CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => {
let (key, id, attempt) = self.verify_id(&id).ok()?;
let (session, id, attempt) = self.verify_id(&id).ok()?;
let substrate_sign_id =
SubstrateSignId { key, id: SubstrateSignableId::Batch(id), attempt };
SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt };
let (machines, our_preprocesses) = match self.preprocessing.remove(&id) {
// Either rebooted or RPC error, or some invariant
@@ -328,10 +330,10 @@ impl<D: Db> BatchSigner<D> {
}
CoordinatorMessage::SubstrateShares { id, shares } => {
let (key, id, attempt) = self.verify_id(&id).ok()?;
let (session, id, attempt) = self.verify_id(&id).ok()?;
let substrate_sign_id =
SubstrateSignId { key, id: SubstrateSignableId::Batch(id), attempt };
SubstrateSignId { session, id: SubstrateSignableId::Batch(id), attempt };
let (machine, our_shares) = match self.signing.remove(&id) {
// Rebooted, RPC error, or some invariant

View File

@@ -3,7 +3,6 @@ use std::collections::HashMap;
use rand_core::OsRng;
use ciphersuite::group::GroupEncoding;
use frost::{
curve::Ristretto,
ThresholdKeys, FrostError,
@@ -18,6 +17,7 @@ use frost_schnorrkel::Schnorrkel;
use log::{info, warn};
use scale::Encode;
use serai_client::validator_sets::primitives::Session;
use messages::coordinator::*;
use crate::{Get, DbTxn, create_db};
@@ -35,7 +35,7 @@ type SignatureShare = <AlgorithmSignMachine<Ristretto, Schnorrkel> as SignMachin
>>::SignatureShare;
pub struct Cosigner {
#[allow(dead_code)] // False positive
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
block_number: u64,
@@ -51,6 +51,7 @@ impl fmt::Debug for Cosigner {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt
.debug_struct("Cosigner")
.field("session", &self.session)
.field("block_number", &self.block_number)
.field("id", &self.id)
.field("attempt", &self.attempt)
@@ -63,6 +64,7 @@ impl fmt::Debug for Cosigner {
impl Cosigner {
pub fn new(
txn: &mut impl DbTxn,
session: Session,
keys: Vec<ThresholdKeys<Ristretto>>,
block_number: u64,
id: [u8; 32],
@@ -100,14 +102,11 @@ impl Cosigner {
}
let preprocessing = Some((machines, preprocesses));
let substrate_sign_id = SubstrateSignId {
key: keys[0].group_key().to_bytes(),
id: SubstrateSignableId::CosigningSubstrateBlock(id),
attempt,
};
let substrate_sign_id =
SubstrateSignId { session, id: SubstrateSignableId::CosigningSubstrateBlock(id), attempt };
Some((
Cosigner { keys, block_number, id, attempt, preprocessing, signing: None },
Cosigner { session, keys, block_number, id, attempt, preprocessing, signing: None },
ProcessorMessage::CosignPreprocess {
id: substrate_sign_id,
preprocesses: serialized_preprocesses,
@@ -127,7 +126,7 @@ impl Cosigner {
}
CoordinatorMessage::SubstratePreprocesses { id, preprocesses } => {
assert_eq!(id.key, self.keys[0].group_key().to_bytes());
assert_eq!(id.session, self.session);
let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {
panic!("cosigner passed Batch")
};
@@ -212,7 +211,7 @@ impl Cosigner {
}
CoordinatorMessage::SubstrateShares { id, shares } => {
assert_eq!(id.key, self.keys[0].group_key().to_bytes());
assert_eq!(id.session, self.session);
let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {
panic!("cosigner passed Batch")
};

View File

@@ -1,7 +1,7 @@
use std::io::Read;
use scale::{Encode, Decode};
use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};
use serai_client::validator_sets::primitives::{Session, KeyPair};
pub use serai_db::*;
@@ -17,15 +17,15 @@ create_db!(
impl PendingActivationsDb {
pub fn pending_activation<N: Network>(
getter: &impl Get,
) -> Option<(<N::Block as Block<N>>::Id, ValidatorSet, KeyPair)> {
) -> Option<(<N::Block as Block<N>>::Id, Session, KeyPair)> {
if let Some(bytes) = Self::get(getter) {
if !bytes.is_empty() {
let mut slice = bytes.as_slice();
let (set, key_pair) = <(ValidatorSet, KeyPair)>::decode(&mut slice).unwrap();
let (session, key_pair) = <(Session, KeyPair)>::decode(&mut slice).unwrap();
let mut block_before_queue_block = <N::Block as Block<N>>::Id::default();
slice.read_exact(block_before_queue_block.as_mut()).unwrap();
assert!(slice.is_empty());
return Some((block_before_queue_block, set, key_pair));
return Some((block_before_queue_block, session, key_pair));
}
}
None
@@ -33,10 +33,10 @@ impl PendingActivationsDb {
pub fn set_pending_activation<N: Network>(
txn: &mut impl DbTxn,
block_before_queue_block: <N::Block as Block<N>>::Id,
set: ValidatorSet,
session: Session,
key_pair: KeyPair,
) {
let mut buf = (set, key_pair).encode();
let mut buf = (session, key_pair).encode();
buf.extend(block_before_queue_block.as_ref());
Self::set(txn, &buf);
}

View File

@@ -17,7 +17,7 @@ use frost::{
use log::info;
use scale::Encode;
use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};
use serai_client::validator_sets::primitives::{Session, KeyPair};
use messages::key_gen::*;
use crate::{Get, DbTxn, Db, create_db, networks::Network};
@@ -30,16 +30,17 @@ pub struct KeyConfirmed<C: Ciphersuite> {
create_db!(
KeyGenDb {
ParamsDb: (set: &ValidatorSet) -> (ThresholdParams, u16),
ParamsDb: (session: &Session) -> (ThresholdParams, u16),
// Not scoped to the set since that'd have latter attempts overwrite former
// A former attempt may become the finalized attempt, even if it doesn't in a timely manner
// Overwriting its commitments would be accordingly poor
CommitmentsDb: (key: &KeyGenId) -> HashMap<Participant, Vec<u8>>,
GeneratedKeysDb: (set: &ValidatorSet, substrate_key: &[u8; 32], network_key: &[u8]) -> Vec<u8>,
GeneratedKeysDb: (session: &Session, substrate_key: &[u8; 32], network_key: &[u8]) -> Vec<u8>,
// These do assume a key is only used once across sets, which holds true so long as a single
// participant is honest in their execution of the protocol
KeysDb: (network_key: &[u8]) -> Vec<u8>,
NetworkKey: (substrate_key: [u8; 32]) -> Vec<u8>,
SessionDb: (network_key: &[u8]) -> Session,
NetworkKeyDb: (session: Session) -> Vec<u8>,
}
);
@@ -76,7 +77,7 @@ impl GeneratedKeysDb {
}
txn.put(
Self::key(
&id.set,
&id.session,
&substrate_keys[0].group_key().to_bytes(),
network_keys[0].group_key().to_bytes().as_ref(),
),
@@ -88,12 +89,12 @@ impl GeneratedKeysDb {
impl KeysDb {
fn confirm_keys<N: Network>(
txn: &mut impl DbTxn,
set: ValidatorSet,
session: Session,
key_pair: KeyPair,
) -> (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>) {
let (keys_vec, keys) = GeneratedKeysDb::read_keys::<N>(
txn,
&GeneratedKeysDb::key(&set, &key_pair.0 .0, key_pair.1.as_ref()),
&GeneratedKeysDb::key(&session, &key_pair.0 .0, key_pair.1.as_ref()),
)
.unwrap();
assert_eq!(key_pair.0 .0, keys.0[0].group_key().to_bytes());
@@ -104,8 +105,9 @@ impl KeysDb {
},
keys.1[0].group_key().to_bytes().as_ref(),
);
txn.put(KeysDb::key(keys.1[0].group_key().to_bytes().as_ref()), keys_vec);
NetworkKey::set(txn, key_pair.0.into(), &key_pair.1.clone().into_inner());
txn.put(Self::key(key_pair.1.as_ref()), keys_vec);
NetworkKeyDb::set(txn, session, &key_pair.1.clone().into_inner());
SessionDb::set(txn, key_pair.1.as_ref(), &session);
keys
}
@@ -113,21 +115,19 @@ impl KeysDb {
fn keys<N: Network>(
getter: &impl Get,
network_key: &<N::Curve as Ciphersuite>::G,
) -> Option<(Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>)> {
) -> Option<(Session, (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>))> {
let res =
GeneratedKeysDb::read_keys::<N>(getter, &Self::key(network_key.to_bytes().as_ref()))?.1;
assert_eq!(&res.1[0].group_key(), network_key);
Some(res)
Some((SessionDb::get(getter, network_key.to_bytes().as_ref()).unwrap(), res))
}
pub fn substrate_keys_by_substrate_key<N: Network>(
pub fn substrate_keys_by_session<N: Network>(
getter: &impl Get,
substrate_key: &[u8; 32],
session: Session,
) -> Option<Vec<ThresholdKeys<Ristretto>>> {
let network_key = NetworkKey::get(getter, *substrate_key)?;
let res = GeneratedKeysDb::read_keys::<N>(getter, &Self::key(&network_key))?.1;
assert_eq!(&res.0[0].group_key().to_bytes(), substrate_key);
Some(res.0)
let network_key = NetworkKeyDb::get(getter, session)?;
Some(GeneratedKeysDb::read_keys::<N>(getter, &Self::key(&network_key))?.1 .0)
}
}
@@ -140,9 +140,9 @@ pub struct KeyGen<N: Network, D: Db> {
db: D,
entropy: Zeroizing<[u8; 32]>,
active_commit: HashMap<ValidatorSet, (SecretShareMachines<N>, Vec<Vec<u8>>)>,
active_commit: HashMap<Session, (SecretShareMachines<N>, Vec<Vec<u8>>)>,
#[allow(clippy::type_complexity)]
active_share: HashMap<ValidatorSet, (KeyMachines<N>, Vec<HashMap<Participant, Vec<u8>>>)>,
active_share: HashMap<Session, (KeyMachines<N>, Vec<HashMap<Participant, Vec<u8>>>)>,
}
impl<N: Network, D: Db> KeyGen<N, D> {
@@ -151,26 +151,26 @@ impl<N: Network, D: Db> KeyGen<N, D> {
KeyGen { db, entropy, active_commit: HashMap::new(), active_share: HashMap::new() }
}
pub fn in_set(&self, set: &ValidatorSet) -> bool {
// We determine if we're in set using if we have the parameters for a set's key generation
ParamsDb::get(&self.db, set).is_some()
pub fn in_set(&self, session: &Session) -> bool {
// We determine if we're in set using if we have the parameters for a session's key generation
ParamsDb::get(&self.db, session).is_some()
}
#[allow(clippy::type_complexity)]
pub fn keys(
&self,
key: &<N::Curve as Ciphersuite>::G,
) -> Option<(Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>)> {
) -> Option<(Session, (Vec<ThresholdKeys<Ristretto>>, Vec<ThresholdKeys<N::Curve>>))> {
// This is safe, despite not having a txn, since it's a static value
// It doesn't change over time/in relation to other operations
KeysDb::keys::<N>(&self.db, key)
}
pub fn substrate_keys_by_substrate_key(
pub fn substrate_keys_by_session(
&self,
substrate_key: &[u8; 32],
session: Session,
) -> Option<Vec<ThresholdKeys<Ristretto>>> {
KeysDb::substrate_keys_by_substrate_key::<N>(&self.db, substrate_key)
KeysDb::substrate_keys_by_session::<N>(&self.db, session)
}
pub async fn handle(
@@ -184,7 +184,10 @@ impl<N: Network, D: Db> KeyGen<N, D> {
// TODO2: Also embed the chain ID/genesis block
format!(
"Serai Key Gen. Session: {:?}, Network: {:?}, Attempt: {}, Key: {}",
id.set.session, id.set.network, id.attempt, key,
id.session,
N::NETWORK,
id.attempt,
key,
)
};
@@ -313,15 +316,15 @@ impl<N: Network, D: Db> KeyGen<N, D> {
info!("Generating new key. ID: {id:?} Params: {params:?} Shares: {shares}");
// Remove old attempts
if self.active_commit.remove(&id.set).is_none() &&
self.active_share.remove(&id.set).is_none()
if self.active_commit.remove(&id.session).is_none() &&
self.active_share.remove(&id.session).is_none()
{
// If we haven't handled this set before, save the params
ParamsDb::set(txn, &id.set, &(params, shares));
// If we haven't handled this session before, save the params
ParamsDb::set(txn, &id.session, &(params, shares));
}
let (machines, commitments) = key_gen_machines(id, params, shares);
self.active_commit.insert(id.set, (machines, commitments.clone()));
self.active_commit.insert(id.session, (machines, commitments.clone()));
ProcessorMessage::Commitments { id, commitments }
}
@@ -329,14 +332,14 @@ impl<N: Network, D: Db> KeyGen<N, D> {
CoordinatorMessage::Commitments { id, mut commitments } => {
info!("Received commitments for {:?}", id);
if self.active_share.contains_key(&id.set) {
if self.active_share.contains_key(&id.session) {
// We should've been told of a new attempt before receiving commitments again
// The coordinator is either missing messages or repeating itself
// Either way, it's faulty
panic!("commitments when already handled commitments");
}
let (params, share_quantity) = ParamsDb::get(txn, &id.set).unwrap();
let (params, share_quantity) = ParamsDb::get(txn, &id.session).unwrap();
// Unwrap the machines, rebuilding them if we didn't have them in our cache
// We won't if the processor rebooted
@@ -345,7 +348,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
// The coordinator is trusted to be proper in this regard
let (prior, our_commitments) = self
.active_commit
.remove(&id.set)
.remove(&id.session)
.unwrap_or_else(|| key_gen_machines(id, params, share_quantity));
for (i, our_commitments) in our_commitments.into_iter().enumerate() {
@@ -361,7 +364,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
match secret_share_machines(id, params, prior, commitments) {
Ok((machines, shares)) => {
self.active_share.insert(id.set, (machines, shares.clone()));
self.active_share.insert(id.session, (machines, shares.clone()));
ProcessorMessage::Shares { id, shares }
}
Err(e) => e,
@@ -371,10 +374,10 @@ impl<N: Network, D: Db> KeyGen<N, D> {
CoordinatorMessage::Shares { id, shares } => {
info!("Received shares for {:?}", id);
let (params, share_quantity) = ParamsDb::get(txn, &id.set).unwrap();
let (params, share_quantity) = ParamsDb::get(txn, &id.session).unwrap();
// Same commentary on inconsistency as above exists
let (machines, our_shares) = self.active_share.remove(&id.set).unwrap_or_else(|| {
let (machines, our_shares) = self.active_share.remove(&id.session).unwrap_or_else(|| {
let prior = key_gen_machines(id, params, share_quantity).0;
let (machines, shares) =
secret_share_machines(id, params, prior, CommitmentsDb::get(txn, &id).unwrap())
@@ -512,7 +515,7 @@ impl<N: Network, D: Db> KeyGen<N, D> {
}
CoordinatorMessage::VerifyBlame { id, accuser, accused, share, blame } => {
let params = ParamsDb::get(txn, &id.set).unwrap().0;
let params = ParamsDb::get(txn, &id.session).unwrap().0;
let mut share_ref = share.as_slice();
let Ok(substrate_share) = EncryptedMessage::<
@@ -580,17 +583,17 @@ impl<N: Network, D: Db> KeyGen<N, D> {
pub async fn confirm(
&mut self,
txn: &mut D::Transaction<'_>,
set: ValidatorSet,
session: Session,
key_pair: KeyPair,
) -> KeyConfirmed<N::Curve> {
info!(
"Confirmed key pair {} {} for set {:?}",
"Confirmed key pair {} {} for {:?}",
hex::encode(key_pair.0),
hex::encode(&key_pair.1),
set,
session,
);
let (substrate_keys, network_keys) = KeysDb::confirm_keys::<N>(txn, set, key_pair);
let (substrate_keys, network_keys) = KeysDb::confirm_keys::<N>(txn, session, key_pair);
KeyConfirmed { substrate_keys, network_keys }
}

View File

@@ -10,7 +10,7 @@ use tokio::time::sleep;
use serai_client::{
primitives::{BlockHash, NetworkId},
validator_sets::primitives::{ValidatorSet, KeyPair},
validator_sets::primitives::{Session, KeyPair},
};
use messages::{
@@ -44,7 +44,7 @@ mod coordinator;
pub use coordinator::*;
mod key_gen;
use key_gen::{KeyConfirmed, KeyGen};
use key_gen::{SessionDb, KeyConfirmed, KeyGen};
mod signer;
use signer::Signer;
@@ -84,7 +84,7 @@ struct TributaryMutable<N: Network, D: Db> {
// invalidating the Tributary's mutable borrow. The signer is coded to allow for attempted usage
// of a dropped task.
key_gen: KeyGen<N, D>,
signers: HashMap<Vec<u8>, Signer<N, D>>,
signers: HashMap<Session, Signer<N, D>>,
// This is also mutably borrowed by the Scanner.
// The Scanner starts new sign tasks.
@@ -187,25 +187,26 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
substrate_mutable: &mut SubstrateMutable<N, D>,
tributary_mutable: &mut TributaryMutable<N, D>,
txn: &mut D::Transaction<'_>,
set: ValidatorSet,
session: Session,
key_pair: KeyPair,
activation_number: usize,
) {
info!("activating {set:?}'s keys at {activation_number}");
info!("activating {session:?}'s keys at {activation_number}");
let network_key = <N as Network>::Curve::read_G::<&[u8]>(&mut key_pair.1.as_ref())
.expect("Substrate finalized invalid point as a network's key");
if tributary_mutable.key_gen.in_set(&set) {
if tributary_mutable.key_gen.in_set(&session) {
// See TributaryMutable's struct definition for why this block is safe
let KeyConfirmed { substrate_keys, network_keys } =
tributary_mutable.key_gen.confirm(txn, set, key_pair.clone()).await;
if set.session.0 == 0 {
tributary_mutable.batch_signer = Some(BatchSigner::new(N::NETWORK, substrate_keys));
tributary_mutable.key_gen.confirm(txn, session, key_pair.clone()).await;
if session.0 == 0 {
tributary_mutable.batch_signer =
Some(BatchSigner::new(N::NETWORK, session, substrate_keys));
}
tributary_mutable
.signers
.insert(key_pair.1.into(), Signer::new(network.clone(), network_keys));
.insert(session, Signer::new(network.clone(), session, network_keys));
}
substrate_mutable.add_key(txn, activation_number, network_key).await;
@@ -219,7 +220,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
CoordinatorMessage::Sign(msg) => {
if let Some(msg) = tributary_mutable
.signers
.get_mut(msg.key())
.get_mut(&msg.session())
.expect("coordinator told us to sign with a signer we don't have")
.handle(txn, msg)
.await
@@ -257,11 +258,11 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
let SubstrateSignableId::CosigningSubstrateBlock(block) = id.id else {
panic!("CosignSubstrateBlock id didn't have a CosigningSubstrateBlock")
};
let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_substrate_key(&id.key)
else {
let Some(keys) = tributary_mutable.key_gen.substrate_keys_by_session(id.session) else {
panic!("didn't have key shares for the key we were told to cosign with");
};
if let Some((cosigner, msg)) = Cosigner::new(txn, keys, block_number, block, id.attempt)
if let Some((cosigner, msg)) =
Cosigner::new(txn, id.session, keys, block_number, block, id.attempt)
{
tributary_mutable.cosigner = Some(cosigner);
coordinator.send(msg).await;
@@ -287,7 +288,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
CoordinatorMessage::Substrate(msg) => {
match msg {
messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, set, key_pair } => {
messages::substrate::CoordinatorMessage::ConfirmKeyPair { context, session, key_pair } => {
// This is the first key pair for this network so no block has been finalized yet
// TODO: Write documentation for this in docs/
// TODO: Use an Option instead of a magic?
@@ -339,7 +340,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
substrate_mutable,
tributary_mutable,
txn,
set,
session,
key_pair,
activation_number,
)
@@ -355,7 +356,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
PendingActivationsDb::set_pending_activation::<N>(
txn,
block_before_queue_block,
set,
session,
key_pair,
);
}
@@ -363,14 +364,13 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
messages::substrate::CoordinatorMessage::SubstrateBlock {
context,
network: network_id,
block: substrate_block,
burns,
batches,
} => {
assert_eq!(network_id, N::NETWORK, "coordinator sent us data for another network");
if let Some((block, set, key_pair)) = PendingActivationsDb::pending_activation::<N>(txn) {
if let Some((block, session, key_pair)) =
PendingActivationsDb::pending_activation::<N>(txn)
{
// Only run if this is a Batch belonging to a distinct block
if context.network_latest_finalized_block.as_ref() != block.as_ref() {
let mut queue_block = <N::Block as Block<N>>::Id::default();
@@ -387,7 +387,7 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
substrate_mutable,
tributary_mutable,
txn,
set,
session,
key_pair,
activation_number,
)
@@ -412,13 +412,12 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
if !tributary_mutable.signers.is_empty() {
coordinator
.send(messages::coordinator::ProcessorMessage::SubstrateBlockAck {
network: N::NETWORK,
block: substrate_block,
plans: to_sign
.iter()
.map(|signable| PlanMeta {
key: signable.0.to_bytes().as_ref().to_vec(),
id: signable.1,
.filter_map(|signable| {
SessionDb::get(txn, signable.0.to_bytes().as_ref())
.map(|session| PlanMeta { session, id: signable.1 })
})
.collect(),
})
@@ -428,7 +427,8 @@ async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
// See commentary in TributaryMutable for why this is safe
let signers = &mut tributary_mutable.signers;
for (key, id, tx, eventuality) in to_sign {
if let Some(signer) = signers.get_mut(key.to_bytes().as_ref()) {
if let Some(session) = SessionDb::get(txn, key.to_bytes().as_ref()) {
let signer = signers.get_mut(&session).unwrap();
if let Some(msg) = signer.sign_transaction(txn, id, tx, eventuality).await {
coordinator.send(msg).await;
}
@@ -494,7 +494,7 @@ async fn boot<N: Network, D: Db, Co: Coordinator>(
let mut signers = HashMap::new();
for (i, key) in current_keys.iter().enumerate() {
let Some((substrate_keys, network_keys)) = key_gen.keys(key) else { continue };
let Some((session, (substrate_keys, network_keys))) = key_gen.keys(key) else { continue };
let network_key = network_keys[0].group_key();
// If this is the oldest key, load the BatchSigner for it as the active BatchSigner
@@ -503,7 +503,7 @@ async fn boot<N: Network, D: Db, Co: Coordinator>(
// We don't have to load any state for this since the Scanner will re-fire any events
// necessary, only no longer scanning old blocks once Substrate acks them
if i == 0 {
batch_signer = Some(BatchSigner::new(N::NETWORK, substrate_keys));
batch_signer = Some(BatchSigner::new(N::NETWORK, session, substrate_keys));
}
// The Scanner re-fires events as needed for batch_signer yet not signer
@@ -514,10 +514,9 @@ async fn boot<N: Network, D: Db, Co: Coordinator>(
// 2) Cause re-emission of Batch events, which we'd need to check the safety of
// (TODO: Do anyways?)
// 3) Violate the attempt counter (TODO: Is this already being violated?)
let mut signer = Signer::new(network.clone(), network_keys);
let mut signer = Signer::new(network.clone(), session, network_keys);
// Sign any TXs being actively signed
let key = key.to_bytes();
for (plan, tx, eventuality) in &actively_signing {
if plan.key == network_key {
let mut txn = raw_db.txn();
@@ -531,7 +530,7 @@ async fn boot<N: Network, D: Db, Co: Coordinator>(
}
}
signers.insert(key.as_ref().to_vec(), signer);
signers.insert(session, signer);
}
// Spawn a task to rebroadcast signed TXs yet to be mined into a finalized block
@@ -630,16 +629,20 @@ async fn run<N: Network, D: Db, Co: Coordinator>(mut raw_db: D, network: N, mut
if let Some((retired_key, new_key)) = retired_key_new_key {
// Safe to mutate since all signing operations are done and no more will be added
tributary_mutable.signers.remove(retired_key.to_bytes().as_ref());
if let Some(retired_session) = SessionDb::get(&txn, retired_key.to_bytes().as_ref()) {
tributary_mutable.signers.remove(&retired_session);
}
tributary_mutable.batch_signer.take();
if let Some((substrate_keys, _)) = tributary_mutable.key_gen.keys(&new_key) {
let keys = tributary_mutable.key_gen.keys(&new_key);
if let Some((session, (substrate_keys, _))) = keys {
tributary_mutable.batch_signer =
Some(BatchSigner::new(N::NETWORK, substrate_keys));
Some(BatchSigner::new(N::NETWORK, session, substrate_keys));
}
}
},
MultisigEvent::Completed(key, id, tx) => {
if let Some(signer) = tributary_mutable.signers.get_mut(&key) {
if let Some(session) = SessionDb::get(&txn, &key) {
let signer = tributary_mutable.signers.get_mut(&session).unwrap();
if let Some(msg) = signer.completed(&mut txn, id, tx) {
coordinator.send(msg).await;
}

View File

@@ -11,6 +11,7 @@ use frost::{
use log::{info, debug, warn, error};
use scale::Encode;
use serai_client::validator_sets::primitives::Session;
use messages::sign::*;
pub use serai_db::*;
@@ -131,6 +132,7 @@ pub struct Signer<N: Network, D: Db> {
network: N,
session: Session,
keys: Vec<ThresholdKeys<N::Curve>>,
signable: HashMap<[u8; 32], N::SignableTransaction>,
@@ -172,13 +174,14 @@ impl<N: Network, D: Db> Signer<N, D> {
tokio::time::sleep(core::time::Duration::from_secs(5 * 60)).await;
}
}
pub fn new(network: N, keys: Vec<ThresholdKeys<N::Curve>>) -> Signer<N, D> {
pub fn new(network: N, session: Session, keys: Vec<ThresholdKeys<N::Curve>>) -> Signer<N, D> {
assert!(!keys.is_empty());
Signer {
db: PhantomData,
network,
session,
keys,
signable: HashMap::new(),
@@ -250,11 +253,7 @@ impl<N: Network, D: Db> Signer<N, D> {
self.signing.remove(&id);
// Emit the event for it
ProcessorMessage::Completed {
key: self.keys[0].group_key().to_bytes().as_ref().to_vec(),
id,
tx: tx_id.as_ref().to_vec(),
}
ProcessorMessage::Completed { session: self.session, id, tx: tx_id.as_ref().to_vec() }
}
#[must_use]
@@ -371,7 +370,7 @@ impl<N: Network, D: Db> Signer<N, D> {
// Update the attempt number
self.attempt.insert(id, attempt);
let id = SignId { key: self.keys[0].group_key().to_bytes().as_ref().to_vec(), id, attempt };
let id = SignId { session: self.session, id, attempt };
info!("signing for {} #{}", hex::encode(id.id), id.attempt);
@@ -603,7 +602,7 @@ impl<N: Network, D: Db> Signer<N, D> {
CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await,
CoordinatorMessage::Completed { key: _, id, tx: mut tx_vec } => {
CoordinatorMessage::Completed { session: _, id, tx: mut tx_vec } => {
let mut tx = <N::Transaction as Transaction<N>>::Id::default();
if tx.as_ref().len() != tx_vec.len() {
let true_len = tx_vec.len();

View File

@@ -7,6 +7,8 @@ use frost::{Participant, ThresholdKeys};
use tokio::time::timeout;
use serai_client::validator_sets::primitives::Session;
use serai_db::{DbTxn, MemDb};
use crate::{
@@ -50,7 +52,7 @@ async fn spend<N: Network, D: Db>(
),
);
}
sign(network.clone(), keys_txs).await;
sign(network.clone(), Session(0), keys_txs).await;
for _ in 0 .. N::CONFIRMATIONS {
network.mine_block().await;

View File

@@ -14,7 +14,8 @@ use sp_application_crypto::{RuntimePublic, sr25519::Public};
use serai_db::{DbTxn, Db, MemDb};
use scale::Encode;
use serai_client::{primitives::*, in_instructions::primitives::*};
#[rustfmt::skip]
use serai_client::{primitives::*, in_instructions::primitives::*, validator_sets::primitives::Session};
use messages::{
substrate,
@@ -49,7 +50,7 @@ async fn test_batch_signer() {
};
let actual_id = SubstrateSignId {
key: keys.values().next().unwrap().group_key().to_bytes(),
session: Session(0),
id: SubstrateSignableId::Batch((batch.network, batch.id).encode().try_into().unwrap()),
attempt: 0,
};
@@ -73,7 +74,7 @@ async fn test_batch_signer() {
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
let keys = keys.get(&i).unwrap().clone();
let mut signer = BatchSigner::<MemDb>::new(NetworkId::Monero, vec![keys]);
let mut signer = BatchSigner::<MemDb>::new(NetworkId::Monero, Session(0), vec![keys]);
let mut db = MemDb::new();
let mut txn = db.txn();

View File

@@ -13,7 +13,7 @@ use sp_application_crypto::{RuntimePublic, sr25519::Public};
use serai_db::{DbTxn, Db, MemDb};
use serai_client::primitives::*;
use serai_client::{primitives::*, validator_sets::primitives::Session};
use messages::coordinator::*;
use crate::cosigner::Cosigner;
@@ -28,7 +28,7 @@ async fn test_cosigner() {
let block = [0xaa; 32];
let actual_id = SubstrateSignId {
key: keys.values().next().unwrap().group_key().to_bytes(),
session: Session(0),
id: SubstrateSignableId::CosigningSubstrateBlock(block),
attempt: (OsRng.next_u64() >> 32).try_into().unwrap(),
};
@@ -55,7 +55,8 @@ async fn test_cosigner() {
let mut db = MemDb::new();
let mut txn = db.txn();
let (signer, preprocess) =
Cosigner::new(&mut txn, vec![keys], block_number, block, actual_id.attempt).unwrap();
Cosigner::new(&mut txn, Session(0), vec![keys], block_number, block, actual_id.attempt)
.unwrap();
match preprocess {
// All participants should emit a preprocess

View File

@@ -10,10 +10,7 @@ use frost::{Participant, ThresholdParams, tests::clone_without};
use serai_db::{DbTxn, Db, MemDb};
use sp_application_crypto::sr25519;
use serai_client::{
primitives::NetworkId,
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
};
use serai_client::validator_sets::primitives::{Session, KeyPair};
use messages::key_gen::*;
use crate::{
@@ -21,8 +18,7 @@ use crate::{
key_gen::{KeyConfirmed, KeyGen},
};
const ID: KeyGenId =
KeyGenId { set: ValidatorSet { session: Session(1), network: NetworkId::Monero }, attempt: 3 };
const ID: KeyGenId = KeyGenId { session: Session(1), attempt: 3 };
pub async fn test_key_gen<N: Network>() {
let mut entropies = HashMap::new();
@@ -139,7 +135,11 @@ pub async fn test_key_gen<N: Network>() {
let key_gen = key_gens.get_mut(&i).unwrap();
let mut txn = dbs.get_mut(&i).unwrap().txn();
let KeyConfirmed { mut substrate_keys, mut network_keys } = key_gen
.confirm(&mut txn, ID.set, KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()))
.confirm(
&mut txn,
ID.session,
KeyPair(sr25519::Public(res.0), res.1.clone().try_into().unwrap()),
)
.await;
txn.commit();

View File

@@ -2,7 +2,6 @@ use std::collections::HashMap;
use rand_core::{RngCore, OsRng};
use ciphersuite::group::GroupEncoding;
use frost::{
Participant, ThresholdKeys,
dkg::tests::{key_gen, clone_without},
@@ -10,7 +9,10 @@ use frost::{
use serai_db::{DbTxn, Db, MemDb};
use serai_client::primitives::{NetworkId, Coin, Amount, Balance};
use serai_client::{
primitives::{NetworkId, Coin, Amount, Balance},
validator_sets::primitives::Session,
};
use messages::sign::*;
use crate::{
@@ -22,22 +24,17 @@ use crate::{
#[allow(clippy::type_complexity)]
pub async fn sign<N: Network>(
network: N,
session: Session,
mut keys_txs: HashMap<
Participant,
(ThresholdKeys<N::Curve>, (N::SignableTransaction, N::Eventuality)),
>,
) -> <N::Transaction as Transaction<N>>::Id {
let actual_id = SignId {
key: keys_txs[&Participant::new(1).unwrap()].0.group_key().to_bytes().as_ref().to_vec(),
id: [0xaa; 32],
attempt: 0,
};
let actual_id = SignId { session, id: [0xaa; 32], attempt: 0 };
let mut group_key = None;
let mut keys = HashMap::new();
let mut txs = HashMap::new();
for (i, (these_keys, this_tx)) in keys_txs.drain() {
group_key = Some(these_keys.group_key());
keys.insert(i, these_keys);
txs.insert(i, this_tx);
}
@@ -49,7 +46,7 @@ pub async fn sign<N: Network>(
let i = Participant::new(u16::try_from(i).unwrap()).unwrap();
let keys = keys.remove(&i).unwrap();
t = keys.params().t();
signers.insert(i, Signer::<_, MemDb>::new(network.clone(), vec![keys]));
signers.insert(i, Signer::<_, MemDb>::new(network.clone(), Session(0), vec![keys]));
dbs.insert(i, MemDb::new());
}
drop(keys);
@@ -130,8 +127,8 @@ pub async fn sign<N: Network>(
.await
.unwrap()
{
ProcessorMessage::Completed { key, id, tx } => {
assert_eq!(&key, group_key.unwrap().to_bytes().as_ref());
ProcessorMessage::Completed { session, id, tx } => {
assert_eq!(session, Session(0));
assert_eq!(id, actual_id.id);
if tx_id.is_none() {
tx_id = Some(tx.clone());
@@ -196,7 +193,7 @@ pub async fn test_signer<N: Network>(network: N) {
// The signer may not publish the TX if it has a connection error
// It doesn't fail in this case
let txid = sign(network.clone(), keys_txs).await;
let txid = sign(network.clone(), Session(0), keys_txs).await;
let tx = network.get_transaction(&txid).await.unwrap();
assert_eq!(tx.id(), txid);
// Mine a block, and scan it, to ensure that the TX actually made it on chain

View File

@@ -8,7 +8,10 @@ use tokio::time::timeout;
use serai_db::{DbTxn, Db, MemDb};
use serai_client::primitives::{NetworkId, Coin, Amount, Balance};
use serai_client::{
primitives::{NetworkId, Coin, Amount, Balance},
validator_sets::primitives::Session,
};
use crate::{
Payment, Plan,
@@ -140,7 +143,7 @@ pub async fn test_wallet<N: Network>(network: N) {
keys_txs.insert(i, (keys, (signable, eventuality)));
}
let txid = sign(network.clone(), keys_txs).await;
let txid = sign(network.clone(), Session(0), keys_txs).await;
let tx = network.get_transaction(&txid).await.unwrap();
network.mine_block().await;
let block_number = network.get_latest_block_number().await.unwrap();