2023-08-14 06:08:55 -04:00
|
|
|
use core::{ops::Deref, future::Future};
|
2023-04-24 00:53:15 -04:00
|
|
|
use std::collections::HashMap;
|
2023-04-20 05:05:17 -04:00
|
|
|
|
|
|
|
|
use zeroize::Zeroizing;
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
use rand_core::SeedableRng;
|
|
|
|
|
use rand_chacha::ChaCha20Rng;
|
|
|
|
|
|
|
|
|
|
use transcript::{Transcript, RecommendedTranscript};
|
2023-04-20 05:05:17 -04:00
|
|
|
use ciphersuite::{Ciphersuite, Ristretto};
|
2023-08-14 06:08:55 -04:00
|
|
|
use frost::{
|
|
|
|
|
FrostError,
|
|
|
|
|
dkg::{Participant, musig::musig},
|
|
|
|
|
sign::*,
|
|
|
|
|
};
|
|
|
|
|
use frost_schnorrkel::Schnorrkel;
|
|
|
|
|
|
|
|
|
|
use serai_client::{
|
|
|
|
|
Signature,
|
|
|
|
|
validator_sets::primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message},
|
|
|
|
|
subxt::utils::Encoded,
|
|
|
|
|
Serai,
|
|
|
|
|
};
|
2023-04-20 05:05:17 -04:00
|
|
|
|
2023-05-08 22:20:51 -04:00
|
|
|
use tokio::sync::mpsc::UnboundedSender;
|
|
|
|
|
|
2023-04-24 06:50:40 -04:00
|
|
|
use tributary::{Signed, Block, TributaryReader};
|
2023-04-20 05:05:17 -04:00
|
|
|
|
|
|
|
|
use processor_messages::{
|
|
|
|
|
key_gen::{self, KeyGenId},
|
2023-04-20 06:27:00 -04:00
|
|
|
sign::{self, SignId},
|
|
|
|
|
coordinator, CoordinatorMessage,
|
2023-04-20 05:05:17 -04:00
|
|
|
};
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
use serai_db::{Get, DbTxn};
|
2023-04-20 05:05:17 -04:00
|
|
|
|
|
|
|
|
use crate::{
|
|
|
|
|
Db,
|
2023-05-09 23:44:41 -04:00
|
|
|
processors::Processors,
|
2023-04-20 05:05:17 -04:00
|
|
|
tributary::{TributaryDb, TributarySpec, Transaction},
|
|
|
|
|
};
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
const DKG_CONFIRMATION_NONCES: &[u8] = b"dkg_confirmation_nonces";
|
|
|
|
|
const DKG_CONFIRMATION_SHARES: &[u8] = b"dkg_confirmation_shares";
|
|
|
|
|
|
|
|
|
|
// Instead of maintaing state, this simply re-creates the machine(s) in-full on every call.
|
|
|
|
|
// This simplifies data flow and prevents requiring multiple paths.
|
|
|
|
|
// While more expensive, this only runs an O(n) algorithm, which is tolerable to run multiple
|
|
|
|
|
// times.
|
|
|
|
|
struct DkgConfirmer;
|
|
|
|
|
impl DkgConfirmer {
|
|
|
|
|
fn preprocess_internal(
|
|
|
|
|
spec: &TributarySpec,
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
|
|
|
|
// TODO: Does Substrate already have a validator-uniqueness check?
|
|
|
|
|
let validators = spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
|
|
|
|
|
|
|
|
|
let context = musig_context(spec.set());
|
|
|
|
|
let mut chacha = ChaCha20Rng::from_seed({
|
|
|
|
|
let mut entropy_transcript = RecommendedTranscript::new(b"DkgConfirmer Entropy");
|
|
|
|
|
entropy_transcript.append_message(b"spec", spec.serialize());
|
|
|
|
|
entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes()));
|
|
|
|
|
// TODO: This is incredibly insecure unless message-bound (or bound via the attempt)
|
|
|
|
|
Zeroizing::new(entropy_transcript).rng_seed(b"preprocess")
|
|
|
|
|
});
|
|
|
|
|
let (machine, preprocess) = AlgorithmMachine::new(
|
|
|
|
|
Schnorrkel::new(b"substrate"),
|
|
|
|
|
musig(&context, key, &validators)
|
|
|
|
|
.expect("confirming the DKG for a set we aren't in/validator present multiple times")
|
|
|
|
|
.into(),
|
|
|
|
|
)
|
|
|
|
|
.preprocess(&mut chacha);
|
|
|
|
|
|
|
|
|
|
(machine, preprocess.serialize().try_into().unwrap())
|
|
|
|
|
}
|
|
|
|
|
// Get the preprocess for this confirmation.
|
|
|
|
|
fn preprocess(spec: &TributarySpec, key: &Zeroizing<<Ristretto as Ciphersuite>::F>) -> [u8; 64] {
|
|
|
|
|
Self::preprocess_internal(spec, key).1
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn share_internal(
|
|
|
|
|
spec: &TributarySpec,
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
|
|
|
|
key_pair: &KeyPair,
|
|
|
|
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
|
|
|
|
let machine = Self::preprocess_internal(spec, key).0;
|
|
|
|
|
let preprocesses = preprocesses
|
|
|
|
|
.into_iter()
|
|
|
|
|
.map(|(p, preprocess)| {
|
|
|
|
|
machine
|
|
|
|
|
.read_preprocess(&mut preprocess.as_slice())
|
|
|
|
|
.map(|preprocess| (p, preprocess))
|
|
|
|
|
.map_err(|_| p)
|
|
|
|
|
})
|
|
|
|
|
.collect::<Result<HashMap<_, _>, _>>()?;
|
|
|
|
|
let (machine, share) = machine
|
|
|
|
|
.sign(preprocesses, &set_keys_message(&spec.set(), key_pair))
|
|
|
|
|
.map_err(|e| match e {
|
|
|
|
|
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
|
|
|
|
FrostError::InvalidParticipant(_, _) |
|
|
|
|
|
FrostError::InvalidSigningSet(_) |
|
|
|
|
|
FrostError::InvalidParticipantQuantity(_, _) |
|
|
|
|
|
FrostError::DuplicatedParticipant(_) |
|
|
|
|
|
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
|
|
|
|
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
Ok((machine, share.serialize().try_into().unwrap()))
|
|
|
|
|
}
|
|
|
|
|
// Get the share for this confirmation, if the preprocesses are valid.
|
|
|
|
|
fn share(
|
|
|
|
|
spec: &TributarySpec,
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
|
|
|
|
key_pair: &KeyPair,
|
|
|
|
|
) -> Result<[u8; 32], Participant> {
|
|
|
|
|
Self::share_internal(spec, key, preprocesses, key_pair).map(|(_, share)| share)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn complete(
|
|
|
|
|
spec: &TributarySpec,
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
|
|
|
|
key_pair: &KeyPair,
|
|
|
|
|
shares: HashMap<Participant, Vec<u8>>,
|
|
|
|
|
) -> Result<[u8; 64], Participant> {
|
|
|
|
|
let machine = Self::share_internal(spec, key, preprocesses, key_pair)
|
|
|
|
|
.expect("trying to complete a machine which failed to preprocess")
|
|
|
|
|
.0;
|
|
|
|
|
|
|
|
|
|
let shares = shares
|
|
|
|
|
.into_iter()
|
|
|
|
|
.map(|(p, share)| {
|
|
|
|
|
machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p)
|
|
|
|
|
})
|
|
|
|
|
.collect::<Result<HashMap<_, _>, _>>()?;
|
|
|
|
|
let signature = machine.complete(shares).map_err(|e| match e {
|
|
|
|
|
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
|
|
|
|
FrostError::InvalidParticipant(_, _) |
|
|
|
|
|
FrostError::InvalidSigningSet(_) |
|
|
|
|
|
FrostError::InvalidParticipantQuantity(_, _) |
|
|
|
|
|
FrostError::DuplicatedParticipant(_) |
|
|
|
|
|
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
|
|
|
|
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
|
|
|
|
})?;
|
|
|
|
|
|
|
|
|
|
Ok(signature.to_bytes())
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[allow(clippy::too_many_arguments)] // TODO
|
|
|
|
|
fn read_known_to_exist_data<D: Db, G: Get>(
|
|
|
|
|
getter: &G,
|
|
|
|
|
spec: &TributarySpec,
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
label: &'static [u8],
|
|
|
|
|
id: [u8; 32],
|
|
|
|
|
needed: u16,
|
|
|
|
|
attempt: u32,
|
|
|
|
|
bytes: Vec<u8>,
|
|
|
|
|
signed: Option<&Signed>,
|
|
|
|
|
) -> HashMap<Participant, Vec<u8>> {
|
|
|
|
|
let mut data = HashMap::new();
|
|
|
|
|
for validator in spec.validators().iter().map(|validator| validator.0) {
|
|
|
|
|
data.insert(
|
|
|
|
|
spec.i(validator).unwrap(),
|
|
|
|
|
if Some(&validator) == signed.map(|signed| &signed.signer) {
|
|
|
|
|
bytes.clone()
|
|
|
|
|
} else if let Some(data) =
|
|
|
|
|
TributaryDb::<D>::data(label, getter, spec.genesis(), id, attempt, validator)
|
|
|
|
|
{
|
|
|
|
|
data
|
|
|
|
|
} else {
|
|
|
|
|
continue;
|
|
|
|
|
},
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
assert_eq!(data.len(), usize::from(needed));
|
|
|
|
|
|
|
|
|
|
// Remove our own piece of data
|
|
|
|
|
assert!(data
|
|
|
|
|
.remove(
|
|
|
|
|
&spec
|
|
|
|
|
.i(Ristretto::generator() * key.deref())
|
|
|
|
|
.expect("handling a message for a Tributary we aren't part of")
|
|
|
|
|
)
|
|
|
|
|
.is_some());
|
|
|
|
|
|
|
|
|
|
data
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn dkg_confirmation_nonces(
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
spec: &TributarySpec,
|
|
|
|
|
) -> [u8; 64] {
|
|
|
|
|
DkgConfirmer::preprocess(spec, key)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[allow(clippy::needless_pass_by_ref_mut)]
|
|
|
|
|
pub fn generated_key_pair<D: Db>(
|
|
|
|
|
txn: &mut D::Transaction<'_>,
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
spec: &TributarySpec,
|
|
|
|
|
key_pair: &KeyPair,
|
|
|
|
|
) -> Result<[u8; 32], Participant> {
|
|
|
|
|
TributaryDb::<D>::save_currently_completing_key_pair(txn, spec.genesis(), key_pair);
|
|
|
|
|
|
|
|
|
|
let attempt = 0; // TODO
|
|
|
|
|
let preprocesses = read_known_to_exist_data::<D, _>(
|
|
|
|
|
txn,
|
|
|
|
|
spec,
|
|
|
|
|
key,
|
|
|
|
|
DKG_CONFIRMATION_NONCES,
|
|
|
|
|
[0; 32],
|
|
|
|
|
spec.n(),
|
|
|
|
|
attempt,
|
|
|
|
|
vec![],
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
DkgConfirmer::share(spec, key, preprocesses, key_pair)
|
|
|
|
|
}
|
|
|
|
|
|
2023-05-08 22:20:51 -04:00
|
|
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
|
|
|
pub enum RecognizedIdType {
|
|
|
|
|
Block,
|
|
|
|
|
Plan,
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-20 05:05:17 -04:00
|
|
|
// Handle a specific Tributary block
|
2023-08-01 02:49:31 -04:00
|
|
|
#[allow(clippy::needless_pass_by_ref_mut)] // False positive?
|
2023-08-14 06:08:55 -04:00
|
|
|
async fn handle_block<
|
|
|
|
|
D: Db,
|
|
|
|
|
Pro: Processors,
|
|
|
|
|
F: Future<Output = ()>,
|
|
|
|
|
PST: Fn(ValidatorSet, Encoded) -> F,
|
|
|
|
|
>(
|
2023-04-20 05:05:17 -04:00
|
|
|
db: &mut TributaryDb<D>,
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
2023-05-08 22:20:51 -04:00
|
|
|
recognized_id: &UnboundedSender<([u8; 32], RecognizedIdType, [u8; 32])>,
|
2023-05-09 23:44:41 -04:00
|
|
|
processors: &Pro,
|
2023-08-14 06:08:55 -04:00
|
|
|
publish_serai_tx: PST,
|
2023-04-20 05:05:17 -04:00
|
|
|
spec: &TributarySpec,
|
|
|
|
|
block: Block<Transaction>,
|
|
|
|
|
) {
|
2023-08-08 15:12:47 -04:00
|
|
|
log::info!("found block for Tributary {:?}", spec.set());
|
|
|
|
|
|
2023-04-24 06:50:40 -04:00
|
|
|
let genesis = spec.genesis();
|
2023-04-20 05:05:17 -04:00
|
|
|
let hash = block.hash();
|
|
|
|
|
|
|
|
|
|
let mut event_id = 0;
|
2023-04-20 14:24:49 -04:00
|
|
|
#[allow(clippy::explicit_counter_loop)] // event_id isn't TX index. It just currently lines up
|
2023-04-20 05:05:17 -04:00
|
|
|
for tx in block.transactions {
|
|
|
|
|
if !TributaryDb::<D>::handled_event(&db.0, hash, event_id) {
|
|
|
|
|
let mut txn = db.0.txn();
|
|
|
|
|
|
2023-04-20 14:24:49 -04:00
|
|
|
// Used to determine if an ID is acceptable
|
|
|
|
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
|
|
|
enum Zone {
|
|
|
|
|
Dkg,
|
|
|
|
|
Batch,
|
|
|
|
|
Sign,
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-20 14:51:33 -04:00
|
|
|
impl Zone {
|
|
|
|
|
fn label(&self) -> &'static str {
|
|
|
|
|
match self {
|
|
|
|
|
Zone::Dkg => {
|
|
|
|
|
panic!("getting the label for dkg despite dkg code paths not needing a label")
|
|
|
|
|
}
|
2023-04-20 14:24:49 -04:00
|
|
|
Zone::Batch => "batch",
|
|
|
|
|
Zone::Sign => "sign",
|
2023-04-20 14:51:33 -04:00
|
|
|
}
|
2023-04-20 14:24:49 -04:00
|
|
|
}
|
2023-04-20 14:51:33 -04:00
|
|
|
}
|
2023-04-20 14:24:49 -04:00
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
let handle =
|
|
|
|
|
|txn: &mut _, zone: Zone, label, needed, id, attempt, bytes: Vec<u8>, signed: &Signed| {
|
|
|
|
|
if zone == Zone::Dkg {
|
|
|
|
|
// Since Dkg doesn't have an ID, solely attempts, this should just be [0; 32]
|
|
|
|
|
assert_eq!(id, [0; 32], "DKG, which shouldn't have IDs, had a non-0 ID");
|
|
|
|
|
} else if !TributaryDb::<D>::recognized_id(txn, zone.label(), genesis, id) {
|
2023-04-20 14:51:33 -04:00
|
|
|
// TODO: Full slash
|
|
|
|
|
todo!();
|
2023-04-20 05:05:17 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
// If they've already published a TX for this attempt, slash
|
|
|
|
|
if let Some(data) =
|
|
|
|
|
TributaryDb::<D>::data(label, txn, genesis, id, attempt, signed.signer)
|
|
|
|
|
{
|
|
|
|
|
if data != bytes {
|
|
|
|
|
// TODO: Full slash
|
|
|
|
|
todo!();
|
|
|
|
|
}
|
2023-04-20 14:51:33 -04:00
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
// TODO: Slash
|
|
|
|
|
return None;
|
|
|
|
|
}
|
2023-04-20 14:51:33 -04:00
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
// If the attempt is lesser than the blockchain's, slash
|
|
|
|
|
let curr_attempt = TributaryDb::<D>::attempt(txn, genesis, id);
|
|
|
|
|
if attempt < curr_attempt {
|
|
|
|
|
// TODO: Slash for being late
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
if attempt > curr_attempt {
|
|
|
|
|
// TODO: Full slash
|
|
|
|
|
todo!();
|
2023-04-20 14:51:33 -04:00
|
|
|
}
|
2023-08-13 02:21:56 -04:00
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
// TODO: We can also full slash if shares before all commitments, or share before the
|
|
|
|
|
// necessary preprocesses
|
|
|
|
|
|
|
|
|
|
// TODO: If this is shares, we need to check they are part of the selected signing set
|
|
|
|
|
|
|
|
|
|
// Store this data
|
|
|
|
|
let received =
|
|
|
|
|
TributaryDb::<D>::set_data(label, txn, genesis, id, attempt, signed.signer, &bytes);
|
|
|
|
|
|
|
|
|
|
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
|
|
|
|
// TODO: This needs to be coded by weight, not by validator count
|
|
|
|
|
if received == needed {
|
|
|
|
|
return Some(read_known_to_exist_data::<D, _>(
|
|
|
|
|
txn,
|
|
|
|
|
spec,
|
|
|
|
|
key,
|
|
|
|
|
label,
|
|
|
|
|
id,
|
|
|
|
|
needed,
|
|
|
|
|
attempt,
|
|
|
|
|
bytes,
|
|
|
|
|
Some(signed),
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
None
|
|
|
|
|
};
|
2023-04-20 05:05:17 -04:00
|
|
|
|
|
|
|
|
match tx {
|
|
|
|
|
Transaction::DkgCommitments(attempt, bytes, signed) => {
|
2023-08-14 06:08:55 -04:00
|
|
|
if let Some(commitments) = handle(
|
|
|
|
|
&mut txn,
|
|
|
|
|
Zone::Dkg,
|
|
|
|
|
b"dkg_commitments",
|
|
|
|
|
spec.n(),
|
|
|
|
|
[0; 32],
|
|
|
|
|
attempt,
|
|
|
|
|
bytes,
|
|
|
|
|
&signed,
|
|
|
|
|
) {
|
2023-08-13 02:21:56 -04:00
|
|
|
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
2023-05-09 23:44:41 -04:00
|
|
|
processors
|
|
|
|
|
.send(
|
|
|
|
|
spec.set().network,
|
|
|
|
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
|
|
|
id: KeyGenId { set: spec.set(), attempt },
|
|
|
|
|
commitments,
|
|
|
|
|
}),
|
|
|
|
|
)
|
2023-04-20 05:05:17 -04:00
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
Transaction::DkgShares { attempt, sender_i, mut shares, confirmation_nonces, signed } => {
|
2023-08-13 02:21:56 -04:00
|
|
|
if sender_i !=
|
|
|
|
|
spec
|
|
|
|
|
.i(signed.signer)
|
|
|
|
|
.expect("transaction added to tributary by signer who isn't a participant")
|
|
|
|
|
{
|
2023-04-20 05:05:17 -04:00
|
|
|
// TODO: Full slash
|
2023-04-20 14:24:49 -04:00
|
|
|
todo!();
|
2023-04-20 05:05:17 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-13 02:21:56 -04:00
|
|
|
if shares.len() != (usize::from(spec.n()) - 1) {
|
|
|
|
|
// TODO: Full slash
|
|
|
|
|
todo!();
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Only save our share's bytes
|
|
|
|
|
let our_i = spec
|
|
|
|
|
.i(Ristretto::generator() * key.deref())
|
|
|
|
|
.expect("in a tributary we're not a validator for");
|
|
|
|
|
// This unwrap is safe since the length of shares is checked, the the only missing key
|
|
|
|
|
// within the valid range will be the sender's i
|
|
|
|
|
let bytes = if sender_i == our_i { vec![] } else { shares.remove(&our_i).unwrap() };
|
2023-04-20 05:05:17 -04:00
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
let confirmation_nonces = handle(
|
|
|
|
|
&mut txn,
|
|
|
|
|
Zone::Dkg,
|
|
|
|
|
DKG_CONFIRMATION_NONCES,
|
|
|
|
|
spec.n(),
|
|
|
|
|
[0; 32],
|
|
|
|
|
attempt,
|
|
|
|
|
confirmation_nonces.to_vec(),
|
|
|
|
|
&signed,
|
|
|
|
|
);
|
2023-04-20 14:24:49 -04:00
|
|
|
if let Some(shares) =
|
2023-08-14 06:08:55 -04:00
|
|
|
handle(&mut txn, Zone::Dkg, b"dkg_shares", spec.n(), [0; 32], attempt, bytes, &signed)
|
2023-04-20 14:24:49 -04:00
|
|
|
{
|
2023-08-13 02:21:56 -04:00
|
|
|
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
2023-08-14 06:08:55 -04:00
|
|
|
assert!(confirmation_nonces.is_some());
|
2023-05-09 23:44:41 -04:00
|
|
|
processors
|
|
|
|
|
.send(
|
|
|
|
|
spec.set().network,
|
|
|
|
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
|
|
|
|
|
id: KeyGenId { set: spec.set(), attempt },
|
|
|
|
|
shares,
|
|
|
|
|
}),
|
|
|
|
|
)
|
2023-04-20 05:05:17 -04:00
|
|
|
.await;
|
2023-08-14 06:08:55 -04:00
|
|
|
} else {
|
|
|
|
|
assert!(confirmation_nonces.is_none());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Transaction::DkgConfirmed(attempt, shares, signed) => {
|
|
|
|
|
if let Some(shares) = handle(
|
|
|
|
|
&mut txn,
|
|
|
|
|
Zone::Dkg,
|
|
|
|
|
DKG_CONFIRMATION_SHARES,
|
|
|
|
|
spec.n(),
|
|
|
|
|
[0; 32],
|
|
|
|
|
attempt,
|
|
|
|
|
shares.to_vec(),
|
|
|
|
|
&signed,
|
|
|
|
|
) {
|
|
|
|
|
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
|
|
|
|
|
|
|
|
|
let preprocesses = read_known_to_exist_data::<D, _>(
|
|
|
|
|
&txn,
|
|
|
|
|
spec,
|
|
|
|
|
key,
|
|
|
|
|
DKG_CONFIRMATION_NONCES,
|
|
|
|
|
[0; 32],
|
|
|
|
|
spec.n(),
|
|
|
|
|
attempt,
|
|
|
|
|
vec![],
|
|
|
|
|
None,
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
let key_pair = TributaryDb::<D>::currently_completing_key_pair(&txn, genesis)
|
|
|
|
|
.unwrap_or_else(|| {
|
|
|
|
|
panic!(
|
|
|
|
|
"in DkgConfirmed handling, which happens after everyone {}",
|
|
|
|
|
"(including us) fires DkgConfirmed, yet no confirming key pair"
|
|
|
|
|
)
|
|
|
|
|
});
|
|
|
|
|
let Ok(sig) = DkgConfirmer::complete(spec, key, preprocesses, &key_pair, shares) else {
|
|
|
|
|
// TODO: Full slash
|
|
|
|
|
todo!();
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
publish_serai_tx(
|
|
|
|
|
spec.set(),
|
|
|
|
|
Serai::set_validator_set_keys(spec.set().network, key_pair, Signature(sig)),
|
|
|
|
|
)
|
|
|
|
|
.await;
|
2023-04-20 05:05:17 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-20 14:51:33 -04:00
|
|
|
Transaction::ExternalBlock(block) => {
|
|
|
|
|
// Because this external block has been finalized, its batch ID should be authorized
|
2023-05-08 22:20:51 -04:00
|
|
|
TributaryDb::<D>::recognize_id(&mut txn, Zone::Batch.label(), genesis, block);
|
|
|
|
|
recognized_id
|
|
|
|
|
.send((genesis, RecognizedIdType::Block, block))
|
|
|
|
|
.expect("recognized_id_recv was dropped. are we shutting down?");
|
2023-04-20 14:51:33 -04:00
|
|
|
}
|
2023-04-20 15:37:22 -04:00
|
|
|
|
|
|
|
|
Transaction::SubstrateBlock(block) => {
|
2023-04-24 06:50:40 -04:00
|
|
|
let plan_ids = TributaryDb::<D>::plan_ids(&txn, genesis, block).expect(
|
2023-04-20 15:37:22 -04:00
|
|
|
"synced a tributary block finalizing a substrate block in a provided transaction \
|
|
|
|
|
despite us not providing that transaction",
|
|
|
|
|
);
|
|
|
|
|
|
|
|
|
|
for id in plan_ids {
|
2023-04-24 06:50:40 -04:00
|
|
|
TributaryDb::<D>::recognize_id(&mut txn, Zone::Sign.label(), genesis, id);
|
2023-05-08 22:20:51 -04:00
|
|
|
recognized_id
|
|
|
|
|
.send((genesis, RecognizedIdType::Plan, id))
|
|
|
|
|
.expect("recognized_id_recv was dropped. are we shutting down?");
|
2023-04-20 15:37:22 -04:00
|
|
|
}
|
|
|
|
|
}
|
2023-04-20 05:05:17 -04:00
|
|
|
|
2023-04-20 06:27:00 -04:00
|
|
|
Transaction::BatchPreprocess(data) => {
|
2023-04-20 14:24:49 -04:00
|
|
|
if let Some(preprocesses) = handle(
|
2023-08-14 06:08:55 -04:00
|
|
|
&mut txn,
|
2023-04-20 14:24:49 -04:00
|
|
|
Zone::Batch,
|
|
|
|
|
b"batch_preprocess",
|
|
|
|
|
spec.t(),
|
|
|
|
|
data.plan,
|
|
|
|
|
data.attempt,
|
|
|
|
|
data.data,
|
2023-08-14 06:08:55 -04:00
|
|
|
&data.signed,
|
2023-04-20 14:24:49 -04:00
|
|
|
) {
|
2023-05-09 23:44:41 -04:00
|
|
|
processors
|
|
|
|
|
.send(
|
|
|
|
|
spec.set().network,
|
|
|
|
|
CoordinatorMessage::Coordinator(
|
|
|
|
|
coordinator::CoordinatorMessage::BatchPreprocesses {
|
|
|
|
|
id: SignId { key: todo!(), id: data.plan, attempt: data.attempt },
|
|
|
|
|
preprocesses,
|
|
|
|
|
},
|
|
|
|
|
),
|
|
|
|
|
)
|
2023-04-20 06:27:00 -04:00
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Transaction::BatchShare(data) => {
|
2023-04-20 14:24:49 -04:00
|
|
|
if let Some(shares) = handle(
|
2023-08-14 06:08:55 -04:00
|
|
|
&mut txn,
|
2023-04-20 14:24:49 -04:00
|
|
|
Zone::Batch,
|
|
|
|
|
b"batch_share",
|
|
|
|
|
spec.t(),
|
|
|
|
|
data.plan,
|
|
|
|
|
data.attempt,
|
|
|
|
|
data.data,
|
2023-08-14 06:08:55 -04:00
|
|
|
&data.signed,
|
2023-04-20 14:24:49 -04:00
|
|
|
) {
|
2023-05-09 23:44:41 -04:00
|
|
|
processors
|
|
|
|
|
.send(
|
|
|
|
|
spec.set().network,
|
|
|
|
|
CoordinatorMessage::Coordinator(coordinator::CoordinatorMessage::BatchShares {
|
|
|
|
|
id: SignId { key: todo!(), id: data.plan, attempt: data.attempt },
|
|
|
|
|
shares: shares
|
|
|
|
|
.drain()
|
|
|
|
|
.map(|(validator, share)| (validator, share.try_into().unwrap()))
|
|
|
|
|
.collect(),
|
|
|
|
|
}),
|
|
|
|
|
)
|
2023-04-20 06:27:00 -04:00
|
|
|
.await;
|
2023-04-20 06:59:42 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Transaction::SignPreprocess(data) => {
|
2023-04-20 14:24:49 -04:00
|
|
|
if let Some(preprocesses) = handle(
|
2023-08-14 06:08:55 -04:00
|
|
|
&mut txn,
|
2023-04-20 14:24:49 -04:00
|
|
|
Zone::Sign,
|
|
|
|
|
b"sign_preprocess",
|
|
|
|
|
spec.t(),
|
|
|
|
|
data.plan,
|
|
|
|
|
data.attempt,
|
|
|
|
|
data.data,
|
2023-08-14 06:08:55 -04:00
|
|
|
&data.signed,
|
2023-04-20 14:24:49 -04:00
|
|
|
) {
|
2023-05-09 23:44:41 -04:00
|
|
|
processors
|
|
|
|
|
.send(
|
|
|
|
|
spec.set().network,
|
|
|
|
|
CoordinatorMessage::Sign(sign::CoordinatorMessage::Preprocesses {
|
|
|
|
|
id: SignId { key: todo!(), id: data.plan, attempt: data.attempt },
|
|
|
|
|
preprocesses,
|
|
|
|
|
}),
|
|
|
|
|
)
|
2023-04-20 06:59:42 -04:00
|
|
|
.await;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
Transaction::SignShare(data) => {
|
2023-04-20 14:24:49 -04:00
|
|
|
if let Some(shares) = handle(
|
2023-08-14 06:08:55 -04:00
|
|
|
&mut txn,
|
2023-04-20 14:24:49 -04:00
|
|
|
Zone::Sign,
|
|
|
|
|
b"sign_share",
|
|
|
|
|
spec.t(),
|
|
|
|
|
data.plan,
|
|
|
|
|
data.attempt,
|
|
|
|
|
data.data,
|
2023-08-14 06:08:55 -04:00
|
|
|
&data.signed,
|
2023-04-20 14:24:49 -04:00
|
|
|
) {
|
2023-05-09 23:44:41 -04:00
|
|
|
processors
|
|
|
|
|
.send(
|
|
|
|
|
spec.set().network,
|
|
|
|
|
CoordinatorMessage::Sign(sign::CoordinatorMessage::Shares {
|
|
|
|
|
id: SignId { key: todo!(), id: data.plan, attempt: data.attempt },
|
|
|
|
|
shares,
|
|
|
|
|
}),
|
|
|
|
|
)
|
2023-04-20 06:59:42 -04:00
|
|
|
.await;
|
2023-04-20 06:27:00 -04:00
|
|
|
}
|
|
|
|
|
}
|
2023-07-14 14:05:12 -04:00
|
|
|
Transaction::SignCompleted(_, _, _) => todo!(),
|
2023-04-20 05:05:17 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TributaryDb::<D>::handle_event(&mut txn, hash, event_id);
|
|
|
|
|
txn.commit();
|
|
|
|
|
}
|
|
|
|
|
event_id += 1;
|
|
|
|
|
}
|
2023-04-20 06:27:00 -04:00
|
|
|
|
|
|
|
|
// TODO: Trigger any necessary re-attempts
|
2023-04-20 05:05:17 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
pub async fn handle_new_blocks<
|
|
|
|
|
D: Db,
|
|
|
|
|
Pro: Processors,
|
|
|
|
|
F: Future<Output = ()>,
|
|
|
|
|
PST: Clone + Fn(ValidatorSet, Encoded) -> F,
|
|
|
|
|
>(
|
2023-04-20 05:05:17 -04:00
|
|
|
db: &mut TributaryDb<D>,
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
2023-05-08 22:20:51 -04:00
|
|
|
recognized_id: &UnboundedSender<([u8; 32], RecognizedIdType, [u8; 32])>,
|
2023-05-09 23:44:41 -04:00
|
|
|
processors: &Pro,
|
2023-08-14 06:08:55 -04:00
|
|
|
publish_serai_tx: PST,
|
2023-04-20 05:05:17 -04:00
|
|
|
spec: &TributarySpec,
|
2023-04-24 06:50:40 -04:00
|
|
|
tributary: &TributaryReader<D, Transaction>,
|
2023-04-20 05:05:17 -04:00
|
|
|
) {
|
2023-04-24 06:50:40 -04:00
|
|
|
let genesis = tributary.genesis();
|
|
|
|
|
let mut last_block = db.last_block(genesis);
|
2023-04-24 00:53:15 -04:00
|
|
|
while let Some(next) = tributary.block_after(&last_block) {
|
|
|
|
|
let block = tributary.block(&next).unwrap();
|
2023-08-14 06:08:55 -04:00
|
|
|
handle_block(db, key, recognized_id, processors, publish_serai_tx.clone(), spec, block).await;
|
2023-04-24 00:53:15 -04:00
|
|
|
last_block = next;
|
2023-04-24 06:50:40 -04:00
|
|
|
db.set_last_block(genesis, next);
|
2023-04-20 05:05:17 -04:00
|
|
|
}
|
|
|
|
|
}
|