mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
DKG Removals (#467)
* Update ValidatorSets with a remove_participant call * Add DkgRemoval, a sign machine for producing the relevant MuSig signatures * Don't use position-dependent u8s yet Public when removing validators from the DKG * Add DkgRemovalPreprocess, DkgRemovalShares Implementation is via a new publish_tributary_tx lambda. This is code is a copy-pasted mess which will need to be cleaned up. * Only allow non-removed validators to vote for removals Otherwise, it's risked that the remaining validators fall below 67% of the original set. * Correct publish_serai_tx, which was prior publish_set_keys in practice
This commit is contained in:
@@ -199,7 +199,6 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.session),
|
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } => Some(id.session),
|
||||||
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),
|
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),
|
||||||
},
|
},
|
||||||
// TODO: Review replacing key with Session in messages?
|
|
||||||
ProcessorMessage::Sign(inner_msg) => match inner_msg {
|
ProcessorMessage::Sign(inner_msg) => match inner_msg {
|
||||||
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
|
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
|
||||||
sign::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session),
|
sign::ProcessorMessage::InvalidParticipant { id, .. } => Some(id.session),
|
||||||
|
|||||||
@@ -24,7 +24,10 @@ use processor_messages::{
|
|||||||
use tributary::{TransactionTrait, Tributary};
|
use tributary::{TransactionTrait, Tributary};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
tributary::{Transaction, TributarySpec, scanner::handle_new_blocks},
|
tributary::{
|
||||||
|
Transaction, TributarySpec,
|
||||||
|
scanner::{PstTxType, handle_new_blocks},
|
||||||
|
},
|
||||||
tests::{
|
tests::{
|
||||||
MemProcessors, LocalP2p,
|
MemProcessors, LocalP2p,
|
||||||
tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion},
|
tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion},
|
||||||
@@ -85,14 +88,19 @@ async fn dkg_test() {
|
|||||||
) -> (MemDb, MemProcessors) {
|
) -> (MemDb, MemProcessors) {
|
||||||
let mut scanner_db = MemDb::new();
|
let mut scanner_db = MemDb::new();
|
||||||
let processors = MemProcessors::new();
|
let processors = MemProcessors::new();
|
||||||
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut scanner_db,
|
||||||
key,
|
key,
|
||||||
|_, _, _, _| async {
|
|_, _, _, _| async {
|
||||||
panic!("provided TX caused recognized_id to be called in new_processors")
|
panic!("provided TX caused recognized_id to be called in new_processors")
|
||||||
},
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|_, _| async { panic!("test tried to publish a new Serai TX in new_processors") },
|
|_, _, _| async { panic!("test tried to publish a new Serai TX in new_processors") },
|
||||||
|
&|_| async {
|
||||||
|
panic!(
|
||||||
|
"test tried to publish a new Tributary TX from handle_application_tx in new_processors"
|
||||||
|
)
|
||||||
|
},
|
||||||
spec,
|
spec,
|
||||||
&tributary.reader(),
|
&tributary.reader(),
|
||||||
)
|
)
|
||||||
@@ -111,14 +119,19 @@ async fn dkg_test() {
|
|||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
||||||
|
|
||||||
// Verify the scanner emits a KeyGen::Commitments message
|
// Verify the scanner emits a KeyGen::Commitments message
|
||||||
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut scanner_db,
|
||||||
&keys[0],
|
&keys[0],
|
||||||
|_, _, _, _| async {
|
|_, _, _, _| async {
|
||||||
panic!("provided TX caused recognized_id to be called after Commitments")
|
panic!("provided TX caused recognized_id to be called after Commitments")
|
||||||
},
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|_, _| async { panic!("test tried to publish a new Serai TX after Commitments") },
|
|_, _, _| async { panic!("test tried to publish a new Serai TX after Commitments") },
|
||||||
|
&|_| async {
|
||||||
|
panic!(
|
||||||
|
"test tried to publish a new Tributary TX from handle_application_tx after Commitments"
|
||||||
|
)
|
||||||
|
},
|
||||||
&spec,
|
&spec,
|
||||||
&tributaries[0].1.reader(),
|
&tributaries[0].1.reader(),
|
||||||
)
|
)
|
||||||
@@ -190,14 +203,19 @@ async fn dkg_test() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// With just 4 sets of shares, nothing should happen yet
|
// With just 4 sets of shares, nothing should happen yet
|
||||||
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut scanner_db,
|
||||||
&keys[0],
|
&keys[0],
|
||||||
|_, _, _, _| async {
|
|_, _, _, _| async {
|
||||||
panic!("provided TX caused recognized_id to be called after some shares")
|
panic!("provided TX caused recognized_id to be called after some shares")
|
||||||
},
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|_, _| async { panic!("test tried to publish a new Serai TX after some shares") },
|
|_, _, _| async { panic!("test tried to publish a new Serai TX after some shares") },
|
||||||
|
&|_| async {
|
||||||
|
panic!(
|
||||||
|
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
|
||||||
|
)
|
||||||
|
},
|
||||||
&spec,
|
&spec,
|
||||||
&tributaries[0].1.reader(),
|
&tributaries[0].1.reader(),
|
||||||
)
|
)
|
||||||
@@ -238,12 +256,13 @@ async fn dkg_test() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Any scanner which has handled the prior blocks should only emit the new event
|
// Any scanner which has handled the prior blocks should only emit the new event
|
||||||
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut scanner_db,
|
||||||
&keys[0],
|
&keys[0],
|
||||||
|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
||||||
&processors,
|
&processors,
|
||||||
|_, _| async { panic!("test tried to publish a new Serai TX") },
|
|_, _, _| async { panic!("test tried to publish a new Serai TX") },
|
||||||
|
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
||||||
&spec,
|
&spec,
|
||||||
&tributaries[0].1.reader(),
|
&tributaries[0].1.reader(),
|
||||||
)
|
)
|
||||||
@@ -308,14 +327,16 @@ async fn dkg_test() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// The scanner should successfully try to publish a transaction with a validly signed signature
|
// The scanner should successfully try to publish a transaction with a validly signed signature
|
||||||
handle_new_blocks::<_, _, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, _, _, _, LocalP2p>(
|
||||||
&mut scanner_db,
|
&mut scanner_db,
|
||||||
&keys[0],
|
&keys[0],
|
||||||
|_, _, _, _| async {
|
|_, _, _, _| async {
|
||||||
panic!("provided TX caused recognized_id to be called after DKG confirmation")
|
panic!("provided TX caused recognized_id to be called after DKG confirmation")
|
||||||
},
|
},
|
||||||
&processors,
|
&processors,
|
||||||
|set, tx| {
|
|set, tx_type, tx| {
|
||||||
|
assert_eq!(tx_type, PstTxType::SetKeys);
|
||||||
|
|
||||||
let spec = spec.clone();
|
let spec = spec.clone();
|
||||||
let key_pair = key_pair.clone();
|
let key_pair = key_pair.clone();
|
||||||
async move {
|
async move {
|
||||||
@@ -368,6 +389,7 @@ async fn dkg_test() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
||||||
&spec,
|
&spec,
|
||||||
&tributaries[0].1.reader(),
|
&tributaries[0].1.reader(),
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -202,6 +202,17 @@ fn serialize_transaction() {
|
|||||||
random_signed_with_nonce(&mut OsRng, 2),
|
random_signed_with_nonce(&mut OsRng, 2),
|
||||||
));
|
));
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut key = [0; 32];
|
||||||
|
OsRng.fill_bytes(&mut key);
|
||||||
|
test_read_write(Transaction::DkgRemovalPreprocess(random_sign_data(&mut OsRng, key, 0)));
|
||||||
|
}
|
||||||
|
{
|
||||||
|
let mut key = [0; 32];
|
||||||
|
OsRng.fill_bytes(&mut key);
|
||||||
|
test_read_write(Transaction::DkgRemovalShare(random_sign_data(&mut OsRng, key, 1)));
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
let mut block = [0; 32];
|
let mut block = [0; 32];
|
||||||
OsRng.fill_bytes(&mut block);
|
OsRng.fill_bytes(&mut block);
|
||||||
|
|||||||
@@ -18,6 +18,7 @@ use crate::tributary::TributarySpec;
|
|||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
|
||||||
pub enum Topic {
|
pub enum Topic {
|
||||||
Dkg,
|
Dkg,
|
||||||
|
DkgRemoval([u8; 32]),
|
||||||
SubstrateSign(SubstrateSignableId),
|
SubstrateSign(SubstrateSignableId),
|
||||||
Sign([u8; 32]),
|
Sign([u8; 32]),
|
||||||
}
|
}
|
||||||
@@ -49,7 +50,10 @@ create_db!(
|
|||||||
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
||||||
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
||||||
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||||
|
RemovalNonces:
|
||||||
|
(genesis: [u8; 32], removing: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||||
CurrentlyCompletingKeyPair: (genesis: [u8; 32]) -> KeyPair,
|
CurrentlyCompletingKeyPair: (genesis: [u8; 32]) -> KeyPair,
|
||||||
|
DkgCompleted: (genesis: [u8; 32]) -> (),
|
||||||
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
|
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
|
||||||
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
||||||
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
||||||
|
|||||||
241
coordinator/src/tributary/dkg_removal.rs
Normal file
241
coordinator/src/tributary/dkg_removal.rs
Normal file
@@ -0,0 +1,241 @@
|
|||||||
|
use core::ops::Deref;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
use rand_core::SeedableRng;
|
||||||
|
use rand_chacha::ChaCha20Rng;
|
||||||
|
|
||||||
|
use transcript::{Transcript, RecommendedTranscript};
|
||||||
|
use ciphersuite::{
|
||||||
|
group::{Group, GroupEncoding},
|
||||||
|
Ciphersuite, Ristretto,
|
||||||
|
};
|
||||||
|
use frost::{
|
||||||
|
FrostError,
|
||||||
|
dkg::{Participant, musig::musig},
|
||||||
|
sign::*,
|
||||||
|
};
|
||||||
|
use frost_schnorrkel::Schnorrkel;
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
Public,
|
||||||
|
validator_sets::primitives::{musig_context, remove_participant_message},
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::tributary::TributarySpec;
|
||||||
|
|
||||||
|
/*
|
||||||
|
The following is a clone of DkgConfirmer modified for DKG removals.
|
||||||
|
|
||||||
|
The notable difference is this uses a MuSig key of the first `t` participants to respond with
|
||||||
|
preprocesses, not all `n` participants.
|
||||||
|
|
||||||
|
TODO: Exact same commentary on seeded RNGs. The following can drop its seeded RNG if cached
|
||||||
|
preprocesses are used to carry the preprocess between machines
|
||||||
|
*/
|
||||||
|
pub(crate) struct DkgRemoval;
|
||||||
|
impl DkgRemoval {
|
||||||
|
// Convert the passed in HashMap, which uses the validators' start index for their `s` threshold
|
||||||
|
// shares, to the indexes needed for MuSig
|
||||||
|
fn from_threshold_i_to_musig_i(
|
||||||
|
mut old_map: HashMap<[u8; 32], Vec<u8>>,
|
||||||
|
) -> HashMap<Participant, Vec<u8>> {
|
||||||
|
let mut new_map = HashMap::new();
|
||||||
|
let mut participating = old_map.keys().cloned().collect::<Vec<_>>();
|
||||||
|
participating.sort();
|
||||||
|
for (i, participating) in participating.into_iter().enumerate() {
|
||||||
|
new_map.insert(
|
||||||
|
Participant::new(u16::try_from(i + 1).unwrap()).unwrap(),
|
||||||
|
old_map.remove(&participating).unwrap(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
new_map
|
||||||
|
}
|
||||||
|
|
||||||
|
fn preprocess_rng(
|
||||||
|
spec: &TributarySpec,
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
attempt: u32,
|
||||||
|
) -> ChaCha20Rng {
|
||||||
|
ChaCha20Rng::from_seed({
|
||||||
|
let mut entropy_transcript = RecommendedTranscript::new(b"DkgRemoval Entropy");
|
||||||
|
entropy_transcript.append_message(b"spec", spec.serialize());
|
||||||
|
entropy_transcript.append_message(b"key", Zeroizing::new(key.to_bytes()));
|
||||||
|
entropy_transcript.append_message(b"attempt", attempt.to_le_bytes());
|
||||||
|
Zeroizing::new(entropy_transcript).rng_seed(b"preprocess")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn preprocess_internal(
|
||||||
|
spec: &TributarySpec,
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
attempt: u32,
|
||||||
|
participants: Option<&[<Ristretto as Ciphersuite>::G]>,
|
||||||
|
) -> (Option<AlgorithmSignMachine<Ristretto, Schnorrkel>>, [u8; 64]) {
|
||||||
|
// TODO: Diversify this among DkgConfirmer/DkgRemoval?
|
||||||
|
let context = musig_context(spec.set());
|
||||||
|
|
||||||
|
let (_, preprocess) = AlgorithmMachine::new(
|
||||||
|
Schnorrkel::new(b"substrate"),
|
||||||
|
// Preprocess with our key alone as we don't know the signing set yet
|
||||||
|
musig(&context, key, &[<Ristretto as Ciphersuite>::G::generator() * key.deref()])
|
||||||
|
.expect("couldn't get the MuSig key of our key alone")
|
||||||
|
.into(),
|
||||||
|
)
|
||||||
|
.preprocess(&mut Self::preprocess_rng(spec, key, attempt));
|
||||||
|
|
||||||
|
let machine = if let Some(participants) = participants {
|
||||||
|
let (machine, actual_preprocess) = AlgorithmMachine::new(
|
||||||
|
Schnorrkel::new(b"substrate"),
|
||||||
|
// Preprocess with our key alone as we don't know the signing set yet
|
||||||
|
musig(&context, key, participants)
|
||||||
|
.expect(
|
||||||
|
"couldn't create a MuSig key for the DKG removal we're supposedly participating in",
|
||||||
|
)
|
||||||
|
.into(),
|
||||||
|
)
|
||||||
|
.preprocess(&mut Self::preprocess_rng(spec, key, attempt));
|
||||||
|
// Doesn't use assert_eq due to lack of Debug
|
||||||
|
assert!(preprocess == actual_preprocess);
|
||||||
|
Some(machine)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
|
||||||
|
(machine, preprocess.serialize().try_into().unwrap())
|
||||||
|
}
|
||||||
|
// Get the preprocess for this confirmation.
|
||||||
|
pub(crate) fn preprocess(
|
||||||
|
spec: &TributarySpec,
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
attempt: u32,
|
||||||
|
) -> [u8; 64] {
|
||||||
|
Self::preprocess_internal(spec, key, attempt, None).1
|
||||||
|
}
|
||||||
|
|
||||||
|
fn share_internal(
|
||||||
|
spec: &TributarySpec,
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
attempt: u32,
|
||||||
|
mut preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
|
removed: [u8; 32],
|
||||||
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||||
|
// TODO: Remove this ugly blob
|
||||||
|
let preprocesses = {
|
||||||
|
let mut preprocesses_participants = preprocesses.keys().cloned().collect::<Vec<_>>();
|
||||||
|
preprocesses_participants.sort();
|
||||||
|
let mut actual_keys = vec![];
|
||||||
|
let spec_validators = spec.validators();
|
||||||
|
for participant in &preprocesses_participants {
|
||||||
|
for (validator, _) in &spec_validators {
|
||||||
|
if participant == &spec.i(*validator).unwrap().start {
|
||||||
|
actual_keys.push(*validator);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut new_preprocesses = HashMap::new();
|
||||||
|
for (participant, actual_key) in
|
||||||
|
preprocesses_participants.into_iter().zip(actual_keys.into_iter())
|
||||||
|
{
|
||||||
|
new_preprocesses.insert(actual_key, preprocesses.remove(&participant).unwrap());
|
||||||
|
}
|
||||||
|
new_preprocesses
|
||||||
|
};
|
||||||
|
|
||||||
|
let participants = preprocesses.keys().cloned().collect::<Vec<_>>();
|
||||||
|
let preprocesses = Self::from_threshold_i_to_musig_i(
|
||||||
|
preprocesses.into_iter().map(|(key, preprocess)| (key.to_bytes(), preprocess)).collect(),
|
||||||
|
);
|
||||||
|
let machine = Self::preprocess_internal(spec, key, attempt, Some(&participants)).0.unwrap();
|
||||||
|
let preprocesses = preprocesses
|
||||||
|
.into_iter()
|
||||||
|
.map(|(p, preprocess)| {
|
||||||
|
machine
|
||||||
|
.read_preprocess(&mut preprocess.as_slice())
|
||||||
|
.map(|preprocess| (p, preprocess))
|
||||||
|
.map_err(|_| p)
|
||||||
|
})
|
||||||
|
.collect::<Result<HashMap<_, _>, _>>()?;
|
||||||
|
let (machine, share) = machine
|
||||||
|
.sign(preprocesses, &remove_participant_message(&spec.set(), Public(removed)))
|
||||||
|
.map_err(|e| match e {
|
||||||
|
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
||||||
|
FrostError::InvalidParticipant(_, _) |
|
||||||
|
FrostError::InvalidSigningSet(_) |
|
||||||
|
FrostError::InvalidParticipantQuantity(_, _) |
|
||||||
|
FrostError::DuplicatedParticipant(_) |
|
||||||
|
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
||||||
|
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok((machine, share.serialize().try_into().unwrap()))
|
||||||
|
}
|
||||||
|
// Get the share for this confirmation, if the preprocesses are valid.
|
||||||
|
pub(crate) fn share(
|
||||||
|
spec: &TributarySpec,
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
attempt: u32,
|
||||||
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
|
removed: [u8; 32],
|
||||||
|
) -> Result<[u8; 32], Participant> {
|
||||||
|
Self::share_internal(spec, key, attempt, preprocesses, removed).map(|(_, share)| share)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn complete(
|
||||||
|
spec: &TributarySpec,
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
attempt: u32,
|
||||||
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
|
removed: [u8; 32],
|
||||||
|
mut shares: HashMap<Participant, Vec<u8>>,
|
||||||
|
) -> Result<(Vec<Public>, [u8; 64]), Participant> {
|
||||||
|
// TODO: Remove this ugly blob
|
||||||
|
let shares = {
|
||||||
|
let mut shares_participants = shares.keys().cloned().collect::<Vec<_>>();
|
||||||
|
shares_participants.sort();
|
||||||
|
let mut actual_keys = vec![];
|
||||||
|
let spec_validators = spec.validators();
|
||||||
|
for participant in &shares_participants {
|
||||||
|
for (validator, _) in &spec_validators {
|
||||||
|
if participant == &spec.i(*validator).unwrap().start {
|
||||||
|
actual_keys.push(*validator);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut new_shares = HashMap::new();
|
||||||
|
for (participant, actual_key) in shares_participants.into_iter().zip(actual_keys.into_iter())
|
||||||
|
{
|
||||||
|
new_shares.insert(actual_key.to_bytes(), shares.remove(&participant).unwrap());
|
||||||
|
}
|
||||||
|
new_shares
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut signers = shares.keys().cloned().map(Public).collect::<Vec<_>>();
|
||||||
|
signers.sort();
|
||||||
|
|
||||||
|
let machine = Self::share_internal(spec, key, attempt, preprocesses, removed)
|
||||||
|
.expect("trying to complete a machine which failed to preprocess")
|
||||||
|
.0;
|
||||||
|
|
||||||
|
let shares = Self::from_threshold_i_to_musig_i(shares)
|
||||||
|
.into_iter()
|
||||||
|
.map(|(p, share)| {
|
||||||
|
machine.read_share(&mut share.as_slice()).map(|share| (p, share)).map_err(|_| p)
|
||||||
|
})
|
||||||
|
.collect::<Result<HashMap<_, _>, _>>()?;
|
||||||
|
let signature = machine.complete(shares).map_err(|e| match e {
|
||||||
|
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
||||||
|
FrostError::InvalidParticipant(_, _) |
|
||||||
|
FrostError::InvalidSigningSet(_) |
|
||||||
|
FrostError::InvalidParticipantQuantity(_, _) |
|
||||||
|
FrostError::DuplicatedParticipant(_) |
|
||||||
|
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
||||||
|
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok((signers, signature.to_bytes()))
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,6 +1,8 @@
|
|||||||
use core::{ops::Deref, future::Future};
|
use core::{ops::Deref, future::Future};
|
||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use zeroize::{Zeroize, Zeroizing};
|
use zeroize::{Zeroize, Zeroizing};
|
||||||
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
@@ -26,10 +28,13 @@ use serai_db::{Get, Db};
|
|||||||
use crate::{
|
use crate::{
|
||||||
processors::Processors,
|
processors::Processors,
|
||||||
tributary::{
|
tributary::{
|
||||||
Transaction, TributarySpec, SeraiBlockNumber, Topic, DataSpecification, DataSet, Accumulation,
|
SignData, Transaction, TributarySpec, SeraiBlockNumber, Topic, DataSpecification, DataSet,
|
||||||
|
Accumulation,
|
||||||
dkg_confirmer::DkgConfirmer,
|
dkg_confirmer::DkgConfirmer,
|
||||||
scanner::{RecognizedIdType, RIDTrait},
|
dkg_removal::DkgRemoval,
|
||||||
FatallySlashed, DkgShare, PlanIds, ConfirmationNonces, AttemptDb, DataDb,
|
scanner::{RecognizedIdType, RIDTrait, PstTxType},
|
||||||
|
FatallySlashed, DkgShare, DkgCompleted, PlanIds, ConfirmationNonces, RemovalNonces, AttemptDb,
|
||||||
|
DataDb,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -40,8 +45,11 @@ const DKG_SHARES: &str = "shares";
|
|||||||
const DKG_CONFIRMATION_NONCES: &str = "confirmation_nonces";
|
const DKG_CONFIRMATION_NONCES: &str = "confirmation_nonces";
|
||||||
const DKG_CONFIRMATION_SHARES: &str = "confirmation_shares";
|
const DKG_CONFIRMATION_SHARES: &str = "confirmation_shares";
|
||||||
|
|
||||||
// These s/b prefixes between Batch and Sign should be unnecessary, as Batch/Share entries
|
// These d/s/b prefixes between DKG Removal, Batch, and Sign should be unnecessary, as Batch/Share
|
||||||
// themselves should already be domain separated
|
// entries themselves should already be domain separated
|
||||||
|
const DKG_REMOVAL_PREPROCESS: &str = "d_preprocess";
|
||||||
|
const DKG_REMOVAL_SHARE: &str = "d_share";
|
||||||
|
|
||||||
const BATCH_PREPROCESS: &str = "b_preprocess";
|
const BATCH_PREPROCESS: &str = "b_preprocess";
|
||||||
const BATCH_SHARE: &str = "b_share";
|
const BATCH_SHARE: &str = "b_share";
|
||||||
|
|
||||||
@@ -94,26 +102,54 @@ pub fn generated_key_pair<D: Db>(
|
|||||||
DkgConfirmer::share(spec, key, attempt, preprocesses, key_pair)
|
DkgConfirmer::share(spec, key, attempt, preprocesses, key_pair)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(super) fn fatal_slash<D: Db>(
|
pub(super) async fn fatal_slash<
|
||||||
|
D: Db,
|
||||||
|
FPtt: Future<Output = ()>,
|
||||||
|
PTT: Clone + Fn(Transaction) -> FPtt,
|
||||||
|
>(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
genesis: [u8; 32],
|
spec: &TributarySpec,
|
||||||
account: [u8; 32],
|
publish_tributary_tx: &PTT,
|
||||||
|
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
|
slashing: [u8; 32],
|
||||||
reason: &str,
|
reason: &str,
|
||||||
) {
|
) {
|
||||||
log::warn!("fatally slashing {}. reason: {}", hex::encode(account), reason);
|
let genesis = spec.genesis();
|
||||||
FatallySlashed::set_fatally_slashed(txn, genesis, account);
|
|
||||||
|
log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason);
|
||||||
|
FatallySlashed::set_fatally_slashed(txn, genesis, slashing);
|
||||||
// TODO: disconnect the node from network/ban from further participation in all Tributaries
|
// TODO: disconnect the node from network/ban from further participation in all Tributaries
|
||||||
|
|
||||||
// TODO: If during DKG, trigger a re-attempt
|
// TODO: If during DKG, trigger a re-attempt
|
||||||
|
// Despite triggering a re-attempt, this DKG may still complete and may become in-use
|
||||||
|
|
||||||
|
// If during a DKG, remove the participant
|
||||||
|
if DkgCompleted::get(txn, genesis).is_none() {
|
||||||
|
let preprocess = DkgRemoval::preprocess(spec, our_key, 0);
|
||||||
|
let mut tx = Transaction::DkgRemovalPreprocess(SignData {
|
||||||
|
plan: slashing,
|
||||||
|
attempt: 0,
|
||||||
|
data: vec![preprocess.to_vec()],
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
});
|
||||||
|
tx.sign(&mut OsRng, genesis, our_key);
|
||||||
|
publish_tributary_tx(tx).await;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second
|
// TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second
|
||||||
// Tributary post-DKG
|
// Tributary post-DKG
|
||||||
// https://github.com/serai-dex/serai/issues/426
|
// https://github.com/serai-dex/serai/issues/426
|
||||||
|
|
||||||
fn fatal_slash_with_participant_index<D: Db>(
|
async fn fatal_slash_with_participant_index<
|
||||||
spec: &TributarySpec,
|
D: Db,
|
||||||
|
FPtt: Future<Output = ()>,
|
||||||
|
PTT: Clone + Fn(Transaction) -> FPtt,
|
||||||
|
>(
|
||||||
txn: &mut <D as Db>::Transaction<'_>,
|
txn: &mut <D as Db>::Transaction<'_>,
|
||||||
|
spec: &TributarySpec,
|
||||||
|
publish_tributary_tx: &PTT,
|
||||||
|
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
i: Participant,
|
i: Participant,
|
||||||
reason: &str,
|
reason: &str,
|
||||||
) {
|
) {
|
||||||
@@ -129,14 +165,18 @@ fn fatal_slash_with_participant_index<D: Db>(
|
|||||||
}
|
}
|
||||||
let validator = validator.unwrap();
|
let validator = validator.unwrap();
|
||||||
|
|
||||||
fatal_slash::<D>(txn, spec.genesis(), validator.to_bytes(), reason);
|
fatal_slash::<D, _, _>(txn, spec, publish_tributary_tx, our_key, validator.to_bytes(), reason)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub(crate) async fn handle_application_tx<
|
pub(crate) async fn handle_application_tx<
|
||||||
D: Db,
|
D: Db,
|
||||||
Pro: Processors,
|
Pro: Processors,
|
||||||
FPst: Future<Output = ()>,
|
FPst: Future<Output = ()>,
|
||||||
PST: Clone + Fn(ValidatorSet, Vec<u8>) -> FPst,
|
PST: Clone + Fn(ValidatorSet, PstTxType, Vec<u8>) -> FPst,
|
||||||
|
FPtt: Future<Output = ()>,
|
||||||
|
PTT: Clone + Fn(Transaction) -> FPtt,
|
||||||
FRid: Future<Output = ()>,
|
FRid: Future<Output = ()>,
|
||||||
RID: RIDTrait<FRid>,
|
RID: RIDTrait<FRid>,
|
||||||
>(
|
>(
|
||||||
@@ -144,6 +184,7 @@ pub(crate) async fn handle_application_tx<
|
|||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
processors: &Pro,
|
processors: &Pro,
|
||||||
publish_serai_tx: PST,
|
publish_serai_tx: PST,
|
||||||
|
publish_tributary_tx: &PTT,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
recognized_id: RID,
|
recognized_id: RID,
|
||||||
txn: &mut <D as Db>::Transaction<'_>,
|
txn: &mut <D as Db>::Transaction<'_>,
|
||||||
@@ -159,18 +200,28 @@ pub(crate) async fn handle_application_tx<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let handle = |txn: &mut <D as Db>::Transaction<'_>,
|
async fn handle<D: Db, FPtt: Future<Output = ()>, PTT: Clone + Fn(Transaction) -> FPtt>(
|
||||||
|
txn: &mut <D as Db>::Transaction<'_>,
|
||||||
|
spec: &TributarySpec,
|
||||||
|
publish_tributary_tx: &PTT,
|
||||||
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
data_spec: &DataSpecification,
|
data_spec: &DataSpecification,
|
||||||
bytes: Vec<u8>,
|
bytes: Vec<u8>,
|
||||||
signed: &Signed| {
|
signed: &Signed,
|
||||||
|
) -> Accumulation {
|
||||||
|
let genesis = spec.genesis();
|
||||||
|
|
||||||
let Some(curr_attempt) = AttemptDb::attempt(txn, genesis, data_spec.topic) else {
|
let Some(curr_attempt) = AttemptDb::attempt(txn, genesis, data_spec.topic) else {
|
||||||
// Premature publication of a valid ID/publication of an invalid ID
|
// Premature publication of a valid ID/publication of an invalid ID
|
||||||
fatal_slash::<D>(
|
fatal_slash::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
genesis,
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
signed.signer.to_bytes(),
|
signed.signer.to_bytes(),
|
||||||
"published data for ID without an attempt",
|
"published data for ID without an attempt",
|
||||||
);
|
)
|
||||||
|
.await;
|
||||||
return Accumulation::NotReady;
|
return Accumulation::NotReady;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -178,7 +229,15 @@ pub(crate) async fn handle_application_tx<
|
|||||||
// This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a
|
// This shouldn't be reachable since nonces were made inserted by the coordinator, yet it's a
|
||||||
// cheap check to leave in for safety
|
// cheap check to leave in for safety
|
||||||
if DataDb::get(txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() {
|
if DataDb::get(txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() {
|
||||||
fatal_slash::<D>(txn, genesis, signed.signer.to_bytes(), "published data multiple times");
|
fatal_slash::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
signed.signer.to_bytes(),
|
||||||
|
"published data multiple times",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return Accumulation::NotReady;
|
return Accumulation::NotReady;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -189,12 +248,15 @@ pub(crate) async fn handle_application_tx<
|
|||||||
}
|
}
|
||||||
// If the attempt is greater, this is a premature publication, full slash
|
// If the attempt is greater, this is a premature publication, full slash
|
||||||
if data_spec.attempt > curr_attempt {
|
if data_spec.attempt > curr_attempt {
|
||||||
fatal_slash::<D>(
|
fatal_slash::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
genesis,
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
signed.signer.to_bytes(),
|
signed.signer.to_bytes(),
|
||||||
"published data with an attempt which hasn't started",
|
"published data with an attempt which hasn't started",
|
||||||
);
|
)
|
||||||
|
.await;
|
||||||
return Accumulation::NotReady;
|
return Accumulation::NotReady;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -205,22 +267,31 @@ pub(crate) async fn handle_application_tx<
|
|||||||
|
|
||||||
// Accumulate this data
|
// Accumulate this data
|
||||||
DataDb::accumulate(txn, key, spec, data_spec, signed.signer, &bytes)
|
DataDb::accumulate(txn, key, spec, data_spec, signed.signer, &bytes)
|
||||||
};
|
}
|
||||||
|
|
||||||
fn check_sign_data_len<D: Db>(
|
async fn check_sign_data_len<
|
||||||
|
D: Db,
|
||||||
|
FPtt: Future<Output = ()>,
|
||||||
|
PTT: Clone + Fn(Transaction) -> FPtt,
|
||||||
|
>(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
|
publish_tributary_tx: &PTT,
|
||||||
|
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
signer: <Ristretto as Ciphersuite>::G,
|
signer: <Ristretto as Ciphersuite>::G,
|
||||||
len: usize,
|
len: usize,
|
||||||
) -> Result<(), ()> {
|
) -> Result<(), ()> {
|
||||||
let signer_i = spec.i(signer).unwrap();
|
let signer_i = spec.i(signer).unwrap();
|
||||||
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
||||||
fatal_slash::<D>(
|
fatal_slash::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
spec.genesis(),
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
our_key,
|
||||||
signer.to_bytes(),
|
signer.to_bytes(),
|
||||||
"signer published a distinct amount of sign data than they had shares",
|
"signer published a distinct amount of sign data than they had shares",
|
||||||
);
|
)
|
||||||
|
.await;
|
||||||
Err(())?;
|
Err(())?;
|
||||||
}
|
}
|
||||||
Ok(())
|
Ok(())
|
||||||
@@ -242,18 +313,40 @@ pub(crate) async fn handle_application_tx<
|
|||||||
|
|
||||||
match tx {
|
match tx {
|
||||||
Transaction::RemoveParticipant(i) => {
|
Transaction::RemoveParticipant(i) => {
|
||||||
fatal_slash_with_participant_index::<D>(spec, txn, i, "RemoveParticipant Provided TX")
|
fatal_slash_with_participant_index::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
i,
|
||||||
|
"RemoveParticipant Provided TX",
|
||||||
|
)
|
||||||
|
.await
|
||||||
}
|
}
|
||||||
Transaction::DkgCommitments(attempt, commitments, signed) => {
|
Transaction::DkgCommitments(attempt, commitments, signed) => {
|
||||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, signed.signer, commitments.len()) else {
|
let Ok(_) = check_sign_data_len::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
signed.signer,
|
||||||
|
commitments.len(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
match handle(
|
match handle::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
&DataSpecification { topic: Topic::Dkg, label: DKG_COMMITMENTS, attempt },
|
&DataSpecification { topic: Topic::Dkg, label: DKG_COMMITMENTS, attempt },
|
||||||
commitments.encode(),
|
commitments.encode(),
|
||||||
&signed,
|
&signed,
|
||||||
) {
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
||||||
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
||||||
unflatten(spec, &mut commitments);
|
unflatten(spec, &mut commitments);
|
||||||
@@ -281,17 +374,28 @@ pub(crate) async fn handle_application_tx<
|
|||||||
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||||
|
|
||||||
if shares.len() != usize::from(sender_is_len) {
|
if shares.len() != usize::from(sender_is_len) {
|
||||||
fatal_slash::<D>(
|
fatal_slash::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
genesis,
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
signed.signer.to_bytes(),
|
signed.signer.to_bytes(),
|
||||||
"invalid amount of DKG shares by key shares",
|
"invalid amount of DKG shares by key shares",
|
||||||
);
|
)
|
||||||
|
.await;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
for shares in &shares {
|
for shares in &shares {
|
||||||
if shares.len() != (usize::from(spec.n() - sender_is_len)) {
|
if shares.len() != (usize::from(spec.n() - sender_is_len)) {
|
||||||
fatal_slash::<D>(txn, genesis, signed.signer.to_bytes(), "invalid amount of DKG shares");
|
fatal_slash::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
signed.signer.to_bytes(),
|
||||||
|
"invalid amount of DKG shares",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -348,18 +452,27 @@ pub(crate) async fn handle_application_tx<
|
|||||||
// Drop shares as it's been mutated into invalidity
|
// Drop shares as it's been mutated into invalidity
|
||||||
drop(shares);
|
drop(shares);
|
||||||
|
|
||||||
let confirmation_nonces = handle(
|
let confirmation_nonces = handle::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
&DataSpecification { topic: Topic::Dkg, label: DKG_CONFIRMATION_NONCES, attempt },
|
&DataSpecification { topic: Topic::Dkg, label: DKG_CONFIRMATION_NONCES, attempt },
|
||||||
confirmation_nonces.to_vec(),
|
confirmation_nonces.to_vec(),
|
||||||
&signed,
|
&signed,
|
||||||
);
|
)
|
||||||
match handle(
|
.await;
|
||||||
|
match handle::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
&DataSpecification { topic: Topic::Dkg, label: DKG_SHARES, attempt },
|
&DataSpecification { topic: Topic::Dkg, label: DKG_SHARES, attempt },
|
||||||
our_shares.encode(),
|
our_shares.encode(),
|
||||||
&signed,
|
&signed,
|
||||||
) {
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||||
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
||||||
|
|
||||||
@@ -417,24 +530,30 @@ pub(crate) async fn handle_application_tx<
|
|||||||
if (u16::from(accuser) < u16::from(range.start)) ||
|
if (u16::from(accuser) < u16::from(range.start)) ||
|
||||||
(u16::from(range.end) <= u16::from(accuser))
|
(u16::from(range.end) <= u16::from(accuser))
|
||||||
{
|
{
|
||||||
fatal_slash::<D>(
|
fatal_slash::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
genesis,
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
signed.signer.to_bytes(),
|
signed.signer.to_bytes(),
|
||||||
"accused with a Participant index which wasn't theirs",
|
"accused with a Participant index which wasn't theirs",
|
||||||
);
|
)
|
||||||
|
.await;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if !((u16::from(range.start) <= u16::from(faulty)) &&
|
if !((u16::from(range.start) <= u16::from(faulty)) &&
|
||||||
(u16::from(faulty) < u16::from(range.end)))
|
(u16::from(faulty) < u16::from(range.end)))
|
||||||
{
|
{
|
||||||
fatal_slash::<D>(
|
fatal_slash::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
genesis,
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
signed.signer.to_bytes(),
|
signed.signer.to_bytes(),
|
||||||
"accused self of having an InvalidDkgShare",
|
"accused self of having an InvalidDkgShare",
|
||||||
);
|
)
|
||||||
|
.await;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -454,12 +573,17 @@ pub(crate) async fn handle_application_tx<
|
|||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgConfirmed(attempt, shares, signed) => {
|
Transaction::DkgConfirmed(attempt, shares, signed) => {
|
||||||
match handle(
|
match handle::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
&DataSpecification { topic: Topic::Dkg, label: DKG_CONFIRMATION_SHARES, attempt },
|
&DataSpecification { topic: Topic::Dkg, label: DKG_CONFIRMATION_SHARES, attempt },
|
||||||
shares.to_vec(),
|
shares.to_vec(),
|
||||||
&signed,
|
&signed,
|
||||||
) {
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||||
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
||||||
|
|
||||||
@@ -474,13 +598,24 @@ pub(crate) async fn handle_application_tx<
|
|||||||
match DkgConfirmer::complete(spec, key, attempt, preprocesses, &key_pair, shares) {
|
match DkgConfirmer::complete(spec, key, attempt, preprocesses, &key_pair, shares) {
|
||||||
Ok(sig) => sig,
|
Ok(sig) => sig,
|
||||||
Err(p) => {
|
Err(p) => {
|
||||||
fatal_slash_with_participant_index::<D>(spec, txn, p, "invalid DkgConfirmer share");
|
fatal_slash_with_participant_index::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
p,
|
||||||
|
"invalid DkgConfirmer share",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
DkgCompleted::set(txn, genesis, &());
|
||||||
|
|
||||||
publish_serai_tx(
|
publish_serai_tx(
|
||||||
spec.set(),
|
spec.set(),
|
||||||
|
PstTxType::SetKeys,
|
||||||
SeraiValidatorSets::set_keys(spec.set().network, key_pair, Signature(sig)),
|
SeraiValidatorSets::set_keys(spec.set().network, key_pair, Signature(sig)),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@@ -492,6 +627,124 @@ pub(crate) async fn handle_application_tx<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Transaction::DkgRemovalPreprocess(data) => {
|
||||||
|
let signer = data.signed.signer;
|
||||||
|
// TODO: Only handle this if we're not actively removing this validator
|
||||||
|
if (data.data.len() != 1) || (data.data[0].len() != 64) {
|
||||||
|
fatal_slash::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
signer.to_bytes(),
|
||||||
|
"non-64-byte DKG removal preprocess",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
match handle::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
&DataSpecification {
|
||||||
|
topic: Topic::DkgRemoval(data.plan),
|
||||||
|
label: DKG_REMOVAL_PREPROCESS,
|
||||||
|
attempt: data.attempt,
|
||||||
|
},
|
||||||
|
data.data.encode(),
|
||||||
|
&data.signed,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Accumulation::Ready(DataSet::Participating(preprocesses)) => {
|
||||||
|
RemovalNonces::set(txn, genesis, data.plan, data.attempt, &preprocesses);
|
||||||
|
|
||||||
|
let Ok(share) = DkgRemoval::share(spec, key, data.attempt, preprocesses, data.plan)
|
||||||
|
else {
|
||||||
|
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
|
||||||
|
// slash) and censor transactions (yet don't explicitly ban)
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut tx = Transaction::DkgRemovalPreprocess(SignData {
|
||||||
|
plan: data.plan,
|
||||||
|
attempt: data.attempt,
|
||||||
|
data: vec![share.to_vec()],
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
});
|
||||||
|
tx.sign(&mut OsRng, genesis, key);
|
||||||
|
publish_tributary_tx(tx).await;
|
||||||
|
}
|
||||||
|
Accumulation::Ready(DataSet::NotParticipating) => {}
|
||||||
|
Accumulation::NotReady => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Transaction::DkgRemovalShare(data) => {
|
||||||
|
let signer = data.signed.signer;
|
||||||
|
if (data.data.len() != 1) || (data.data[0].len() != 32) {
|
||||||
|
fatal_slash::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
signer.to_bytes(),
|
||||||
|
"non-32-byte DKG removal share",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
match handle::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
&DataSpecification {
|
||||||
|
topic: Topic::DkgRemoval(data.plan),
|
||||||
|
label: DKG_REMOVAL_SHARE,
|
||||||
|
attempt: data.attempt,
|
||||||
|
},
|
||||||
|
data.data.encode(),
|
||||||
|
&data.signed,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||||
|
let preprocesses = RemovalNonces::get(txn, genesis, data.plan, data.attempt).unwrap();
|
||||||
|
|
||||||
|
let Ok((signers, signature)) =
|
||||||
|
DkgRemoval::complete(spec, key, data.attempt, preprocesses, data.plan, shares)
|
||||||
|
else {
|
||||||
|
// TODO: Locally increase slash points to maximum (distinct from an explicitly fatal
|
||||||
|
// slash) and censor transactions (yet don't explicitly ban)
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
|
||||||
|
// TODO: Only handle this if we're not actively removing any of the signers
|
||||||
|
// The created Substrate call will fail if a removed validator was one of the signers
|
||||||
|
// Since:
|
||||||
|
// 1) publish_serai_tx will block this task until the TX is published
|
||||||
|
// 2) We won't scan any more TXs/blocks until we handle this TX
|
||||||
|
// The TX *must* be successfully published *before* we start removing any more signers
|
||||||
|
// Accordingly, if the signers aren't currently being removed, they won't be removed
|
||||||
|
// by the time this transaction is successfully published *unless* a malicious 34%
|
||||||
|
// participates with the non-participating 33% to continue operation and produce a
|
||||||
|
// distinct removal (since the non-participating won't block in this block)
|
||||||
|
// This breaks BFT and is accordingly within bounds
|
||||||
|
|
||||||
|
let tx = serai_client::SeraiValidatorSets::remove_participant(
|
||||||
|
spec.set().network,
|
||||||
|
Public(data.plan),
|
||||||
|
signers,
|
||||||
|
Signature(signature),
|
||||||
|
);
|
||||||
|
publish_serai_tx(spec.set(), PstTxType::RemoveParticipant(data.plan), tx).await;
|
||||||
|
}
|
||||||
|
Accumulation::Ready(DataSet::NotParticipating) => {}
|
||||||
|
Accumulation::NotReady => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(hash) => {
|
Transaction::CosignSubstrateBlock(hash) => {
|
||||||
AttemptDb::recognize_topic(
|
AttemptDb::recognize_topic(
|
||||||
txn,
|
txn,
|
||||||
@@ -540,17 +793,37 @@ pub(crate) async fn handle_application_tx<
|
|||||||
|
|
||||||
Transaction::SubstratePreprocess(data) => {
|
Transaction::SubstratePreprocess(data) => {
|
||||||
let signer = data.signed.signer;
|
let signer = data.signed.signer;
|
||||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, signer, data.data.len()) else {
|
let Ok(_) = check_sign_data_len::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
signer,
|
||||||
|
data.data.len(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
for data in &data.data {
|
for data in &data.data {
|
||||||
if data.len() != 64 {
|
if data.len() != 64 {
|
||||||
fatal_slash::<D>(txn, genesis, signer.to_bytes(), "non-64-byte Substrate preprocess");
|
fatal_slash::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
signer.to_bytes(),
|
||||||
|
"non-64-byte Substrate preprocess",
|
||||||
|
)
|
||||||
|
.await;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
match handle(
|
match handle::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
&DataSpecification {
|
&DataSpecification {
|
||||||
topic: Topic::SubstrateSign(data.plan),
|
topic: Topic::SubstrateSign(data.plan),
|
||||||
label: BATCH_PREPROCESS,
|
label: BATCH_PREPROCESS,
|
||||||
@@ -558,7 +831,9 @@ pub(crate) async fn handle_application_tx<
|
|||||||
},
|
},
|
||||||
data.data.encode(),
|
data.data.encode(),
|
||||||
&data.signed,
|
&data.signed,
|
||||||
) {
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
|
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
|
||||||
unflatten(spec, &mut preprocesses);
|
unflatten(spec, &mut preprocesses);
|
||||||
processors
|
processors
|
||||||
@@ -583,11 +858,23 @@ pub(crate) async fn handle_application_tx<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Transaction::SubstrateShare(data) => {
|
Transaction::SubstrateShare(data) => {
|
||||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
|
let Ok(_) = check_sign_data_len::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
data.signed.signer,
|
||||||
|
data.data.len(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
match handle(
|
match handle::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
&DataSpecification {
|
&DataSpecification {
|
||||||
topic: Topic::SubstrateSign(data.plan),
|
topic: Topic::SubstrateSign(data.plan),
|
||||||
label: BATCH_SHARE,
|
label: BATCH_SHARE,
|
||||||
@@ -595,7 +882,9 @@ pub(crate) async fn handle_application_tx<
|
|||||||
},
|
},
|
||||||
data.data.encode(),
|
data.data.encode(),
|
||||||
&data.signed,
|
&data.signed,
|
||||||
) {
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Accumulation::Ready(DataSet::Participating(mut shares)) => {
|
Accumulation::Ready(DataSet::Participating(mut shares)) => {
|
||||||
unflatten(spec, &mut shares);
|
unflatten(spec, &mut shares);
|
||||||
processors
|
processors
|
||||||
@@ -621,11 +910,23 @@ pub(crate) async fn handle_application_tx<
|
|||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SignPreprocess(data) => {
|
Transaction::SignPreprocess(data) => {
|
||||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
|
let Ok(_) = check_sign_data_len::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
data.signed.signer,
|
||||||
|
data.data.len(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
match handle(
|
match handle::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
&DataSpecification {
|
&DataSpecification {
|
||||||
topic: Topic::Sign(data.plan),
|
topic: Topic::Sign(data.plan),
|
||||||
label: SIGN_PREPROCESS,
|
label: SIGN_PREPROCESS,
|
||||||
@@ -633,7 +934,9 @@ pub(crate) async fn handle_application_tx<
|
|||||||
},
|
},
|
||||||
data.data.encode(),
|
data.data.encode(),
|
||||||
&data.signed,
|
&data.signed,
|
||||||
) {
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
|
Accumulation::Ready(DataSet::Participating(mut preprocesses)) => {
|
||||||
unflatten(spec, &mut preprocesses);
|
unflatten(spec, &mut preprocesses);
|
||||||
processors
|
processors
|
||||||
@@ -651,11 +954,23 @@ pub(crate) async fn handle_application_tx<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Transaction::SignShare(data) => {
|
Transaction::SignShare(data) => {
|
||||||
let Ok(_) = check_sign_data_len::<D>(txn, spec, data.signed.signer, data.data.len()) else {
|
let Ok(_) = check_sign_data_len::<D, _, _>(
|
||||||
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
|
data.signed.signer,
|
||||||
|
data.data.len(),
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
match handle(
|
match handle::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
&DataSpecification {
|
&DataSpecification {
|
||||||
topic: Topic::Sign(data.plan),
|
topic: Topic::Sign(data.plan),
|
||||||
label: SIGN_SHARE,
|
label: SIGN_SHARE,
|
||||||
@@ -663,7 +978,9 @@ pub(crate) async fn handle_application_tx<
|
|||||||
},
|
},
|
||||||
data.data.encode(),
|
data.data.encode(),
|
||||||
&data.signed,
|
&data.signed,
|
||||||
) {
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
Accumulation::Ready(DataSet::Participating(mut shares)) => {
|
Accumulation::Ready(DataSet::Participating(mut shares)) => {
|
||||||
unflatten(spec, &mut shares);
|
unflatten(spec, &mut shares);
|
||||||
processors
|
processors
|
||||||
@@ -688,12 +1005,15 @@ pub(crate) async fn handle_application_tx<
|
|||||||
);
|
);
|
||||||
|
|
||||||
if AttemptDb::attempt(txn, genesis, Topic::Sign(plan)).is_none() {
|
if AttemptDb::attempt(txn, genesis, Topic::Sign(plan)).is_none() {
|
||||||
fatal_slash::<D>(
|
fatal_slash::<D, _, _>(
|
||||||
txn,
|
txn,
|
||||||
genesis,
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
first_signer.to_bytes(),
|
first_signer.to_bytes(),
|
||||||
"claimed an unrecognized plan was completed",
|
"claimed an unrecognized plan was completed",
|
||||||
);
|
)
|
||||||
|
.await;
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|||||||
@@ -36,6 +36,7 @@ mod db;
|
|||||||
pub use db::*;
|
pub use db::*;
|
||||||
|
|
||||||
mod dkg_confirmer;
|
mod dkg_confirmer;
|
||||||
|
mod dkg_removal;
|
||||||
|
|
||||||
mod handle;
|
mod handle;
|
||||||
pub use handle::*;
|
pub use handle::*;
|
||||||
@@ -226,7 +227,7 @@ impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
|
|||||||
writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?;
|
writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?;
|
||||||
for data in &self.data {
|
for data in &self.data {
|
||||||
if data.len() > u16::MAX.into() {
|
if data.len() > u16::MAX.into() {
|
||||||
// Currently, the largest individual preproces is a Monero transaction
|
// Currently, the largest individual preprocess is a Monero transaction
|
||||||
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
|
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
|
||||||
// key image and proof (96 bytes)
|
// key image and proof (96 bytes)
|
||||||
// Even with all of that, we could support 227 inputs in a single TX
|
// Even with all of that, we could support 227 inputs in a single TX
|
||||||
@@ -265,6 +266,9 @@ pub enum Transaction {
|
|||||||
},
|
},
|
||||||
DkgConfirmed(u32, [u8; 32], Signed),
|
DkgConfirmed(u32, [u8; 32], Signed),
|
||||||
|
|
||||||
|
DkgRemovalPreprocess(SignData<[u8; 32]>),
|
||||||
|
DkgRemovalShare(SignData<[u8; 32]>),
|
||||||
|
|
||||||
// Co-sign a Substrate block.
|
// Co-sign a Substrate block.
|
||||||
CosignSubstrateBlock([u8; 32]),
|
CosignSubstrateBlock([u8; 32]),
|
||||||
|
|
||||||
@@ -325,6 +329,12 @@ impl Debug for Transaction {
|
|||||||
.field("attempt", attempt)
|
.field("attempt", attempt)
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
.finish_non_exhaustive(),
|
.finish_non_exhaustive(),
|
||||||
|
Transaction::DkgRemovalPreprocess(sign_data) => {
|
||||||
|
fmt.debug_struct("Transaction::DkgRemovalPreprocess").field("sign_data", sign_data).finish()
|
||||||
|
}
|
||||||
|
Transaction::DkgRemovalShare(sign_data) => {
|
||||||
|
fmt.debug_struct("Transaction::DkgRemovalShare").field("sign_data", sign_data).finish()
|
||||||
|
}
|
||||||
Transaction::CosignSubstrateBlock(block) => fmt
|
Transaction::CosignSubstrateBlock(block) => fmt
|
||||||
.debug_struct("Transaction::CosignSubstrateBlock")
|
.debug_struct("Transaction::CosignSubstrateBlock")
|
||||||
.field("block", &hex::encode(block))
|
.field("block", &hex::encode(block))
|
||||||
@@ -487,13 +497,16 @@ impl ReadWrite for Transaction {
|
|||||||
Ok(Transaction::DkgConfirmed(attempt, confirmation_share, signed))
|
Ok(Transaction::DkgConfirmed(attempt, confirmation_share, signed))
|
||||||
}
|
}
|
||||||
|
|
||||||
5 => {
|
5 => SignData::read(reader, 0).map(Transaction::DkgRemovalPreprocess),
|
||||||
|
6 => SignData::read(reader, 1).map(Transaction::DkgRemovalShare),
|
||||||
|
|
||||||
|
7 => {
|
||||||
let mut block = [0; 32];
|
let mut block = [0; 32];
|
||||||
reader.read_exact(&mut block)?;
|
reader.read_exact(&mut block)?;
|
||||||
Ok(Transaction::CosignSubstrateBlock(block))
|
Ok(Transaction::CosignSubstrateBlock(block))
|
||||||
}
|
}
|
||||||
|
|
||||||
6 => {
|
8 => {
|
||||||
let mut block = [0; 32];
|
let mut block = [0; 32];
|
||||||
reader.read_exact(&mut block)?;
|
reader.read_exact(&mut block)?;
|
||||||
let mut batch = [0; 5];
|
let mut batch = [0; 5];
|
||||||
@@ -501,19 +514,19 @@ impl ReadWrite for Transaction {
|
|||||||
Ok(Transaction::Batch(block, batch))
|
Ok(Transaction::Batch(block, batch))
|
||||||
}
|
}
|
||||||
|
|
||||||
7 => {
|
9 => {
|
||||||
let mut block = [0; 8];
|
let mut block = [0; 8];
|
||||||
reader.read_exact(&mut block)?;
|
reader.read_exact(&mut block)?;
|
||||||
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
||||||
}
|
}
|
||||||
|
|
||||||
8 => SignData::read(reader, 0).map(Transaction::SubstratePreprocess),
|
10 => SignData::read(reader, 0).map(Transaction::SubstratePreprocess),
|
||||||
9 => SignData::read(reader, 1).map(Transaction::SubstrateShare),
|
11 => SignData::read(reader, 1).map(Transaction::SubstrateShare),
|
||||||
|
|
||||||
10 => SignData::read(reader, 0).map(Transaction::SignPreprocess),
|
12 => SignData::read(reader, 0).map(Transaction::SignPreprocess),
|
||||||
11 => SignData::read(reader, 1).map(Transaction::SignShare),
|
13 => SignData::read(reader, 1).map(Transaction::SignShare),
|
||||||
|
|
||||||
12 => {
|
14 => {
|
||||||
let mut plan = [0; 32];
|
let mut plan = [0; 32];
|
||||||
reader.read_exact(&mut plan)?;
|
reader.read_exact(&mut plan)?;
|
||||||
|
|
||||||
@@ -610,41 +623,50 @@ impl ReadWrite for Transaction {
|
|||||||
signed.write_without_nonce(writer)
|
signed.write_without_nonce(writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(block) => {
|
Transaction::DkgRemovalPreprocess(data) => {
|
||||||
writer.write_all(&[5])?;
|
writer.write_all(&[5])?;
|
||||||
|
data.write(writer)
|
||||||
|
}
|
||||||
|
Transaction::DkgRemovalShare(data) => {
|
||||||
|
writer.write_all(&[6])?;
|
||||||
|
data.write(writer)
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::CosignSubstrateBlock(block) => {
|
||||||
|
writer.write_all(&[7])?;
|
||||||
writer.write_all(block)
|
writer.write_all(block)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::Batch(block, batch) => {
|
Transaction::Batch(block, batch) => {
|
||||||
writer.write_all(&[6])?;
|
writer.write_all(&[8])?;
|
||||||
writer.write_all(block)?;
|
writer.write_all(block)?;
|
||||||
writer.write_all(batch)
|
writer.write_all(batch)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstrateBlock(block) => {
|
Transaction::SubstrateBlock(block) => {
|
||||||
writer.write_all(&[7])?;
|
writer.write_all(&[9])?;
|
||||||
writer.write_all(&block.to_le_bytes())
|
writer.write_all(&block.to_le_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstratePreprocess(data) => {
|
Transaction::SubstratePreprocess(data) => {
|
||||||
writer.write_all(&[8])?;
|
writer.write_all(&[10])?;
|
||||||
data.write(writer)
|
data.write(writer)
|
||||||
}
|
}
|
||||||
Transaction::SubstrateShare(data) => {
|
Transaction::SubstrateShare(data) => {
|
||||||
writer.write_all(&[9])?;
|
writer.write_all(&[11])?;
|
||||||
data.write(writer)
|
data.write(writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SignPreprocess(data) => {
|
Transaction::SignPreprocess(data) => {
|
||||||
writer.write_all(&[10])?;
|
writer.write_all(&[12])?;
|
||||||
data.write(writer)
|
data.write(writer)
|
||||||
}
|
}
|
||||||
Transaction::SignShare(data) => {
|
Transaction::SignShare(data) => {
|
||||||
writer.write_all(&[11])?;
|
writer.write_all(&[13])?;
|
||||||
data.write(writer)
|
data.write(writer)
|
||||||
}
|
}
|
||||||
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
||||||
writer.write_all(&[12])?;
|
writer.write_all(&[14])?;
|
||||||
writer.write_all(plan)?;
|
writer.write_all(plan)?;
|
||||||
writer
|
writer
|
||||||
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
||||||
@@ -674,6 +696,13 @@ impl TransactionTrait for Transaction {
|
|||||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Transaction::DkgRemovalPreprocess(data) => {
|
||||||
|
TransactionKind::Signed((b"dkg_removal", data.plan, data.attempt).encode(), &data.signed)
|
||||||
|
}
|
||||||
|
Transaction::DkgRemovalShare(data) => {
|
||||||
|
TransactionKind::Signed((b"dkg_removal", data.plan, data.attempt).encode(), &data.signed)
|
||||||
|
}
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
||||||
|
|
||||||
Transaction::Batch(_, _) => TransactionKind::Provided("batch"),
|
Transaction::Batch(_, _) => TransactionKind::Provided("batch"),
|
||||||
@@ -753,6 +782,9 @@ impl Transaction {
|
|||||||
Transaction::InvalidDkgShare { .. } => 2,
|
Transaction::InvalidDkgShare { .. } => 2,
|
||||||
Transaction::DkgConfirmed(_, _, _) => 2,
|
Transaction::DkgConfirmed(_, _, _) => 2,
|
||||||
|
|
||||||
|
Transaction::DkgRemovalPreprocess(_) => 0,
|
||||||
|
Transaction::DkgRemovalShare(_) => 1,
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||||
|
|
||||||
Transaction::Batch(_, _) => panic!("signing Batch"),
|
Transaction::Batch(_, _) => panic!("signing Batch"),
|
||||||
@@ -776,6 +808,9 @@ impl Transaction {
|
|||||||
Transaction::InvalidDkgShare { ref mut signed, .. } => signed,
|
Transaction::InvalidDkgShare { ref mut signed, .. } => signed,
|
||||||
Transaction::DkgConfirmed(_, _, ref mut signed) => signed,
|
Transaction::DkgConfirmed(_, _, ref mut signed) => signed,
|
||||||
|
|
||||||
|
Transaction::DkgRemovalPreprocess(ref mut data) => &mut data.signed,
|
||||||
|
Transaction::DkgRemovalShare(ref mut data) => &mut data.signed,
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||||
|
|
||||||
Transaction::Batch(_, _) => panic!("signing Batch"),
|
Transaction::Batch(_, _) => panic!("signing Batch"),
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ use serai_client::{validator_sets::primitives::ValidatorSet, Serai};
|
|||||||
use serai_db::DbTxn;
|
use serai_db::DbTxn;
|
||||||
|
|
||||||
use tributary::{
|
use tributary::{
|
||||||
TransactionKind, Transaction as TributaryTransaction, Block, TributaryReader,
|
TransactionKind, Transaction as TributaryTransaction, TransactionError, Block, TributaryReader,
|
||||||
tendermint::{
|
tendermint::{
|
||||||
tx::{TendermintTx, Evidence, decode_signed_message},
|
tx::{TendermintTx, Evidence, decode_signed_message},
|
||||||
TendermintNetwork,
|
TendermintNetwork,
|
||||||
@@ -43,12 +43,21 @@ impl<FRid, F: Clone + Fn(ValidatorSet, [u8; 32], RecognizedIdType, Vec<u8>) -> F
|
|||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
|
pub enum PstTxType {
|
||||||
|
SetKeys,
|
||||||
|
RemoveParticipant([u8; 32]),
|
||||||
|
}
|
||||||
|
|
||||||
// Handle a specific Tributary block
|
// Handle a specific Tributary block
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
async fn handle_block<
|
async fn handle_block<
|
||||||
D: Db,
|
D: Db,
|
||||||
Pro: Processors,
|
Pro: Processors,
|
||||||
FPst: Future<Output = ()>,
|
FPst: Future<Output = ()>,
|
||||||
PST: Clone + Fn(ValidatorSet, Vec<u8>) -> FPst,
|
PST: Clone + Fn(ValidatorSet, PstTxType, Vec<u8>) -> FPst,
|
||||||
|
FPtt: Future<Output = ()>,
|
||||||
|
PTT: Clone + Fn(Transaction) -> FPtt,
|
||||||
FRid: Future<Output = ()>,
|
FRid: Future<Output = ()>,
|
||||||
RID: RIDTrait<FRid>,
|
RID: RIDTrait<FRid>,
|
||||||
P: P2p,
|
P: P2p,
|
||||||
@@ -58,12 +67,12 @@ async fn handle_block<
|
|||||||
recognized_id: RID,
|
recognized_id: RID,
|
||||||
processors: &Pro,
|
processors: &Pro,
|
||||||
publish_serai_tx: PST,
|
publish_serai_tx: PST,
|
||||||
|
publish_tributary_tx: &PTT,
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
block: Block<Transaction>,
|
block: Block<Transaction>,
|
||||||
) {
|
) {
|
||||||
log::info!("found block for Tributary {:?}", spec.set());
|
log::info!("found block for Tributary {:?}", spec.set());
|
||||||
|
|
||||||
let genesis = spec.genesis();
|
|
||||||
let hash = block.hash();
|
let hash = block.hash();
|
||||||
|
|
||||||
let mut event_id = 0;
|
let mut event_id = 0;
|
||||||
@@ -100,19 +109,23 @@ async fn handle_block<
|
|||||||
|
|
||||||
// Since anything with evidence is fundamentally faulty behavior, not just temporal errors,
|
// Since anything with evidence is fundamentally faulty behavior, not just temporal errors,
|
||||||
// mark the node as fatally slashed
|
// mark the node as fatally slashed
|
||||||
fatal_slash::<D>(
|
fatal_slash::<D, _, _>(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
genesis,
|
spec,
|
||||||
|
publish_tributary_tx,
|
||||||
|
key,
|
||||||
msgs.0.msg.sender,
|
msgs.0.msg.sender,
|
||||||
&format!("invalid tendermint messages: {:?}", msgs),
|
&format!("invalid tendermint messages: {:?}", msgs),
|
||||||
);
|
)
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
TributaryTransaction::Application(tx) => {
|
TributaryTransaction::Application(tx) => {
|
||||||
handle_application_tx::<D, _, _, _, _, _>(
|
handle_application_tx::<D, _, _, _, _, _, _, _>(
|
||||||
tx,
|
tx,
|
||||||
spec,
|
spec,
|
||||||
processors,
|
processors,
|
||||||
publish_serai_tx.clone(),
|
publish_serai_tx.clone(),
|
||||||
|
publish_tributary_tx,
|
||||||
key,
|
key,
|
||||||
recognized_id.clone(),
|
recognized_id.clone(),
|
||||||
&mut txn,
|
&mut txn,
|
||||||
@@ -130,11 +143,14 @@ async fn handle_block<
|
|||||||
// TODO: Trigger any necessary re-attempts
|
// TODO: Trigger any necessary re-attempts
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub(crate) async fn handle_new_blocks<
|
pub(crate) async fn handle_new_blocks<
|
||||||
D: Db,
|
D: Db,
|
||||||
Pro: Processors,
|
Pro: Processors,
|
||||||
FPst: Future<Output = ()>,
|
FPst: Future<Output = ()>,
|
||||||
PST: Clone + Fn(ValidatorSet, Vec<u8>) -> FPst,
|
PST: Clone + Fn(ValidatorSet, PstTxType, Vec<u8>) -> FPst,
|
||||||
|
FPtt: Future<Output = ()>,
|
||||||
|
PTT: Clone + Fn(Transaction) -> FPtt,
|
||||||
FRid: Future<Output = ()>,
|
FRid: Future<Output = ()>,
|
||||||
RID: RIDTrait<FRid>,
|
RID: RIDTrait<FRid>,
|
||||||
P: P2p,
|
P: P2p,
|
||||||
@@ -144,6 +160,7 @@ pub(crate) async fn handle_new_blocks<
|
|||||||
recognized_id: RID,
|
recognized_id: RID,
|
||||||
processors: &Pro,
|
processors: &Pro,
|
||||||
publish_serai_tx: PST,
|
publish_serai_tx: PST,
|
||||||
|
publish_tributary_tx: &PTT,
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
tributary: &TributaryReader<D, Transaction>,
|
tributary: &TributaryReader<D, Transaction>,
|
||||||
) {
|
) {
|
||||||
@@ -165,12 +182,13 @@ pub(crate) async fn handle_new_blocks<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
handle_block::<_, _, _, _, _, _, P>(
|
handle_block::<_, _, _, _, _, _, _, _, P>(
|
||||||
db,
|
db,
|
||||||
key,
|
key,
|
||||||
recognized_id.clone(),
|
recognized_id.clone(),
|
||||||
processors,
|
processors,
|
||||||
publish_serai_tx.clone(),
|
publish_serai_tx.clone(),
|
||||||
|
publish_tributary_tx,
|
||||||
spec,
|
spec,
|
||||||
block,
|
block,
|
||||||
)
|
)
|
||||||
@@ -222,12 +240,12 @@ pub(crate) async fn scan_tributaries_task<
|
|||||||
// the next block occurs
|
// the next block occurs
|
||||||
let next_block_notification = tributary.next_block_notification().await;
|
let next_block_notification = tributary.next_block_notification().await;
|
||||||
|
|
||||||
handle_new_blocks::<_, _, _, _, _, _, P>(
|
handle_new_blocks::<_, _, _, _, _, _, _, _, P>(
|
||||||
&mut tributary_db,
|
&mut tributary_db,
|
||||||
&key,
|
&key,
|
||||||
recognized_id.clone(),
|
recognized_id.clone(),
|
||||||
&processors,
|
&processors,
|
||||||
|set, tx| {
|
|set, tx_type, tx| {
|
||||||
let serai = serai.clone();
|
let serai = serai.clone();
|
||||||
async move {
|
async move {
|
||||||
loop {
|
loop {
|
||||||
@@ -242,33 +260,52 @@ pub(crate) async fn scan_tributaries_task<
|
|||||||
Err(e) => {
|
Err(e) => {
|
||||||
if let Ok(serai) = serai.as_of_latest_finalized_block().await {
|
if let Ok(serai) = serai.as_of_latest_finalized_block().await {
|
||||||
let serai = serai.validator_sets();
|
let serai = serai.validator_sets();
|
||||||
// Check if this failed because the keys were already set by someone
|
|
||||||
// else
|
|
||||||
if matches!(serai.keys(spec.set()).await, Ok(Some(_))) {
|
|
||||||
log::info!("another coordinator set key pair for {:?}", set);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The above block may return false if the keys have been pruned from
|
// The following block is irrelevant, and can/likely will fail, if
|
||||||
// the state
|
// we're publishing a TX for an old session
|
||||||
// Check if this session is no longer the latest session, meaning it at
|
// If we're on a newer session, move on
|
||||||
// some point did set keys, and we're just operating off very
|
|
||||||
// historical data
|
|
||||||
if let Ok(Some(current_session)) =
|
if let Ok(Some(current_session)) =
|
||||||
serai.session(spec.set().network).await
|
serai.session(spec.set().network).await
|
||||||
{
|
{
|
||||||
if current_session.0 > spec.set().session.0 {
|
if current_session.0 > spec.set().session.0 {
|
||||||
log::warn!(
|
log::warn!(
|
||||||
"trying to set keys for a set which isn't the latest {:?}",
|
"trying to publish a TX relevant to a set {} {:?}",
|
||||||
|
"which isn't the latest",
|
||||||
set
|
set
|
||||||
);
|
);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Check if someone else published the TX in question
|
||||||
|
match tx_type {
|
||||||
|
PstTxType::SetKeys => {
|
||||||
|
if matches!(serai.keys(spec.set()).await, Ok(Some(_))) {
|
||||||
|
log::info!("another coordinator set key pair for {:?}", set);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
PstTxType::RemoveParticipant(removed) => {
|
||||||
|
if let Ok(Some(participants)) =
|
||||||
|
serai.participants(spec.set().network).await
|
||||||
|
{
|
||||||
|
if !participants
|
||||||
|
.iter()
|
||||||
|
.any(|(participant, _)| participant.0 == removed)
|
||||||
|
{
|
||||||
|
log::info!(
|
||||||
|
"another coordinator published removal for {:?}",
|
||||||
|
hex::encode(removed)
|
||||||
|
);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log::error!(
|
log::error!(
|
||||||
"couldn't connect to Serai node to publish set_keys TX: {:?}",
|
"couldn't connect to Serai node to publish {tx_type:?} TX: {:?}",
|
||||||
e
|
e
|
||||||
);
|
);
|
||||||
tokio::time::sleep(core::time::Duration::from_secs(10)).await;
|
tokio::time::sleep(core::time::Duration::from_secs(10)).await;
|
||||||
@@ -277,6 +314,21 @@ pub(crate) async fn scan_tributaries_task<
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
&|tx| {
|
||||||
|
let tributary = tributary.clone();
|
||||||
|
async move {
|
||||||
|
match tributary.add_transaction(tx.clone()).await {
|
||||||
|
Ok(_) => {}
|
||||||
|
// Can happen as this occurs on a distinct DB TXN
|
||||||
|
Err(TransactionError::InvalidNonce) => {
|
||||||
|
log::warn!(
|
||||||
|
"publishing TX {tx:?} returned InvalidNonce. was it already added?"
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Err(e) => panic!("created an invalid transaction: {e:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
spec,
|
spec,
|
||||||
&reader,
|
&reader,
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -112,4 +112,20 @@ impl<'a> SeraiValidatorSets<'a> {
|
|||||||
validator_sets::Call::<Runtime>::set_keys { network, key_pair, signature },
|
validator_sets::Call::<Runtime>::set_keys { network, key_pair, signature },
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn remove_participant(
|
||||||
|
network: NetworkId,
|
||||||
|
to_remove: Public,
|
||||||
|
signers: Vec<Public>,
|
||||||
|
signature: Signature,
|
||||||
|
) -> Vec<u8> {
|
||||||
|
Serai::unsigned(&serai_runtime::RuntimeCall::ValidatorSets(
|
||||||
|
validator_sets::Call::<Runtime>::remove_participant {
|
||||||
|
network,
|
||||||
|
to_remove,
|
||||||
|
signers,
|
||||||
|
signature,
|
||||||
|
},
|
||||||
|
))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -85,22 +85,49 @@ pub mod pallet {
|
|||||||
#[pallet::getter(fn allocation_per_key_share)]
|
#[pallet::getter(fn allocation_per_key_share)]
|
||||||
pub type AllocationPerKeyShare<T: Config> =
|
pub type AllocationPerKeyShare<T: Config> =
|
||||||
StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;
|
StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;
|
||||||
/// The validators selected to be in-set.
|
/// The validators selected to be in-set who haven't been removed.
|
||||||
#[pallet::storage]
|
#[pallet::storage]
|
||||||
#[pallet::getter(fn participants)]
|
pub(crate) type Participants<T: Config> = StorageMap<
|
||||||
pub type Participants<T: Config> = StorageMap<
|
|
||||||
_,
|
_,
|
||||||
Identity,
|
Identity,
|
||||||
NetworkId,
|
NetworkId,
|
||||||
BoundedVec<(Public, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET }>>,
|
BoundedVec<(Public, u64), ConstU32<{ MAX_KEY_SHARES_PER_SET }>>,
|
||||||
ValueQuery,
|
OptionQuery,
|
||||||
>;
|
>;
|
||||||
/// The validators selected to be in-set, yet with the ability to perform a check for presence.
|
/// The validators selected to be in-set, regardless of if removed, with the ability to perform a
|
||||||
|
/// check for presence.
|
||||||
// Uses Identity so we can call clear_prefix over network, manually inserting a Blake2 hash
|
// Uses Identity so we can call clear_prefix over network, manually inserting a Blake2 hash
|
||||||
// before the spammable key.
|
// before the spammable key.
|
||||||
#[pallet::storage]
|
#[pallet::storage]
|
||||||
pub type InSet<T: Config> =
|
pub(crate) type InSet<T: Config> =
|
||||||
StorageMap<_, Identity, (NetworkId, [u8; 16], Public), (), OptionQuery>;
|
StorageMap<_, Identity, (NetworkId, [u8; 16], Public), u64, OptionQuery>;
|
||||||
|
|
||||||
|
// TODO: Merge this with SortedAllocationsIter
|
||||||
|
struct InSetIter<T: Config> {
|
||||||
|
_t: PhantomData<T>,
|
||||||
|
prefix: Vec<u8>,
|
||||||
|
last: Vec<u8>,
|
||||||
|
}
|
||||||
|
impl<T: Config> InSetIter<T> {
|
||||||
|
fn new(network: NetworkId) -> Self {
|
||||||
|
let mut prefix = InSet::<T>::final_prefix().to_vec();
|
||||||
|
prefix.extend(&network.encode());
|
||||||
|
Self { _t: PhantomData, prefix: prefix.clone(), last: prefix }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<T: Config> Iterator for InSetIter<T> {
|
||||||
|
type Item = u64;
|
||||||
|
fn next(&mut self) -> Option<Self::Item> {
|
||||||
|
let next = sp_io::storage::next_key(&self.last)?;
|
||||||
|
if !next.starts_with(&self.prefix) {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
let res = u64::decode(&mut sp_io::storage::get(&next).unwrap().as_ref()).unwrap();
|
||||||
|
self.last = next;
|
||||||
|
Some(res)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<T: Config> Pallet<T> {
|
impl<T: Config> Pallet<T> {
|
||||||
fn in_set_key(
|
fn in_set_key(
|
||||||
network: NetworkId,
|
network: NetworkId,
|
||||||
@@ -118,6 +145,8 @@ pub mod pallet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the account is included in an active set.
|
/// Returns true if the account is included in an active set.
|
||||||
|
///
|
||||||
|
/// This will still include participants which were removed from the DKG.
|
||||||
pub fn in_active_set(network: NetworkId, account: Public) -> bool {
|
pub fn in_active_set(network: NetworkId, account: Public) -> bool {
|
||||||
if network == NetworkId::Serai {
|
if network == NetworkId::Serai {
|
||||||
Self::in_active_serai_set(account)
|
Self::in_active_serai_set(account)
|
||||||
@@ -127,6 +156,8 @@ pub mod pallet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Returns true if the account has been definitively included in an active or upcoming set.
|
/// Returns true if the account has been definitively included in an active or upcoming set.
|
||||||
|
///
|
||||||
|
/// This will still include participants which were removed from the DKG.
|
||||||
pub fn in_set(network: NetworkId, account: Public) -> bool {
|
pub fn in_set(network: NetworkId, account: Public) -> bool {
|
||||||
if InSet::<T>::contains_key(Self::in_set_key(network, account)) {
|
if InSet::<T>::contains_key(Self::in_set_key(network, account)) {
|
||||||
return true;
|
return true;
|
||||||
@@ -258,7 +289,6 @@ pub mod pallet {
|
|||||||
|
|
||||||
/// The MuSig key for a validator set.
|
/// The MuSig key for a validator set.
|
||||||
#[pallet::storage]
|
#[pallet::storage]
|
||||||
#[pallet::getter(fn musig_key)]
|
|
||||||
pub type MuSigKeys<T: Config> = StorageMap<_, Twox64Concat, ValidatorSet, Public, OptionQuery>;
|
pub type MuSigKeys<T: Config> = StorageMap<_, Twox64Concat, ValidatorSet, Public, OptionQuery>;
|
||||||
|
|
||||||
/// The generated key pair for a given validator set instance.
|
/// The generated key pair for a given validator set instance.
|
||||||
@@ -272,6 +302,10 @@ pub mod pallet {
|
|||||||
NewSet {
|
NewSet {
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
},
|
},
|
||||||
|
ParticipantRemoved {
|
||||||
|
set: ValidatorSet,
|
||||||
|
removed: T::AccountId,
|
||||||
|
},
|
||||||
KeyGen {
|
KeyGen {
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
@@ -328,7 +362,7 @@ pub mod pallet {
|
|||||||
let Some((key, amount)) = iter.next() else { break };
|
let Some((key, amount)) = iter.next() else { break };
|
||||||
|
|
||||||
let these_key_shares = amount.0 / allocation_per_key_share;
|
let these_key_shares = amount.0 / allocation_per_key_share;
|
||||||
InSet::<T>::set(Self::in_set_key(network, key), Some(()));
|
InSet::<T>::set(Self::in_set_key(network, key), Some(these_key_shares));
|
||||||
participants.push((key, these_key_shares));
|
participants.push((key, these_key_shares));
|
||||||
|
|
||||||
// This can technically set key_shares to a value exceeding MAX_KEY_SHARES_PER_SET
|
// This can technically set key_shares to a value exceeding MAX_KEY_SHARES_PER_SET
|
||||||
@@ -348,7 +382,7 @@ pub mod pallet {
|
|||||||
Some(musig_key(set, &participants.iter().map(|(id, _)| *id).collect::<Vec<_>>())),
|
Some(musig_key(set, &participants.iter().map(|(id, _)| *id).collect::<Vec<_>>())),
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Participants::<T>::set(network, participants.try_into().unwrap());
|
Participants::<T>::set(network, Some(participants.try_into().unwrap()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -672,7 +706,8 @@ pub mod pallet {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn rotate_session() {
|
fn rotate_session() {
|
||||||
let prior_serai_participants = Self::participants(NetworkId::Serai);
|
let prior_serai_participants = Participants::<T>::get(NetworkId::Serai)
|
||||||
|
.expect("no Serai participants upon rotate_session");
|
||||||
let prior_serai_session = Self::session(NetworkId::Serai).unwrap();
|
let prior_serai_session = Self::session(NetworkId::Serai).unwrap();
|
||||||
|
|
||||||
// TODO: T::SessionHandler::on_before_session_ending() was here.
|
// TODO: T::SessionHandler::on_before_session_ending() was here.
|
||||||
@@ -685,7 +720,8 @@ pub mod pallet {
|
|||||||
// Update Babe and Grandpa
|
// Update Babe and Grandpa
|
||||||
let session = prior_serai_session.0 + 1;
|
let session = prior_serai_session.0 + 1;
|
||||||
let validators = prior_serai_participants;
|
let validators = prior_serai_participants;
|
||||||
let next_validators = Self::participants(NetworkId::Serai);
|
let next_validators =
|
||||||
|
Participants::<T>::get(NetworkId::Serai).expect("no Serai participants after new_session");
|
||||||
Babe::<T>::enact_epoch_change(
|
Babe::<T>::enact_epoch_change(
|
||||||
WeakBoundedVec::force_from(
|
WeakBoundedVec::force_from(
|
||||||
validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(),
|
validators.iter().copied().map(|(id, w)| (BabeAuthorityId::from(id), w)).collect(),
|
||||||
@@ -733,6 +769,26 @@ pub mod pallet {
|
|||||||
|
|
||||||
#[pallet::call_index(1)]
|
#[pallet::call_index(1)]
|
||||||
#[pallet::weight(0)] // TODO
|
#[pallet::weight(0)] // TODO
|
||||||
|
pub fn remove_participant(
|
||||||
|
origin: OriginFor<T>,
|
||||||
|
network: NetworkId,
|
||||||
|
to_remove: Public,
|
||||||
|
signers: Vec<Public>,
|
||||||
|
signature: Signature,
|
||||||
|
) -> DispatchResult {
|
||||||
|
ensure_none(origin)?;
|
||||||
|
|
||||||
|
// Nothing occurs here as validate_unsigned does everything
|
||||||
|
let _ = network;
|
||||||
|
let _ = to_remove;
|
||||||
|
let _ = signers;
|
||||||
|
let _ = signature;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
#[pallet::call_index(2)]
|
||||||
|
#[pallet::weight(0)] // TODO
|
||||||
pub fn allocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
|
pub fn allocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
|
||||||
let validator = ensure_signed(origin)?;
|
let validator = ensure_signed(origin)?;
|
||||||
Coins::<T>::transfer_internal(
|
Coins::<T>::transfer_internal(
|
||||||
@@ -743,7 +799,7 @@ pub mod pallet {
|
|||||||
Self::increase_allocation(network, validator, amount)
|
Self::increase_allocation(network, validator, amount)
|
||||||
}
|
}
|
||||||
|
|
||||||
#[pallet::call_index(2)]
|
#[pallet::call_index(3)]
|
||||||
#[pallet::weight(0)] // TODO
|
#[pallet::weight(0)] // TODO
|
||||||
pub fn deallocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
|
pub fn deallocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
|
||||||
let account = ensure_signed(origin)?;
|
let account = ensure_signed(origin)?;
|
||||||
@@ -760,7 +816,7 @@ pub mod pallet {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
#[pallet::call_index(3)]
|
#[pallet::call_index(4)]
|
||||||
#[pallet::weight((0, DispatchClass::Operational))] // TODO
|
#[pallet::weight((0, DispatchClass::Operational))] // TODO
|
||||||
pub fn claim_deallocation(
|
pub fn claim_deallocation(
|
||||||
origin: OriginFor<T>,
|
origin: OriginFor<T>,
|
||||||
@@ -787,17 +843,11 @@ pub mod pallet {
|
|||||||
|
|
||||||
fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity {
|
fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity {
|
||||||
// Match to be exhaustive
|
// Match to be exhaustive
|
||||||
let (network, key_pair, signature) = match call {
|
match call {
|
||||||
Call::set_keys { network, ref key_pair, ref signature } => (network, key_pair, signature),
|
Call::set_keys { network, ref key_pair, ref signature } => {
|
||||||
Call::allocate { .. } | Call::deallocate { .. } | Call::claim_deallocation { .. } => {
|
|
||||||
Err(InvalidTransaction::Call)?
|
|
||||||
}
|
|
||||||
Call::__Ignore(_, _) => unreachable!(),
|
|
||||||
};
|
|
||||||
|
|
||||||
// Don't allow the Serai set to set_keys, as they have no reason to do so
|
// Don't allow the Serai set to set_keys, as they have no reason to do so
|
||||||
// This should already be covered by the lack of key in MuSigKeys, yet it doesn't hurt to be
|
// This should already be covered by the lack of key in MuSigKeys, yet it doesn't hurt to
|
||||||
// explicit
|
// be explicit
|
||||||
if network == &NetworkId::Serai {
|
if network == &NetworkId::Serai {
|
||||||
Err(InvalidTransaction::Custom(0))?;
|
Err(InvalidTransaction::Custom(0))?;
|
||||||
}
|
}
|
||||||
@@ -821,13 +871,110 @@ pub mod pallet {
|
|||||||
Ok(()) => (),
|
Ok(()) => (),
|
||||||
}
|
}
|
||||||
|
|
||||||
ValidTransaction::with_tag_prefix("validator-sets")
|
ValidTransaction::with_tag_prefix("ValidatorSets")
|
||||||
.and_provides(set)
|
.and_provides((0, set))
|
||||||
// Set a 10 block longevity, though this should be included in the next block
|
.longevity(u64::MAX)
|
||||||
.longevity(10)
|
|
||||||
.propagate(true)
|
.propagate(true)
|
||||||
.build()
|
.build()
|
||||||
}
|
}
|
||||||
|
Call::remove_participant { network, to_remove, signers, signature } => {
|
||||||
|
if network == &NetworkId::Serai {
|
||||||
|
Err(InvalidTransaction::Custom(0))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Confirm this set has a session
|
||||||
|
let Some(current_session) = Self::session(*network) else {
|
||||||
|
Err(InvalidTransaction::Custom(1))?
|
||||||
|
};
|
||||||
|
// This is needed as modify storage variables of the latest decided session
|
||||||
|
assert_eq!(Pallet::<T>::latest_decided_session(*network), Some(current_session));
|
||||||
|
let set = ValidatorSet { network: *network, session: current_session };
|
||||||
|
// Confirm it has yet to set keys
|
||||||
|
if Keys::<T>::get(set).is_some() {
|
||||||
|
Err(InvalidTransaction::Custom(2))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut participants =
|
||||||
|
Participants::<T>::get(network).expect("session existed without participants");
|
||||||
|
|
||||||
|
// Require signers be sorted to ensure no duplicates are present
|
||||||
|
let mut last_signer = None;
|
||||||
|
let mut signing_key_shares = 0;
|
||||||
|
for signer in signers {
|
||||||
|
if let Some(last_signer) = last_signer {
|
||||||
|
if last_signer >= signer {
|
||||||
|
Err(InvalidTransaction::Custom(3))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
last_signer = Some(signer);
|
||||||
|
|
||||||
|
// Doesn't use InSet as InSet *includes* removed validators
|
||||||
|
// Only non-removed validators should be considered as contributing
|
||||||
|
let Some(shares) = participants
|
||||||
|
.iter()
|
||||||
|
.find(|(participant, _)| participant == to_remove)
|
||||||
|
.map(|(_, shares)| shares)
|
||||||
|
else {
|
||||||
|
Err(InvalidTransaction::Custom(4))?
|
||||||
|
};
|
||||||
|
signing_key_shares += shares;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check 67% are participating in this removal
|
||||||
|
// This is done by iterating over InSet, which isn't mutated on removal, and reading the
|
||||||
|
// shares from that
|
||||||
|
let mut all_key_shares = 0;
|
||||||
|
for shares in InSetIter::<T>::new(*network) {
|
||||||
|
all_key_shares += shares;
|
||||||
|
}
|
||||||
|
// 2f + 1
|
||||||
|
if signing_key_shares < ((2 * (all_key_shares - signing_key_shares)) + 1) {
|
||||||
|
Err(InvalidTransaction::Custom(5))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Perform the removal
|
||||||
|
let Some(removal_index) =
|
||||||
|
participants.iter().position(|participant| &participant.0 == to_remove)
|
||||||
|
else {
|
||||||
|
Err(InvalidTransaction::Custom(6))?
|
||||||
|
};
|
||||||
|
participants.remove(removal_index);
|
||||||
|
|
||||||
|
// Verify the signature with the MuSig key of the signers
|
||||||
|
if !musig_key(set, signers)
|
||||||
|
.verify(&remove_participant_message(&set, *to_remove), signature)
|
||||||
|
{
|
||||||
|
Err(InvalidTransaction::BadProof)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the new MuSig key
|
||||||
|
MuSigKeys::<T>::set(
|
||||||
|
set,
|
||||||
|
Some(musig_key(set, &participants.iter().map(|(id, _)| *id).collect::<Vec<_>>())),
|
||||||
|
);
|
||||||
|
Participants::<T>::set(network, Some(participants));
|
||||||
|
|
||||||
|
// This does not remove from TotalAllocatedStake or InSet in order to:
|
||||||
|
// 1) Not decrease the stake present in this set. This means removed participants are
|
||||||
|
// still liable for the economic security of the external network. This prevents
|
||||||
|
// a decided set, which is economically secure, from falling below the threshold.
|
||||||
|
// 2) Not allow parties removed to immediately deallocate, per commentary on deallocation
|
||||||
|
// scheduling (https://github.com/serai-dex/serai/issues/394).
|
||||||
|
|
||||||
|
Pallet::<T>::deposit_event(Event::ParticipantRemoved { set, removed: *to_remove });
|
||||||
|
|
||||||
|
ValidTransaction::with_tag_prefix("ValidatorSets")
|
||||||
|
.and_provides((1, set, to_remove))
|
||||||
|
.longevity(u64::MAX)
|
||||||
|
.propagate(true)
|
||||||
|
.build()
|
||||||
|
}
|
||||||
|
Call::allocate { .. } | Call::deallocate { .. } | Call::claim_deallocation { .. } => {
|
||||||
|
Err(InvalidTransaction::Call)?
|
||||||
|
}
|
||||||
|
Call::__Ignore(_, _) => unreachable!(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Explicitly provide a pre-dispatch which calls validate_unsigned
|
// Explicitly provide a pre-dispatch which calls validate_unsigned
|
||||||
fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> {
|
fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> {
|
||||||
|
|||||||
@@ -98,9 +98,14 @@ pub fn musig_key(set: ValidatorSet, set_keys: &[Public]) -> Public {
|
|||||||
Public(dkg::musig::musig_key::<Ristretto>(&musig_context(set), &keys).unwrap().to_bytes())
|
Public(dkg::musig::musig_key::<Ristretto>(&musig_context(set), &keys).unwrap().to_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The message for the remove_participant signature.
|
||||||
|
pub fn remove_participant_message(set: &ValidatorSet, removed: Public) -> Vec<u8> {
|
||||||
|
(b"ValidatorSets-remove_participant", set, removed).encode()
|
||||||
|
}
|
||||||
|
|
||||||
/// The message for the set_keys signature.
|
/// The message for the set_keys signature.
|
||||||
pub fn set_keys_message(set: &ValidatorSet, key_pair: &KeyPair) -> Vec<u8> {
|
pub fn set_keys_message(set: &ValidatorSet, key_pair: &KeyPair) -> Vec<u8> {
|
||||||
[b"ValidatorSets-key_pair".as_ref(), &(set, key_pair).encode()].concat()
|
(b"ValidatorSets-set_keys", set, key_pair).encode()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// For a set of validators whose key shares may exceed the maximum, reduce until they equal the
|
/// For a set of validators whose key shares may exceed the maximum, reduce until they equal the
|
||||||
|
|||||||
Reference in New Issue
Block a user