2023-09-27 00:00:31 -04:00
|
|
|
use core::marker::PhantomData;
|
|
|
|
|
|
2023-09-29 03:51:01 -04:00
|
|
|
use blake2::{
|
|
|
|
|
digest::{consts::U32, Digest},
|
|
|
|
|
Blake2b,
|
|
|
|
|
};
|
|
|
|
|
|
2023-08-31 22:09:29 -04:00
|
|
|
use scale::{Encode, Decode};
|
2023-09-29 03:51:01 -04:00
|
|
|
use serai_client::{
|
|
|
|
|
primitives::NetworkId,
|
2023-10-13 12:14:59 -04:00
|
|
|
validator_sets::primitives::{Session, ValidatorSet},
|
2023-09-29 03:51:01 -04:00
|
|
|
in_instructions::primitives::{Batch, SignedBatch},
|
|
|
|
|
};
|
2023-08-24 19:06:22 -04:00
|
|
|
|
2023-04-23 04:31:00 -04:00
|
|
|
pub use serai_db::*;
|
|
|
|
|
|
2023-09-27 00:44:31 -04:00
|
|
|
use ::tributary::ReadWrite;
|
2023-10-14 21:58:10 -04:00
|
|
|
use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
|
2023-04-23 04:31:00 -04:00
|
|
|
|
|
|
|
|
#[derive(Debug)]
|
2023-09-27 00:00:31 -04:00
|
|
|
pub struct MainDb<D: Db>(PhantomData<D>);
|
|
|
|
|
impl<D: Db> MainDb<D> {
|
2023-04-23 04:31:00 -04:00
|
|
|
fn main_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
2023-09-01 01:00:24 -04:00
|
|
|
D::key(b"coordinator_main", dst, key)
|
2023-04-23 04:31:00 -04:00
|
|
|
}
|
|
|
|
|
|
2023-09-27 12:20:57 -04:00
|
|
|
fn handled_message_key(network: NetworkId, id: u64) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"handled_message", (network, id).encode())
|
2023-09-26 23:28:05 -04:00
|
|
|
}
|
2023-09-27 12:20:57 -04:00
|
|
|
pub fn save_handled_message(txn: &mut D::Transaction<'_>, network: NetworkId, id: u64) {
|
|
|
|
|
txn.put(Self::handled_message_key(network, id), []);
|
2023-09-26 23:28:05 -04:00
|
|
|
}
|
2023-09-27 12:20:57 -04:00
|
|
|
pub fn handled_message<G: Get>(getter: &G, network: NetworkId, id: u64) -> bool {
|
|
|
|
|
getter.get(Self::handled_message_key(network, id)).is_some()
|
2023-09-26 23:28:05 -04:00
|
|
|
}
|
|
|
|
|
|
2023-10-14 20:37:54 -04:00
|
|
|
fn in_tributary_key(set: ValidatorSet) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"in_tributary", set.encode())
|
|
|
|
|
}
|
|
|
|
|
fn active_tributaries_key() -> Vec<u8> {
|
2023-04-23 04:31:00 -04:00
|
|
|
Self::main_key(b"active_tributaries", [])
|
|
|
|
|
}
|
2023-10-14 16:47:25 -04:00
|
|
|
fn retired_tributary_key(set: ValidatorSet) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"retired_tributary", set.encode())
|
|
|
|
|
}
|
2023-10-14 20:37:54 -04:00
|
|
|
pub fn in_tributary<G: Get>(getter: &G, set: ValidatorSet) -> bool {
|
|
|
|
|
getter.get(Self::in_tributary_key(set)).is_some()
|
|
|
|
|
}
|
2023-09-27 00:00:31 -04:00
|
|
|
pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<TributarySpec>) {
|
2023-10-14 20:37:54 -04:00
|
|
|
let bytes = getter.get(Self::active_tributaries_key()).unwrap_or(vec![]);
|
2023-04-23 04:31:00 -04:00
|
|
|
let mut bytes_ref: &[u8] = bytes.as_ref();
|
|
|
|
|
|
|
|
|
|
let mut tributaries = vec![];
|
|
|
|
|
while !bytes_ref.is_empty() {
|
|
|
|
|
tributaries.push(TributarySpec::read(&mut bytes_ref).unwrap());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
(bytes, tributaries)
|
|
|
|
|
}
|
2023-10-14 21:53:38 -04:00
|
|
|
pub fn add_participating_in_tributary(txn: &mut D::Transaction<'_>, spec: &TributarySpec) {
|
2023-10-14 20:37:54 -04:00
|
|
|
txn.put(Self::in_tributary_key(spec.set()), []);
|
|
|
|
|
|
|
|
|
|
let key = Self::active_tributaries_key();
|
2023-09-27 00:00:31 -04:00
|
|
|
let (mut existing_bytes, existing) = Self::active_tributaries(txn);
|
2023-04-23 04:31:00 -04:00
|
|
|
for tributary in &existing {
|
|
|
|
|
if tributary == spec {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
spec.write(&mut existing_bytes).unwrap();
|
|
|
|
|
txn.put(key, existing_bytes);
|
|
|
|
|
}
|
2023-10-14 16:47:25 -04:00
|
|
|
pub fn retire_tributary(txn: &mut D::Transaction<'_>, set: ValidatorSet) {
|
|
|
|
|
let mut active = Self::active_tributaries(txn).1;
|
|
|
|
|
for i in 0 .. active.len() {
|
|
|
|
|
if active[i].set() == set {
|
|
|
|
|
active.remove(i);
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut bytes = vec![];
|
|
|
|
|
for active in active {
|
|
|
|
|
active.write(&mut bytes).unwrap();
|
|
|
|
|
}
|
2023-10-14 20:37:54 -04:00
|
|
|
txn.put(Self::active_tributaries_key(), bytes);
|
2023-10-14 16:47:25 -04:00
|
|
|
txn.put(Self::retired_tributary_key(set), []);
|
|
|
|
|
}
|
|
|
|
|
pub fn is_tributary_retired<G: Get>(getter: &G, set: ValidatorSet) -> bool {
|
|
|
|
|
getter.get(Self::retired_tributary_key(set)).is_some()
|
|
|
|
|
}
|
2023-05-08 22:20:51 -04:00
|
|
|
|
2023-09-27 00:44:31 -04:00
|
|
|
fn signed_transaction_key(nonce: u32) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"signed_transaction", nonce.to_le_bytes())
|
|
|
|
|
}
|
|
|
|
|
pub fn save_signed_transaction(txn: &mut D::Transaction<'_>, nonce: u32, tx: Transaction) {
|
|
|
|
|
txn.put(Self::signed_transaction_key(nonce), tx.serialize());
|
|
|
|
|
}
|
|
|
|
|
pub fn take_signed_transaction(txn: &mut D::Transaction<'_>, nonce: u32) -> Option<Transaction> {
|
|
|
|
|
let key = Self::signed_transaction_key(nonce);
|
|
|
|
|
let res = txn.get(&key).map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap());
|
|
|
|
|
if res.is_some() {
|
|
|
|
|
txn.del(&key);
|
|
|
|
|
}
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
|
2023-10-14 21:58:10 -04:00
|
|
|
fn first_preprocess_key(network: NetworkId, id_type: RecognizedIdType, id: [u8; 32]) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"first_preprocess", (network, id_type, id).encode())
|
2023-09-27 13:00:04 -04:00
|
|
|
}
|
|
|
|
|
pub fn save_first_preprocess(
|
|
|
|
|
txn: &mut D::Transaction<'_>,
|
|
|
|
|
network: NetworkId,
|
2023-10-14 21:58:10 -04:00
|
|
|
id_type: RecognizedIdType,
|
2023-09-27 13:00:04 -04:00
|
|
|
id: [u8; 32],
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
preprocess: Vec<Vec<u8>>,
|
2023-09-27 13:00:04 -04:00
|
|
|
) {
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
let preprocess = preprocess.encode();
|
2023-10-14 21:58:10 -04:00
|
|
|
let key = Self::first_preprocess_key(network, id_type, id);
|
2023-05-08 22:20:51 -04:00
|
|
|
if let Some(existing) = txn.get(&key) {
|
|
|
|
|
assert_eq!(existing, preprocess, "saved a distinct first preprocess");
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
txn.put(key, preprocess);
|
|
|
|
|
}
|
2023-10-14 21:58:10 -04:00
|
|
|
pub fn first_preprocess<G: Get>(
|
|
|
|
|
getter: &G,
|
|
|
|
|
network: NetworkId,
|
|
|
|
|
id_type: RecognizedIdType,
|
|
|
|
|
id: [u8; 32],
|
Support multiple key shares per validator (#416)
* Update the coordinator to give key shares based on weight, not based on existence
Participants are now identified by their starting index. While this compiles,
the following is unimplemented:
1) A conversion for DKG `i` values. It assumes the threshold `i` values used
will be identical for the MuSig signature used to confirm the DKG.
2) Expansion from compressed values to full values before forwarding to the
processor.
* Add a fn to the DkgConfirmer to convert `i` values as needed
Also removes TODOs regarding Serai ensuring validator key uniqueness +
validity. The current infra achieves both.
* Have the Tributary DB track participation by shares, not by count
* Prevent a node from obtaining 34% of the maximum amount of key shares
This is actually mainly intended to set a bound on message sizes in the
coordinator. Message sizes are amplified by the amount of key shares held, so
setting an upper bound on said amount lets it determine constants. While that
upper bound could be 150, that'd be unreasonable and increase the potential for
DoS attacks.
* Correct the mechanism to detect if sufficient accumulation has occured
It used to check if the latest accumulation hit the required threshold. Now,
accumulations may jump past the required threshold. The required mechanism is
to check the threshold wasn't prior met and is now met.
* Finish updating the coordinator to handle a multiple key share per validator environment
* Adjust stategy re: preventing noce reuse in DKG Confirmer
* Add TODOs regarding dropped transactions, add possible TODO fix
* Update tests/coordinator
This doesn't add new multi-key-share tests, it solely updates the existing
single key-share tests to compile and run, with the necessary fixes to the
coordinator.
* Update processor key_gen to handle generating multiple key shares at once
* Update SubstrateSigner
* Update signer, clippy
* Update processor tests
* Update processor docker tests
2023-11-04 19:26:13 -04:00
|
|
|
) -> Option<Vec<Vec<u8>>> {
|
|
|
|
|
getter
|
|
|
|
|
.get(Self::first_preprocess_key(network, id_type, id))
|
|
|
|
|
.map(|bytes| Vec::<_>::decode(&mut bytes.as_slice()).unwrap())
|
2023-05-08 22:20:51 -04:00
|
|
|
}
|
2023-08-31 22:09:29 -04:00
|
|
|
|
2023-10-13 03:36:59 -04:00
|
|
|
fn last_received_batch_key(network: NetworkId) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"last_received_batch", network.encode())
|
|
|
|
|
}
|
2023-09-29 03:51:01 -04:00
|
|
|
fn expected_batch_key(network: NetworkId, id: u32) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"expected_batch", (network, id).encode())
|
|
|
|
|
}
|
|
|
|
|
pub fn save_expected_batch(txn: &mut D::Transaction<'_>, batch: &Batch) {
|
2023-10-13 03:36:59 -04:00
|
|
|
txn.put(Self::last_received_batch_key(batch.network), batch.id.to_le_bytes());
|
2023-09-29 03:51:01 -04:00
|
|
|
txn.put(
|
|
|
|
|
Self::expected_batch_key(batch.network, batch.id),
|
|
|
|
|
Blake2b::<U32>::digest(batch.instructions.encode()),
|
|
|
|
|
);
|
|
|
|
|
}
|
2023-10-13 03:36:59 -04:00
|
|
|
pub fn last_received_batch<G: Get>(getter: &G, network: NetworkId) -> Option<u32> {
|
|
|
|
|
getter
|
|
|
|
|
.get(Self::last_received_batch_key(network))
|
|
|
|
|
.map(|id| u32::from_le_bytes(id.try_into().unwrap()))
|
|
|
|
|
}
|
2023-09-29 03:51:01 -04:00
|
|
|
pub fn expected_batch<G: Get>(getter: &G, network: NetworkId, id: u32) -> Option<[u8; 32]> {
|
|
|
|
|
getter.get(Self::expected_batch_key(network, id)).map(|batch| batch.try_into().unwrap())
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-31 22:09:29 -04:00
|
|
|
fn batch_key(network: NetworkId, id: u32) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"batch", (network, id).encode())
|
|
|
|
|
}
|
2023-09-27 00:00:31 -04:00
|
|
|
pub fn save_batch(txn: &mut D::Transaction<'_>, batch: SignedBatch) {
|
2023-08-31 22:09:29 -04:00
|
|
|
txn.put(Self::batch_key(batch.batch.network, batch.batch.id), batch.encode());
|
|
|
|
|
}
|
2023-09-27 00:00:31 -04:00
|
|
|
pub fn batch<G: Get>(getter: &G, network: NetworkId, id: u32) -> Option<SignedBatch> {
|
|
|
|
|
getter
|
2023-08-31 22:09:29 -04:00
|
|
|
.get(Self::batch_key(network, id))
|
|
|
|
|
.map(|batch| SignedBatch::decode(&mut batch.as_ref()).unwrap())
|
|
|
|
|
}
|
2023-09-29 03:51:01 -04:00
|
|
|
|
|
|
|
|
fn last_verified_batch_key(network: NetworkId) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"last_verified_batch", network.encode())
|
|
|
|
|
}
|
|
|
|
|
pub fn save_last_verified_batch(txn: &mut D::Transaction<'_>, network: NetworkId, id: u32) {
|
|
|
|
|
txn.put(Self::last_verified_batch_key(network), id.to_le_bytes());
|
|
|
|
|
}
|
|
|
|
|
pub fn last_verified_batch<G: Get>(getter: &G, network: NetworkId) -> Option<u32> {
|
|
|
|
|
getter
|
|
|
|
|
.get(Self::last_verified_batch_key(network))
|
|
|
|
|
.map(|id| u32::from_le_bytes(id.try_into().unwrap()))
|
|
|
|
|
}
|
2023-10-13 03:36:59 -04:00
|
|
|
|
2023-10-13 12:14:59 -04:00
|
|
|
fn handover_batch_key(set: ValidatorSet) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"handover_batch", set.encode())
|
2023-10-13 03:36:59 -04:00
|
|
|
}
|
2023-10-13 12:14:59 -04:00
|
|
|
fn lookup_handover_batch_key(network: NetworkId, batch: u32) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"lookup_handover_batch", (network, batch).encode())
|
2023-10-13 03:36:59 -04:00
|
|
|
}
|
2023-10-13 12:14:59 -04:00
|
|
|
pub fn set_handover_batch(txn: &mut D::Transaction<'_>, set: ValidatorSet, batch: u32) {
|
|
|
|
|
txn.put(Self::handover_batch_key(set), batch.to_le_bytes());
|
|
|
|
|
txn.put(Self::lookup_handover_batch_key(set.network, batch), set.session.0.to_le_bytes());
|
|
|
|
|
}
|
|
|
|
|
pub fn handover_batch<G: Get>(getter: &G, set: ValidatorSet) -> Option<u32> {
|
|
|
|
|
getter.get(Self::handover_batch_key(set)).map(|id| u32::from_le_bytes(id.try_into().unwrap()))
|
|
|
|
|
}
|
|
|
|
|
pub fn is_handover_batch<G: Get>(
|
|
|
|
|
getter: &G,
|
|
|
|
|
network: NetworkId,
|
|
|
|
|
batch: u32,
|
|
|
|
|
) -> Option<ValidatorSet> {
|
|
|
|
|
getter.get(Self::lookup_handover_batch_key(network, batch)).map(|session| ValidatorSet {
|
|
|
|
|
network,
|
|
|
|
|
session: Session(u32::from_le_bytes(session.try_into().unwrap())),
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn queued_batches_key(set: ValidatorSet) -> Vec<u8> {
|
|
|
|
|
Self::main_key(b"queued_batches", set.encode())
|
|
|
|
|
}
|
|
|
|
|
pub fn queue_batch(txn: &mut D::Transaction<'_>, set: ValidatorSet, batch: Transaction) {
|
|
|
|
|
let key = Self::queued_batches_key(set);
|
|
|
|
|
let mut batches = txn.get(&key).unwrap_or(vec![]);
|
|
|
|
|
batches.extend(batch.serialize());
|
|
|
|
|
txn.put(&key, batches);
|
|
|
|
|
}
|
|
|
|
|
pub fn take_queued_batches(txn: &mut D::Transaction<'_>, set: ValidatorSet) -> Vec<Transaction> {
|
|
|
|
|
let key = Self::queued_batches_key(set);
|
|
|
|
|
let batches_vec = txn.get(&key).unwrap_or(vec![]);
|
|
|
|
|
txn.del(&key);
|
|
|
|
|
let mut batches: &[u8] = &batches_vec;
|
|
|
|
|
|
|
|
|
|
let mut res = vec![];
|
|
|
|
|
while !batches.is_empty() {
|
|
|
|
|
res.push(Transaction::read(&mut batches).unwrap());
|
|
|
|
|
}
|
|
|
|
|
res
|
2023-10-13 03:36:59 -04:00
|
|
|
}
|
2023-04-23 04:31:00 -04:00
|
|
|
}
|