mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Remove duplicated genesis presence in Tributary Scanner DB keys
This wasted 32-bytes per every single entry in the DB (ignoring de-duplication possible by the DB layer).
This commit is contained in:
@@ -9,59 +9,27 @@ use serai_client::validator_sets::primitives::{ValidatorSet, KeyPair};
|
|||||||
|
|
||||||
use processor_messages::coordinator::SubstrateSignableId;
|
use processor_messages::coordinator::SubstrateSignableId;
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::{Encode, Decode};
|
||||||
|
|
||||||
pub use serai_db::*;
|
pub use serai_db::*;
|
||||||
|
|
||||||
use crate::tributary::TributarySpec;
|
use crate::tributary::TributarySpec;
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
|
||||||
pub enum Topic {
|
pub enum Topic {
|
||||||
Dkg,
|
Dkg,
|
||||||
SubstrateSign(SubstrateSignableId),
|
SubstrateSign(SubstrateSignableId),
|
||||||
Sign([u8; 32]),
|
Sign([u8; 32]),
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Topic {
|
|
||||||
fn as_key(&self, genesis: [u8; 32]) -> Vec<u8> {
|
|
||||||
let mut res = genesis.to_vec();
|
|
||||||
#[allow(unused_assignments)] // False positive
|
|
||||||
let mut id_buf = vec![];
|
|
||||||
let (label, id) = match self {
|
|
||||||
Topic::Dkg => (b"dkg".as_slice(), [].as_slice()),
|
|
||||||
Topic::SubstrateSign(id) => {
|
|
||||||
id_buf = id.encode();
|
|
||||||
(b"substrate_sign".as_slice(), id_buf.as_slice())
|
|
||||||
}
|
|
||||||
Topic::Sign(id) => (b"sign".as_slice(), id.as_slice()),
|
|
||||||
};
|
|
||||||
res.push(u8::try_from(label.len()).unwrap());
|
|
||||||
res.extend(label);
|
|
||||||
res.push(u8::try_from(id.len()).unwrap());
|
|
||||||
res.extend(id);
|
|
||||||
res
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A struct to refer to a piece of data all validators will presumably provide a value for.
|
// A struct to refer to a piece of data all validators will presumably provide a value for.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)]
|
||||||
pub struct DataSpecification {
|
pub struct DataSpecification {
|
||||||
pub topic: Topic,
|
pub topic: Topic,
|
||||||
pub label: &'static str,
|
pub label: &'static str,
|
||||||
pub attempt: u32,
|
pub attempt: u32,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl DataSpecification {
|
|
||||||
pub fn as_key(&self, genesis: [u8; 32]) -> Vec<u8> {
|
|
||||||
let mut res = self.topic.as_key(genesis);
|
|
||||||
let label_bytes = self.label.bytes();
|
|
||||||
res.push(u8::try_from(label_bytes.len()).unwrap());
|
|
||||||
res.extend(label_bytes);
|
|
||||||
res.extend(self.attempt.to_le_bytes());
|
|
||||||
res
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum DataSet {
|
pub enum DataSet {
|
||||||
Participating(HashMap<Participant, Vec<u8>>),
|
Participating(HashMap<Participant, Vec<u8>>),
|
||||||
NotParticipating,
|
NotParticipating,
|
||||||
@@ -76,16 +44,16 @@ create_db!(
|
|||||||
NewTributary {
|
NewTributary {
|
||||||
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
||||||
LastBlock: (genesis: [u8; 32]) -> [u8; 32],
|
LastBlock: (genesis: [u8; 32]) -> [u8; 32],
|
||||||
FatalSlashes: (genesis: [u8; 32]) -> Vec<u8>,
|
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||||
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
||||||
ShareBlame: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
||||||
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
||||||
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||||
CurrentlyCompletingKeyPair: (genesis: [u8; 32]) -> KeyPair,
|
CurrentlyCompletingKeyPair: (genesis: [u8; 32]) -> KeyPair,
|
||||||
KeyPairDb: (set: ValidatorSet) -> KeyPair,
|
KeyPairDb: (set: ValidatorSet) -> KeyPair,
|
||||||
AttemptDb: (genesis: [u8; 32], topic_key: &Vec<u8>) -> u32,
|
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
|
||||||
DataReceived: (genesis: [u8; 32], data_spec_key: &Vec<u8>) -> u16,
|
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
||||||
DataDb: (genesis: [u8; 32], data_spec_key: &Vec<u8>, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
||||||
EventDb: (id: [u8; 32], index: u32) -> (),
|
EventDb: (id: [u8; 32], index: u32) -> (),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
@@ -96,23 +64,24 @@ impl FatallySlashed {
|
|||||||
let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default();
|
let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default();
|
||||||
|
|
||||||
// Don't append if we already have it
|
// Don't append if we already have it
|
||||||
if existing.chunks(32).any(|existing| existing == account) {
|
if existing.iter().any(|existing| existing == &account) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
existing.extend(account);
|
existing.push(account);
|
||||||
FatalSlashes::set(txn, genesis, &existing);
|
FatalSlashes::set(txn, genesis, &existing);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl AttemptDb {
|
impl AttemptDb {
|
||||||
pub fn recognize_topic(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) {
|
pub fn recognize_topic(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) {
|
||||||
Self::set(txn, genesis, &topic.as_key(genesis), &0u32);
|
Self::set(txn, genesis, &topic, &0u32);
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
||||||
let attempt = Self::get(getter, genesis, &topic.as_key(genesis));
|
let attempt = Self::get(getter, genesis, &topic);
|
||||||
if attempt.is_none() && topic == Topic::Dkg {
|
// Don't require explicit recognition of the Dkg topic as it starts when the chain does
|
||||||
|
if attempt.is_none() && (topic == Topic::Dkg) {
|
||||||
return Some(0);
|
return Some(0);
|
||||||
}
|
}
|
||||||
attempt
|
attempt
|
||||||
@@ -120,22 +89,6 @@ impl AttemptDb {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl DataDb {
|
impl DataDb {
|
||||||
pub fn set_data(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
data_spec: &DataSpecification,
|
|
||||||
signer: <Ristretto as Ciphersuite>::G,
|
|
||||||
signer_shares: u16,
|
|
||||||
data: &Vec<u8>,
|
|
||||||
) -> (u16, u16) {
|
|
||||||
let data_spec = data_spec.as_key(genesis);
|
|
||||||
let prior_received = DataReceived::get(txn, genesis, &data_spec).unwrap_or_default();
|
|
||||||
let received = prior_received + signer_shares;
|
|
||||||
DataReceived::set(txn, genesis, &data_spec, &received);
|
|
||||||
DataDb::set(txn, genesis, &data_spec, &signer.to_bytes(), data);
|
|
||||||
(prior_received, received)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn accumulate(
|
pub fn accumulate(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
@@ -145,8 +98,7 @@ impl DataDb {
|
|||||||
data: &Vec<u8>,
|
data: &Vec<u8>,
|
||||||
) -> Accumulation {
|
) -> Accumulation {
|
||||||
let genesis = spec.genesis();
|
let genesis = spec.genesis();
|
||||||
let data_spec_key = data_spec.as_key(genesis);
|
if Self::get(txn, genesis, data_spec, &signer.to_bytes()).is_some() {
|
||||||
if Self::get(txn, genesis, &data_spec_key, &signer.to_bytes()).is_some() {
|
|
||||||
panic!("accumulating data for a participant multiple times");
|
panic!("accumulating data for a participant multiple times");
|
||||||
}
|
}
|
||||||
let signer_shares = {
|
let signer_shares = {
|
||||||
@@ -154,8 +106,11 @@ impl DataDb {
|
|||||||
spec.i(signer).expect("transaction signed by a non-validator for this tributary");
|
spec.i(signer).expect("transaction signed by a non-validator for this tributary");
|
||||||
u16::from(signer_i.end) - u16::from(signer_i.start)
|
u16::from(signer_i.end) - u16::from(signer_i.start)
|
||||||
};
|
};
|
||||||
let (prior_received, now_received) =
|
|
||||||
Self::set_data(txn, spec.genesis(), data_spec, signer, signer_shares, data);
|
let prior_received = DataReceived::get(txn, genesis, data_spec).unwrap_or_default();
|
||||||
|
let now_received = prior_received + signer_shares;
|
||||||
|
DataReceived::set(txn, genesis, data_spec, &now_received);
|
||||||
|
DataDb::set(txn, genesis, data_spec, &signer.to_bytes(), data);
|
||||||
|
|
||||||
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
||||||
let needed = if data_spec.topic == Topic::Dkg { spec.n() } else { spec.t() };
|
let needed = if data_spec.topic == Topic::Dkg { spec.n() } else { spec.t() };
|
||||||
@@ -165,7 +120,7 @@ impl DataDb {
|
|||||||
for validator in spec.validators().iter().map(|validator| validator.0) {
|
for validator in spec.validators().iter().map(|validator| validator.0) {
|
||||||
data.insert(
|
data.insert(
|
||||||
spec.i(validator).unwrap().start,
|
spec.i(validator).unwrap().start,
|
||||||
if let Some(data) = Self::get(txn, genesis, &data_spec_key, &validator.to_bytes()) {
|
if let Some(data) = Self::get(txn, genesis, data_spec, &validator.to_bytes()) {
|
||||||
data
|
data
|
||||||
} else {
|
} else {
|
||||||
continue;
|
continue;
|
||||||
|
|||||||
@@ -31,7 +31,7 @@ use crate::{
|
|||||||
nonce_decider::NonceDecider,
|
nonce_decider::NonceDecider,
|
||||||
dkg_confirmer::DkgConfirmer,
|
dkg_confirmer::DkgConfirmer,
|
||||||
scanner::{RecognizedIdType, RIDTrait},
|
scanner::{RecognizedIdType, RIDTrait},
|
||||||
FatallySlashed, ShareBlame, PlanIds, ConfirmationNonces, KeyPairDb, AttemptDb, DataDb,
|
FatallySlashed, DkgShare, PlanIds, ConfirmationNonces, KeyPairDb, AttemptDb, DataDb,
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -177,7 +177,7 @@ pub(crate) async fn handle_application_tx<
|
|||||||
};
|
};
|
||||||
|
|
||||||
// If they've already published a TX for this attempt, slash
|
// If they've already published a TX for this attempt, slash
|
||||||
if DataDb::get(txn, genesis, &data_spec.as_key(genesis), &signed.signer.to_bytes()).is_some() {
|
if DataDb::get(txn, genesis, data_spec, &signed.signer.to_bytes()).is_some() {
|
||||||
fatal_slash::<D>(txn, genesis, signed.signer.to_bytes(), "published data multiple times");
|
fatal_slash::<D>(txn, genesis, signed.signer.to_bytes(), "published data multiple times");
|
||||||
return Accumulation::NotReady;
|
return Accumulation::NotReady;
|
||||||
}
|
}
|
||||||
@@ -313,7 +313,7 @@ pub(crate) async fn handle_application_tx<
|
|||||||
}
|
}
|
||||||
let to = Participant::new(to).unwrap();
|
let to = Participant::new(to).unwrap();
|
||||||
|
|
||||||
ShareBlame::set(txn, genesis, from.into(), to.into(), &share);
|
DkgShare::set(txn, genesis, from.into(), to.into(), &share);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -439,7 +439,7 @@ pub(crate) async fn handle_application_tx<
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let share = ShareBlame::get(txn, genesis, accuser.into(), faulty.into()).unwrap();
|
let share = DkgShare::get(txn, genesis, accuser.into(), faulty.into()).unwrap();
|
||||||
processors
|
processors
|
||||||
.send(
|
.send(
|
||||||
spec.set().network,
|
spec.set().network,
|
||||||
|
|||||||
@@ -10,6 +10,8 @@ use tokio::sync::broadcast;
|
|||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use serai_client::{validator_sets::primitives::ValidatorSet, subxt::utils::Encoded, Serai};
|
use serai_client::{validator_sets::primitives::ValidatorSet, subxt::utils::Encoded, Serai};
|
||||||
|
|
||||||
|
use serai_db::DbTxn;
|
||||||
|
|
||||||
use tributary::{
|
use tributary::{
|
||||||
TransactionKind, Transaction as TributaryTransaction, Block, TributaryReader,
|
TransactionKind, Transaction as TributaryTransaction, Block, TributaryReader,
|
||||||
tendermint::{
|
tendermint::{
|
||||||
@@ -22,14 +24,10 @@ use crate::{
|
|||||||
Db,
|
Db,
|
||||||
tributary::handle::{fatal_slash, handle_application_tx},
|
tributary::handle::{fatal_slash, handle_application_tx},
|
||||||
processors::Processors,
|
processors::Processors,
|
||||||
tributary::{TributarySpec, Transaction, EventDb},
|
tributary::{TributarySpec, Transaction, LastBlock, EventDb},
|
||||||
P2p,
|
P2p,
|
||||||
};
|
};
|
||||||
|
|
||||||
use super::LastBlock;
|
|
||||||
|
|
||||||
use serai_db::DbTxn;
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
|
||||||
pub enum RecognizedIdType {
|
pub enum RecognizedIdType {
|
||||||
Batch,
|
Batch,
|
||||||
|
|||||||
Reference in New Issue
Block a user