Make MainDB into SubstrateDB

This commit is contained in:
Luke Parker
2023-04-20 05:04:08 -04:00
parent ee65e4df8f
commit 9e1f3fc85c
3 changed files with 90 additions and 104 deletions

View File

@@ -1,39 +0,0 @@
pub use serai_db::*;
#[derive(Debug)]
pub struct MainDb<D: Db>(pub D);
impl<D: Db> MainDb<D> {
pub fn new(db: D) -> Self {
Self(db)
}
fn main_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
D::key(b"MAIN", dst, key)
}
fn substrate_block_key() -> Vec<u8> {
Self::main_key(b"substrate_block", [])
}
pub fn set_last_substrate_block(&mut self, block: u64) {
let mut txn = self.0.txn();
txn.put(Self::substrate_block_key(), block.to_le_bytes());
txn.commit();
}
pub fn last_substrate_block(&self) -> u64 {
u64::from_le_bytes(
self.0.get(Self::substrate_block_key()).unwrap_or(vec![0; 8]).try_into().unwrap(),
)
}
fn event_key(id: &[u8], index: u32) -> Vec<u8> {
Self::main_key(b"event", [id, index.to_le_bytes().as_ref()].concat())
}
pub fn handle_event(&mut self, id: [u8; 32], index: u32) {
let mut txn = self.0.txn();
txn.put(Self::event_key(&id, index), []);
txn.commit();
}
pub fn handled_event(&self, id: [u8; 32], index: u32) -> bool {
self.0.get(Self::event_key(&id, index)).is_some()
}
}

View File

@@ -0,0 +1,36 @@
pub use serai_db::*;
#[derive(Debug)]
pub struct SubstrateDb<D: Db>(pub D);
impl<D: Db> SubstrateDb<D> {
pub fn new(db: D) -> Self {
Self(db)
}
fn substrate_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
D::key(b"SUBSTRATE", dst, key)
}
fn block_key() -> Vec<u8> {
Self::substrate_key(b"block", [])
}
pub fn set_last_block(&mut self, block: u64) {
let mut txn = self.0.txn();
txn.put(Self::block_key(), block.to_le_bytes());
txn.commit();
}
pub fn last_block(&self) -> u64 {
u64::from_le_bytes(self.0.get(Self::block_key()).unwrap_or(vec![0; 8]).try_into().unwrap())
}
fn event_key(id: &[u8], index: u32) -> Vec<u8> {
Self::substrate_key(b"event", [id, index.to_le_bytes().as_ref()].concat())
}
pub fn handled_event<G: Get>(getter: &G, id: [u8; 32], index: u32) -> bool {
getter.get(Self::event_key(&id, index)).is_some()
}
pub fn handle_event(txn: &mut D::Transaction<'_>, id: [u8; 32], index: u32) {
assert!(!Self::handled_event(txn, id, index));
txn.put(Self::event_key(&id, index), []);
}
}

View File

@@ -4,7 +4,7 @@ use std::collections::{HashSet, HashMap};
use zeroize::Zeroizing; use zeroize::Zeroizing;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use frost::{Participant, ThresholdParams}; use frost::ThresholdParams;
use serai_client::{ use serai_client::{
SeraiError, Block, Serai, SeraiError, Block, Serai,
@@ -17,36 +17,31 @@ use serai_client::{
tokens::{primitives::OutInstructionWithBalance, TokensEvent}, tokens::{primitives::OutInstructionWithBalance, TokensEvent},
}; };
use serai_db::DbTxn;
use tributary::Tributary; use tributary::Tributary;
use processor_messages::{SubstrateContext, key_gen::KeyGenId, CoordinatorMessage}; use processor_messages::{SubstrateContext, key_gen::KeyGenId, CoordinatorMessage};
use crate::{Db, MainDb, P2p, processor::Processor}; use crate::{Db, P2p, processor::Processor, tributary::TributarySpec};
async fn get_coin_key(serai: &Serai, set: ValidatorSet) -> Result<Option<Vec<u8>>, SeraiError> { mod db;
Ok(serai.get_keys(set).await?.map(|keys| keys.1.into_inner())) pub use db::*;
}
async fn in_set( async fn in_set(
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: &Serai, serai: &Serai,
set: ValidatorSet, set: ValidatorSet,
) -> Result<Option<Option<Participant>>, SeraiError> { ) -> Result<Option<bool>, SeraiError> {
let Some(data) = serai.get_validator_set(set).await? else { let Some(data) = serai.get_validator_set(set).await? else {
return Ok(None); return Ok(None);
}; };
let key = (Ristretto::generator() * key.deref()).to_bytes(); let key = (Ristretto::generator() * key.deref()).to_bytes();
Ok(Some( Ok(Some(data.participants.iter().any(|(participant, _)| participant.0 == key)))
data
.participants
.iter()
.position(|(participant, _)| participant.0 == key)
.map(|index| Participant::new((index + 1).try_into().unwrap()).unwrap()),
))
} }
async fn handle_new_set<D: Db, Pro: Processor, P: P2p>( async fn handle_new_set<D: Db, Pro: Processor, P: P2p>(
db: &mut MainDb<D>, db: D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: &P, p2p: &P,
processor: &mut Pro, processor: &mut Pro,
@@ -54,29 +49,18 @@ async fn handle_new_set<D: Db, Pro: Processor, P: P2p>(
block: &Block, block: &Block,
set: ValidatorSet, set: ValidatorSet,
) -> Result<(), SeraiError> { ) -> Result<(), SeraiError> {
if let Some(i) = in_set(key, serai, set).await?.expect("NewSet for set which doesn't exist") { if in_set(key, serai, set).await?.expect("NewSet for set which doesn't exist") {
let set_data = serai.get_validator_set(set).await?.expect("NewSet for set which doesn't exist"); let set_data = serai.get_validator_set(set).await?.expect("NewSet for set which doesn't exist");
let n = u16::try_from(set_data.participants.len()).unwrap(); let spec = TributarySpec::new(block.hash(), block.time().unwrap(), set, set_data);
let t = (2 * (n / 3)) + 1;
let mut validators = HashMap::new();
for (l, (participant, amount)) in set_data.participants.iter().enumerate() {
// TODO: Ban invalid keys from being validators on the Serai side
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
.expect("invalid key registered as participant");
// Give one weight on Tributary per bond instance
validators.insert(participant, amount.0 / set_data.bond.0);
}
// TODO: Do something with this // TODO: Do something with this
let tributary = Tributary::<_, crate::tributary::Transaction, _>::new( let tributary = Tributary::<_, crate::tributary::Transaction, _>::new(
// TODO2: Use a DB on a dedicated volume db,
db.0.clone(), spec.genesis(),
crate::tributary::genesis(block.hash(), set), spec.start_time(),
block.time().expect("Serai block didn't have a timestamp set"),
key.clone(), key.clone(),
validators, spec.validators(),
p2p.clone(), p2p.clone(),
) )
.await .await
@@ -91,7 +75,14 @@ async fn handle_new_set<D: Db, Pro: Processor, P: P2p>(
.send(CoordinatorMessage::KeyGen( .send(CoordinatorMessage::KeyGen(
processor_messages::key_gen::CoordinatorMessage::GenerateKey { processor_messages::key_gen::CoordinatorMessage::GenerateKey {
id: KeyGenId { set, attempt: 0 }, id: KeyGenId { set, attempt: 0 },
params: ThresholdParams::new(t, n, i).unwrap(), params: ThresholdParams::new(
spec.t(),
spec.n(),
spec
.i(Ristretto::generator() * key.deref())
.expect("In set for a set we aren't in set for"),
)
.unwrap(),
}, },
)) ))
.await; .await;
@@ -100,8 +91,7 @@ async fn handle_new_set<D: Db, Pro: Processor, P: P2p>(
Ok(()) Ok(())
} }
async fn handle_key_gen<D: Db, Pro: Processor>( async fn handle_key_gen<Pro: Processor>(
db: &mut MainDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
processor: &mut Pro, processor: &mut Pro,
serai: &Serai, serai: &Serai,
@@ -109,11 +99,7 @@ async fn handle_key_gen<D: Db, Pro: Processor>(
set: ValidatorSet, set: ValidatorSet,
key_pair: KeyPair, key_pair: KeyPair,
) -> Result<(), SeraiError> { ) -> Result<(), SeraiError> {
if in_set(key, serai, set) if in_set(key, serai, set).await?.expect("KeyGen occurred for a set which doesn't exist") {
.await?
.expect("KeyGen occurred for a set which doesn't exist")
.is_some()
{
// TODO: Check how the processor handles this being fired multiple times // TODO: Check how the processor handles this being fired multiple times
processor processor
.send(CoordinatorMessage::Substrate( .send(CoordinatorMessage::Substrate(
@@ -137,9 +123,7 @@ async fn handle_key_gen<D: Db, Pro: Processor>(
Ok(()) Ok(())
} }
async fn handle_batch_and_burns<D: Db, Pro: Processor>( async fn handle_batch_and_burns<Pro: Processor>(
db: &mut MainDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
processor: &mut Pro, processor: &mut Pro,
serai: &Serai, serai: &Serai,
block: &Block, block: &Block,
@@ -213,12 +197,10 @@ async fn handle_batch_and_burns<D: Db, Pro: Processor>(
serai_time: block.time().unwrap(), serai_time: block.time().unwrap(),
coin_latest_finalized_block, coin_latest_finalized_block,
}, },
key: get_coin_key( key: serai
serai, .get_keys(ValidatorSet { network, session: Session(0) }) // TODO2
// TODO2
ValidatorSet { network, session: Session(0) },
)
.await? .await?
.map(|keys| keys.1.into_inner())
.expect("batch/burn for network which never set keys"), .expect("batch/burn for network which never set keys"),
burns: burns.remove(&network).unwrap(), burns: burns.remove(&network).unwrap(),
}, },
@@ -232,7 +214,7 @@ async fn handle_batch_and_burns<D: Db, Pro: Processor>(
// Handle a specific Substrate block, returning an error when it fails to get data // Handle a specific Substrate block, returning an error when it fails to get data
// (not blocking / holding) // (not blocking / holding)
async fn handle_block<D: Db, Pro: Processor, P: P2p>( async fn handle_block<D: Db, Pro: Processor, P: P2p>(
db: &mut MainDb<D>, db: &mut SubstrateDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: &P, p2p: &P,
processor: &mut Pro, processor: &mut Pro,
@@ -250,26 +232,31 @@ async fn handle_block<D: Db, Pro: Processor, P: P2p>(
// Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000 // Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000
// events will successfully be incrementally handled (though the Serai connection should be // events will successfully be incrementally handled (though the Serai connection should be
// stable) // stable)
if !db.handled_event(hash, event_id) { if !SubstrateDb::<D>::handled_event(&db.0, hash, event_id) {
if let ValidatorSetsEvent::NewSet { set } = new_set { if let ValidatorSetsEvent::NewSet { set } = new_set {
handle_new_set(db, key, p2p, processor, serai, &block, set).await?; // TODO2: Use a DB on a dedicated volume
handle_new_set(db.0.clone(), key, p2p, processor, serai, &block, set).await?;
} else { } else {
panic!("NewSet event wasn't NewSet: {new_set:?}"); panic!("NewSet event wasn't NewSet: {new_set:?}");
} }
db.handle_event(hash, event_id); let mut txn = db.0.txn();
SubstrateDb::<D>::handle_event(&mut txn, hash, event_id);
txn.commit();
} }
event_id += 1; event_id += 1;
} }
// If a key pair was confirmed, inform the processor // If a key pair was confirmed, inform the processor
for key_gen in serai.get_key_gen_events(hash).await? { for key_gen in serai.get_key_gen_events(hash).await? {
if !db.handled_event(hash, event_id) { if !SubstrateDb::<D>::handled_event(&db.0, hash, event_id) {
if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen { if let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen {
handle_key_gen(db, key, processor, serai, &block, set, key_pair).await?; handle_key_gen(key, processor, serai, &block, set, key_pair).await?;
} else { } else {
panic!("KeyGen event wasn't KeyGen: {key_gen:?}"); panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
} }
db.handle_event(hash, event_id); let mut txn = db.0.txn();
SubstrateDb::<D>::handle_event(&mut txn, hash, event_id);
txn.commit();
} }
event_id += 1; event_id += 1;
} }
@@ -279,31 +266,33 @@ async fn handle_block<D: Db, Pro: Processor, P: P2p>(
// following events share data collection // following events share data collection
// This does break the uniqueness of (hash, event_id) -> one event, yet // This does break the uniqueness of (hash, event_id) -> one event, yet
// (network, (hash, event_id)) remains valid as a unique ID for an event // (network, (hash, event_id)) remains valid as a unique ID for an event
if !db.handled_event(hash, event_id) { if !SubstrateDb::<D>::handled_event(&db.0, hash, event_id) {
handle_batch_and_burns(db, key, processor, serai, &block).await?; handle_batch_and_burns(processor, serai, &block).await?;
} }
db.handle_event(hash, event_id); let mut txn = db.0.txn();
SubstrateDb::<D>::handle_event(&mut txn, hash, event_id);
txn.commit();
Ok(()) Ok(())
} }
pub async fn handle_new_blocks<D: Db, Pro: Processor, P: P2p>( pub async fn handle_new_blocks<D: Db, Pro: Processor, P: P2p>(
db: &mut MainDb<D>, db: &mut SubstrateDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
p2p: &P, p2p: &P,
processor: &mut Pro, processor: &mut Pro,
serai: &Serai, serai: &Serai,
last_substrate_block: &mut u64, last_block: &mut u64,
) -> Result<(), SeraiError> { ) -> Result<(), SeraiError> {
// Check if there's been a new Substrate block // Check if there's been a new Substrate block
let latest = serai.get_latest_block().await?; let latest = serai.get_latest_block().await?;
let latest_number = latest.number(); let latest_number = latest.number();
if latest_number == *last_substrate_block { if latest_number == *last_block {
return Ok(()); return Ok(());
} }
let mut latest = Some(latest); let mut latest = Some(latest);
for b in (*last_substrate_block + 1) ..= latest_number { for b in (*last_block + 1) ..= latest_number {
handle_block( handle_block(
db, db,
key, key,
@@ -320,8 +309,8 @@ pub async fn handle_new_blocks<D: Db, Pro: Processor, P: P2p>(
}, },
) )
.await?; .await?;
*last_substrate_block += 1; *last_block += 1;
db.set_last_substrate_block(*last_substrate_block); db.set_last_block(*last_block);
} }
Ok(()) Ok(())