Initial Tributary handling

This commit is contained in:
Luke Parker
2023-04-20 05:05:17 -04:00
parent 9e1f3fc85c
commit 8041a0d845
9 changed files with 413 additions and 42 deletions

View File

@@ -0,0 +1,92 @@
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
pub use serai_db::*;
#[derive(Debug)]
pub struct TributaryDb<D: Db>(pub D);
impl<D: Db> TributaryDb<D> {
pub fn new(db: D) -> Self {
Self(db)
}
fn tributary_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
D::key(b"TRIBUTARY", dst, key)
}
fn block_key(genesis: [u8; 32]) -> Vec<u8> {
Self::tributary_key(b"block", genesis)
}
pub fn set_last_block(&mut self, genesis: [u8; 32], block: [u8; 32]) {
let mut txn = self.0.txn();
txn.put(Self::block_key(genesis), block);
txn.commit();
}
pub fn last_block(&self, genesis: [u8; 32]) -> [u8; 32] {
self.0.get(Self::block_key(genesis)).unwrap_or(genesis.to_vec()).try_into().unwrap()
}
fn dkg_attempt_key(genesis: [u8; 32]) -> Vec<u8> {
Self::tributary_key(b"dkg_attempt", genesis)
}
pub fn dkg_attempt<G: Get>(getter: &G, genesis: [u8; 32]) -> u32 {
u32::from_le_bytes(
getter.get(Self::dkg_attempt_key(genesis)).unwrap_or(vec![0; 4]).try_into().unwrap(),
)
}
fn dkg_data_received_key(label: &'static [u8], genesis: &[u8], attempt: u32) -> Vec<u8> {
Self::tributary_key(
b"dkg_data_received",
[label, genesis, attempt.to_le_bytes().as_ref()].concat(),
)
}
fn dkg_data_key(
label: &'static [u8],
genesis: &[u8],
signer: &<Ristretto as Ciphersuite>::G,
attempt: u32,
) -> Vec<u8> {
Self::tributary_key(
b"dkg_data",
[label, genesis, signer.to_bytes().as_ref(), attempt.to_le_bytes().as_ref()].concat(),
)
}
pub fn dkg_data<G: Get>(
label: &'static [u8],
getter: &G,
genesis: [u8; 32],
signer: &<Ristretto as Ciphersuite>::G,
attempt: u32,
) -> Option<Vec<u8>> {
getter.get(Self::dkg_data_key(label, &genesis, signer, attempt))
}
pub fn set_dkg_data(
label: &'static [u8],
txn: &mut D::Transaction<'_>,
genesis: [u8; 32],
signer: &<Ristretto as Ciphersuite>::G,
attempt: u32,
data: &[u8],
) -> u16 {
let received_key = Self::dkg_data_received_key(label, &genesis, attempt);
let mut received =
u16::from_le_bytes(txn.get(&received_key).unwrap_or(vec![0; 2]).try_into().unwrap());
received += 1;
txn.put(received_key, received.to_le_bytes());
txn.put(Self::dkg_data_key(label, &genesis, signer, attempt), data);
received
}
fn event_key(id: &[u8], index: u32) -> Vec<u8> {
Self::tributary_key(b"event", [id, index.to_le_bytes().as_ref()].concat())
}
pub fn handled_event<G: Get>(getter: &G, id: [u8; 32], index: u32) -> bool {
getter.get(Self::event_key(&id, index)).is_some()
}
pub fn handle_event(txn: &mut D::Transaction<'_>, id: [u8; 32], index: u32) {
assert!(!Self::handled_event(txn, id, index));
txn.put(Self::event_key(&id, index), []);
}
}

View File

@@ -0,0 +1,332 @@
use std::{io, collections::HashMap};
use blake2::{Digest, Blake2s256};
use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::{Ciphersuite, Ristretto};
use frost::Participant;
use scale::Encode;
use serai_client::validator_sets::primitives::{ValidatorSet, ValidatorSetData};
#[rustfmt::skip]
use tributary::{
ReadWrite, Signed, TransactionError, TransactionKind, Transaction as TransactionTrait,
};
mod db;
pub use db::*;
pub mod scanner;
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct TributarySpec {
serai_block: [u8; 32],
start_time: u64,
set: ValidatorSet,
validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
}
impl TributarySpec {
pub fn new(
serai_block: [u8; 32],
start_time: u64,
set: ValidatorSet,
set_data: ValidatorSetData,
) -> TributarySpec {
let mut validators = vec![];
for (participant, amount) in set_data.participants {
// TODO: Ban invalid keys from being validators on the Serai side
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
.expect("invalid key registered as participant");
// Give one weight on Tributary per bond instance
validators.push((participant, amount.0 / set_data.bond.0));
}
Self { serai_block, start_time, set, validators }
}
pub fn set(&self) -> ValidatorSet {
self.set
}
pub fn genesis(&self) -> [u8; 32] {
// Calculate the genesis for this Tributary
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis");
// This locks it to a specific Serai chain
genesis.append_message(b"serai_block", self.serai_block);
genesis.append_message(b"session", self.set.session.0.to_le_bytes());
genesis.append_message(b"network", self.set.network.encode());
let genesis = genesis.challenge(b"genesis");
let genesis_ref: &[u8] = genesis.as_ref();
genesis_ref[.. 32].try_into().unwrap()
}
pub fn start_time(&self) -> u64 {
self.start_time
}
pub fn n(&self) -> u16 {
// TODO: Support multiple key shares
// self.validators.iter().map(|(_, weight)| u16::try_from(weight).unwrap()).sum()
self.validators().len().try_into().unwrap()
}
pub fn t(&self) -> u16 {
(2 * (self.n() / 3)) + 1
}
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Participant> {
let mut i = 1;
// TODO: Support multiple key shares
for (validator, _weight) in &self.validators {
if validator == &key {
// return (i .. (i + weight)).to_vec();
return Some(Participant::new(i).unwrap());
}
// i += weight;
i += 1;
}
None
}
pub fn validators(&self) -> HashMap<<Ristretto as Ciphersuite>::G, u64> {
let mut res = HashMap::new();
for (key, amount) in self.validators.clone() {
res.insert(key, amount);
}
res
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct SignData {
pub plan: [u8; 32],
pub attempt: u32,
pub data: Vec<u8>,
pub signed: Signed,
}
impl ReadWrite for SignData {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut plan = [0; 32];
reader.read_exact(&mut plan)?;
let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt);
let data = {
let mut data_len = [0; 2];
reader.read_exact(&mut data_len)?;
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
reader.read_exact(&mut data)?;
data
};
let signed = Signed::read(reader)?;
Ok(SignData { plan, attempt, data, signed })
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&self.plan)?;
writer.write_all(&self.attempt.to_le_bytes())?;
if self.data.len() > u16::MAX.into() {
// Currently, the largest sign item would be a Monero transaction
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
// key image and proof (96 bytes)
// Even with all of that, we could support 227 inputs in a single TX
// Monero is limited to 120 inputs per TX
Err(io::Error::new(io::ErrorKind::Other, "signing data exceeded 65535 bytes"))?;
}
writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?;
writer.write_all(&self.data)?;
self.signed.write(writer)
}
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum Transaction {
// Once this completes successfully, no more instances should be created.
DkgCommitments(u32, Vec<u8>, Signed),
DkgShares(u32, HashMap<Participant, Vec<u8>>, Signed),
SignPreprocess(SignData),
SignShare(SignData),
FinalizedBlock(u64),
BatchPreprocess(SignData),
BatchShare(SignData),
}
impl ReadWrite for Transaction {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0];
reader.read_exact(&mut kind)?;
match kind[0] {
0 => {
let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt);
let commitments = {
let mut commitments_len = [0; 2];
reader.read_exact(&mut commitments_len)?;
let mut commitments = vec![0; usize::from(u16::from_le_bytes(commitments_len))];
reader.read_exact(&mut commitments)?;
commitments
};
let signed = Signed::read(reader)?;
Ok(Transaction::DkgCommitments(attempt, commitments, signed))
}
1 => {
let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt);
let shares = {
let mut share_quantity = [0; 2];
reader.read_exact(&mut share_quantity)?;
let mut share_len = [0; 2];
reader.read_exact(&mut share_len)?;
let share_len = usize::from(u16::from_le_bytes(share_len));
let mut shares = HashMap::new();
for i in 0 .. u16::from_le_bytes(share_quantity) {
let participant = Participant::new(i + 1).unwrap();
let mut share = vec![0; share_len];
reader.read_exact(&mut share)?;
shares.insert(participant, share);
}
shares
};
let signed = Signed::read(reader)?;
Ok(Transaction::DkgShares(attempt, shares, signed))
}
2 => SignData::read(reader).map(Transaction::SignPreprocess),
3 => SignData::read(reader).map(Transaction::SignShare),
4 => {
let mut block = [0; 8];
reader.read_exact(&mut block)?;
Ok(Transaction::FinalizedBlock(u64::from_le_bytes(block)))
}
5 => SignData::read(reader).map(Transaction::BatchPreprocess),
6 => SignData::read(reader).map(Transaction::BatchShare),
_ => Err(io::Error::new(io::ErrorKind::Other, "invalid transaction type")),
}
}
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self {
Transaction::DkgCommitments(attempt, commitments, signed) => {
writer.write_all(&[0])?;
writer.write_all(&attempt.to_le_bytes())?;
if commitments.len() > u16::MAX.into() {
// t commitments and an encryption key mean a u16 is fine until a threshold > 2000 occurs
Err(io::Error::new(io::ErrorKind::Other, "dkg commitments exceeded 65535 bytes"))?;
}
writer.write_all(&u16::try_from(commitments.len()).unwrap().to_le_bytes())?;
writer.write_all(commitments)?;
signed.write(writer)
}
Transaction::DkgShares(attempt, shares, signed) => {
writer.write_all(&[1])?;
writer.write_all(&attempt.to_le_bytes())?;
// Shares are indexed by non-zero u16s (Participants), so this can't fail
writer.write_all(&u16::try_from(shares.len()).unwrap().to_le_bytes())?;
let mut share_len = None;
for participant in 0 .. shares.len() {
let share = &shares[&Participant::new(u16::try_from(participant + 1).unwrap()).unwrap()];
if let Some(share_len) = share_len {
if share.len() != share_len {
panic!("variable length shares");
}
} else {
// For BLS12-381 G2, this would be:
// - A 32-byte share
// - A 96-byte ephemeral key
// - A 128-byte signature
// Hence why this has to be u16
writer.write_all(&u16::try_from(share.len()).unwrap().to_le_bytes())?;
share_len = Some(share.len());
}
writer.write_all(share)?;
}
signed.write(writer)
}
Transaction::SignPreprocess(data) => {
writer.write_all(&[2])?;
data.write(writer)
}
Transaction::SignShare(data) => {
writer.write_all(&[3])?;
data.write(writer)
}
Transaction::FinalizedBlock(block) => {
writer.write_all(&[4])?;
writer.write_all(&block.to_le_bytes())
}
Transaction::BatchPreprocess(data) => {
writer.write_all(&[5])?;
data.write(writer)
}
Transaction::BatchShare(data) => {
writer.write_all(&[6])?;
data.write(writer)
}
}
}
}
impl TransactionTrait for Transaction {
fn kind(&self) -> TransactionKind<'_> {
match self {
Transaction::DkgCommitments(_, _, signed) => TransactionKind::Signed(signed),
Transaction::DkgShares(_, _, signed) => TransactionKind::Signed(signed),
Transaction::SignPreprocess(data) => TransactionKind::Signed(&data.signed),
Transaction::SignShare(data) => TransactionKind::Signed(&data.signed),
Transaction::FinalizedBlock(_) => TransactionKind::Provided,
Transaction::BatchPreprocess(data) => TransactionKind::Signed(&data.signed),
Transaction::BatchShare(data) => TransactionKind::Signed(&data.signed),
}
}
fn hash(&self) -> [u8; 32] {
let mut tx = self.serialize();
if let TransactionKind::Signed(signed) = self.kind() {
// Make sure the part we're cutting off is the signature
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
}
Blake2s256::digest(tx).into()
}
fn verify(&self) -> Result<(), TransactionError> {
// TODO: Augment with checks that the Vecs can be deser'd and are for recognized IDs
Ok(())
}
}

View File

@@ -0,0 +1,173 @@
use core::ops::Deref;
use std::collections::HashMap;
use zeroize::Zeroizing;
use ciphersuite::{Ciphersuite, Ristretto};
use tributary::{Signed, Block, P2p, Tributary};
use processor_messages::{
key_gen::{self, KeyGenId},
CoordinatorMessage,
};
use serai_db::DbTxn;
use crate::{
Db,
processor::Processor,
tributary::{TributaryDb, TributarySpec, Transaction},
};
// Handle a specific Tributary block
async fn handle_block<D: Db, Pro: Processor, P: P2p>(
db: &mut TributaryDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
processor: &mut Pro,
spec: &TributarySpec,
tributary: &Tributary<D, Transaction, P>,
block: Block<Transaction>,
) {
let hash = block.hash();
let mut event_id = 0;
for tx in block.transactions {
if !TributaryDb::<D>::handled_event(&db.0, hash, event_id) {
let mut txn = db.0.txn();
let mut handle_dkg = |label, attempt, mut bytes: Vec<u8>, signed: Signed| {
// If they've already published a TX for this attempt, slash
if let Some(data) =
TributaryDb::<D>::dkg_data(label, &txn, tributary.genesis(), &signed.signer, attempt)
{
if data != bytes {
// TODO: Full slash
todo!();
}
// TODO: Slash
return None;
}
// If the attempt is lesser than the blockchain's, slash
let curr_attempt = TributaryDb::<D>::dkg_attempt(&txn, tributary.genesis());
if attempt < curr_attempt {
// TODO: Slash for being late
return None;
}
if attempt > curr_attempt {
// TODO: Full slash
todo!();
}
// Store this data
let received = TributaryDb::<D>::set_dkg_data(
label,
&mut txn,
tributary.genesis(),
&signed.signer,
attempt,
&bytes,
);
// If we have all commitments/shares, tell the processor
if received == spec.n() {
let mut data = HashMap::new();
for validator in spec.validators().keys() {
data.insert(
spec.i(*validator).unwrap(),
if validator == &signed.signer {
bytes.split_off(0)
} else {
TributaryDb::<D>::dkg_data(label, &txn, tributary.genesis(), validator, attempt)
.unwrap_or_else(|| {
panic!(
"received all DKG data yet couldn't load {} for a validator",
std::str::from_utf8(label).unwrap(),
)
})
},
);
}
return Some((KeyGenId { set: spec.set(), attempt }, data));
}
None
};
match tx {
Transaction::DkgCommitments(attempt, bytes, signed) => {
if let Some((id, commitments)) = handle_dkg(b"commitments", attempt, bytes, signed) {
processor
.send(CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
id,
commitments,
}))
.await;
}
}
Transaction::DkgShares(attempt, mut shares, signed) => {
if shares.len() != usize::from(spec.n()) {
// TODO: Full slash
continue;
}
let bytes = shares
.remove(
&spec
.i(Ristretto::generator() * key.deref())
.expect("in a tributary we're not a validator for"),
)
.unwrap();
if let Some((id, shares)) = handle_dkg(b"shares", attempt, bytes, signed) {
processor
.send(CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares { id, shares }))
.await;
}
}
Transaction::SignPreprocess(..) => todo!(),
Transaction::SignShare(..) => todo!(),
Transaction::FinalizedBlock(..) => todo!(),
Transaction::BatchPreprocess(..) => todo!(),
Transaction::BatchShare(..) => todo!(),
}
TributaryDb::<D>::handle_event(&mut txn, hash, event_id);
txn.commit();
}
event_id += 1;
}
}
pub async fn handle_new_blocks<D: Db, Pro: Processor, P: P2p>(
db: &mut TributaryDb<D>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
processor: &mut Pro,
spec: &TributarySpec,
tributary: &Tributary<D, Transaction, P>,
last_block: &mut [u8; 32],
) {
// Check if there's been a new Tributary block
let latest = tributary.tip();
if latest == *last_block {
return;
}
let mut blocks = vec![tributary.block(&latest).unwrap()];
while blocks.last().unwrap().parent() != *last_block {
blocks.push(tributary.block(&blocks.last().unwrap().parent()).unwrap());
}
while let Some(block) = blocks.pop() {
let hash = block.hash();
handle_block(db, key, processor, spec, tributary, block).await;
*last_block = hash;
db.set_last_block(tributary.genesis(), *last_block);
}
}