2023-04-23 01:25:45 -04:00
|
|
|
use core::ops::Deref;
|
2023-09-01 00:03:53 -04:00
|
|
|
use std::io::{self, Read, Write};
|
2023-04-11 19:04:53 -04:00
|
|
|
|
2023-04-23 01:25:45 -04:00
|
|
|
use zeroize::Zeroizing;
|
|
|
|
|
use rand_core::{RngCore, CryptoRng};
|
|
|
|
|
|
2023-04-11 19:04:53 -04:00
|
|
|
use blake2::{Digest, Blake2s256};
|
2023-04-17 02:09:29 -04:00
|
|
|
use transcript::{Transcript, RecommendedTranscript};
|
2023-04-11 19:04:53 -04:00
|
|
|
|
2023-04-23 04:31:00 -04:00
|
|
|
use ciphersuite::{
|
|
|
|
|
group::{ff::Field, GroupEncoding},
|
|
|
|
|
Ciphersuite, Ristretto,
|
|
|
|
|
};
|
2023-04-23 01:25:45 -04:00
|
|
|
use schnorr::SchnorrSignature;
|
2023-04-11 19:04:53 -04:00
|
|
|
use frost::Participant;
|
|
|
|
|
|
2023-04-23 04:31:00 -04:00
|
|
|
use scale::{Encode, Decode};
|
2023-04-18 02:01:53 -04:00
|
|
|
|
2023-04-23 04:31:00 -04:00
|
|
|
use serai_client::{
|
2023-10-10 13:53:24 +03:00
|
|
|
primitives::{NetworkId, PublicKey},
|
|
|
|
|
validator_sets::primitives::{Session, ValidatorSet},
|
2023-04-23 04:31:00 -04:00
|
|
|
};
|
2023-04-17 02:09:29 -04:00
|
|
|
|
2023-04-11 19:04:53 -04:00
|
|
|
#[rustfmt::skip]
|
|
|
|
|
use tributary::{
|
Slash malevolent validators (#294)
* add slash tx
* ignore unsigned tx replays
* verify that provided evidence is valid
* fix clippy + fmt
* move application tx handling to another module
* partially handle the tendermint txs
* fix pr comments
* support unsigned app txs
* add slash target to the votes
* enforce provided, unsigned, signed tx ordering within a block
* bug fixes
* add unit test for tendermint txs
* bug fixes
* update tests for tendermint txs
* add tx ordering test
* tidy up tx ordering test
* cargo +nightly fmt
* Misc fixes from rebasing
* Finish resolving clippy
* Remove sha3 from tendermint-machine
* Resolve a DoS in SlashEvidence's read
Also moves Evidence from Vec<Message> to (Message, Option<Message>). That
should meet all requirements while being a bit safer.
* Make lazy_static a dev-depend for tributary
* Various small tweaks
One use of sort was inefficient, sorting unsigned || signed when unsigned was
already properly sorted. Given how the unsigned TXs were given a nonce of 0, an
unstable sort may swap places with an unsigned TX and a signed TX with a nonce
of 0 (leading to a faulty block).
The extra protection added here sorts signed, then concats.
* Fix Tributary tests I broke, start review on tendermint/tx.rs
* Finish reviewing everything outside tests and empty_signature
* Remove empty_signature
empty_signature led to corrupted local state histories. Unfortunately, the API
is only sane with a signature.
We now use the actual signature, which risks creating a signature over a
malicious message if we have ever have an invariant producing malicious
messages. Prior, we only signed the message after the local machine confirmed
it was okay per the local view of consensus.
This is tolerated/preferred over a corrupt state history since production of
such messages is already an invariant. TODOs are added to make handling of this
theoretical invariant further robust.
* Remove async_sequential for tokio::test
There was no competition for resources forcing them to be run sequentially.
* Modify block order test to be statistically significant without multiple runs
* Clean tests
---------
Co-authored-by: Luke Parker <lukeparker5132@gmail.com>
2023-08-21 07:28:23 +03:00
|
|
|
ReadWrite,
|
|
|
|
|
transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait}
|
2023-04-11 19:04:53 -04:00
|
|
|
};
|
|
|
|
|
|
2023-04-20 05:05:17 -04:00
|
|
|
mod db;
|
|
|
|
|
pub use db::*;
|
|
|
|
|
|
2023-09-25 15:42:39 -04:00
|
|
|
mod nonce_decider;
|
|
|
|
|
pub use nonce_decider::*;
|
|
|
|
|
|
Slash malevolent validators (#294)
* add slash tx
* ignore unsigned tx replays
* verify that provided evidence is valid
* fix clippy + fmt
* move application tx handling to another module
* partially handle the tendermint txs
* fix pr comments
* support unsigned app txs
* add slash target to the votes
* enforce provided, unsigned, signed tx ordering within a block
* bug fixes
* add unit test for tendermint txs
* bug fixes
* update tests for tendermint txs
* add tx ordering test
* tidy up tx ordering test
* cargo +nightly fmt
* Misc fixes from rebasing
* Finish resolving clippy
* Remove sha3 from tendermint-machine
* Resolve a DoS in SlashEvidence's read
Also moves Evidence from Vec<Message> to (Message, Option<Message>). That
should meet all requirements while being a bit safer.
* Make lazy_static a dev-depend for tributary
* Various small tweaks
One use of sort was inefficient, sorting unsigned || signed when unsigned was
already properly sorted. Given how the unsigned TXs were given a nonce of 0, an
unstable sort may swap places with an unsigned TX and a signed TX with a nonce
of 0 (leading to a faulty block).
The extra protection added here sorts signed, then concats.
* Fix Tributary tests I broke, start review on tendermint/tx.rs
* Finish reviewing everything outside tests and empty_signature
* Remove empty_signature
empty_signature led to corrupted local state histories. Unfortunately, the API
is only sane with a signature.
We now use the actual signature, which risks creating a signature over a
malicious message if we have ever have an invariant producing malicious
messages. Prior, we only signed the message after the local machine confirmed
it was okay per the local view of consensus.
This is tolerated/preferred over a corrupt state history since production of
such messages is already an invariant. TODOs are added to make handling of this
theoretical invariant further robust.
* Remove async_sequential for tokio::test
There was no competition for resources forcing them to be run sequentially.
* Modify block order test to be statistically significant without multiple runs
* Clean tests
---------
Co-authored-by: Luke Parker <lukeparker5132@gmail.com>
2023-08-21 07:28:23 +03:00
|
|
|
mod handle;
|
|
|
|
|
pub use handle::*;
|
|
|
|
|
|
2023-04-20 05:05:17 -04:00
|
|
|
pub mod scanner;
|
|
|
|
|
|
|
|
|
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
|
|
|
pub struct TributarySpec {
|
|
|
|
|
serai_block: [u8; 32],
|
|
|
|
|
start_time: u64,
|
|
|
|
|
set: ValidatorSet,
|
|
|
|
|
validators: Vec<(<Ristretto as Ciphersuite>::G, u64)>,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl TributarySpec {
|
|
|
|
|
pub fn new(
|
|
|
|
|
serai_block: [u8; 32],
|
|
|
|
|
start_time: u64,
|
|
|
|
|
set: ValidatorSet,
|
2023-10-10 13:53:24 +03:00
|
|
|
set_participants: Vec<PublicKey>,
|
2023-04-20 05:05:17 -04:00
|
|
|
) -> TributarySpec {
|
|
|
|
|
let mut validators = vec![];
|
2023-10-10 13:53:24 +03:00
|
|
|
for participant in set_participants {
|
2023-04-20 05:05:17 -04:00
|
|
|
// TODO: Ban invalid keys from being validators on the Serai side
|
Start moving Coordinator to a multi-Tributary model
Prior, we only supported a single Tributary per network, and spawned a task to
handled Processor messages per Tributary. Now, we handle Processor messages per
network, yet we still only supported a single Tributary in that handling
function.
Now, when we handle a message, we load the Tributary which is relevant. Once we
know it, we ensure we have it (preventing race conditions), and then proceed.
We do need work to check if we should have a Tributary, or if we're not
participating. We also need to check if a Tributary has been retired, meaning
we shouldn't handle any transactions related to them, and to clean up retired
Tributaries.
2023-09-27 18:20:36 -04:00
|
|
|
// (make coordinator key a session key?)
|
2023-04-20 05:05:17 -04:00
|
|
|
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
|
|
|
|
|
.expect("invalid key registered as participant");
|
2023-10-10 13:53:24 +03:00
|
|
|
// TODO: Give one weight on Tributary per bond instance
|
|
|
|
|
validators.push((participant, 1));
|
2023-04-20 05:05:17 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Self { serai_block, start_time, set, validators }
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn set(&self) -> ValidatorSet {
|
|
|
|
|
self.set
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn genesis(&self) -> [u8; 32] {
|
|
|
|
|
// Calculate the genesis for this Tributary
|
|
|
|
|
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis");
|
|
|
|
|
// This locks it to a specific Serai chain
|
|
|
|
|
genesis.append_message(b"serai_block", self.serai_block);
|
|
|
|
|
genesis.append_message(b"session", self.set.session.0.to_le_bytes());
|
|
|
|
|
genesis.append_message(b"network", self.set.network.encode());
|
|
|
|
|
let genesis = genesis.challenge(b"genesis");
|
|
|
|
|
let genesis_ref: &[u8] = genesis.as_ref();
|
|
|
|
|
genesis_ref[.. 32].try_into().unwrap()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn start_time(&self) -> u64 {
|
|
|
|
|
self.start_time
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn n(&self) -> u16 {
|
|
|
|
|
// TODO: Support multiple key shares
|
|
|
|
|
// self.validators.iter().map(|(_, weight)| u16::try_from(weight).unwrap()).sum()
|
|
|
|
|
self.validators().len().try_into().unwrap()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn t(&self) -> u16 {
|
2023-05-10 06:29:19 -04:00
|
|
|
((2 * self.n()) / 3) + 1
|
2023-04-20 05:05:17 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Participant> {
|
|
|
|
|
let mut i = 1;
|
|
|
|
|
// TODO: Support multiple key shares
|
|
|
|
|
for (validator, _weight) in &self.validators {
|
|
|
|
|
if validator == &key {
|
|
|
|
|
// return (i .. (i + weight)).to_vec();
|
|
|
|
|
return Some(Participant::new(i).unwrap());
|
|
|
|
|
}
|
|
|
|
|
// i += weight;
|
|
|
|
|
i += 1;
|
|
|
|
|
}
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-22 10:49:52 -04:00
|
|
|
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
|
|
|
|
self.validators.clone()
|
2023-04-20 05:05:17 -04:00
|
|
|
}
|
2023-04-23 04:31:00 -04:00
|
|
|
|
|
|
|
|
pub fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
|
|
|
writer.write_all(&self.serai_block)?;
|
|
|
|
|
writer.write_all(&self.start_time.to_le_bytes())?;
|
|
|
|
|
writer.write_all(&self.set.session.0.to_le_bytes())?;
|
|
|
|
|
let network_encoded = self.set.network.encode();
|
|
|
|
|
assert_eq!(network_encoded.len(), 1);
|
|
|
|
|
writer.write_all(&network_encoded)?;
|
|
|
|
|
writer.write_all(&u32::try_from(self.validators.len()).unwrap().to_le_bytes())?;
|
|
|
|
|
for validator in &self.validators {
|
|
|
|
|
writer.write_all(&validator.0.to_bytes())?;
|
|
|
|
|
writer.write_all(&validator.1.to_le_bytes())?;
|
|
|
|
|
}
|
|
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn serialize(&self) -> Vec<u8> {
|
|
|
|
|
let mut res = vec![];
|
|
|
|
|
self.write(&mut res).unwrap();
|
|
|
|
|
res
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn read<R: Read>(reader: &mut R) -> io::Result<Self> {
|
|
|
|
|
let mut serai_block = [0; 32];
|
|
|
|
|
reader.read_exact(&mut serai_block)?;
|
|
|
|
|
|
|
|
|
|
let mut start_time = [0; 8];
|
|
|
|
|
reader.read_exact(&mut start_time)?;
|
|
|
|
|
let start_time = u64::from_le_bytes(start_time);
|
|
|
|
|
|
|
|
|
|
let mut session = [0; 4];
|
|
|
|
|
reader.read_exact(&mut session)?;
|
|
|
|
|
let session = Session(u32::from_le_bytes(session));
|
|
|
|
|
|
|
|
|
|
let mut network = [0; 1];
|
|
|
|
|
reader.read_exact(&mut network)?;
|
|
|
|
|
let network = NetworkId::decode(&mut &network[..])
|
|
|
|
|
.map_err(|_| io::Error::new(io::ErrorKind::Other, "invalid network"))?;
|
|
|
|
|
|
|
|
|
|
let mut validators_len = [0; 4];
|
|
|
|
|
reader.read_exact(&mut validators_len)?;
|
|
|
|
|
let validators_len = usize::try_from(u32::from_le_bytes(validators_len)).unwrap();
|
|
|
|
|
|
|
|
|
|
let mut validators = Vec::with_capacity(validators_len);
|
|
|
|
|
for _ in 0 .. validators_len {
|
|
|
|
|
let key = Ristretto::read_G(reader)?;
|
|
|
|
|
let mut bond = [0; 8];
|
|
|
|
|
reader.read_exact(&mut bond)?;
|
|
|
|
|
validators.push((key, u64::from_le_bytes(bond)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Ok(Self { serai_block, start_time, set: ValidatorSet { session, network }, validators })
|
|
|
|
|
}
|
2023-04-17 02:09:29 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-11 19:04:53 -04:00
|
|
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
|
|
|
pub struct SignData {
|
|
|
|
|
pub plan: [u8; 32],
|
|
|
|
|
pub attempt: u32,
|
|
|
|
|
|
|
|
|
|
pub data: Vec<u8>,
|
|
|
|
|
|
|
|
|
|
pub signed: Signed,
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl ReadWrite for SignData {
|
|
|
|
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
|
|
|
|
let mut plan = [0; 32];
|
|
|
|
|
reader.read_exact(&mut plan)?;
|
|
|
|
|
|
|
|
|
|
let mut attempt = [0; 4];
|
|
|
|
|
reader.read_exact(&mut attempt)?;
|
|
|
|
|
let attempt = u32::from_le_bytes(attempt);
|
|
|
|
|
|
|
|
|
|
let data = {
|
|
|
|
|
let mut data_len = [0; 2];
|
|
|
|
|
reader.read_exact(&mut data_len)?;
|
|
|
|
|
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
|
|
|
|
|
reader.read_exact(&mut data)?;
|
|
|
|
|
data
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let signed = Signed::read(reader)?;
|
|
|
|
|
|
|
|
|
|
Ok(SignData { plan, attempt, data, signed })
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
|
|
|
writer.write_all(&self.plan)?;
|
|
|
|
|
writer.write_all(&self.attempt.to_le_bytes())?;
|
|
|
|
|
|
2023-04-11 19:18:26 -04:00
|
|
|
if self.data.len() > u16::MAX.into() {
|
|
|
|
|
// Currently, the largest sign item would be a Monero transaction
|
|
|
|
|
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
|
|
|
|
|
// key image and proof (96 bytes)
|
|
|
|
|
// Even with all of that, we could support 227 inputs in a single TX
|
2023-08-13 02:21:56 -04:00
|
|
|
// Monero is limited to ~120 inputs per TX
|
2023-04-11 19:18:26 -04:00
|
|
|
Err(io::Error::new(io::ErrorKind::Other, "signing data exceeded 65535 bytes"))?;
|
|
|
|
|
}
|
2023-04-11 19:04:53 -04:00
|
|
|
writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?;
|
|
|
|
|
writer.write_all(&self.data)?;
|
|
|
|
|
|
|
|
|
|
self.signed.write(writer)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
|
|
|
pub enum Transaction {
|
|
|
|
|
// Once this completes successfully, no more instances should be created.
|
|
|
|
|
DkgCommitments(u32, Vec<u8>, Signed),
|
2023-08-14 06:08:55 -04:00
|
|
|
DkgShares {
|
|
|
|
|
attempt: u32,
|
2023-09-01 00:03:53 -04:00
|
|
|
shares: Vec<Vec<u8>>,
|
2023-08-14 06:08:55 -04:00
|
|
|
confirmation_nonces: [u8; 64],
|
|
|
|
|
signed: Signed,
|
|
|
|
|
},
|
|
|
|
|
DkgConfirmed(u32, [u8; 32], Signed),
|
2023-04-11 19:04:53 -04:00
|
|
|
|
Replace ExternalBlock with Batch
The initial TODO was simply to use one ExternalBlock per all batches in the
block. This would require publishing ExternalBlock after the last batch,
requiring knowing the last batch. While we could add such a pipeline, it'd
require:
1) Initial preprocesses using a distinct message from BatchPreprocess
2) An additional message sent after all BatchPreprocess are sent
Unfortunately, both would require tweaks to the SubstrateSigner which aren't
worth the complexity compared to the solution here, at least, not at this time.
While this will cause, if a Tributary is signing a block whose total batch data
exceeds 25 kB, to use multiple transactions which could be optimized out by
'better' local data pipelining, that's an extreme edge case. Given the temporal
nature of each Tributary, it's also an acceptable edge.
This does no longer achieve synchrony over external blocks accordingly. While
signed batches have synchrony, as they embed their block hash, batches being
signed don't have cryptographic synchrony on their contents. This means
validators who are eclipsed may produce invalid shares, as they sign a
different batch. This will be introduced in a follow-up commit.
2023-08-31 22:48:02 -04:00
|
|
|
// When we have synchrony on a batch, we can allow signing it
|
2023-08-31 23:39:36 -04:00
|
|
|
// TODO (never?): This is less efficient compared to an ExternalBlock provided transaction,
|
|
|
|
|
// which would be binding over the block hash and automatically achieve synchrony on all
|
|
|
|
|
// relevant batches. ExternalBlock was removed for this due to complexity around the pipeline
|
|
|
|
|
// with the current processor, yet it would still be an improvement.
|
2023-08-31 23:04:37 -04:00
|
|
|
Batch([u8; 32], [u8; 32]),
|
2023-04-20 06:59:42 -04:00
|
|
|
// When a Serai block is finalized, with the contained batches, we can allow the associated plan
|
|
|
|
|
// IDs
|
2023-04-20 15:37:22 -04:00
|
|
|
SubstrateBlock(u64),
|
2023-04-11 19:04:53 -04:00
|
|
|
|
|
|
|
|
BatchPreprocess(SignData),
|
|
|
|
|
BatchShare(SignData),
|
2023-04-20 06:59:42 -04:00
|
|
|
|
|
|
|
|
SignPreprocess(SignData),
|
|
|
|
|
SignShare(SignData),
|
2023-08-31 23:39:36 -04:00
|
|
|
// This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst
|
|
|
|
|
// reporters (who should all report the same thing)
|
|
|
|
|
// We do still track the signer in order to prevent a single signer from publishing arbitrarily
|
|
|
|
|
// many TXs without penalty
|
|
|
|
|
// Here, they're denoted as the first_signer, as only the signer of the first TX to be included
|
|
|
|
|
// with this pairing will be remembered on-chain
|
|
|
|
|
SignCompleted {
|
|
|
|
|
plan: [u8; 32],
|
|
|
|
|
tx_hash: Vec<u8>,
|
|
|
|
|
first_signer: <Ristretto as Ciphersuite>::G,
|
|
|
|
|
signature: SchnorrSignature<Ristretto>,
|
|
|
|
|
},
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl ReadWrite for Transaction {
|
|
|
|
|
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
|
|
|
|
let mut kind = [0];
|
|
|
|
|
reader.read_exact(&mut kind)?;
|
|
|
|
|
|
|
|
|
|
match kind[0] {
|
|
|
|
|
0 => {
|
|
|
|
|
let mut attempt = [0; 4];
|
|
|
|
|
reader.read_exact(&mut attempt)?;
|
|
|
|
|
let attempt = u32::from_le_bytes(attempt);
|
|
|
|
|
|
|
|
|
|
let commitments = {
|
|
|
|
|
let mut commitments_len = [0; 2];
|
|
|
|
|
reader.read_exact(&mut commitments_len)?;
|
|
|
|
|
let mut commitments = vec![0; usize::from(u16::from_le_bytes(commitments_len))];
|
|
|
|
|
reader.read_exact(&mut commitments)?;
|
|
|
|
|
commitments
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let signed = Signed::read(reader)?;
|
|
|
|
|
|
|
|
|
|
Ok(Transaction::DkgCommitments(attempt, commitments, signed))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
1 => {
|
|
|
|
|
let mut attempt = [0; 4];
|
|
|
|
|
reader.read_exact(&mut attempt)?;
|
|
|
|
|
let attempt = u32::from_le_bytes(attempt);
|
|
|
|
|
|
|
|
|
|
let shares = {
|
|
|
|
|
let mut share_quantity = [0; 2];
|
|
|
|
|
reader.read_exact(&mut share_quantity)?;
|
|
|
|
|
|
2023-04-11 19:18:26 -04:00
|
|
|
let mut share_len = [0; 2];
|
2023-04-11 19:04:53 -04:00
|
|
|
reader.read_exact(&mut share_len)?;
|
2023-04-11 19:18:26 -04:00
|
|
|
let share_len = usize::from(u16::from_le_bytes(share_len));
|
2023-04-11 19:04:53 -04:00
|
|
|
|
2023-09-01 00:03:53 -04:00
|
|
|
let mut shares = vec![];
|
2023-09-27 13:00:04 -04:00
|
|
|
for _ in 0 .. u16::from_le_bytes(share_quantity) {
|
2023-04-11 19:04:53 -04:00
|
|
|
let mut share = vec![0; share_len];
|
|
|
|
|
reader.read_exact(&mut share)?;
|
2023-09-01 00:03:53 -04:00
|
|
|
shares.push(share);
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
shares
|
|
|
|
|
};
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
let mut confirmation_nonces = [0; 64];
|
|
|
|
|
reader.read_exact(&mut confirmation_nonces)?;
|
|
|
|
|
|
2023-04-11 19:04:53 -04:00
|
|
|
let signed = Signed::read(reader)?;
|
|
|
|
|
|
2023-09-01 00:03:53 -04:00
|
|
|
Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed })
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-20 06:59:42 -04:00
|
|
|
2 => {
|
2023-08-14 06:08:55 -04:00
|
|
|
let mut attempt = [0; 4];
|
|
|
|
|
reader.read_exact(&mut attempt)?;
|
|
|
|
|
let attempt = u32::from_le_bytes(attempt);
|
|
|
|
|
|
|
|
|
|
let mut confirmation_share = [0; 32];
|
|
|
|
|
reader.read_exact(&mut confirmation_share)?;
|
|
|
|
|
|
|
|
|
|
let signed = Signed::read(reader)?;
|
|
|
|
|
|
|
|
|
|
Ok(Transaction::DkgConfirmed(attempt, confirmation_share, signed))
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
3 => {
|
2023-08-31 23:04:37 -04:00
|
|
|
let mut block = [0; 32];
|
|
|
|
|
reader.read_exact(&mut block)?;
|
Replace ExternalBlock with Batch
The initial TODO was simply to use one ExternalBlock per all batches in the
block. This would require publishing ExternalBlock after the last batch,
requiring knowing the last batch. While we could add such a pipeline, it'd
require:
1) Initial preprocesses using a distinct message from BatchPreprocess
2) An additional message sent after all BatchPreprocess are sent
Unfortunately, both would require tweaks to the SubstrateSigner which aren't
worth the complexity compared to the solution here, at least, not at this time.
While this will cause, if a Tributary is signing a block whose total batch data
exceeds 25 kB, to use multiple transactions which could be optimized out by
'better' local data pipelining, that's an extreme edge case. Given the temporal
nature of each Tributary, it's also an acceptable edge.
This does no longer achieve synchrony over external blocks accordingly. While
signed batches have synchrony, as they embed their block hash, batches being
signed don't have cryptographic synchrony on their contents. This means
validators who are eclipsed may produce invalid shares, as they sign a
different batch. This will be introduced in a follow-up commit.
2023-08-31 22:48:02 -04:00
|
|
|
let mut batch = [0; 32];
|
|
|
|
|
reader.read_exact(&mut batch)?;
|
2023-08-31 23:04:37 -04:00
|
|
|
Ok(Transaction::Batch(block, batch))
|
2023-04-20 06:59:42 -04:00
|
|
|
}
|
2023-04-11 19:04:53 -04:00
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
4 => {
|
2023-04-11 19:04:53 -04:00
|
|
|
let mut block = [0; 8];
|
|
|
|
|
reader.read_exact(&mut block)?;
|
2023-04-20 15:37:22 -04:00
|
|
|
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
5 => SignData::read(reader).map(Transaction::BatchPreprocess),
|
|
|
|
|
6 => SignData::read(reader).map(Transaction::BatchShare),
|
2023-04-20 06:59:42 -04:00
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
7 => SignData::read(reader).map(Transaction::SignPreprocess),
|
|
|
|
|
8 => SignData::read(reader).map(Transaction::SignShare),
|
2023-04-20 06:59:42 -04:00
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
9 => {
|
2023-07-14 14:05:12 -04:00
|
|
|
let mut plan = [0; 32];
|
|
|
|
|
reader.read_exact(&mut plan)?;
|
|
|
|
|
|
2023-08-31 23:39:36 -04:00
|
|
|
let mut tx_hash_len = [0];
|
|
|
|
|
reader.read_exact(&mut tx_hash_len)?;
|
|
|
|
|
let mut tx_hash = vec![0; usize::from(tx_hash_len[0])];
|
|
|
|
|
reader.read_exact(&mut tx_hash)?;
|
2023-07-14 14:05:12 -04:00
|
|
|
|
2023-08-31 23:39:36 -04:00
|
|
|
let first_signer = Ristretto::read_G(reader)?;
|
|
|
|
|
let signature = SchnorrSignature::<Ristretto>::read(reader)?;
|
|
|
|
|
|
|
|
|
|
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
|
2023-07-14 14:05:12 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-11 19:04:53 -04:00
|
|
|
_ => Err(io::Error::new(io::ErrorKind::Other, "invalid transaction type")),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
|
|
|
match self {
|
|
|
|
|
Transaction::DkgCommitments(attempt, commitments, signed) => {
|
|
|
|
|
writer.write_all(&[0])?;
|
|
|
|
|
writer.write_all(&attempt.to_le_bytes())?;
|
2023-04-11 19:18:26 -04:00
|
|
|
if commitments.len() > u16::MAX.into() {
|
|
|
|
|
// t commitments and an encryption key mean a u16 is fine until a threshold > 2000 occurs
|
|
|
|
|
Err(io::Error::new(io::ErrorKind::Other, "dkg commitments exceeded 65535 bytes"))?;
|
|
|
|
|
}
|
2023-04-11 19:04:53 -04:00
|
|
|
writer.write_all(&u16::try_from(commitments.len()).unwrap().to_le_bytes())?;
|
|
|
|
|
writer.write_all(commitments)?;
|
|
|
|
|
signed.write(writer)
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-01 00:03:53 -04:00
|
|
|
Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => {
|
2023-04-11 19:04:53 -04:00
|
|
|
writer.write_all(&[1])?;
|
|
|
|
|
writer.write_all(&attempt.to_le_bytes())?;
|
2023-08-13 02:21:56 -04:00
|
|
|
|
2023-09-01 00:03:53 -04:00
|
|
|
// `shares` is a Vec which maps to a HashMap<Pariticpant, Vec<u8>> for any legitimate
|
|
|
|
|
// `DkgShares`. Since Participant has a range of 1 ..= u16::MAX, the length must be <
|
|
|
|
|
// u16::MAX. The only way for this to not be true if we were malicious, or if we read a
|
|
|
|
|
// `DkgShares` with a `shares.len() > u16::MAX`. The former is assumed untrue. The latter
|
|
|
|
|
// is impossible since we'll only read up to u16::MAX items.
|
2023-04-11 19:04:53 -04:00
|
|
|
writer.write_all(&u16::try_from(shares.len()).unwrap().to_le_bytes())?;
|
2023-08-13 02:21:56 -04:00
|
|
|
|
2023-09-01 00:03:53 -04:00
|
|
|
let share_len = shares.get(0).map(|share| share.len()).unwrap_or(0);
|
|
|
|
|
// For BLS12-381 G2, this would be:
|
|
|
|
|
// - A 32-byte share
|
|
|
|
|
// - A 96-byte ephemeral key
|
|
|
|
|
// - A 128-byte signature
|
|
|
|
|
// Hence why this has to be u16
|
|
|
|
|
writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;
|
2023-04-11 19:04:53 -04:00
|
|
|
|
2023-09-01 00:03:53 -04:00
|
|
|
for share in shares {
|
|
|
|
|
assert_eq!(share.len(), share_len, "shares were of variable length");
|
2023-04-11 19:04:53 -04:00
|
|
|
writer.write_all(share)?;
|
|
|
|
|
}
|
2023-09-01 00:03:53 -04:00
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
writer.write_all(confirmation_nonces)?;
|
2023-04-11 19:04:53 -04:00
|
|
|
signed.write(writer)
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-14 06:08:55 -04:00
|
|
|
Transaction::DkgConfirmed(attempt, share, signed) => {
|
2023-04-11 19:04:53 -04:00
|
|
|
writer.write_all(&[2])?;
|
2023-08-14 06:08:55 -04:00
|
|
|
writer.write_all(&attempt.to_le_bytes())?;
|
|
|
|
|
writer.write_all(share)?;
|
|
|
|
|
signed.write(writer)
|
|
|
|
|
}
|
|
|
|
|
|
2023-08-31 23:04:37 -04:00
|
|
|
Transaction::Batch(block, batch) => {
|
2023-08-14 06:08:55 -04:00
|
|
|
writer.write_all(&[3])?;
|
2023-08-31 23:04:37 -04:00
|
|
|
writer.write_all(block)?;
|
Replace ExternalBlock with Batch
The initial TODO was simply to use one ExternalBlock per all batches in the
block. This would require publishing ExternalBlock after the last batch,
requiring knowing the last batch. While we could add such a pipeline, it'd
require:
1) Initial preprocesses using a distinct message from BatchPreprocess
2) An additional message sent after all BatchPreprocess are sent
Unfortunately, both would require tweaks to the SubstrateSigner which aren't
worth the complexity compared to the solution here, at least, not at this time.
While this will cause, if a Tributary is signing a block whose total batch data
exceeds 25 kB, to use multiple transactions which could be optimized out by
'better' local data pipelining, that's an extreme edge case. Given the temporal
nature of each Tributary, it's also an acceptable edge.
This does no longer achieve synchrony over external blocks accordingly. While
signed batches have synchrony, as they embed their block hash, batches being
signed don't have cryptographic synchrony on their contents. This means
validators who are eclipsed may produce invalid shares, as they sign a
different batch. This will be introduced in a follow-up commit.
2023-08-31 22:48:02 -04:00
|
|
|
writer.write_all(batch)
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
|
2023-04-20 15:37:22 -04:00
|
|
|
Transaction::SubstrateBlock(block) => {
|
2023-08-14 06:08:55 -04:00
|
|
|
writer.write_all(&[4])?;
|
2023-04-11 19:04:53 -04:00
|
|
|
writer.write_all(&block.to_le_bytes())
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Transaction::BatchPreprocess(data) => {
|
2023-08-14 06:08:55 -04:00
|
|
|
writer.write_all(&[5])?;
|
2023-04-11 19:04:53 -04:00
|
|
|
data.write(writer)
|
|
|
|
|
}
|
|
|
|
|
Transaction::BatchShare(data) => {
|
2023-08-14 06:08:55 -04:00
|
|
|
writer.write_all(&[6])?;
|
2023-04-20 06:59:42 -04:00
|
|
|
data.write(writer)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
Transaction::SignPreprocess(data) => {
|
2023-08-14 06:08:55 -04:00
|
|
|
writer.write_all(&[7])?;
|
2023-04-11 19:04:53 -04:00
|
|
|
data.write(writer)
|
|
|
|
|
}
|
2023-04-20 06:59:42 -04:00
|
|
|
Transaction::SignShare(data) => {
|
2023-08-14 06:08:55 -04:00
|
|
|
writer.write_all(&[8])?;
|
2023-04-20 06:59:42 -04:00
|
|
|
data.write(writer)
|
|
|
|
|
}
|
2023-08-31 23:39:36 -04:00
|
|
|
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
2023-08-14 06:08:55 -04:00
|
|
|
writer.write_all(&[9])?;
|
2023-07-14 14:05:12 -04:00
|
|
|
writer.write_all(plan)?;
|
2023-08-31 23:39:36 -04:00
|
|
|
writer
|
|
|
|
|
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
|
|
|
|
writer.write_all(tx_hash)?;
|
|
|
|
|
writer.write_all(&first_signer.to_bytes())?;
|
|
|
|
|
signature.write(writer)
|
2023-07-14 14:05:12 -04:00
|
|
|
}
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
impl TransactionTrait for Transaction {
|
2023-04-12 09:38:20 -04:00
|
|
|
fn kind(&self) -> TransactionKind<'_> {
|
2023-04-11 19:04:53 -04:00
|
|
|
match self {
|
2023-04-12 09:38:20 -04:00
|
|
|
Transaction::DkgCommitments(_, _, signed) => TransactionKind::Signed(signed),
|
2023-08-14 06:08:55 -04:00
|
|
|
Transaction::DkgShares { signed, .. } => TransactionKind::Signed(signed),
|
|
|
|
|
Transaction::DkgConfirmed(_, _, signed) => TransactionKind::Signed(signed),
|
2023-04-11 19:04:53 -04:00
|
|
|
|
2023-08-31 23:04:37 -04:00
|
|
|
Transaction::Batch(_, _) => TransactionKind::Provided("batch"),
|
2023-04-20 15:37:22 -04:00
|
|
|
Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"),
|
2023-04-11 19:04:53 -04:00
|
|
|
|
2023-04-12 09:38:20 -04:00
|
|
|
Transaction::BatchPreprocess(data) => TransactionKind::Signed(&data.signed),
|
|
|
|
|
Transaction::BatchShare(data) => TransactionKind::Signed(&data.signed),
|
2023-04-20 06:59:42 -04:00
|
|
|
|
|
|
|
|
Transaction::SignPreprocess(data) => TransactionKind::Signed(&data.signed),
|
|
|
|
|
Transaction::SignShare(data) => TransactionKind::Signed(&data.signed),
|
2023-08-31 23:39:36 -04:00
|
|
|
Transaction::SignCompleted { .. } => TransactionKind::Unsigned,
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn hash(&self) -> [u8; 32] {
|
|
|
|
|
let mut tx = self.serialize();
|
|
|
|
|
if let TransactionKind::Signed(signed) = self.kind() {
|
2023-04-11 19:18:26 -04:00
|
|
|
// Make sure the part we're cutting off is the signature
|
2023-04-12 09:38:20 -04:00
|
|
|
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
|
2023-04-11 19:04:53 -04:00
|
|
|
}
|
|
|
|
|
Blake2s256::digest(tx).into()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn verify(&self) -> Result<(), TransactionError> {
|
2023-04-20 06:27:00 -04:00
|
|
|
if let Transaction::BatchShare(data) = self {
|
|
|
|
|
if data.data.len() != 32 {
|
|
|
|
|
Err(TransactionError::InvalidContent)?;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-09-27 13:00:04 -04:00
|
|
|
if let Transaction::SignCompleted { first_signer, signature, .. } = self {
|
2023-08-31 23:39:36 -04:00
|
|
|
if !signature.verify(*first_signer, self.sign_completed_challenge()) {
|
|
|
|
|
Err(TransactionError::InvalidContent)?;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-04-11 19:04:53 -04:00
|
|
|
Ok(())
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-04-23 01:25:45 -04:00
|
|
|
|
|
|
|
|
impl Transaction {
|
|
|
|
|
// Used to initially construct transactions so we can then get sig hashes and perform signing
|
|
|
|
|
pub fn empty_signed() -> Signed {
|
|
|
|
|
Signed {
|
|
|
|
|
signer: Ristretto::generator(),
|
|
|
|
|
nonce: 0,
|
|
|
|
|
signature: SchnorrSignature::<Ristretto> {
|
|
|
|
|
R: Ristretto::generator(),
|
|
|
|
|
s: <Ristretto as Ciphersuite>::F::ZERO,
|
|
|
|
|
},
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Sign a transaction
|
|
|
|
|
pub fn sign<R: RngCore + CryptoRng>(
|
|
|
|
|
&mut self,
|
|
|
|
|
rng: &mut R,
|
|
|
|
|
genesis: [u8; 32],
|
|
|
|
|
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
|
|
|
nonce: u32,
|
|
|
|
|
) {
|
|
|
|
|
fn signed(tx: &mut Transaction) -> &mut Signed {
|
|
|
|
|
match tx {
|
|
|
|
|
Transaction::DkgCommitments(_, _, ref mut signed) => signed,
|
2023-08-14 06:08:55 -04:00
|
|
|
Transaction::DkgShares { ref mut signed, .. } => signed,
|
|
|
|
|
Transaction::DkgConfirmed(_, _, ref mut signed) => signed,
|
2023-04-23 01:25:45 -04:00
|
|
|
|
2023-08-31 23:04:37 -04:00
|
|
|
Transaction::Batch(_, _) => panic!("signing Batch"),
|
2023-04-23 01:25:45 -04:00
|
|
|
Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"),
|
|
|
|
|
|
|
|
|
|
Transaction::BatchPreprocess(ref mut data) => &mut data.signed,
|
|
|
|
|
Transaction::BatchShare(ref mut data) => &mut data.signed,
|
|
|
|
|
|
|
|
|
|
Transaction::SignPreprocess(ref mut data) => &mut data.signed,
|
|
|
|
|
Transaction::SignShare(ref mut data) => &mut data.signed,
|
2023-08-31 23:39:36 -04:00
|
|
|
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
2023-04-23 01:25:45 -04:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let signed_ref = signed(self);
|
|
|
|
|
signed_ref.signer = Ristretto::generator() * key.deref();
|
|
|
|
|
signed_ref.nonce = nonce;
|
|
|
|
|
|
2023-06-08 06:38:25 -04:00
|
|
|
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
|
|
|
|
|
signed(self).signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
|
2023-04-23 01:25:45 -04:00
|
|
|
let sig_hash = self.sig_hash(genesis);
|
2023-06-08 06:38:25 -04:00
|
|
|
signed(self).signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
|
2023-04-23 01:25:45 -04:00
|
|
|
}
|
2023-08-31 23:39:36 -04:00
|
|
|
|
|
|
|
|
pub fn sign_completed_challenge(&self) -> <Ristretto as Ciphersuite>::F {
|
|
|
|
|
if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self {
|
|
|
|
|
let mut transcript =
|
|
|
|
|
RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted");
|
|
|
|
|
transcript.append_message(b"plan", plan);
|
|
|
|
|
transcript.append_message(b"tx_hash", tx_hash);
|
|
|
|
|
transcript.append_message(b"signer", first_signer.to_bytes());
|
|
|
|
|
transcript.append_message(b"nonce", signature.R.to_bytes());
|
|
|
|
|
Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge"))
|
|
|
|
|
} else {
|
|
|
|
|
panic!("sign_completed_challenge called on transaction which wasn't SignCompleted")
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-04-23 01:25:45 -04:00
|
|
|
}
|