Have the processor verify the published Batches match expectations

This commit is contained in:
Luke Parker
2024-12-30 05:21:26 -05:00
parent 1d50792eed
commit e67e301fc2
14 changed files with 124 additions and 67 deletions

View File

@@ -280,7 +280,13 @@ pub async fn main_loop<
// Substrate sets this limit to prevent DoSs from malicious validator sets // Substrate sets this limit to prevent DoSs from malicious validator sets
// That bound lets us consume this txn in the following loop body, as an optimization // That bound lets us consume this txn in the following loop body, as an optimization
assert!(batches.len() <= 1); assert!(batches.len() <= 1);
for messages::substrate::ExecutedBatch { id, in_instructions } in batches { for messages::substrate::ExecutedBatch {
id,
publisher,
in_instructions_hash,
in_instruction_results,
} in batches
{
let key_to_activate = let key_to_activate =
KeyToActivate::<KeyFor<S>>::try_recv(txn.as_mut().unwrap()).map(|key| key.0); KeyToActivate::<KeyFor<S>>::try_recv(txn.as_mut().unwrap()).map(|key| key.0);
@@ -288,7 +294,9 @@ pub async fn main_loop<
let _: () = scanner.acknowledge_batch( let _: () = scanner.acknowledge_batch(
txn.take().unwrap(), txn.take().unwrap(),
id, id,
in_instructions, publisher,
in_instructions_hash,
in_instruction_results,
/* /*
`acknowledge_batch` takes burns to optimize handling returns with standard `acknowledge_batch` takes burns to optimize handling returns with standard
payments. That's why handling these with a Batch (and not waiting until the payments. That's why handling these with a Batch (and not waiting until the

View File

@@ -24,6 +24,7 @@ scale = { package = "parity-scale-codec", version = "3", default-features = fals
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
# Cryptography # Cryptography
blake2 = { version = "0.10", default-features = false, features = ["std"] }
group = { version = "0.13", default-features = false } group = { version = "0.13", default-features = false }
# Application # Application
@@ -35,6 +36,7 @@ serai-db = { path = "../../common/db" }
messages = { package = "serai-processor-messages", path = "../messages" } messages = { package = "serai-processor-messages", path = "../messages" }
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] } serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] }
serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std", "borsh"] }
serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] } serai-in-instructions-primitives = { path = "../../substrate/in-instructions/primitives", default-features = false, features = ["std", "borsh"] }
serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] } serai-coins-primitives = { path = "../../substrate/coins/primitives", default-features = false, features = ["std", "borsh"] }

View File

@@ -7,8 +7,9 @@ use scale::{Encode, Decode, IoReader};
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use serai_db::{Get, DbTxn, create_db, db_channel}; use serai_db::{Get, DbTxn, create_db, db_channel};
use serai_in_instructions_primitives::{InInstructionWithBalance, Batch};
use serai_coins_primitives::OutInstructionWithBalance; use serai_coins_primitives::OutInstructionWithBalance;
use serai_validator_sets_primitives::Session;
use serai_in_instructions_primitives::{InInstructionWithBalance, Batch};
use primitives::{EncodableG, ReceivedOutput}; use primitives::{EncodableG, ReceivedOutput};
@@ -25,11 +26,13 @@ impl<T: BorshSerialize + BorshDeserialize> Borshy for T {}
#[derive(BorshSerialize, BorshDeserialize)] #[derive(BorshSerialize, BorshDeserialize)]
struct SeraiKeyDbEntry<K: Borshy> { struct SeraiKeyDbEntry<K: Borshy> {
activation_block_number: u64, activation_block_number: u64,
session: Session,
key: K, key: K,
} }
#[derive(Clone)] #[derive(Clone)]
pub(crate) struct SeraiKey<K> { pub(crate) struct SeraiKey<K> {
pub(crate) session: Session,
pub(crate) key: K, pub(crate) key: K,
pub(crate) stage: LifetimeStage, pub(crate) stage: LifetimeStage,
pub(crate) activation_block_number: u64, pub(crate) activation_block_number: u64,
@@ -165,7 +168,7 @@ impl<S: ScannerFeed> ScannerGlobalDb<S> {
// If this new key retires a key, mark the block at which forwarding explicitly occurs notable // If this new key retires a key, mark the block at which forwarding explicitly occurs notable
// This lets us obtain synchrony over the transactions we'll make to accomplish this // This lets us obtain synchrony over the transactions we'll make to accomplish this
if let Some(key_retired_by_this) = keys.last() { let this_keys_session = if let Some(key_retired_by_this) = keys.last() {
NotableBlock::set( NotableBlock::set(
txn, txn,
Lifetime::calculate::<S>( Lifetime::calculate::<S>(
@@ -182,10 +185,17 @@ impl<S: ScannerFeed> ScannerGlobalDb<S> {
), ),
&(), &(),
); );
} Session(key_retired_by_this.session.0 + 1)
} else {
Session(0)
};
// Push and save the next key // Push and save the next key
keys.push(SeraiKeyDbEntry { activation_block_number, key: EncodableG(key) }); keys.push(SeraiKeyDbEntry {
activation_block_number,
session: this_keys_session,
key: EncodableG(key),
});
ActiveKeys::set(txn, &keys); ActiveKeys::set(txn, &keys);
// Now tidy the keys, ensuring this has a maximum length of 2 // Now tidy the keys, ensuring this has a maximum length of 2
@@ -236,6 +246,7 @@ impl<S: ScannerFeed> ScannerGlobalDb<S> {
raw_keys.get(i + 1).map(|key| key.activation_block_number), raw_keys.get(i + 1).map(|key| key.activation_block_number),
); );
keys.push(SeraiKey { keys.push(SeraiKey {
session: raw_keys[i].session,
key: raw_keys[i].key.0, key: raw_keys[i].key.0,
stage, stage,
activation_block_number: raw_keys[i].activation_block_number, activation_block_number: raw_keys[i].activation_block_number,
@@ -477,6 +488,7 @@ db_channel! {
} }
pub(crate) struct InInstructionData<S: ScannerFeed> { pub(crate) struct InInstructionData<S: ScannerFeed> {
pub(crate) session_to_sign_batch: Session,
pub(crate) external_key_for_session_to_sign_batch: KeyFor<S>, pub(crate) external_key_for_session_to_sign_batch: KeyFor<S>,
pub(crate) returnable_in_instructions: Vec<Returnable<S>>, pub(crate) returnable_in_instructions: Vec<Returnable<S>>,
} }
@@ -488,7 +500,8 @@ impl<S: ScannerFeed> ScanToReportDb<S> {
block_number: u64, block_number: u64,
data: &InInstructionData<S>, data: &InInstructionData<S>,
) { ) {
let mut buf = data.external_key_for_session_to_sign_batch.to_bytes().as_ref().to_vec(); let mut buf = data.session_to_sign_batch.encode();
buf.extend(data.external_key_for_session_to_sign_batch.to_bytes().as_ref());
for returnable_in_instruction in &data.returnable_in_instructions { for returnable_in_instruction in &data.returnable_in_instructions {
returnable_in_instruction.write(&mut buf).unwrap(); returnable_in_instruction.write(&mut buf).unwrap();
} }
@@ -510,6 +523,7 @@ impl<S: ScannerFeed> ScanToReportDb<S> {
); );
let mut buf = data.returnable_in_instructions.as_slice(); let mut buf = data.returnable_in_instructions.as_slice();
let session_to_sign_batch = Session::decode(&mut buf).unwrap();
let external_key_for_session_to_sign_batch = { let external_key_for_session_to_sign_batch = {
let mut external_key_for_session_to_sign_batch = let mut external_key_for_session_to_sign_batch =
<KeyFor<S> as GroupEncoding>::Repr::default(); <KeyFor<S> as GroupEncoding>::Repr::default();
@@ -523,7 +537,11 @@ impl<S: ScannerFeed> ScanToReportDb<S> {
while !buf.is_empty() { while !buf.is_empty() {
returnable_in_instructions.push(Returnable::read(&mut buf).unwrap()); returnable_in_instructions.push(Returnable::read(&mut buf).unwrap());
} }
InInstructionData { external_key_for_session_to_sign_batch, returnable_in_instructions } InInstructionData {
session_to_sign_batch,
external_key_for_session_to_sign_batch,
returnable_in_instructions,
}
} }
} }

View File

@@ -11,6 +11,7 @@ use borsh::{BorshSerialize, BorshDeserialize};
use serai_db::{Get, DbTxn, Db}; use serai_db::{Get, DbTxn, Db};
use serai_primitives::{NetworkId, Coin, Amount}; use serai_primitives::{NetworkId, Coin, Amount};
use serai_validator_sets_primitives::Session;
use serai_coins_primitives::OutInstructionWithBalance; use serai_coins_primitives::OutInstructionWithBalance;
use primitives::{task::*, Address, ReceivedOutput, Block, Payment}; use primitives::{task::*, Address, ReceivedOutput, Block, Payment};
@@ -437,10 +438,13 @@ impl<S: ScannerFeed> Scanner<S> {
/// `queue_burns`. Doing so will cause them to be executed multiple times. /// `queue_burns`. Doing so will cause them to be executed multiple times.
/// ///
/// The calls to this function must be ordered with regards to `queue_burns`. /// The calls to this function must be ordered with regards to `queue_burns`.
#[allow(clippy::too_many_arguments)]
pub fn acknowledge_batch( pub fn acknowledge_batch(
&mut self, &mut self,
mut txn: impl DbTxn, mut txn: impl DbTxn,
batch_id: u32, batch_id: u32,
publisher: Session,
in_instructions_hash: [u8; 32],
in_instruction_results: Vec<messages::substrate::InInstructionResult>, in_instruction_results: Vec<messages::substrate::InInstructionResult>,
burns: Vec<OutInstructionWithBalance>, burns: Vec<OutInstructionWithBalance>,
key_to_activate: Option<KeyFor<S>>, key_to_activate: Option<KeyFor<S>>,
@@ -451,6 +455,8 @@ impl<S: ScannerFeed> Scanner<S> {
substrate::queue_acknowledge_batch::<S>( substrate::queue_acknowledge_batch::<S>(
&mut txn, &mut txn,
batch_id, batch_id,
publisher,
in_instructions_hash,
in_instruction_results, in_instruction_results,
burns, burns,
key_to_activate, key_to_activate,

View File

@@ -8,9 +8,17 @@ use borsh::{BorshSerialize, BorshDeserialize};
use serai_db::{Get, DbTxn, create_db}; use serai_db::{Get, DbTxn, create_db};
use serai_primitives::Balance; use serai_primitives::Balance;
use serai_validator_sets_primitives::Session;
use crate::{ScannerFeed, KeyFor, AddressFor}; use crate::{ScannerFeed, KeyFor, AddressFor};
#[derive(BorshSerialize, BorshDeserialize)]
pub(crate) struct BatchInfo {
pub(crate) block_number: u64,
pub(crate) publisher: Session,
pub(crate) in_instructions_hash: [u8; 32],
}
create_db!( create_db!(
ScannerReport { ScannerReport {
// The next block to potentially report // The next block to potentially report
@@ -18,10 +26,11 @@ create_db!(
// The next Batch ID to use // The next Batch ID to use
NextBatchId: () -> u32, NextBatchId: () -> u32,
// The block number which caused a batch // The information needed to verify a batch
BlockNumberForBatch: (batch: u32) -> u64, InfoForBatch: (batch: u32) -> BatchInfo,
// The external key for the session which should sign a batch // The external key for the session which should sign a batch
// TODO: Merge this with InfoForBatch
ExternalKeyForSessionToSignBatch: (batch: u32) -> Vec<u8>, ExternalKeyForSessionToSignBatch: (batch: u32) -> Vec<u8>,
// The return addresses for the InInstructions within a Batch // The return addresses for the InInstructions within a Batch
@@ -46,15 +55,24 @@ impl<S: ScannerFeed> ReportDb<S> {
NextToPotentiallyReportBlock::get(getter) NextToPotentiallyReportBlock::get(getter)
} }
pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn, block_number: u64) -> u32 { pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 {
let id = NextBatchId::get(txn).unwrap_or(0); let id = NextBatchId::get(txn).unwrap_or(0);
NextBatchId::set(txn, &(id + 1)); NextBatchId::set(txn, &(id + 1));
BlockNumberForBatch::set(txn, id, &block_number);
id id
} }
pub(crate) fn take_block_number_for_batch(txn: &mut impl DbTxn, id: u32) -> Option<u64> { pub(crate) fn save_batch_info(
BlockNumberForBatch::take(txn, id) txn: &mut impl DbTxn,
id: u32,
block_number: u64,
publisher: Session,
in_instructions_hash: [u8; 32],
) {
InfoForBatch::set(txn, id, &BatchInfo { block_number, publisher, in_instructions_hash });
}
pub(crate) fn take_info_for_batch(txn: &mut impl DbTxn, id: u32) -> Option<BatchInfo> {
InfoForBatch::take(txn, id)
} }
pub(crate) fn save_external_key_for_session_to_sign_batch( pub(crate) fn save_external_key_for_session_to_sign_batch(

View File

@@ -1,28 +1,28 @@
use core::{marker::PhantomData, future::Future}; use core::{marker::PhantomData, future::Future};
use blake2::{digest::typenum::U32, Digest, Blake2b};
use scale::Encode; use scale::Encode;
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};
use serai_primitives::BlockHash;
use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch}; use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch};
use primitives::task::ContinuallyRan; use primitives::task::ContinuallyRan;
use crate::{ use crate::{
db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToReportDb, Batches, BatchesToSign}, db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToReportDb, Batches, BatchesToSign},
index,
scan::next_to_scan_for_outputs_block, scan::next_to_scan_for_outputs_block,
ScannerFeed, KeyFor, ScannerFeed, KeyFor,
}; };
mod db; mod db;
pub(crate) use db::ReturnInformation; pub(crate) use db::{BatchInfo, ReturnInformation};
use db::ReportDb; use db::ReportDb;
pub(crate) fn take_block_number_for_batch<S: ScannerFeed>( pub(crate) fn take_info_for_batch<S: ScannerFeed>(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
id: u32, id: u32,
) -> Option<u64> { ) -> Option<BatchInfo> {
ReportDb::<S>::take_block_number_for_batch(txn, id) ReportDb::<S>::take_info_for_batch(txn, id)
} }
pub(crate) fn take_external_key_for_session_to_sign_batch<S: ScannerFeed>( pub(crate) fn take_external_key_for_session_to_sign_batch<S: ScannerFeed>(
@@ -88,33 +88,28 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
let next_to_potentially_report = ReportDb::<S>::next_to_potentially_report_block(&self.db) let next_to_potentially_report = ReportDb::<S>::next_to_potentially_report_block(&self.db)
.expect("ReportTask run before writing the start block"); .expect("ReportTask run before writing the start block");
for b in next_to_potentially_report ..= highest_reportable { for block_number in next_to_potentially_report ..= highest_reportable {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
// Receive the InInstructions for this block // Receive the InInstructions for this block
// We always do this as we can't trivially tell if we should recv InInstructions before we // We always do this as we can't trivially tell if we should recv InInstructions before we
// do // do
let InInstructionData { let InInstructionData {
session_to_sign_batch,
external_key_for_session_to_sign_batch, external_key_for_session_to_sign_batch,
returnable_in_instructions: in_instructions, returnable_in_instructions: in_instructions,
} = ScanToReportDb::<S>::recv_in_instructions(&mut txn, b); } = ScanToReportDb::<S>::recv_in_instructions(&mut txn, block_number);
let notable = ScannerGlobalDb::<S>::is_block_notable(&txn, b); let notable = ScannerGlobalDb::<S>::is_block_notable(&txn, block_number);
if !notable { if !notable {
assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions"); assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions");
} }
// If this block is notable, create the Batch(s) for it // If this block is notable, create the Batch(s) for it
if notable { if notable {
let network = S::NETWORK; let network = S::NETWORK;
let block_hash = index::block_id(&txn, b); let mut batch_id = ReportDb::<S>::acquire_batch_id(&mut txn);
let mut batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b);
// start with empty batch // start with empty batch
let mut batches = vec![Batch { let mut batches = vec![Batch { network, id: batch_id, instructions: vec![] }];
network,
id: batch_id,
block: BlockHash(block_hash),
instructions: vec![],
}];
// We also track the return information for the InInstructions within a Batch in case // We also track the return information for the InInstructions within a Batch in case
// they error // they error
let mut return_information = vec![vec![]]; let mut return_information = vec![vec![]];
@@ -131,15 +126,10 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
let in_instruction = batch.instructions.pop().unwrap(); let in_instruction = batch.instructions.pop().unwrap();
// bump the id for the new batch // bump the id for the new batch
batch_id = ReportDb::<S>::acquire_batch_id(&mut txn, b); batch_id = ReportDb::<S>::acquire_batch_id(&mut txn);
// make a new batch with this instruction included // make a new batch with this instruction included
batches.push(Batch { batches.push(Batch { network, id: batch_id, instructions: vec![in_instruction] });
network,
id: batch_id,
block: BlockHash(block_hash),
instructions: vec![in_instruction],
});
// Since we're allocating a new batch, allocate a new set of return addresses for it // Since we're allocating a new batch, allocate a new set of return addresses for it
return_information.push(vec![]); return_information.push(vec![]);
} }
@@ -152,10 +142,17 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
.push(return_address.map(|address| ReturnInformation { address, balance })); .push(return_address.map(|address| ReturnInformation { address, balance }));
} }
// Save the return addresses to the database // Now that we've finalized the Batches, save the information for each to the database
assert_eq!(batches.len(), return_information.len()); assert_eq!(batches.len(), return_information.len());
for (batch, return_information) in batches.iter().zip(&return_information) { for (batch, return_information) in batches.iter().zip(&return_information) {
assert_eq!(batch.instructions.len(), return_information.len()); assert_eq!(batch.instructions.len(), return_information.len());
ReportDb::<S>::save_batch_info(
&mut txn,
batch.id,
block_number,
session_to_sign_batch,
Blake2b::<U32>::digest(batch.instructions.encode()).into(),
);
ReportDb::<S>::save_external_key_for_session_to_sign_batch( ReportDb::<S>::save_external_key_for_session_to_sign_batch(
&mut txn, &mut txn,
batch.id, batch.id,
@@ -171,7 +168,7 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
} }
// Update the next to potentially report block // Update the next to potentially report block
ReportDb::<S>::set_next_to_potentially_report_block(&mut txn, b + 1); ReportDb::<S>::set_next_to_potentially_report_block(&mut txn, block_number + 1);
txn.commit(); txn.commit();
} }

View File

@@ -349,6 +349,7 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for ScanTask<D, S> {
&mut txn, &mut txn,
b, b,
&InInstructionData { &InInstructionData {
session_to_sign_batch: keys[0].session,
external_key_for_session_to_sign_batch: keys[0].key, external_key_for_session_to_sign_batch: keys[0].key,
returnable_in_instructions: in_instructions, returnable_in_instructions: in_instructions,
}, },

View File

@@ -6,12 +6,15 @@ use borsh::{BorshSerialize, BorshDeserialize};
use serai_db::{Get, DbTxn, create_db, db_channel}; use serai_db::{Get, DbTxn, create_db, db_channel};
use serai_coins_primitives::OutInstructionWithBalance; use serai_coins_primitives::OutInstructionWithBalance;
use serai_validator_sets_primitives::Session;
use crate::{ScannerFeed, KeyFor}; use crate::{ScannerFeed, KeyFor};
#[derive(BorshSerialize, BorshDeserialize)] #[derive(BorshSerialize, BorshDeserialize)]
struct AcknowledgeBatchEncodable { struct AcknowledgeBatchEncodable {
batch_id: u32, batch_id: u32,
publisher: Session,
in_instructions_hash: [u8; 32],
in_instruction_results: Vec<messages::substrate::InInstructionResult>, in_instruction_results: Vec<messages::substrate::InInstructionResult>,
burns: Vec<OutInstructionWithBalance>, burns: Vec<OutInstructionWithBalance>,
key_to_activate: Option<Vec<u8>>, key_to_activate: Option<Vec<u8>>,
@@ -25,6 +28,8 @@ enum ActionEncodable {
pub(crate) struct AcknowledgeBatch<S: ScannerFeed> { pub(crate) struct AcknowledgeBatch<S: ScannerFeed> {
pub(crate) batch_id: u32, pub(crate) batch_id: u32,
pub(crate) publisher: Session,
pub(crate) in_instructions_hash: [u8; 32],
pub(crate) in_instruction_results: Vec<messages::substrate::InInstructionResult>, pub(crate) in_instruction_results: Vec<messages::substrate::InInstructionResult>,
pub(crate) burns: Vec<OutInstructionWithBalance>, pub(crate) burns: Vec<OutInstructionWithBalance>,
pub(crate) key_to_activate: Option<KeyFor<S>>, pub(crate) key_to_activate: Option<KeyFor<S>>,
@@ -46,6 +51,8 @@ impl<S: ScannerFeed> SubstrateDb<S> {
pub(crate) fn queue_acknowledge_batch( pub(crate) fn queue_acknowledge_batch(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
batch_id: u32, batch_id: u32,
publisher: Session,
in_instructions_hash: [u8; 32],
in_instruction_results: Vec<messages::substrate::InInstructionResult>, in_instruction_results: Vec<messages::substrate::InInstructionResult>,
burns: Vec<OutInstructionWithBalance>, burns: Vec<OutInstructionWithBalance>,
key_to_activate: Option<KeyFor<S>>, key_to_activate: Option<KeyFor<S>>,
@@ -54,6 +61,8 @@ impl<S: ScannerFeed> SubstrateDb<S> {
txn, txn,
&ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { &ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable {
batch_id, batch_id,
publisher,
in_instructions_hash,
in_instruction_results, in_instruction_results,
burns, burns,
key_to_activate: key_to_activate.map(|key| key.to_bytes().as_ref().to_vec()), key_to_activate: key_to_activate.map(|key| key.to_bytes().as_ref().to_vec()),
@@ -69,11 +78,15 @@ impl<S: ScannerFeed> SubstrateDb<S> {
Some(match action_encodable { Some(match action_encodable {
ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable {
batch_id, batch_id,
publisher,
in_instructions_hash,
in_instruction_results, in_instruction_results,
burns, burns,
key_to_activate, key_to_activate,
}) => Action::AcknowledgeBatch(AcknowledgeBatch { }) => Action::AcknowledgeBatch(AcknowledgeBatch {
batch_id, batch_id,
publisher,
in_instructions_hash,
in_instruction_results, in_instruction_results,
burns, burns,
key_to_activate: key_to_activate.map(|key| { key_to_activate: key_to_activate.map(|key| {

View File

@@ -3,6 +3,7 @@ use core::{marker::PhantomData, future::Future};
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};
use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance}; use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance};
use serai_validator_sets_primitives::Session;
use primitives::task::ContinuallyRan; use primitives::task::ContinuallyRan;
use crate::{ use crate::{
@@ -16,6 +17,8 @@ use db::*;
pub(crate) fn queue_acknowledge_batch<S: ScannerFeed>( pub(crate) fn queue_acknowledge_batch<S: ScannerFeed>(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
batch_id: u32, batch_id: u32,
publisher: Session,
in_instructions_hash: [u8; 32],
in_instruction_results: Vec<messages::substrate::InInstructionResult>, in_instruction_results: Vec<messages::substrate::InInstructionResult>,
burns: Vec<OutInstructionWithBalance>, burns: Vec<OutInstructionWithBalance>,
key_to_activate: Option<KeyFor<S>>, key_to_activate: Option<KeyFor<S>>,
@@ -23,6 +26,8 @@ pub(crate) fn queue_acknowledge_batch<S: ScannerFeed>(
SubstrateDb::<S>::queue_acknowledge_batch( SubstrateDb::<S>::queue_acknowledge_batch(
txn, txn,
batch_id, batch_id,
publisher,
in_instructions_hash,
in_instruction_results, in_instruction_results,
burns, burns,
key_to_activate, key_to_activate,
@@ -67,17 +72,31 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for SubstrateTask<D, S> {
match action { match action {
Action::AcknowledgeBatch(AcknowledgeBatch { Action::AcknowledgeBatch(AcknowledgeBatch {
batch_id, batch_id,
publisher,
in_instructions_hash,
in_instruction_results, in_instruction_results,
mut burns, mut burns,
key_to_activate, key_to_activate,
}) => { }) => {
// Check if we have the information for this batch // Check if we have the information for this batch
let Some(block_number) = report::take_block_number_for_batch::<S>(&mut txn, batch_id) let Some(report::BatchInfo {
block_number,
publisher: expected_publisher,
in_instructions_hash: expected_in_instructions_hash,
}) = report::take_info_for_batch::<S>(&mut txn, batch_id)
else { else {
// If we don't, drop this txn (restoring the action to the database) // If we don't, drop this txn (restoring the action to the database)
drop(txn); drop(txn);
return Ok(made_progress); return Ok(made_progress);
}; };
assert_eq!(
publisher, expected_publisher,
"batch acknowledged on-chain was acknowledged by an unexpected publisher"
);
assert_eq!(
in_instructions_hash, expected_in_instructions_hash,
"batch acknowledged on-chain was distinct"
);
{ {
let external_key_for_session_to_sign_batch = let external_key_for_session_to_sign_batch =

View File

@@ -13,13 +13,6 @@ const PALLET: &str = "InInstructions";
#[derive(Clone, Copy)] #[derive(Clone, Copy)]
pub struct SeraiInInstructions<'a>(pub(crate) &'a TemporalSerai<'a>); pub struct SeraiInInstructions<'a>(pub(crate) &'a TemporalSerai<'a>);
impl<'a> SeraiInInstructions<'a> { impl<'a> SeraiInInstructions<'a> {
pub async fn latest_block_for_network(
&self,
network: NetworkId,
) -> Result<Option<BlockHash>, SeraiError> {
self.0.storage(PALLET, "LatestNetworkBlock", network).await
}
pub async fn last_batch_for_network( pub async fn last_batch_for_network(
&self, &self,
network: NetworkId, network: NetworkId,

View File

@@ -25,9 +25,6 @@ serai_test!(
let network = NetworkId::Bitcoin; let network = NetworkId::Bitcoin;
let id = 0; let id = 0;
let mut block_hash = BlockHash([0; 32]);
OsRng.fill_bytes(&mut block_hash.0);
let mut address = SeraiAddress::new([0; 32]); let mut address = SeraiAddress::new([0; 32]);
OsRng.fill_bytes(&mut address.0); OsRng.fill_bytes(&mut address.0);
@@ -38,7 +35,6 @@ serai_test!(
let batch = Batch { let batch = Batch {
network, network,
id, id,
block: block_hash,
instructions: vec![InInstructionWithBalance { instructions: vec![InInstructionWithBalance {
instruction: InInstruction::Transfer(address), instruction: InInstruction::Transfer(address),
balance, balance,
@@ -50,15 +46,12 @@ serai_test!(
let serai = serai.as_of(block); let serai = serai.as_of(block);
{ {
let serai = serai.in_instructions(); let serai = serai.in_instructions();
let latest_finalized = serai.latest_block_for_network(network).await.unwrap();
assert_eq!(latest_finalized, Some(block_hash));
let batches = serai.batch_events().await.unwrap(); let batches = serai.batch_events().await.unwrap();
assert_eq!( assert_eq!(
batches, batches,
vec![InInstructionsEvent::Batch { vec![InInstructionsEvent::Batch {
network, network,
id, id,
block: block_hash,
instructions_hash: Blake2b::<U32>::digest(batch.instructions.encode()).into(), instructions_hash: Blake2b::<U32>::digest(batch.instructions.encode()).into(),
}] }]
); );

View File

@@ -52,7 +52,6 @@ pub async fn provide_batch(serai: &Serai, batch: Batch) -> [u8; 32] {
vec![InInstructionsEvent::Batch { vec![InInstructionsEvent::Batch {
network: batch.network, network: batch.network,
id: batch.id, id: batch.id,
block: batch.block,
instructions_hash: Blake2b::<U32>::digest(batch.instructions.encode()).into(), instructions_hash: Blake2b::<U32>::digest(batch.instructions.encode()).into(),
}], }],
); );

View File

@@ -89,12 +89,6 @@ pub mod pallet {
#[pallet::storage] #[pallet::storage]
pub(crate) type Halted<T: Config> = StorageMap<_, Identity, NetworkId, (), OptionQuery>; pub(crate) type Halted<T: Config> = StorageMap<_, Identity, NetworkId, (), OptionQuery>;
// The latest block a network has acknowledged as finalized
#[pallet::storage]
#[pallet::getter(fn latest_network_block)]
pub(crate) type LatestNetworkBlock<T: Config> =
StorageMap<_, Identity, NetworkId, BlockHash, OptionQuery>;
impl<T: Config> Pallet<T> { impl<T: Config> Pallet<T> {
// Use a dedicated transaction layer when executing this InInstruction // Use a dedicated transaction layer when executing this InInstruction
// This lets it individually error without causing any storage modifications // This lets it individually error without causing any storage modifications
@@ -262,11 +256,9 @@ pub mod pallet {
let batch = batch.batch; let batch = batch.batch;
LatestNetworkBlock::<T>::insert(batch.network, batch.block);
Self::deposit_event(Event::Batch { Self::deposit_event(Event::Batch {
network: batch.network, network: batch.network,
id: batch.id, id: batch.id,
block: batch.block,
instructions_hash: blake2_256(&batch.instructions.encode()), instructions_hash: blake2_256(&batch.instructions.encode()),
}); });
for (i, instruction) in batch.instructions.into_iter().enumerate() { for (i, instruction) in batch.instructions.into_iter().enumerate() {

View File

@@ -19,8 +19,7 @@ use sp_application_crypto::sr25519::Signature;
use sp_std::vec::Vec; use sp_std::vec::Vec;
use sp_runtime::RuntimeDebug; use sp_runtime::RuntimeDebug;
#[rustfmt::skip] use serai_primitives::{Balance, NetworkId, SeraiAddress, ExternalAddress, system_address};
use serai_primitives::{BlockHash, Balance, NetworkId, SeraiAddress, ExternalAddress, system_address};
mod shorthand; mod shorthand;
pub use shorthand::*; pub use shorthand::*;
@@ -107,7 +106,6 @@ pub struct InInstructionWithBalance {
pub struct Batch { pub struct Batch {
pub network: NetworkId, pub network: NetworkId,
pub id: u32, pub id: u32,
pub block: BlockHash,
pub instructions: Vec<InInstructionWithBalance>, pub instructions: Vec<InInstructionWithBalance>,
} }