mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Definition and delineation of tasks within the scanner
Also defines primitives for the processor.
This commit is contained in:
@@ -17,17 +17,23 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] }
|
||||
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", version = "^0.8.1", default-features = false }
|
||||
|
||||
serai-validator-sets-primitives = { path = "../../substrate/validator-sets/primitives", default-features = false, features = ["std"] }
|
||||
# Macros
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
thiserror = { version = "1", default-features = false }
|
||||
|
||||
# Encoders
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
# Cryptography
|
||||
group = { version = "0.13", default-features = false }
|
||||
|
||||
# Application
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
|
||||
serai-db = { path = "../../common/db" }
|
||||
|
||||
messages = { package = "serai-processor-messages", path = "../messages" }
|
||||
primitives = { package = "serai-processor-primitives", path = "../primitives" }
|
||||
|
||||
162
processor/scanner/src/db.rs
Normal file
162
processor/scanner/src/db.rs
Normal file
@@ -0,0 +1,162 @@
|
||||
use core::marker::PhantomData;
|
||||
|
||||
use group::GroupEncoding;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_db::{Get, DbTxn, create_db};
|
||||
|
||||
use primitives::{Id, Block, BorshG};
|
||||
|
||||
use crate::ScannerFeed;
|
||||
|
||||
// The DB macro doesn't support `BorshSerialize + BorshDeserialize` as a bound, hence this.
|
||||
trait Borshy: BorshSerialize + BorshDeserialize {}
|
||||
impl<T: BorshSerialize + BorshDeserialize> Borshy for T {}
|
||||
|
||||
#[derive(BorshSerialize, BorshDeserialize)]
|
||||
struct SeraiKey<K: Borshy> {
|
||||
activation_block_number: u64,
|
||||
retirement_block_number: Option<u64>,
|
||||
key: K,
|
||||
}
|
||||
|
||||
create_db!(
|
||||
Scanner {
|
||||
BlockId: <I: Id>(number: u64) -> I,
|
||||
BlockNumber: <I: Id>(id: I) -> u64,
|
||||
|
||||
ActiveKeys: <K: Borshy>() -> Vec<SeraiKey<K>>,
|
||||
|
||||
// The latest finalized block to appear of a blockchain
|
||||
LatestFinalizedBlock: () -> u64,
|
||||
// The latest block which it's safe to scan (dependent on what Serai has acknowledged scanning)
|
||||
LatestScannableBlock: () -> u64,
|
||||
// The next block to scan for received outputs
|
||||
NextToScanForOutputsBlock: () -> u64,
|
||||
// The next block to check for resolving eventualities
|
||||
NextToCheckForEventualitiesBlock: () -> u64,
|
||||
|
||||
// If a block was notable
|
||||
/*
|
||||
A block is notable if one of three conditions are met:
|
||||
|
||||
1) We activated a key within this block.
|
||||
2) We retired a key within this block.
|
||||
3) We received outputs within this block.
|
||||
|
||||
The first two conditions, and the reasoning for them, is extensively documented in
|
||||
`spec/processor/Multisig Rotation.md`. The third is obvious (as any block we receive outputs
|
||||
in needs synchrony so that we can spend the received outputs).
|
||||
|
||||
We save if a block is notable here by either the scan for received outputs task or the
|
||||
check for eventuality completion task. Once a block has been processed by both, the reporting
|
||||
task will report any notable blocks. Finally, the task which sets the block safe to scan to
|
||||
makes its decision based on the notable blocks and the acknowledged blocks.
|
||||
*/
|
||||
// This collapses from `bool` to `()`, using if the value was set for true and false otherwise
|
||||
NotableBlock: (number: u64) -> (),
|
||||
}
|
||||
);
|
||||
|
||||
pub(crate) struct ScannerDb<S: ScannerFeed>(PhantomData<S>);
|
||||
impl<S: ScannerFeed> ScannerDb<S> {
|
||||
pub(crate) fn set_block(txn: &mut impl DbTxn, number: u64, id: <S::Block as Block>::Id) {
|
||||
BlockId::set(txn, number, &id);
|
||||
BlockNumber::set(txn, id, &number);
|
||||
}
|
||||
pub(crate) fn block_id(getter: &impl Get, number: u64) -> Option<<S::Block as Block>::Id> {
|
||||
BlockId::get(getter, number)
|
||||
}
|
||||
pub(crate) fn block_number(getter: &impl Get, id: <S::Block as Block>::Id) -> Option<u64> {
|
||||
BlockNumber::get(getter, id)
|
||||
}
|
||||
|
||||
// activation_block_number is inclusive, so the key will be scanned for starting at the specified
|
||||
// block
|
||||
pub(crate) fn queue_key(txn: &mut impl DbTxn, activation_block_number: u64, key: S::Key) {
|
||||
let mut keys: Vec<SeraiKey<BorshG<S::Key>>> = ActiveKeys::get(txn).unwrap_or(vec![]);
|
||||
for key_i in &keys {
|
||||
if key == key_i.key.0 {
|
||||
panic!("queueing a key prior queued");
|
||||
}
|
||||
}
|
||||
keys.push(SeraiKey {
|
||||
activation_block_number,
|
||||
retirement_block_number: None,
|
||||
key: BorshG(key),
|
||||
});
|
||||
ActiveKeys::set(txn, &keys);
|
||||
}
|
||||
// retirement_block_number is inclusive, so the key will no longer be scanned for as of the
|
||||
// specified block
|
||||
pub(crate) fn retire_key(txn: &mut impl DbTxn, retirement_block_number: u64, key: S::Key) {
|
||||
let mut keys: Vec<SeraiKey<BorshG<S::Key>>> =
|
||||
ActiveKeys::get(txn).expect("retiring key yet no active keys");
|
||||
|
||||
assert!(keys.len() > 1, "retiring our only key");
|
||||
for i in 0 .. keys.len() {
|
||||
if key == keys[i].key.0 {
|
||||
keys[i].retirement_block_number = Some(retirement_block_number);
|
||||
ActiveKeys::set(txn, &keys);
|
||||
return;
|
||||
}
|
||||
|
||||
// This is not the key in question, but since it's older, it already should've been queued
|
||||
// for retirement
|
||||
assert!(
|
||||
keys[i].retirement_block_number.is_some(),
|
||||
"older key wasn't retired before newer key"
|
||||
);
|
||||
}
|
||||
panic!("retiring key yet not present in keys")
|
||||
}
|
||||
pub(crate) fn keys(getter: &impl Get) -> Option<Vec<SeraiKey<BorshG<S::Key>>>> {
|
||||
ActiveKeys::get(getter)
|
||||
}
|
||||
|
||||
pub(crate) fn set_start_block(
|
||||
txn: &mut impl DbTxn,
|
||||
start_block: u64,
|
||||
id: <S::Block as Block>::Id,
|
||||
) {
|
||||
Self::set_block(txn, start_block, id);
|
||||
LatestFinalizedBlock::set(txn, &start_block);
|
||||
LatestScannableBlock::set(txn, &start_block);
|
||||
NextToScanForOutputsBlock::set(txn, &start_block);
|
||||
NextToCheckForEventualitiesBlock::set(txn, &start_block);
|
||||
}
|
||||
|
||||
pub(crate) fn set_latest_finalized_block(txn: &mut impl DbTxn, latest_finalized_block: u64) {
|
||||
LatestFinalizedBlock::set(txn, &latest_finalized_block);
|
||||
}
|
||||
pub(crate) fn latest_finalized_block(getter: &impl Get) -> Option<u64> {
|
||||
LatestFinalizedBlock::get(getter)
|
||||
}
|
||||
|
||||
pub(crate) fn set_latest_scannable_block(txn: &mut impl DbTxn, latest_scannable_block: u64) {
|
||||
LatestScannableBlock::set(txn, &latest_scannable_block);
|
||||
}
|
||||
pub(crate) fn latest_scannable_block(getter: &impl Get) -> Option<u64> {
|
||||
LatestScannableBlock::get(getter)
|
||||
}
|
||||
|
||||
pub(crate) fn set_next_to_scan_for_outputs_block(
|
||||
txn: &mut impl DbTxn,
|
||||
next_to_scan_for_outputs_block: u64,
|
||||
) {
|
||||
NextToScanForOutputsBlock::set(txn, &next_to_scan_for_outputs_block);
|
||||
}
|
||||
pub(crate) fn next_to_scan_for_outputs_block(getter: &impl Get) -> Option<u64> {
|
||||
NextToScanForOutputsBlock::get(getter)
|
||||
}
|
||||
|
||||
pub(crate) fn set_next_to_check_for_eventualities_block(
|
||||
txn: &mut impl DbTxn,
|
||||
next_to_check_for_eventualities_block: u64,
|
||||
) {
|
||||
NextToCheckForEventualitiesBlock::set(txn, &next_to_check_for_eventualities_block);
|
||||
}
|
||||
pub(crate) fn next_to_check_for_eventualities_block(getter: &impl Get) -> Option<u64> {
|
||||
NextToCheckForEventualitiesBlock::get(getter)
|
||||
}
|
||||
}
|
||||
0
processor/scanner/src/eventuality.rs
Normal file
0
processor/scanner/src/eventuality.rs
Normal file
72
processor/scanner/src/index.rs
Normal file
72
processor/scanner/src/index.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use serai_db::{Db, DbTxn};
|
||||
|
||||
use primitives::{Id, Block};
|
||||
|
||||
// TODO: Localize to IndexDb?
|
||||
use crate::{db::ScannerDb, ScannerFeed, ContinuallyRan};
|
||||
|
||||
/*
|
||||
This processor should build its own index of the blockchain, yet only for finalized blocks which
|
||||
are safe to process. For Proof of Work blockchains, which only have probabilistic finality, these
|
||||
are the set of sufficiently confirmed blocks. For blockchains with finality, these are the
|
||||
finalized blocks.
|
||||
|
||||
This task finds the finalized blocks, verifies they're continguous, and saves their IDs.
|
||||
*/
|
||||
struct IndexFinalizedTask<D: Db, S: ScannerFeed> {
|
||||
db: D,
|
||||
feed: S,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for IndexFinalizedTask<D, S> {
|
||||
async fn run_instance(&mut self) -> Result<(), String> {
|
||||
// Fetch the latest finalized block
|
||||
let our_latest_finalized = ScannerDb::<S>::latest_finalized_block(&self.db)
|
||||
.expect("IndexTask run before writing the start block");
|
||||
let latest_finalized = match self.feed.latest_finalized_block_number().await {
|
||||
Ok(latest_finalized) => latest_finalized,
|
||||
Err(e) => Err(format!("couldn't fetch the latest finalized block number: {e:?}"))?,
|
||||
};
|
||||
|
||||
// Index the hashes of all blocks until the latest finalized block
|
||||
for b in (our_latest_finalized + 1) ..= latest_finalized {
|
||||
let block = match self.feed.block_by_number(b).await {
|
||||
Ok(block) => block,
|
||||
Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?,
|
||||
};
|
||||
|
||||
// Check this descends from our indexed chain
|
||||
{
|
||||
let expected_parent =
|
||||
ScannerDb::<S>::block_id(&self.db, b - 1).expect("didn't have the ID of the prior block");
|
||||
if block.parent() != expected_parent {
|
||||
panic!(
|
||||
"current finalized block (#{b}, {}) doesn't build off finalized block (#{}, {})",
|
||||
hex::encode(block.parent()),
|
||||
b - 1,
|
||||
hex::encode(expected_parent)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Update the latest finalized block
|
||||
let mut txn = self.db.txn();
|
||||
ScannerDb::<S>::set_block(&mut txn, b, block.id());
|
||||
ScannerDb::<S>::set_latest_finalized_block(&mut txn, b);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
The processor can't index the blockchain unilaterally. It needs to develop a totally ordered view
|
||||
of the blockchain. That requires consensus with other validators on when certain keys are set to
|
||||
activate (and retire). We solve this by only scanning `n` blocks ahead of the last agreed upon
|
||||
block, then waiting for Serai to acknowledge the block. This lets us safely schedule events after
|
||||
this `n` block window (as demonstrated/proven with `mini`).
|
||||
|
||||
TODO
|
||||
*/
|
||||
@@ -1,25 +1,91 @@
|
||||
use core::marker::PhantomData;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
io::Read,
|
||||
time::Duration,
|
||||
collections::{VecDeque, HashSet, HashMap},
|
||||
};
|
||||
use core::fmt::Debug;
|
||||
|
||||
use ciphersuite::group::GroupEncoding;
|
||||
use frost::curve::Ciphersuite;
|
||||
use primitives::{ReceivedOutput, Block};
|
||||
|
||||
use log::{info, debug, warn};
|
||||
use tokio::{
|
||||
sync::{RwLockReadGuard, RwLockWriteGuard, RwLock, mpsc},
|
||||
time::sleep,
|
||||
};
|
||||
mod db;
|
||||
mod index;
|
||||
|
||||
use crate::{
|
||||
Get, DbTxn, Db,
|
||||
networks::{Output, Transaction, Eventuality, EventualitiesTracker, Block, Network},
|
||||
};
|
||||
/// A feed usable to scan a blockchain.
|
||||
///
|
||||
/// This defines the primitive types used, along with various getters necessary for indexing.
|
||||
#[async_trait::async_trait]
|
||||
pub trait ScannerFeed: Send + Sync {
|
||||
/// The type of the key used to receive coins on this blockchain.
|
||||
type Key: group::Group + group::GroupEncoding;
|
||||
|
||||
/// The type of the address used to specify who to send coins to on this blockchain.
|
||||
type Address;
|
||||
|
||||
/// The type representing a received (and spendable) output.
|
||||
type Output: ReceivedOutput<Self::Key, Self::Address>;
|
||||
|
||||
/// The representation of a block for this blockchain.
|
||||
///
|
||||
/// A block is defined as a consensus event associated with a set of transactions. It is not
|
||||
/// necessary to literally define it as whatever the external network defines as a block. For
|
||||
/// external networks which finalize block(s), this block type should be a representation of all
|
||||
/// transactions within a finalization event.
|
||||
type Block: Block;
|
||||
|
||||
/// An error encountered when fetching data from the blockchain.
|
||||
///
|
||||
/// This MUST be an ephemeral error. Retrying fetching data from the blockchain MUST eventually
|
||||
/// resolve without manual intervention.
|
||||
type EphemeralError: Debug;
|
||||
|
||||
/// Fetch the number of the latest finalized block.
|
||||
///
|
||||
/// The block number is its zero-indexed position within a linear view of the external network's
|
||||
/// consensus. The genesis block accordingly has block number 0.
|
||||
async fn latest_finalized_block_number(&self) -> Result<u64, Self::EphemeralError>;
|
||||
|
||||
/// Fetch a block by its number.
|
||||
async fn block_by_number(&self, number: u64) -> Result<Self::Block, Self::EphemeralError>;
|
||||
|
||||
/// Scan a block for its outputs.
|
||||
async fn scan_for_outputs(
|
||||
&self,
|
||||
block: &Self::Block,
|
||||
key: Self::Key,
|
||||
) -> Result<Self::Output, Self::EphemeralError>;
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
pub(crate) trait ContinuallyRan: Sized {
|
||||
async fn run_instance(&mut self) -> Result<(), String>;
|
||||
|
||||
async fn continually_run(mut self) {
|
||||
// The default number of seconds to sleep before running the task again
|
||||
let default_sleep_before_next_task = 5;
|
||||
// The current number of seconds to sleep before running the task again
|
||||
// We increment this upon errors in order to not flood the logs with errors
|
||||
let mut current_sleep_before_next_task = default_sleep_before_next_task;
|
||||
let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| {
|
||||
let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task;
|
||||
// Set a limit of sleeping for two minutes
|
||||
*current_sleep_before_next_task = new_sleep.max(120);
|
||||
};
|
||||
|
||||
loop {
|
||||
match self.run_instance().await {
|
||||
Ok(()) => {
|
||||
// Upon a successful (error-free) loop iteration, reset the amount of time we sleep
|
||||
current_sleep_before_next_task = default_sleep_before_next_task;
|
||||
}
|
||||
Err(e) => {
|
||||
log::debug!("{}", e);
|
||||
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
||||
}
|
||||
}
|
||||
|
||||
// Don't run the task again for another few seconds
|
||||
// This is at the start of the loop so we can continue without skipping this delay
|
||||
tokio::time::sleep(core::time::Duration::from_secs(current_sleep_before_next_task)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ScannerEvent<N: Network> {
|
||||
// Block scanned
|
||||
@@ -44,86 +110,6 @@ pub type ScannerEventChannel<N> = mpsc::UnboundedReceiver<ScannerEvent<N>>;
|
||||
#[derive(Clone, Debug)]
|
||||
struct ScannerDb<N: Network, D: Db>(PhantomData<N>, PhantomData<D>);
|
||||
impl<N: Network, D: Db> ScannerDb<N, D> {
|
||||
fn scanner_key(dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||
D::key(b"SCANNER", dst, key)
|
||||
}
|
||||
|
||||
fn block_key(number: usize) -> Vec<u8> {
|
||||
Self::scanner_key(b"block_id", u64::try_from(number).unwrap().to_le_bytes())
|
||||
}
|
||||
fn block_number_key(id: &<N::Block as Block<N>>::Id) -> Vec<u8> {
|
||||
Self::scanner_key(b"block_number", id)
|
||||
}
|
||||
fn save_block(txn: &mut D::Transaction<'_>, number: usize, id: &<N::Block as Block<N>>::Id) {
|
||||
txn.put(Self::block_number_key(id), u64::try_from(number).unwrap().to_le_bytes());
|
||||
txn.put(Self::block_key(number), id);
|
||||
}
|
||||
fn block<G: Get>(getter: &G, number: usize) -> Option<<N::Block as Block<N>>::Id> {
|
||||
getter.get(Self::block_key(number)).map(|id| {
|
||||
let mut res = <N::Block as Block<N>>::Id::default();
|
||||
res.as_mut().copy_from_slice(&id);
|
||||
res
|
||||
})
|
||||
}
|
||||
fn block_number<G: Get>(getter: &G, id: &<N::Block as Block<N>>::Id) -> Option<usize> {
|
||||
getter
|
||||
.get(Self::block_number_key(id))
|
||||
.map(|number| u64::from_le_bytes(number.try_into().unwrap()).try_into().unwrap())
|
||||
}
|
||||
|
||||
fn keys_key() -> Vec<u8> {
|
||||
Self::scanner_key(b"keys", b"")
|
||||
}
|
||||
fn register_key(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
activation_number: usize,
|
||||
key: <N::Curve as Ciphersuite>::G,
|
||||
) {
|
||||
let mut keys = txn.get(Self::keys_key()).unwrap_or(vec![]);
|
||||
|
||||
let key_bytes = key.to_bytes();
|
||||
|
||||
let key_len = key_bytes.as_ref().len();
|
||||
assert_eq!(keys.len() % (8 + key_len), 0);
|
||||
|
||||
// Sanity check this key isn't already present
|
||||
let mut i = 0;
|
||||
while i < keys.len() {
|
||||
if &keys[(i + 8) .. ((i + 8) + key_len)] == key_bytes.as_ref() {
|
||||
panic!("adding {} as a key yet it was already present", hex::encode(key_bytes));
|
||||
}
|
||||
i += 8 + key_len;
|
||||
}
|
||||
|
||||
keys.extend(u64::try_from(activation_number).unwrap().to_le_bytes());
|
||||
keys.extend(key_bytes.as_ref());
|
||||
txn.put(Self::keys_key(), keys);
|
||||
}
|
||||
fn keys<G: Get>(getter: &G) -> Vec<(usize, <N::Curve as Ciphersuite>::G)> {
|
||||
let bytes_vec = getter.get(Self::keys_key()).unwrap_or(vec![]);
|
||||
let mut bytes: &[u8] = bytes_vec.as_ref();
|
||||
|
||||
// Assumes keys will be 32 bytes when calculating the capacity
|
||||
// If keys are larger, this may allocate more memory than needed
|
||||
// If keys are smaller, this may require additional allocations
|
||||
// Either are fine
|
||||
let mut res = Vec::with_capacity(bytes.len() / (8 + 32));
|
||||
while !bytes.is_empty() {
|
||||
let mut activation_number = [0; 8];
|
||||
bytes.read_exact(&mut activation_number).unwrap();
|
||||
let activation_number = u64::from_le_bytes(activation_number).try_into().unwrap();
|
||||
|
||||
res.push((activation_number, N::Curve::read_G(&mut bytes).unwrap()));
|
||||
}
|
||||
res
|
||||
}
|
||||
fn retire_key(txn: &mut D::Transaction<'_>) {
|
||||
let keys = Self::keys(txn);
|
||||
assert_eq!(keys.len(), 2);
|
||||
txn.del(Self::keys_key());
|
||||
Self::register_key(txn, keys[1].0, keys[1].1);
|
||||
}
|
||||
|
||||
fn seen_key(id: &<N::Output as Output<N>>::Id) -> Vec<u8> {
|
||||
Self::scanner_key(b"seen", id)
|
||||
}
|
||||
@@ -737,3 +723,4 @@ impl<N: Network, D: Db> Scanner<N, D> {
|
||||
}
|
||||
}
|
||||
}
|
||||
*/
|
||||
|
||||
73
processor/scanner/src/scan.rs
Normal file
73
processor/scanner/src/scan.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use serai_db::{Db, DbTxn};
|
||||
|
||||
use primitives::{Id, Block};
|
||||
|
||||
// TODO: Localize to ScanDb?
|
||||
use crate::{db::ScannerDb, ScannerFeed};
|
||||
|
||||
struct ScanForOutputsTask<D: Db, S: ScannerFeed> {
|
||||
db: D,
|
||||
feed: S,
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl<D: Db, S: ScannerFeed> ContinuallyRan for ScanForOutputsTask<D, S> {
|
||||
async fn run_instance(&mut self) -> Result<(), String> {
|
||||
// Fetch the safe to scan block
|
||||
let latest_scannable = ScannerDb::<S>::latest_scannable_block(&self.db).expect("ScanForOutputsTask run before writing the start block");
|
||||
// Fetch the next block to scan
|
||||
let next_to_scan = ScannerDb::<S>::next_to_scan_for_outputs_block(&self.db).expect("ScanForOutputsTask run before writing the start block");
|
||||
|
||||
for b in next_to_scan ..= latest_scannable {
|
||||
let block = match self.feed.block_by_number(b).await {
|
||||
Ok(block) => block,
|
||||
Err(e) => Err(format!("couldn't fetch block {b}: {e:?}"))?,
|
||||
};
|
||||
|
||||
// Check the ID of this block is the expected ID
|
||||
{
|
||||
let expected = ScannerDb::<S>::block_id(b).expect("scannable block didn't have its ID saved");
|
||||
if block.id() != expected {
|
||||
panic!("finalized chain reorganized from {} to {} at {}", hex::encode(expected), hex::encode(block.id()), b);
|
||||
}
|
||||
}
|
||||
|
||||
log::info!("scanning block: {} ({b})", hex::encode(block.id()));
|
||||
|
||||
let keys = ScannerDb::<S>::keys(&self.db).expect("scanning for a blockchain without any keys set");
|
||||
// Remove all the retired keys
|
||||
while let Some(retire_at) = keys[0].retirement_block_number {
|
||||
if retire_at <= b {
|
||||
keys.remove(0);
|
||||
}
|
||||
}
|
||||
assert!(keys.len() <= 2);
|
||||
|
||||
// Scan for each key
|
||||
for key in keys {
|
||||
// If this key has yet to active, skip it
|
||||
if key.activation_block_number > b {
|
||||
continue;
|
||||
}
|
||||
|
||||
let mut outputs = vec![];
|
||||
for output in network.scan_for_outputs(&block, key).awaits {
|
||||
assert_eq!(output.key(), key);
|
||||
// TODO: Check for dust
|
||||
outputs.push(output);
|
||||
}
|
||||
}
|
||||
|
||||
let mut txn = self.db.txn();
|
||||
// Update the latest scanned block
|
||||
ScannerDb::<S>::set_next_to_scan_for_outputs_block(&mut txn, b + 1);
|
||||
// TODO: If this had outputs, yield them and mark this block notable
|
||||
/*
|
||||
A block is notable if it's an activation, had outputs, or a retirement block.
|
||||
*/
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user