Remove async-trait from processor/

Part of https://github.com/serai-dex/issues/607.
This commit is contained in:
Luke Parker
2024-09-13 01:14:47 -04:00
parent 2c4c33e632
commit e78236276a
29 changed files with 1481 additions and 1378 deletions

View File

@@ -17,7 +17,6 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
async-trait = { version = "0.1", default-features = false }
rand_core = { version = "0.6", default-features = false }
hex = { version = "0.4", default-features = false, features = ["std"] }

View File

@@ -96,7 +96,6 @@ use serai_client::{
*/
/*
#[async_trait]
impl TransactionTrait<Bitcoin> for Transaction {
#[cfg(test)]
async fn fee(&self, network: &Bitcoin) -> u64 {
@@ -210,7 +209,6 @@ impl Bitcoin {
}
}
#[async_trait]
impl Network for Bitcoin {
// 2 inputs should be 2 * 230 = 460 weight units
// The output should be ~36 bytes, or 144 weight units

View File

@@ -31,7 +31,6 @@ impl<D: Db> fmt::Debug for Block<D> {
}
}
#[async_trait::async_trait]
impl<D: Db> primitives::Block for Block<D> {
type Header = BlockHeader;

View File

@@ -1,3 +1,5 @@
use core::future::Future;
use bitcoin_serai::rpc::{RpcError, Rpc as BRpc};
use serai_client::primitives::{NetworkId, Coin, Amount};
@@ -18,7 +20,6 @@ pub(crate) struct Rpc<D: Db> {
pub(crate) rpc: BRpc,
}
#[async_trait::async_trait]
impl<D: Db> ScannerFeed for Rpc<D> {
const NETWORK: NetworkId = NetworkId::Bitcoin;
// 6 confirmations is widely accepted as secure and shouldn't occur
@@ -32,71 +33,89 @@ impl<D: Db> ScannerFeed for Rpc<D> {
type EphemeralError = RpcError;
async fn latest_finalized_block_number(&self) -> Result<u64, Self::EphemeralError> {
db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError)
fn latest_finalized_block_number(
&self,
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>> {
async move { db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) }
}
async fn time_of_block(&self, number: u64) -> Result<u64, Self::EphemeralError> {
let number = usize::try_from(number).unwrap();
/*
The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the
median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve
CLTV). This creates a monotonic median time which we use as the block time.
*/
// This implements `GetMedianTimePast`
let median = {
const MEDIAN_TIMESPAN: usize = 11;
let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN);
for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number {
timestamps.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time);
}
timestamps.sort();
timestamps[timestamps.len() / 2]
};
/*
This block's timestamp is guaranteed to be greater than this median:
https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9
/src/validation.cpp#L4182-L4184
This does not guarantee the median always increases however. Take the following trivial
example, as the window is initially built:
0 block has time 0 // Prior blocks: []
1 block has time 1 // Prior blocks: [0]
2 block has time 2 // Prior blocks: [0, 1]
3 block has time 2 // Prior blocks: [0, 1, 2]
These two blocks have the same time (both greater than the median of their prior blocks) and
the same median.
The median will never decrease however. The values pushed onto the window will always be
greater than the median. If a value greater than the median is popped, the median will remain
the same (due to the counterbalance of the pushed value). If a value less than the median is
popped, the median will increase (either to another instance of the same value, yet one
closer to the end of the repeating sequence, or to a higher value).
*/
Ok(median.into())
}
async fn unchecked_block_header_by_number(
fn time_of_block(
&self,
number: u64,
) -> Result<<Self::Block as primitives::Block>::Header, Self::EphemeralError> {
Ok(BlockHeader(
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?.header,
))
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>> {
async move {
let number = usize::try_from(number).unwrap();
/*
The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the
median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve
CLTV). This creates a monotonic median time which we use as the block time.
*/
// This implements `GetMedianTimePast`
let median = {
const MEDIAN_TIMESPAN: usize = 11;
let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN);
for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number {
timestamps
.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time);
}
timestamps.sort();
timestamps[timestamps.len() / 2]
};
/*
This block's timestamp is guaranteed to be greater than this median:
https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9
/src/validation.cpp#L4182-L4184
This does not guarantee the median always increases however. Take the following trivial
example, as the window is initially built:
0 block has time 0 // Prior blocks: []
1 block has time 1 // Prior blocks: [0]
2 block has time 2 // Prior blocks: [0, 1]
3 block has time 2 // Prior blocks: [0, 1, 2]
These two blocks have the same time (both greater than the median of their prior blocks) and
the same median.
The median will never decrease however. The values pushed onto the window will always be
greater than the median. If a value greater than the median is popped, the median will
remain the same (due to the counterbalance of the pushed value). If a value less than the
median is popped, the median will increase (either to another instance of the same value,
yet one closer to the end of the repeating sequence, or to a higher value).
*/
Ok(median.into())
}
}
async fn unchecked_block_by_number(
fn unchecked_block_header_by_number(
&self,
number: u64,
) -> Result<Self::Block, Self::EphemeralError> {
Ok(Block(
self.db.clone(),
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?,
))
) -> impl Send
+ Future<Output = Result<<Self::Block as primitives::Block>::Header, Self::EphemeralError>>
{
async move {
Ok(BlockHeader(
self
.rpc
.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?)
.await?
.header,
))
}
}
fn unchecked_block_by_number(
&self,
number: u64,
) -> impl Send + Future<Output = Result<Self::Block, Self::EphemeralError>> {
async move {
Ok(Block(
self.db.clone(),
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?,
))
}
}
fn dust(coin: Coin) -> Amount {
@@ -137,22 +156,26 @@ impl<D: Db> ScannerFeed for Rpc<D> {
Amount(10_000)
}
async fn cost_to_aggregate(
fn cost_to_aggregate(
&self,
coin: Coin,
_reference_block: &Self::Block,
) -> Result<Amount, Self::EphemeralError> {
assert_eq!(coin, Coin::Bitcoin);
// TODO
Ok(Amount(0))
) -> impl Send + Future<Output = Result<Amount, Self::EphemeralError>> {
async move {
assert_eq!(coin, Coin::Bitcoin);
// TODO
Ok(Amount(0))
}
}
}
#[async_trait::async_trait]
impl<D: Db> TransactionPublisher<Transaction> for Rpc<D> {
type EphemeralError = RpcError;
async fn publish(&self, tx: Transaction) -> Result<(), Self::EphemeralError> {
self.rpc.send_raw_transaction(&tx.0).await.map(|_| ())
fn publish(
&self,
tx: Transaction,
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
async move { self.rpc.send_raw_transaction(&tx.0).await.map(|_| ()) }
}
}

View File

@@ -1,18 +1,4 @@
/*
We want to be able to return received outputs. We do that by iterating over the inputs to find an
address format we recognize, then setting that address as the address to return to.
Since inputs only contain the script signatures, yet addresses are for script public keys, we
need to pull up the output spent by an input and read the script public key from that. While we
could use `txindex=1`, and an asynchronous call to the Bitcoin node, we:
1) Can maintain a much smaller index ourselves
2) Don't want the asynchronous call (which would require the flow be async, allowed to
potentially error, and more latent)
3) Don't want to risk Bitcoin's `txindex` corruptions (frequently observed on testnet)
This task builds that index.
*/
use core::future::Future;
use bitcoin_serai::bitcoin::ScriptBuf;
@@ -35,72 +21,88 @@ pub(crate) fn script_pubkey_for_on_chain_output(
)
}
/*
We want to be able to return received outputs. We do that by iterating over the inputs to find an
address format we recognize, then setting that address as the address to return to.
Since inputs only contain the script signatures, yet addresses are for script public keys, we
need to pull up the output spent by an input and read the script public key from that. While we
could use `txindex=1`, and an asynchronous call to the Bitcoin node, we:
1) Can maintain a much smaller index ourselves
2) Don't want the asynchronous call (which would require the flow be async, allowed to
potentially error, and more latent)
3) Don't want to risk Bitcoin's `txindex` corruptions (frequently observed on testnet)
This task builds that index.
*/
pub(crate) struct TxIndexTask<D: Db>(pub(crate) Rpc<D>);
#[async_trait::async_trait]
impl<D: Db> ContinuallyRan for TxIndexTask<D> {
async fn run_iteration(&mut self) -> Result<bool, String> {
let latest_block_number = self
.0
.rpc
.get_latest_block_number()
.await
.map_err(|e| format!("couldn't fetch latest block number: {e:?}"))?;
let latest_block_number = u64::try_from(latest_block_number).unwrap();
// `CONFIRMATIONS - 1` as any on-chain block inherently has one confirmation (itself)
let finalized_block_number =
latest_block_number.checked_sub(Rpc::<D>::CONFIRMATIONS - 1).ok_or(format!(
"blockchain only just started and doesn't have {} blocks yet",
Rpc::<D>::CONFIRMATIONS
))?;
/*
`finalized_block_number` is the latest block number minus confirmations. The blockchain may
undetectably re-organize though, as while the scanner will maintain an index of finalized
blocks and panics on reorganization, this runs prior to the scanner and that index.
A reorganization of `CONFIRMATIONS` blocks is still an invariant. Even if that occurs, this
saves the script public keys *by the transaction hash an output index*. Accordingly, it isn't
invalidated on reorganization. The only risk would be if the new chain reorganized to
include a transaction to Serai which we didn't index the parents of. If that happens, we'll
panic when we scan the transaction, causing the invariant to be detected.
*/
let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db);
let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1);
let mut iterated = false;
for b in next_block ..= finalized_block_number {
iterated = true;
// Fetch the block
let block_hash = self
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
async move {
let latest_block_number = self
.0
.rpc
.get_block_hash(b.try_into().unwrap())
.get_latest_block_number()
.await
.map_err(|e| format!("couldn't fetch block hash for block {b}: {e:?}"))?;
let block = self
.0
.rpc
.get_block(&block_hash)
.await
.map_err(|e| format!("couldn't fetch block {b}: {e:?}"))?;
.map_err(|e| format!("couldn't fetch latest block number: {e:?}"))?;
let latest_block_number = u64::try_from(latest_block_number).unwrap();
// `CONFIRMATIONS - 1` as any on-chain block inherently has one confirmation (itself)
let finalized_block_number =
latest_block_number.checked_sub(Rpc::<D>::CONFIRMATIONS - 1).ok_or(format!(
"blockchain only just started and doesn't have {} blocks yet",
Rpc::<D>::CONFIRMATIONS
))?;
let mut txn = self.0.db.txn();
/*
`finalized_block_number` is the latest block number minus confirmations. The blockchain may
undetectably re-organize though, as while the scanner will maintain an index of finalized
blocks and panics on reorganization, this runs prior to the scanner and that index.
for tx in &block.txdata {
let txid = hash_bytes(tx.compute_txid().to_raw_hash());
for (o, output) in tx.output.iter().enumerate() {
let o = u32::try_from(o).unwrap();
// Set the script public key for this transaction
db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes());
A reorganization of `CONFIRMATIONS` blocks is still an invariant. Even if that occurs, this
saves the script public keys *by the transaction hash an output index*. Accordingly, it
isn't invalidated on reorganization. The only risk would be if the new chain reorganized to
include a transaction to Serai which we didn't index the parents of. If that happens, we'll
panic when we scan the transaction, causing the invariant to be detected.
*/
let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db);
let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1);
let mut iterated = false;
for b in next_block ..= finalized_block_number {
iterated = true;
// Fetch the block
let block_hash = self
.0
.rpc
.get_block_hash(b.try_into().unwrap())
.await
.map_err(|e| format!("couldn't fetch block hash for block {b}: {e:?}"))?;
let block = self
.0
.rpc
.get_block(&block_hash)
.await
.map_err(|e| format!("couldn't fetch block {b}: {e:?}"))?;
let mut txn = self.0.db.txn();
for tx in &block.txdata {
let txid = hash_bytes(tx.compute_txid().to_raw_hash());
for (o, output) in tx.output.iter().enumerate() {
let o = u32::try_from(o).unwrap();
// Set the script public key for this transaction
db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes());
}
}
}
db::LatestBlockToYieldAsFinalized::set(&mut txn, &b);
txn.commit();
db::LatestBlockToYieldAsFinalized::set(&mut txn, &b);
txn.commit();
}
Ok(iterated)
}
Ok(iterated)
}
}