Simplify async code in in_instructions_unordered

Outsources fetching the ERC20 events to top_level_transfers_unordered.
This commit is contained in:
Luke Parker
2025-01-24 05:34:49 -05:00
parent 201b675031
commit f948881eba
6 changed files with 284 additions and 324 deletions

View File

@@ -2,8 +2,7 @@
#![doc = include_str!("../README.md")] #![doc = include_str!("../README.md")]
#![deny(missing_docs)] #![deny(missing_docs)]
use core::borrow::Borrow; use std::collections::HashMap;
use std::{sync::Arc, collections::HashMap};
use alloy_core::primitives::{Address, U256}; use alloy_core::primitives::{Address, U256};
@@ -57,20 +56,27 @@ pub struct TopLevelTransfer {
pub data: Vec<u8>, pub data: Vec<u8>,
} }
/// The result of `Erc20::top_level_transfers_unordered`.
pub struct TopLevelTransfers {
/// Every `Transfer` log of the contextual ERC20 to the contextual account, indexed by
/// their transaction.
///
/// The ERC20/account is labelled contextual as it isn't directly named here. Instead, they're
/// assumed contextual to how this was created.
pub logs: HashMap<[u8; 32], Vec<Log>>,
/// All of the top-level transfers of the contextual ERC20 to the contextual account.
///
/// The ERC20/account is labelled contextual as it isn't directly named here. Instead, they're
/// assumed contextual to how this was created.
pub transfers: Vec<TopLevelTransfer>,
}
/// A view for an ERC20 contract. /// A view for an ERC20 contract.
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct Erc20 { pub struct Erc20;
provider: Arc<RootProvider<SimpleRequest>>,
address: Address,
}
impl Erc20 { impl Erc20 {
/// Construct a new view of the specified ERC20 contract.
pub fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
Self { provider, address }
}
/// The filter for transfer logs of the specified ERC20, to the specified recipient. /// The filter for transfer logs of the specified ERC20, to the specified recipient.
pub fn transfer_filter(from_block: u64, to_block: u64, erc20: Address, to: Address) -> Filter { fn transfer_filter(from_block: u64, to_block: u64, erc20: Address, to: Address) -> Filter {
let filter = Filter::new().from_block(from_block).to_block(to_block); let filter = Filter::new().from_block(from_block).to_block(to_block);
filter.address(erc20).event_signature(Transfer::SIGNATURE_HASH).topic2(to.into_word()) filter.address(erc20).event_signature(Transfer::SIGNATURE_HASH).topic2(to.into_word())
} }
@@ -78,32 +84,35 @@ impl Erc20 {
/// Yield the top-level transfer for the specified transaction (if one exists). /// Yield the top-level transfer for the specified transaction (if one exists).
/// ///
/// The passed-in logs MUST be the logs for this transaction. The logs MUST be filtered to the /// The passed-in logs MUST be the logs for this transaction. The logs MUST be filtered to the
/// `Transfer` events of the intended token(s) and the intended `to` transferred to. These /// `Transfer` events of the intended token and the intended `to` transferred to. These
/// properties are completely unchecked and assumed to be the case. /// properties are completely unchecked and assumed to be the case.
/// ///
/// This does NOT yield THE top-level transfer. If multiple `Transfer` events have identical /// This does NOT yield THE top-level transfer. If multiple `Transfer` events have identical
/// structure to the top-level transfer call, the earliest `Transfer` event present in the logs /// structure to the top-level transfer call, the first `Transfer` event present in the logs is
/// is considered the top-level transfer. /// considered the top-level transfer.
// Yielding THE top-level transfer would require tracing the transaction execution and isn't // Yielding THE top-level transfer would require tracing the transaction execution and isn't
// worth the effort. // worth the effort.
pub async fn top_level_transfer( async fn top_level_transfer(
provider: impl AsRef<RootProvider<SimpleRequest>>, provider: &RootProvider<SimpleRequest>,
erc20: Address,
transaction_hash: [u8; 32], transaction_hash: [u8; 32],
mut transfer_logs: Vec<impl Borrow<Log>>, transfer_logs: &[Log],
) -> Result<Option<TopLevelTransfer>, RpcError<TransportErrorKind>> { ) -> Result<Option<TopLevelTransfer>, RpcError<TransportErrorKind>> {
// Fetch the transaction // Fetch the transaction
let transaction = let transaction =
provider.as_ref().get_transaction_by_hash(transaction_hash.into()).await?.ok_or_else( provider.get_transaction_by_hash(transaction_hash.into()).await?.ok_or_else(|| {
|| { TransportErrorKind::Custom(
TransportErrorKind::Custom( "node didn't have the transaction which emitted a log it had".to_string().into(),
"node didn't have the transaction which emitted a log it had".to_string().into(), )
) })?;
},
)?; // If this transaction didn't call this ERC20 at a top-level, return
if transaction.inner.to() != Some(erc20) {
return Ok(None);
}
// If this is a top-level call...
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due // Don't validate the encoding as this can't be re-encoded to an identical bytestring due
// to the `InInstruction` appended after the call itself // to the additional data appended after the call itself
let Ok(call) = IERC20Calls::abi_decode(transaction.inner.input(), false) else { let Ok(call) = IERC20Calls::abi_decode(transaction.inner.input(), false) else {
return Ok(None); return Ok(None);
}; };
@@ -116,21 +125,12 @@ impl Erc20 {
_ => return Ok(None), _ => return Ok(None),
}; };
// Sort the logs to ensure the the earliest logs are first
transfer_logs.sort_by_key(|log| log.borrow().log_index);
// Find the log for this top-level transfer // Find the log for this top-level transfer
for log in transfer_logs { for log in transfer_logs {
// Check the log is for the called contract
// This handles the edge case where we're checking if transfers of token X were top-level and
// a transfer of token Y (with equivalent structure) was top-level
if Some(log.borrow().address()) != transaction.inner.to() {
continue;
}
// Since the caller is responsible for filtering these to `Transfer` events, we can assume // Since the caller is responsible for filtering these to `Transfer` events, we can assume
// this is a non-compliant ERC20 or an error with the logs fetched. We assume ERC20 // this is a non-compliant ERC20 or an error with the logs fetched. We assume ERC20
// compliance here, making this an RPC error // compliance here, making this an RPC error
let log = log.borrow().log_decode::<Transfer>().map_err(|_| { let log = log.log_decode::<Transfer>().map_err(|_| {
TransportErrorKind::Custom("log didn't include a valid transfer event".to_string().into()) TransportErrorKind::Custom("log didn't include a valid transfer event".to_string().into())
})?; })?;
@@ -158,8 +158,8 @@ impl Erc20 {
) => Vec::from(inInstruction), ) => Vec::from(inInstruction),
} }
} else { } else {
// We don't error here so this transfer is propagated up the stack, even without the // If there was no additional data appended, use an empty Vec (which has no data)
// InInstruction. In practice, Serai should acknowledge this and return it to the sender // This has a slight information loss in that it's None -> Some(vec![]), but it's fine
vec![] vec![]
}; };
@@ -177,69 +177,76 @@ impl Erc20 {
/// Fetch all top-level transfers to the specified address for this token. /// Fetch all top-level transfers to the specified address for this token.
/// ///
/// The result of this function is unordered. /// The `transfers` in the result are unordered. The `logs` are sorted by index.
pub async fn top_level_transfers_unordered( pub async fn top_level_transfers_unordered(
&self, provider: &RootProvider<SimpleRequest>,
from_block: u64, from_block: u64,
to_block: u64, to_block: u64,
erc20: Address,
to: Address, to: Address,
) -> Result<Vec<TopLevelTransfer>, RpcError<TransportErrorKind>> { ) -> Result<TopLevelTransfers, RpcError<TransportErrorKind>> {
// Get all transfers within these blocks let mut logs = {
let logs = self // Get all transfers within these blocks
.provider let logs = provider.get_logs(&Self::transfer_filter(from_block, to_block, erc20, to)).await?;
.get_logs(&Self::transfer_filter(from_block, to_block, self.address, to))
.await?;
// The logs, indexed by their transactions // The logs, indexed by their transactions
let mut transaction_logs = HashMap::new(); let mut transaction_logs = HashMap::new();
// Index the logs by their transactions // Index the logs by their transactions
for log in logs { for log in logs {
// Double check the address which emitted this log // Double check the address which emitted this log
if log.address() != self.address { if log.address() != erc20 {
Err(TransportErrorKind::Custom( Err(TransportErrorKind::Custom(
"node returned logs for a different address than requested".to_string().into(), "node returned logs for a different address than requested".to_string().into(),
))?; ))?;
} }
// Double check the event signature for this log // Double check the event signature for this log
if log.topics().first() != Some(&Transfer::SIGNATURE_HASH) { if log.topics().first() != Some(&Transfer::SIGNATURE_HASH) {
Err(TransportErrorKind::Custom( Err(TransportErrorKind::Custom(
"node returned a log for a different topic than filtered to".to_string().into(), "node returned a log for a different topic than filtered to".to_string().into(),
))?; ))?;
} }
// Double check the `to` topic // Double check the `to` topic
if log.topics().get(2) != Some(&to.into_word()) { if log.topics().get(2) != Some(&to.into_word()) {
Err(TransportErrorKind::Custom( Err(TransportErrorKind::Custom(
"node returned a transfer for a different `to` than filtered to".to_string().into(), "node returned a transfer for a different `to` than filtered to".to_string().into(),
))?; ))?;
}
let tx_id = log
.transaction_hash
.ok_or_else(|| {
TransportErrorKind::Custom("log didn't specify its transaction hash".to_string().into())
})?
.0;
transaction_logs.entry(tx_id).or_insert_with(|| Vec::with_capacity(1)).push(log);
} }
let tx_id = log transaction_logs
.transaction_hash };
.ok_or_else(|| {
TransportErrorKind::Custom("log didn't specify its transaction hash".to_string().into())
})?
.0;
transaction_logs.entry(tx_id).or_insert_with(|| Vec::with_capacity(1)).push(log); let mut transfers = vec![];
} {
// Use `FuturesUnordered` so these RPC calls run in parallel
let mut futures = FuturesUnordered::new();
for (tx_id, transfer_logs) in &mut logs {
// Sort the logs to ensure the the earliest logs are first
transfer_logs.sort_by_key(|log| log.log_index);
futures.push(Self::top_level_transfer(provider, erc20, *tx_id, transfer_logs));
}
// Use `FuturesUnordered` so these RPC calls run in parallel while let Some(transfer) = futures.next().await {
let mut futures = FuturesUnordered::new(); match transfer {
for (tx_id, transfer_logs) in transaction_logs { // Top-level transfer
futures.push(Self::top_level_transfer(&self.provider, tx_id, transfer_logs)); Ok(Some(transfer)) => transfers.push(transfer),
} // Not a top-level transfer
Ok(None) => continue,
let mut top_level_transfers = vec![]; // Failed to get this transaction's information so abort
while let Some(top_level_transfer) = futures.next().await { Err(e) => Err(e)?,
match top_level_transfer { }
// Top-level transfer
Ok(Some(top_level_transfer)) => top_level_transfers.push(top_level_transfer),
// Not a top-level transfer
Ok(None) => continue,
// Failed to get this transaction's information so abort
Err(e) => Err(e)?,
} }
} }
Ok(top_level_transfers)
Ok(TopLevelTransfers { logs, transfers })
} }
} }

View File

@@ -14,7 +14,7 @@ mod borsh;
pub use borsh::*; pub use borsh::*;
/// An index of a log within a block. /// An index of a log within a block.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
#[borsh(crate = "::borsh")] #[borsh(crate = "::borsh")]
pub struct LogIndex { pub struct LogIndex {
/// The hash of the block which produced this log. /// The hash of the block which produced this log.

View File

@@ -29,7 +29,7 @@ use serai_client::{
use ethereum_primitives::LogIndex; use ethereum_primitives::LogIndex;
use ethereum_schnorr::{PublicKey, Signature}; use ethereum_schnorr::{PublicKey, Signature};
use ethereum_deployer::Deployer; use ethereum_deployer::Deployer;
use erc20::{Transfer, Erc20}; use erc20::{Transfer, TopLevelTransfer, TopLevelTransfers, Erc20};
use futures_util::stream::{StreamExt, FuturesUnordered}; use futures_util::stream::{StreamExt, FuturesUnordered};
@@ -451,35 +451,66 @@ impl Router {
} }
} }
/// Fetch the `InInstruction`s emitted by the Router from this block. /// Fetch the `InInstruction`s for the Router for the specified inclusive range of blocks.
///
/// This includes all `InInstruction` events from the Router and all top-level transfers to the
/// Router.
/// ///
/// This is not guaranteed to return them in any order. /// This is not guaranteed to return them in any order.
pub async fn in_instructions_unordered( pub async fn in_instructions_unordered(
&self, &self,
from_block: u64, from_block: u64,
to_block: u64, to_block: u64,
allowed_tokens: &HashSet<Address>, allowed_erc20s: &HashSet<Address>,
) -> Result<Vec<InInstruction>, RpcError<TransportErrorKind>> { ) -> Result<Vec<InInstruction>, RpcError<TransportErrorKind>> {
// The InInstruction events for this block // The InInstruction events for this block
let logs = { let in_instruction_logs = {
let filter = Filter::new().from_block(from_block).to_block(to_block).address(self.address); let filter = Filter::new().from_block(from_block).to_block(to_block).address(self.address);
let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);
self.provider.get_logs(&filter).await? self.provider.get_logs(&filter).await?
}; };
let mut in_instructions = Vec::with_capacity(logs.len()); // Define the Vec for the result now that we have the logs as a size hint
/* let mut in_instructions = Vec::with_capacity(in_instruction_logs.len());
We check that for all InInstructions for ERC20s emitted, a corresponding transfer occurred.
On this initial loop, we just queue the ERC20 InInstructions for later verification.
We don't do this for ETH as it'd require tracing the transaction, which is non-trivial. It // Handle the top-level transfers for this block
also isn't necessary as all of this is solely defense in depth. let mut justifying_erc20_transfer_logs = HashSet::new();
*/ let erc20_transfer_logs = {
let mut erc20s = HashSet::new(); let mut transfers = FuturesUnordered::new();
let mut erc20_transfer_logs = FuturesUnordered::new(); for erc20 in allowed_erc20s {
let mut erc20_transactions = HashSet::new(); transfers.push(async move {
let mut erc20_in_instructions = vec![]; (
for log in logs { erc20,
Erc20::top_level_transfers_unordered(
&self.provider,
from_block,
to_block,
*erc20,
self.address,
)
.await,
)
});
}
let mut logs = HashMap::with_capacity(allowed_erc20s.len());
while let Some((token, transfers)) = transfers.next().await {
let TopLevelTransfers { logs: token_logs, transfers } = transfers?;
logs.insert(token, token_logs);
// Map the top-level transfer to an InInstruction
for transfer in transfers {
let TopLevelTransfer { id, transaction_hash, from, amount, data } = transfer;
justifying_erc20_transfer_logs.insert(transfer.id);
let in_instruction =
InInstruction { id, transaction_hash, from, coin: Coin::Erc20(*token), amount, data };
in_instructions.push(in_instruction);
}
}
logs
};
// Now handle the InInstruction events
for log in in_instruction_logs {
// Double check the address which emitted this log // Double check the address which emitted this log
if log.address() != self.address { if log.address() != self.address {
Err(TransportErrorKind::Custom( Err(TransportErrorKind::Custom(
@@ -491,18 +522,22 @@ impl Router {
continue; continue;
} }
let id = LogIndex { let log_index = |log: &Log| -> Result<LogIndex, TransportErrorKind> {
block_hash: log Ok(LogIndex {
.block_hash block_hash: log
.ok_or_else(|| { .block_hash
TransportErrorKind::Custom("log didn't have its block hash set".to_string().into()) .ok_or_else(|| {
})? TransportErrorKind::Custom("log didn't have its block hash set".to_string().into())
.into(), })?
index_within_block: log.log_index.ok_or_else(|| { .into(),
TransportErrorKind::Custom("log didn't have its index set".to_string().into()) index_within_block: log.log_index.ok_or_else(|| {
})?, TransportErrorKind::Custom("log didn't have its index set".to_string().into())
})?,
})
}; };
let id = log_index(&log)?;
let transaction_hash = log.transaction_hash.ok_or_else(|| { let transaction_hash = log.transaction_hash.ok_or_else(|| {
TransportErrorKind::Custom("log didn't have its transaction hash set".to_string().into()) TransportErrorKind::Custom("log didn't have its transaction hash set".to_string().into())
})?; })?;
@@ -530,135 +565,57 @@ impl Router {
}; };
match coin { match coin {
Coin::Ether => in_instructions.push(in_instruction), Coin::Ether => {}
Coin::Erc20(token) => { Coin::Erc20(token) => {
if !allowed_tokens.contains(&token) { // Check this is an allowed token
if !allowed_erc20s.contains(&token) {
continue; continue;
} }
// Fetch the ERC20 transfer events necessary to verify this InInstruction has a matching /*
// transfer We check that for all InInstructions for ERC20s emitted, a corresponding transfer
if !erc20s.contains(&token) { occurred.
erc20s.insert(token);
erc20_transfer_logs.push(async move {
let filter = Erc20::transfer_filter(from_block, to_block, token, self.address);
self.provider.get_logs(&filter).await.map(|logs| (token, logs))
});
}
erc20_transactions.insert(transaction_hash);
erc20_in_instructions.push((transaction_hash, in_instruction))
}
}
}
// Collect the ERC20 transfer logs We don't do this for ETH as it'd require tracing the transaction, which is non-trivial.
let erc20_transfer_logs = { It also isn't necessary as all of this is solely defense in depth.
let mut collected = HashMap::with_capacity(erc20s.len()); */
while let Some(token_and_logs) = erc20_transfer_logs.next().await { let mut justified = false;
let (token, logs) = token_and_logs?; // These logs are returned from `top_level_transfers_unordered` and we don't require any
collected.insert(token, logs); // ordering of them
} for log in erc20_transfer_logs[&token].get(&transaction_hash).unwrap_or(&vec![]) {
collected let log_index = log_index(log)?;
};
/* // Ensure we didn't already use this transfer to justify a distinct InInstruction
For each transaction, it may have a top-level ERC20 transfer. That top-level transfer won't if justifying_erc20_transfer_logs.contains(&log_index) {
be the transfer caused by the call to `inInstruction`, so we shouldn't consider it continue;
justification for this `InInstruction` event. }
Fetch all top-level transfers here so we can ignore them. // Check if this log is from the token we expected to be transferred
*/ if log.address() != Address::from(in_instruction.coin) {
let mut erc20_top_level_transfers = FuturesUnordered::new(); continue;
let mut transaction_transfer_logs = HashMap::new(); }
for transaction in erc20_transactions { // Check if this is a transfer log
// Filter to the logs for this specific transaction if log.topics().first() != Some(&Transfer::SIGNATURE_HASH) {
let logs = erc20_transfer_logs continue;
.values() }
.flat_map(|logs_per_token| logs_per_token.iter()) let Ok(transfer) = Transfer::decode_log(&log.inner.clone(), true) else { continue };
.filter_map(|log| { // Check if this aligns with the InInstruction
let log_transaction_hash = log.transaction_hash.ok_or_else(|| { if (transfer.from == in_instruction.from) &&
TransportErrorKind::Custom( (transfer.to == self.address) &&
"log didn't have its transaction hash set".to_string().into(), (transfer.value == in_instruction.amount)
) {
}); justifying_erc20_transfer_logs.insert(log_index);
match log_transaction_hash { justified = true;
Ok(log_transaction_hash) => { break;
if log_transaction_hash == transaction {
Some(Ok(log))
} else {
None
}
} }
Err(e) => Some(Err(e)),
} }
}) if !justified {
.collect::<Result<Vec<_>, _>>()?; // This is an exploit, a non-conforming ERC20, or an invalid connection
Err(TransportErrorKind::Custom(
// Find the top-level transfer "ERC20 InInstruction with no matching transfer log".to_string().into(),
erc20_top_level_transfers.push(Erc20::top_level_transfer( ))?;
&self.provider, }
transaction,
logs.clone(),
));
// Keep the transaction-indexed logs for the actual justifying
transaction_transfer_logs.insert(transaction, logs);
}
/*
In order to prevent a single transfer from being used to justify multiple distinct
InInstructions, we insert the transfer's log index into this HashSet.
*/
let mut already_used_to_justify = HashSet::new();
// Collect the top-level transfers
while let Some(erc20_top_level_transfer) = erc20_top_level_transfers.next().await {
let erc20_top_level_transfer = erc20_top_level_transfer?;
// If this transaction had a top-level transfer...
if let Some(erc20_top_level_transfer) = erc20_top_level_transfer {
// Mark this log index as used so it isn't used again
already_used_to_justify.insert(erc20_top_level_transfer.id.index_within_block);
}
}
// Now, for each ERC20 InInstruction, find a justifying transfer log
for (transaction_hash, in_instruction) in erc20_in_instructions {
let mut justified = false;
for log in &transaction_transfer_logs[&transaction_hash] {
let log_index = log.log_index.ok_or_else(|| {
TransportErrorKind::Custom(
"log in transaction receipt didn't have its log index set".to_string().into(),
)
})?;
// Ensure we didn't already use this transfer to check a distinct InInstruction event
if already_used_to_justify.contains(&log_index) {
continue;
} }
// Check if this log is from the token we expected to be transferred
if log.address() != Address::from(in_instruction.coin) {
continue;
}
// Check if this is a transfer log
if log.topics().first() != Some(&Transfer::SIGNATURE_HASH) {
continue;
}
let Ok(transfer) = Transfer::decode_log(&log.inner.clone(), true) else { continue };
// Check if this aligns with the InInstruction
if (transfer.from == in_instruction.from) &&
(transfer.to == self.address) &&
(transfer.value == in_instruction.amount)
{
already_used_to_justify.insert(log_index);
justified = true;
break;
}
}
if !justified {
// This is an exploit, a non-conforming ERC20, or an invalid connection
Err(TransportErrorKind::Custom(
"ERC20 InInstruction with no matching transfer log".to_string().into(),
))?;
} }
in_instructions.push(in_instruction); in_instructions.push(in_instruction);
} }
@@ -666,7 +623,7 @@ impl Router {
Ok(in_instructions) Ok(in_instructions)
} }
/// Fetch the executed actions from this block. /// Fetch the executed actions for the specified range of blocks.
pub async fn executed( pub async fn executed(
&self, &self,
from_block: u64, from_block: u64,

View File

@@ -1,13 +1,11 @@
use alloy_core::primitives::{hex, Address, U256, Bytes, TxKind, PrimitiveSignature}; use alloy_core::primitives::{hex, Address, U256, Bytes, TxKind};
use alloy_sol_types::{SolValue, SolCall}; use alloy_sol_types::{SolValue, SolCall};
use alloy_consensus::{TxLegacy, SignableTransaction, Signed}; use alloy_consensus::TxLegacy;
use alloy_rpc_types_eth::{TransactionInput, TransactionRequest}; use alloy_rpc_types_eth::{TransactionInput, TransactionRequest};
use alloy_provider::Provider; use alloy_provider::Provider;
use ethereum_primitives::keccak256;
use crate::tests::Test; use crate::tests::Test;
#[rustfmt::skip] #[rustfmt::skip]

View File

@@ -5,13 +5,12 @@ use rand_core::{RngCore, OsRng};
use group::ff::Field; use group::ff::Field;
use k256::{Scalar, ProjectivePoint}; use k256::{Scalar, ProjectivePoint};
use alloy_core::primitives::{Address, U256, TxKind}; use alloy_core::primitives::{Address, U256};
use alloy_sol_types::SolCall; use alloy_sol_types::{SolCall, SolEvent};
use alloy_consensus::TxLegacy; use alloy_consensus::{TxLegacy, Signed};
#[rustfmt::skip] use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInput, TransactionRequest};
use alloy_rpc_types_eth::{BlockNumberOrTag, TransactionInput, TransactionRequest, TransactionReceipt};
use alloy_simple_request_transport::SimpleRequest; use alloy_simple_request_transport::SimpleRequest;
use alloy_rpc_client::ClientBuilder; use alloy_rpc_client::ClientBuilder;
use alloy_provider::{Provider, RootProvider}; use alloy_provider::{Provider, RootProvider};
@@ -262,6 +261,56 @@ impl Test {
(coin, amount, shorthand, tx) (coin, amount, shorthand, tx)
} }
async fn publish_in_instruction_tx(
&self,
tx: Signed<TxLegacy>,
coin: Coin,
amount: U256,
shorthand: &Shorthand,
) {
let receipt = ethereum_test_primitives::publish_tx(&self.provider, tx.clone()).await;
assert!(receipt.status());
let block = receipt.block_number.unwrap();
if matches!(coin, Coin::Erc20(_)) {
// If we don't whitelist this token, we shouldn't be yielded an InInstruction
let in_instructions =
self.router.in_instructions_unordered(block, block, &HashSet::new()).await.unwrap();
assert!(in_instructions.is_empty());
}
let in_instructions = self
.router
.in_instructions_unordered(
block,
block,
&if let Coin::Erc20(token) = coin { HashSet::from([token]) } else { HashSet::new() },
)
.await
.unwrap();
assert_eq!(in_instructions.len(), 1);
let in_instruction_log_index = receipt.inner.logs().iter().find_map(|log| {
(log.topics().first() == Some(&crate::InInstructionEvent::SIGNATURE_HASH))
.then(|| log.log_index.unwrap())
});
// If this isn't an InInstruction event, it'll be a top-level transfer event
let log_index = in_instruction_log_index.unwrap_or(0);
assert_eq!(
in_instructions[0],
InInstruction {
id: LogIndex { block_hash: *receipt.block_hash.unwrap(), index_within_block: log_index },
transaction_hash: **tx.hash(),
from: tx.recover_signer().unwrap(),
coin,
amount,
data: shorthand.encode(),
}
);
}
fn escape_hatch_tx(&self, escape_to: Address) -> TxLegacy { fn escape_hatch_tx(&self, escape_to: Address) -> TxLegacy {
let msg = Router::escape_hatch_message(self.chain_id, self.state.next_nonce, escape_to); let msg = Router::escape_hatch_message(self.chain_id, self.state.next_nonce, escape_to);
let sig = sign(self.state.key.unwrap(), &msg); let sig = sign(self.state.key.unwrap(), &msg);
@@ -344,31 +393,11 @@ async fn test_eth_in_instruction() {
} }
let tx = ethereum_primitives::deterministically_sign(tx); let tx = ethereum_primitives::deterministically_sign(tx);
let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx.clone()).await; test.publish_in_instruction_tx(tx, coin, amount, &shorthand).await;
assert!(receipt.status());
let block = receipt.block_number.unwrap();
let in_instructions =
test.router.in_instructions_unordered(block, block, &HashSet::new()).await.unwrap();
assert_eq!(in_instructions.len(), 1);
assert_eq!(
in_instructions[0],
InInstruction {
id: LogIndex {
block_hash: *receipt.block_hash.unwrap(),
index_within_block: receipt.inner.logs()[0].log_index.unwrap(),
},
transaction_hash: **tx.hash(),
from: tx.recover_signer().unwrap(),
coin,
amount,
data: shorthand.encode(),
}
);
} }
#[tokio::test] #[tokio::test]
async fn test_erc20_in_instruction() { async fn test_erc20_router_in_instruction() {
let mut test = Test::new().await; let mut test = Test::new().await;
test.confirm_next_serai_key().await; test.confirm_next_serai_key().await;
@@ -404,39 +433,28 @@ async fn test_erc20_in_instruction() {
erc20.mint(&test, signer, amount).await; erc20.mint(&test, signer, amount).await;
erc20.approve(&test, signer, test.router.address(), amount).await; erc20.approve(&test, signer, test.router.address(), amount).await;
} }
let receipt = ethereum_test_primitives::publish_tx(&test.provider, tx.clone()).await;
assert!(receipt.status());
let block = receipt.block_number.unwrap(); test.publish_in_instruction_tx(tx, coin, amount, &shorthand).await;
}
// If we don't whitelist this token, we shouldn't be yielded an InInstruction #[tokio::test]
{ async fn test_erc20_top_level_transfer_in_instruction() {
let in_instructions = let mut test = Test::new().await;
test.router.in_instructions_unordered(block, block, &HashSet::new()).await.unwrap(); test.confirm_next_serai_key().await;
assert!(in_instructions.is_empty());
}
let in_instructions = test let erc20 = Erc20::deploy(&test).await;
.router
.in_instructions_unordered(block, block, &HashSet::from([coin.into()])) let coin = Coin::Erc20(erc20.address());
.await let amount = U256::from(1);
.unwrap(); let shorthand = Test::in_instruction();
assert_eq!(in_instructions.len(), 1);
assert_eq!( let mut tx = test.router.in_instruction(coin, amount, &shorthand);
in_instructions[0], tx.gas_price = 100_000_000_000u128;
InInstruction { tx.gas_limit = 1_000_000;
id: LogIndex {
block_hash: *receipt.block_hash.unwrap(), let tx = ethereum_primitives::deterministically_sign(tx);
// First is the Transfer log, then the InInstruction log erc20.mint(&test, tx.recover_signer().unwrap(), amount).await;
index_within_block: receipt.inner.logs()[1].log_index.unwrap(), test.publish_in_instruction_tx(tx, coin, amount, &shorthand).await;
},
transaction_hash: **tx.hash(),
from: tx.recover_signer().unwrap(),
coin,
amount,
data: shorthand.encode(),
}
);
} }
#[tokio::test] #[tokio::test]

View File

@@ -16,9 +16,7 @@ use serai_db::Db;
use scanner::ScannerFeed; use scanner::ScannerFeed;
use ethereum_schnorr::PublicKey; use ethereum_schnorr::PublicKey;
use ethereum_erc20::{TopLevelTransfer, Erc20}; use ethereum_router::{InInstruction as EthereumInInstruction, Executed, Router};
#[rustfmt::skip]
use ethereum_router::{Coin as EthereumCoin, InInstruction as EthereumInInstruction, Executed, Router};
use crate::{ use crate::{
TOKENS, ETHER_DUST, DAI_DUST, InitialSeraiKey, TOKENS, ETHER_DUST, DAI_DUST, InitialSeraiKey,
@@ -158,31 +156,13 @@ impl<D: Db> ScannerFeed for Rpc<D> {
}; };
async fn sync_block( async fn sync_block(
provider: Arc<RootProvider<SimpleRequest>>,
router: Router, router: Router,
block: Header, block: Header,
) -> Result<(Vec<EthereumInInstruction>, Vec<Executed>), RpcError<TransportErrorKind>> { ) -> Result<(Vec<EthereumInInstruction>, Vec<Executed>), RpcError<TransportErrorKind>> {
let mut instructions = router let instructions = router
.in_instructions_unordered(block.number, block.number, &HashSet::from(TOKENS)) .in_instructions_unordered(block.number, block.number, &HashSet::from(TOKENS))
.await?; .await?;
for token in TOKENS {
for TopLevelTransfer { id, transaction_hash, from, amount, data } in
Erc20::new(provider.clone(), token)
.top_level_transfers_unordered(block.number, block.number, router.address())
.await?
{
instructions.push(EthereumInInstruction {
id,
transaction_hash,
from,
coin: EthereumCoin::Erc20(token),
amount,
data,
});
}
}
let executed = router.executed(block.number, block.number).await?; let executed = router.executed(block.number, block.number).await?;
Ok((instructions, executed)) Ok((instructions, executed))
@@ -214,7 +194,7 @@ impl<D: Db> ScannerFeed for Rpc<D> {
to_check = *to_check_block.parent_hash; to_check = *to_check_block.parent_hash;
// Spawn a task to sync this block // Spawn a task to sync this block
join_set.spawn(sync_block(self.provider.clone(), router.clone(), to_check_block)); join_set.spawn(sync_block(router.clone(), to_check_block));
} }
let mut instructions = vec![]; let mut instructions = vec![];