Add commentary on the use of FuturesOrdered

This commit is contained in:
Luke Parker
2025-01-04 23:28:54 -05:00
parent 9a5a661d04
commit 479ca0410a
2 changed files with 8 additions and 0 deletions

View File

@@ -107,6 +107,9 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
// Sync the next set of upcoming blocks all at once to minimize latency // Sync the next set of upcoming blocks all at once to minimize latency
const BLOCKS_TO_SYNC_AT_ONCE: u64 = 10; const BLOCKS_TO_SYNC_AT_ONCE: u64 = 10;
// FuturesOrdered can be bad practice due to potentially causing tiemouts if it isn't
// sufficiently polled. Considering our processing loop is minimal and it does poll this,
// it's fine.
let mut set = FuturesOrdered::new(); let mut set = FuturesOrdered::new();
for block_number in for block_number in
next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE) next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE)

View File

@@ -100,6 +100,11 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
// Sync the next set of upcoming blocks all at once to minimize latency // Sync the next set of upcoming blocks all at once to minimize latency
const BLOCKS_TO_SYNC_AT_ONCE: u64 = 50; const BLOCKS_TO_SYNC_AT_ONCE: u64 = 50;
// FuturesOrdered can be bad practice due to potentially causing tiemouts if it isn't
// sufficiently polled. Our processing loop isn't minimal, itself making multiple requests,
// but the loop body should only be executed a few times a week. It's better to get through
// most blocks with this optimization, and have timeouts a few times a week, than not have
// this at all.
let mut set = FuturesOrdered::new(); let mut set = FuturesOrdered::new();
for block_number in for block_number in
next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE) next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE)