mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 20:29:23 +00:00
Start on the task to manage the swarm
This commit is contained in:
@@ -25,6 +25,7 @@ bitvec = { version = "1", default-features = false, features = ["std"] }
|
|||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
|
|||||||
@@ -1,4 +1,5 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use rand_core::{RngCore, OsRng};
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
@@ -25,6 +26,7 @@ struct DialTask {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl ContinuallyRan for DialTask {
|
impl ContinuallyRan for DialTask {
|
||||||
|
// Only run every thirty seconds, not the default of every five
|
||||||
const DELAY_BETWEEN_ITERATIONS: u64 = 30;
|
const DELAY_BETWEEN_ITERATIONS: u64 = 30;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||||
@@ -37,7 +39,7 @@ impl ContinuallyRan for DialTask {
|
|||||||
.peers
|
.peers
|
||||||
.peers
|
.peers
|
||||||
.read()
|
.read()
|
||||||
.unwrap()
|
.await
|
||||||
.iter()
|
.iter()
|
||||||
.map(|(network, peers)| (*network, peers.len()))
|
.map(|(network, peers)| (*network, peers.len()))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
@@ -54,9 +56,9 @@ impl ContinuallyRan for DialTask {
|
|||||||
(peer_count <
|
(peer_count <
|
||||||
self
|
self
|
||||||
.validators
|
.validators
|
||||||
.validators()
|
.by_network()
|
||||||
.get(&network)
|
.get(&network)
|
||||||
.map(Vec::len)
|
.map(HashSet::len)
|
||||||
.unwrap_or(0)
|
.unwrap_or(0)
|
||||||
.saturating_sub(1))
|
.saturating_sub(1))
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,11 +1,12 @@
|
|||||||
use std::{
|
use std::{
|
||||||
sync::{Arc, RwLock},
|
sync::Arc,
|
||||||
collections::{HashSet, HashMap},
|
collections::{HashSet, HashMap},
|
||||||
|
time::{Duration, Instant},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_client::primitives::{NetworkId, PublicKey};
|
use serai_client::primitives::{NetworkId, PublicKey};
|
||||||
|
|
||||||
use tokio::sync::mpsc;
|
use tokio::sync::{mpsc, RwLock};
|
||||||
|
|
||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
@@ -16,6 +17,7 @@ use libp2p::{
|
|||||||
|
|
||||||
/// A struct to sync the validators from the Serai node in order to keep track of them.
|
/// A struct to sync the validators from the Serai node in order to keep track of them.
|
||||||
mod validators;
|
mod validators;
|
||||||
|
use validators::Validators;
|
||||||
|
|
||||||
/// The authentication protocol upgrade to limit the P2P network to active validators.
|
/// The authentication protocol upgrade to limit the P2P network to active validators.
|
||||||
mod authenticate;
|
mod authenticate;
|
||||||
@@ -73,3 +75,133 @@ struct Behavior {
|
|||||||
reqres: reqres::Behavior,
|
reqres: reqres::Behavior,
|
||||||
gossip: gossip::Behavior,
|
gossip: gossip::Behavior,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct SwarmTask {
|
||||||
|
to_dial: mpsc::UnboundedReceiver<DialOpts>,
|
||||||
|
|
||||||
|
validators: Arc<RwLock<Validators>>,
|
||||||
|
last_refreshed_validators: Instant,
|
||||||
|
next_refresh_validators: Instant,
|
||||||
|
|
||||||
|
peers: Peers,
|
||||||
|
rebuild_peers_at: Instant,
|
||||||
|
|
||||||
|
swarm: Swarm<Behavior>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SwarmTask {
|
||||||
|
async fn run(mut self) {
|
||||||
|
loop {
|
||||||
|
let time_till_refresh_validators =
|
||||||
|
self.next_refresh_validators.saturating_duration_since(Instant::now());
|
||||||
|
let time_till_rebuild_peers = self.rebuild_peers_at.saturating_duration_since(Instant::now());
|
||||||
|
|
||||||
|
tokio::select! {
|
||||||
|
biased;
|
||||||
|
|
||||||
|
// Refresh the instance of validators we use to track peers/share with authenticate
|
||||||
|
() = tokio::time::sleep(time_till_refresh_validators) => {
|
||||||
|
const TIME_BETWEEN_REFRESH_VALIDATORS: Duration = Duration::from_secs(5);
|
||||||
|
const MAX_TIME_BETWEEN_REFRESH_VALIDATORS: Duration = Duration::from_secs(120);
|
||||||
|
|
||||||
|
let update = self.validators.write().await.update().await;
|
||||||
|
match update {
|
||||||
|
Ok(removed) => {
|
||||||
|
for removed in removed {
|
||||||
|
let _: Result<_, _> = self.swarm.disconnect_peer_id(removed);
|
||||||
|
}
|
||||||
|
self.last_refreshed_validators = Instant::now();
|
||||||
|
self.next_refresh_validators = Instant::now() + TIME_BETWEEN_REFRESH_VALIDATORS;
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
log::warn!("couldn't refresh validators: {e:?}");
|
||||||
|
// Increase the delay before the next refresh by using the time since the last
|
||||||
|
// refresh. This will be 5 seconds, then 5 seconds, then 10 seconds, then 20...
|
||||||
|
let time_since_last = self
|
||||||
|
.next_refresh_validators
|
||||||
|
.saturating_duration_since(self.last_refreshed_validators);
|
||||||
|
// But limit the delay
|
||||||
|
self.next_refresh_validators =
|
||||||
|
Instant::now() + time_since_last.min(MAX_TIME_BETWEEN_REFRESH_VALIDATORS);
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rebuild the peers every 10 minutes
|
||||||
|
//
|
||||||
|
// This handles edge cases such as when a validator changes the networks they're present
|
||||||
|
// in, race conditions, or any other edge cases/quirks which would otherwise risk spiraling
|
||||||
|
// out of control
|
||||||
|
() = tokio::time::sleep(time_till_rebuild_peers) => {
|
||||||
|
const TIME_BETWEEN_REBUILD_PEERS: Duration = Duration::from_secs(10 * 60);
|
||||||
|
|
||||||
|
let validators_by_network = self.validators.read().await.by_network().clone();
|
||||||
|
let connected = self.swarm.connected_peers().copied().collect::<HashSet<_>>();
|
||||||
|
let mut peers = HashMap::new();
|
||||||
|
for (network, validators) in validators_by_network {
|
||||||
|
peers.insert(network, validators.intersection(&connected).copied().collect());
|
||||||
|
}
|
||||||
|
*self.peers.peers.write().await = peers;
|
||||||
|
|
||||||
|
self.rebuild_peers_at = Instant::now() + TIME_BETWEEN_REBUILD_PEERS;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dial peers we're instructed to
|
||||||
|
dial_opts = self.to_dial.recv() => {
|
||||||
|
let dial_opts = dial_opts.expect("DialTask was closed?");
|
||||||
|
let _: Result<_, _> = self.swarm.dial(dial_opts);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle swarm events
|
||||||
|
event = self.swarm.next() => {
|
||||||
|
// `Swarm::next` will never return `Poll::Ready(None)`
|
||||||
|
// https://docs.rs/
|
||||||
|
// libp2p/0.54.1/libp2p/struct.Swarm.html#impl-Stream-for-Swarm%3CTBehaviour%3E
|
||||||
|
let event = event.unwrap();
|
||||||
|
match event {
|
||||||
|
SwarmEvent::Behaviour(BehaviorEvent::Reqres(event)) => todo!("TODO"),
|
||||||
|
SwarmEvent::Behaviour(BehaviorEvent::Gossip(event)) => todo!("TODO"),
|
||||||
|
// New connection, so update peers
|
||||||
|
SwarmEvent::ConnectionEstablished { peer_id, .. } => {
|
||||||
|
let Some(networks) =
|
||||||
|
self.validators.read().await.networks(&peer_id).cloned() else { continue };
|
||||||
|
for network in networks {
|
||||||
|
self
|
||||||
|
.peers
|
||||||
|
.peers
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.entry(network)
|
||||||
|
.or_insert_with(HashSet::new)
|
||||||
|
.insert(peer_id);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
// Connection closed, so update peers
|
||||||
|
SwarmEvent::ConnectionClosed { peer_id, .. } => {
|
||||||
|
let Some(networks) =
|
||||||
|
self.validators.read().await.networks(&peer_id).cloned() else { continue };
|
||||||
|
for network in networks {
|
||||||
|
self
|
||||||
|
.peers
|
||||||
|
.peers
|
||||||
|
.write()
|
||||||
|
.await
|
||||||
|
.entry(network)
|
||||||
|
.or_insert_with(HashSet::new)
|
||||||
|
.remove(&peer_id);
|
||||||
|
}
|
||||||
|
},
|
||||||
|
SwarmEvent::IncomingConnection { .. } |
|
||||||
|
SwarmEvent::IncomingConnectionError { .. } |
|
||||||
|
SwarmEvent::OutgoingConnectionError { .. } |
|
||||||
|
SwarmEvent::NewListenAddr { .. } |
|
||||||
|
SwarmEvent::ExpiredListenAddr { .. } |
|
||||||
|
SwarmEvent::ListenerClosed { .. } |
|
||||||
|
SwarmEvent::ListenerError { .. } |
|
||||||
|
SwarmEvent::Dialing { .. } => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use core::{fmt, time::Duration};
|
use core::{fmt, time::Duration};
|
||||||
use std::io::{self, Read};
|
use std::io;
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
use std::collections::HashMap;
|
use std::collections::{HashSet, HashMap};
|
||||||
|
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
|
||||||
|
|
||||||
@@ -12,13 +12,18 @@ pub(crate) struct Validators {
|
|||||||
// A cache for which session we're populated with the validators of
|
// A cache for which session we're populated with the validators of
|
||||||
sessions: HashMap<NetworkId, Session>,
|
sessions: HashMap<NetworkId, Session>,
|
||||||
// The validators by network
|
// The validators by network
|
||||||
by_network: HashMap<NetworkId, Vec<PeerId>>,
|
by_network: HashMap<NetworkId, HashSet<PeerId>>,
|
||||||
// The set of all validators (as a HashMap<PeerId, usize> to represent the amount of inclusions)
|
// The validators and their networks
|
||||||
set: HashMap<PeerId, usize>,
|
validators: HashMap<PeerId, HashSet<NetworkId>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Validators {
|
impl Validators {
|
||||||
pub(crate) async fn update(&mut self) -> Result<(), String> {
|
/// Update the view of the validators.
|
||||||
|
///
|
||||||
|
/// Returns all validators removed from the active validator set.
|
||||||
|
pub(crate) async fn update(&mut self) -> Result<HashSet<PeerId>, String> {
|
||||||
|
let mut removed = HashSet::new();
|
||||||
|
|
||||||
let temporal_serai =
|
let temporal_serai =
|
||||||
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
let temporal_serai = temporal_serai.validator_sets();
|
let temporal_serai = temporal_serai.validator_sets();
|
||||||
@@ -35,20 +40,22 @@ impl Validators {
|
|||||||
let new_validators =
|
let new_validators =
|
||||||
temporal_serai.active_network_validators(network).await.map_err(|e| format!("{e:?}"))?;
|
temporal_serai.active_network_validators(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
let new_validators =
|
let new_validators =
|
||||||
new_validators.into_iter().map(peer_id_from_public).collect::<Vec<_>>();
|
new_validators.into_iter().map(peer_id_from_public).collect::<HashSet<_>>();
|
||||||
|
|
||||||
// Remove the existing validators
|
// Remove the existing validators
|
||||||
for validator in self.by_network.remove(&network).unwrap_or(vec![]) {
|
for validator in self.by_network.remove(&network).unwrap_or_else(HashSet::new) {
|
||||||
let mut inclusions = self.set.remove(&validator).unwrap();
|
let mut networks = self.validators.remove(&validator).unwrap();
|
||||||
inclusions -= 1;
|
networks.remove(&network);
|
||||||
if inclusions != 0 {
|
if networks.is_empty() {
|
||||||
self.set.insert(validator, inclusions);
|
removed.insert(validator);
|
||||||
|
} else {
|
||||||
|
self.validators.insert(validator, networks);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add the new validators
|
// Add the new validators
|
||||||
for validator in new_validators.iter().copied() {
|
for validator in new_validators.iter().copied() {
|
||||||
*self.set.entry(validator).or_insert(0) += 1;
|
self.validators.entry(validator).or_insert_with(HashSet::new).insert(network);
|
||||||
}
|
}
|
||||||
self.by_network.insert(network, new_validators);
|
self.by_network.insert(network, new_validators);
|
||||||
|
|
||||||
@@ -56,14 +63,19 @@ impl Validators {
|
|||||||
self.sessions.insert(network, session);
|
self.sessions.insert(network, session);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Ok(())
|
|
||||||
|
Ok(removed)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn validators(&self) -> &HashMap<NetworkId, Vec<PeerId>> {
|
pub(crate) fn by_network(&self) -> &HashMap<NetworkId, HashSet<PeerId>> {
|
||||||
&self.by_network
|
&self.by_network
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn contains(&self, peer_id: &PeerId) -> bool {
|
pub(crate) fn contains(&self, peer_id: &PeerId) -> bool {
|
||||||
self.set.contains_key(peer_id)
|
self.validators.contains_key(peer_id)
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<NetworkId>> {
|
||||||
|
self.validators.get(peer_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user