Merge branch 'develop' into HEAD

This commit is contained in:
Luke Parker
2024-06-06 02:43:33 -04:00
261 changed files with 10394 additions and 3812 deletions

View File

@@ -6,7 +6,7 @@ license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/abi"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.69"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true

View File

@@ -36,7 +36,7 @@ async-lock = "3"
simple-request = { path = "../../common/request", version = "0.1", optional = true }
bitcoin = { version = "0.31", optional = true }
bitcoin = { version = "0.32", optional = true }
ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", optional = true }
monero-serai = { path = "../../coins/monero", version = "0.1.4-alpha", optional = true }

View File

@@ -1,4 +1,4 @@
use core::str::FromStr;
use core::{str::FromStr, fmt};
use scale::{Encode, Decode};
@@ -6,38 +6,46 @@ use bitcoin::{
hashes::{Hash as HashTrait, hash160::Hash},
PubkeyHash, ScriptHash,
network::Network,
WitnessVersion, WitnessProgram,
address::{Error, Payload, NetworkChecked, Address as BAddressGeneric},
WitnessVersion, WitnessProgram, ScriptBuf,
address::{AddressType, NetworkChecked, Address as BAddress},
};
type BAddress = BAddressGeneric<NetworkChecked>;
#[derive(Clone, Eq, Debug)]
pub struct Address(BAddress);
pub struct Address(ScriptBuf);
impl PartialEq for Address {
fn eq(&self, other: &Self) -> bool {
// Since Serai defines the Bitcoin-address specification as a variant of the payload alone,
// define equivalency as the payload alone
self.0.payload() == other.0.payload()
// Since Serai defines the Bitcoin-address specification as a variant of the script alone,
// define equivalency as the script alone
self.0 == other.0
}
}
impl From<Address> for ScriptBuf {
fn from(addr: Address) -> ScriptBuf {
addr.0
}
}
impl FromStr for Address {
type Err = Error;
fn from_str(str: &str) -> Result<Address, Error> {
type Err = ();
fn from_str(str: &str) -> Result<Address, ()> {
Address::new(
BAddressGeneric::from_str(str)
.map_err(|_| Error::UnrecognizedScript)?
.require_network(Network::Bitcoin)?,
BAddress::from_str(str)
.map_err(|_| ())?
.require_network(Network::Bitcoin)
.map_err(|_| ())?
.script_pubkey(),
)
.ok_or(Error::UnrecognizedScript)
.ok_or(())
}
}
impl ToString for Address {
fn to_string(&self) -> String {
self.0.to_string()
impl fmt::Display for Address {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
BAddress::<NetworkChecked>::from_script(&self.0, Network::Bitcoin)
.map_err(|_| fmt::Error)?
.fmt(f)
}
}
@@ -54,55 +62,52 @@ enum EncodedAddress {
impl TryFrom<Vec<u8>> for Address {
type Error = ();
fn try_from(data: Vec<u8>) -> Result<Address, ()> {
Ok(Address(BAddress::new(
Network::Bitcoin,
match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? {
EncodedAddress::P2PKH(hash) => {
Payload::PubkeyHash(PubkeyHash::from_raw_hash(Hash::from_byte_array(hash)))
}
EncodedAddress::P2SH(hash) => {
Payload::ScriptHash(ScriptHash::from_raw_hash(Hash::from_byte_array(hash)))
}
EncodedAddress::P2WPKH(hash) => {
Payload::WitnessProgram(WitnessProgram::new(WitnessVersion::V0, hash).unwrap())
}
EncodedAddress::P2WSH(hash) => {
Payload::WitnessProgram(WitnessProgram::new(WitnessVersion::V0, hash).unwrap())
}
EncodedAddress::P2TR(key) => {
Payload::WitnessProgram(WitnessProgram::new(WitnessVersion::V1, key).unwrap())
}
},
)))
Ok(Address(match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? {
EncodedAddress::P2PKH(hash) => {
ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash)))
}
EncodedAddress::P2SH(hash) => {
ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash)))
}
EncodedAddress::P2WPKH(hash) => {
ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap())
}
EncodedAddress::P2WSH(hash) => {
ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap())
}
EncodedAddress::P2TR(key) => {
ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap())
}
}))
}
}
fn try_to_vec(addr: &Address) -> Result<Vec<u8>, ()> {
let parsed_addr =
BAddress::<NetworkChecked>::from_script(&addr.0, Network::Bitcoin).map_err(|_| ())?;
Ok(
(match addr.0.payload() {
Payload::PubkeyHash(hash) => EncodedAddress::P2PKH(*hash.as_raw_hash().as_byte_array()),
Payload::ScriptHash(hash) => EncodedAddress::P2SH(*hash.as_raw_hash().as_byte_array()),
Payload::WitnessProgram(program) => match program.version() {
WitnessVersion::V0 => {
let program = program.program();
if program.len() == 20 {
let mut buf = [0; 20];
buf.copy_from_slice(program.as_ref());
EncodedAddress::P2WPKH(buf)
} else if program.len() == 32 {
let mut buf = [0; 32];
buf.copy_from_slice(program.as_ref());
EncodedAddress::P2WSH(buf)
} else {
Err(())?
}
}
WitnessVersion::V1 => {
let program_ref: &[u8] = program.program().as_ref();
EncodedAddress::P2TR(program_ref.try_into().map_err(|_| ())?)
}
_ => Err(())?,
},
(match parsed_addr.address_type() {
Some(AddressType::P2pkh) => {
EncodedAddress::P2PKH(*parsed_addr.pubkey_hash().unwrap().as_raw_hash().as_byte_array())
}
Some(AddressType::P2sh) => {
EncodedAddress::P2SH(*parsed_addr.script_hash().unwrap().as_raw_hash().as_byte_array())
}
Some(AddressType::P2wpkh) => {
let program = parsed_addr.witness_program().ok_or(())?;
let program = program.program().as_bytes();
EncodedAddress::P2WPKH(program.try_into().map_err(|_| ())?)
}
Some(AddressType::P2wsh) => {
let program = parsed_addr.witness_program().ok_or(())?;
let program = program.program().as_bytes();
EncodedAddress::P2WSH(program.try_into().map_err(|_| ())?)
}
Some(AddressType::P2tr) => {
let program = parsed_addr.witness_program().ok_or(())?;
let program = program.program().as_bytes();
EncodedAddress::P2TR(program.try_into().map_err(|_| ())?)
}
_ => Err(())?,
})
.encode(),
@@ -116,20 +121,8 @@ impl From<Address> for Vec<u8> {
}
}
impl From<Address> for BAddress {
fn from(addr: Address) -> BAddress {
addr.0
}
}
impl AsRef<BAddress> for Address {
fn as_ref(&self) -> &BAddress {
&self.0
}
}
impl Address {
pub fn new(address: BAddress) -> Option<Self> {
pub fn new(address: ScriptBuf) -> Option<Self> {
let res = Self(address);
if try_to_vec(&res).is_ok() {
return Some(res);

View File

@@ -1,4 +1,4 @@
use core::str::FromStr;
use core::{str::FromStr, fmt};
use scale::{Encode, Decode};
@@ -24,9 +24,9 @@ impl FromStr for Address {
}
}
impl ToString for Address {
fn to_string(&self) -> String {
self.0.to_string()
impl fmt::Display for Address {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
self.0.fmt(f)
}
}

View File

@@ -29,7 +29,12 @@ macro_rules! serai_test {
"--rpc-cors".to_string(),
"all".to_string(),
])
.replace_env(HashMap::from([("RUST_LOG".to_string(), "runtime=debug".to_string())]))
.replace_env(
HashMap::from([
("RUST_LOG".to_string(), "runtime=debug".to_string()),
("KEY".to_string(), " ".to_string()),
])
)
.set_publish_all_ports(true)
.set_handle(handle)
.set_start_policy(StartPolicy::Strict)

View File

@@ -14,7 +14,9 @@ async fn dht() {
TestBodySpecification::with_image(
Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never),
)
.replace_env([("SERAI_NAME".to_string(), name.to_string())].into())
.replace_env(
[("SERAI_NAME".to_string(), name.to_string()), ("KEY".to_string(), " ".to_string())].into(),
)
.set_publish_all_ports(true)
.set_handle(handle(name))
.set_start_policy(StartPolicy::Strict)

View File

@@ -102,7 +102,10 @@ async fn validator_set_rotation() {
"local".to_string(),
format!("--{name}"),
])
.replace_env(HashMap::from([("RUST_LOG=runtime".to_string(), "debug".to_string())]))
.replace_env(HashMap::from([
("RUST_LOG".to_string(), "runtime=debug".to_string()),
("KEY".to_string(), " ".to_string()),
]))
.set_publish_all_ports(true)
.set_handle(handle(name))
.set_start_policy(StartPolicy::Strict)

View File

@@ -6,7 +6,7 @@ license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/coins/pallet"
authors = ["Akil Demir <aeg_asd@hotmail.com>"]
edition = "2021"
rust-version = "1.70"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true
@@ -49,6 +49,9 @@ std = [
"coins-primitives/std",
]
# TODO
try-runtime = []
runtime-benchmarks = [
"frame-system/runtime-benchmarks",
"frame-support/runtime-benchmarks",

View File

@@ -5,7 +5,7 @@ description = "Serai coins primitives"
license = "MIT"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.69"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true

View File

@@ -6,7 +6,7 @@ license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/dex/pallet"
authors = ["Parity Technologies <admin@parity.io>, Akil Demir <aeg_asd@hotmail.com>"]
edition = "2021"
rust-version = "1.70"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true

View File

@@ -43,7 +43,7 @@ fn create_coin<T: Config>(coin: &Coin) -> (T::AccountId, AccountIdLookupOf<T>) {
let caller_lookup = T::Lookup::unlookup(caller);
assert_ok!(Coins::<T>::mint(
caller,
Balance { coin: Coin::native(), amount: Amount(SubstrateAmount::max_value().div(1000u64)) }
Balance { coin: Coin::native(), amount: Amount(SubstrateAmount::MAX.div(1000u64)) }
));
assert_ok!(Coins::<T>::mint(
caller,

View File

@@ -60,3 +60,6 @@ std = [
"validator-sets-pallet/std",
]
default = ["std"]
# TODO
try-runtime = []

View File

@@ -10,7 +10,7 @@ pub use in_instructions_primitives as primitives;
use primitives::*;
// TODO: Investigate why Substrate generates these
#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding)]
#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding, clippy::empty_docs)]
#[frame_support::pallet]
pub mod pallet {
use sp_std::vec;

View File

@@ -5,7 +5,7 @@ description = "Serai instructions library, enabling encoding and decoding"
license = "MIT"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.69"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true

View File

@@ -20,12 +20,15 @@ workspace = true
name = "serai-node"
[dependencies]
zeroize = "1"
rand_core = "0.6"
zeroize = "1"
hex = "0.4"
log = "0.4"
schnorrkel = "0.11"
libp2p = "0.52"
sp-core = { git = "https://github.com/serai-dex/substrate" }
sp-keystore = { git = "https://github.com/serai-dex/substrate" }
sp-timestamp = { git = "https://github.com/serai-dex/substrate" }

View File

@@ -1,6 +1,7 @@
use core::marker::PhantomData;
use std::collections::HashSet;
use sp_core::Pair as PairTrait;
use sp_core::{Decode, Pair as PairTrait, sr25519::Public};
use sc_service::ChainType;
@@ -15,7 +16,18 @@ fn account_from_name(name: &'static str) -> PublicKey {
insecure_pair_from_name(name).public()
}
fn testnet_genesis(
fn wasm_binary() -> Vec<u8> {
// TODO: Accept a config of runtime path
const WASM_PATH: &str = "/runtime/serai.wasm";
if let Ok(binary) = std::fs::read(WASM_PATH) {
log::info!("using {WASM_PATH}");
return binary;
}
log::info!("using built-in wasm");
WASM_BINARY.ok_or("compiled in wasm not available").unwrap().to_vec()
}
fn devnet_genesis(
wasm_binary: &[u8],
validators: &[&'static str],
endowed_accounts: Vec<PublicKey>,
@@ -64,18 +76,69 @@ fn testnet_genesis(
}
}
pub fn development_config() -> Result<ChainSpec, &'static str> {
let wasm_binary = WASM_BINARY.ok_or("Development wasm not available")?;
fn testnet_genesis(wasm_binary: &[u8], validators: Vec<&'static str>) -> RuntimeGenesisConfig {
let validators = validators
.into_iter()
.map(|validator| Public::decode(&mut hex::decode(validator).unwrap().as_slice()).unwrap())
.collect::<Vec<_>>();
Ok(ChainSpec::from_genesis(
assert_eq!(validators.iter().collect::<HashSet<_>>().len(), validators.len());
RuntimeGenesisConfig {
system: SystemConfig { code: wasm_binary.to_vec(), _config: PhantomData },
transaction_payment: Default::default(),
coins: CoinsConfig {
accounts: validators
.iter()
.map(|a| (*a, Balance { coin: Coin::Serai, amount: Amount(5_000_000 * 10_u64.pow(8)) }))
.collect(),
_ignore: Default::default(),
},
dex: DexConfig {
pools: vec![Coin::Bitcoin, Coin::Ether, Coin::Dai, Coin::Monero],
_ignore: Default::default(),
},
validator_sets: ValidatorSetsConfig {
networks: serai_runtime::primitives::NETWORKS
.iter()
.map(|network| match network {
NetworkId::Serai => (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))),
NetworkId::Bitcoin => (NetworkId::Bitcoin, Amount(1_000_000 * 10_u64.pow(8))),
NetworkId::Ethereum => (NetworkId::Ethereum, Amount(1_000_000 * 10_u64.pow(8))),
NetworkId::Monero => (NetworkId::Monero, Amount(100_000 * 10_u64.pow(8))),
})
.collect(),
participants: validators.clone(),
},
signals: SignalsConfig::default(),
babe: BabeConfig {
authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(),
epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG),
_config: PhantomData,
},
grandpa: GrandpaConfig {
authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(),
_config: PhantomData,
},
}
}
pub fn development_config() -> ChainSpec {
let wasm_binary = wasm_binary();
ChainSpec::from_genesis(
// Name
"Development Network",
// ID
"devnet",
ChainType::Development,
|| {
testnet_genesis(
wasm_binary,
move || {
devnet_genesis(
&wasm_binary,
&["Alice"],
vec![
account_from_name("Alice"),
@@ -92,28 +155,28 @@ pub fn development_config() -> Result<ChainSpec, &'static str> {
// Telemetry
None,
// Protocol ID
Some("serai"),
Some("serai-devnet"),
// Fork ID
None,
// Properties
None,
// Extensions
None,
))
)
}
pub fn testnet_config() -> Result<ChainSpec, &'static str> {
let wasm_binary = WASM_BINARY.ok_or("Testnet wasm not available")?;
pub fn local_config() -> ChainSpec {
let wasm_binary = wasm_binary();
Ok(ChainSpec::from_genesis(
ChainSpec::from_genesis(
// Name
"Local Test Network",
// ID
"local",
ChainType::Local,
|| {
testnet_genesis(
wasm_binary,
move || {
devnet_genesis(
&wasm_binary,
&["Alice", "Bob", "Charlie", "Dave"],
vec![
account_from_name("Alice"),
@@ -130,12 +193,48 @@ pub fn testnet_config() -> Result<ChainSpec, &'static str> {
// Telemetry
None,
// Protocol ID
Some("serai"),
Some("serai-local"),
// Fork ID
None,
// Properties
None,
// Extensions
None,
))
)
}
pub fn testnet_config() -> ChainSpec {
let wasm_binary = wasm_binary();
ChainSpec::from_genesis(
// Name
"Test Network 2",
// ID
"testnet-2",
ChainType::Live,
move || {
let _ = testnet_genesis(&wasm_binary, vec![]);
todo!()
},
// Bootnodes
vec![],
// Telemetry
None,
// Protocol ID
Some("serai-testnet-2"),
// Fork ID
None,
// Properties
None,
// Extensions
None,
)
}
pub fn bootnode_multiaddrs(id: &str) -> Vec<libp2p::Multiaddr> {
match id {
"devnet" | "local" => vec![],
"testnet-2" => todo!(),
_ => panic!("unrecognized network ID"),
}
}

View File

@@ -39,8 +39,9 @@ impl SubstrateCli for Cli {
fn load_spec(&self, id: &str) -> Result<Box<dyn sc_service::ChainSpec>, String> {
match id {
"dev" | "devnet" => Ok(Box::new(chain_spec::development_config()?)),
"local" => Ok(Box::new(chain_spec::testnet_config()?)),
"dev" | "devnet" => Ok(Box::new(chain_spec::development_config())),
"local" => Ok(Box::new(chain_spec::local_config())),
"testnet" => Ok(Box::new(chain_spec::testnet_config())),
_ => panic!("Unknown network ID"),
}
}

View File

@@ -1,5 +1,4 @@
use zeroize::Zeroize;
use rand_core::RngCore;
use sp_core::{crypto::*, ed25519, sr25519};
use sp_keystore::*;
@@ -9,12 +8,14 @@ pub struct Keystore(sr25519::Pair);
impl Keystore {
pub fn from_env() -> Option<Self> {
let mut key_hex = serai_env::var("KEY")?;
if key_hex.trim().is_empty() {
None?;
}
let mut key = hex::decode(&key_hex).expect("KEY from environment wasn't hex");
key_hex.zeroize();
assert_eq!(key.len(), 32, "KEY from environment wasn't 32 bytes");
key.extend([0; 32]);
rand_core::OsRng.fill_bytes(&mut key[32 ..]);
key.extend(sp_core::blake2_256(&key));
let res = Self(sr25519::Pair::from(schnorrkel::SecretKey::from_bytes(&key).unwrap()));
key.zeroize();

View File

@@ -1,5 +1,7 @@
use std::{sync::Arc, collections::HashSet};
use rand_core::{RngCore, OsRng};
use sp_blockchain::{Error as BlockchainError, HeaderBackend, HeaderMetadata};
use sp_block_builder::BlockBuilder;
use sp_api::ProvideRuntimeApi;
@@ -17,6 +19,7 @@ pub use sc_rpc_api::DenyUnsafe;
use sc_transaction_pool_api::TransactionPool;
pub struct FullDeps<C, P> {
pub id: String,
pub client: Arc<C>,
pub pool: Arc<P>,
pub deny_unsafe: DenyUnsafe,
@@ -44,18 +47,19 @@ where
use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer};
let mut module = RpcModule::new(());
let FullDeps { client, pool, deny_unsafe, authority_discovery } = deps;
let FullDeps { id, client, pool, deny_unsafe, authority_discovery } = deps;
module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?;
module.merge(TransactionPayment::new(client.clone()).into_rpc())?;
if let Some(authority_discovery) = authority_discovery {
let mut authority_discovery_module = RpcModule::new((client, RwLock::new(authority_discovery)));
let mut authority_discovery_module =
RpcModule::new((id, client, RwLock::new(authority_discovery)));
authority_discovery_module.register_async_method(
"p2p_validators",
|params, context| async move {
let network: NetworkId = params.parse()?;
let (client, authority_discovery) = &*context;
let (id, client, authority_discovery) = &*context;
let latest_block = client.info().best_hash;
let validators = client.runtime_api().validators(latest_block, network).map_err(|_| {
@@ -64,7 +68,9 @@ where
"please report this at https://github.com/serai-dex/serai",
)))
})?;
let mut all_p2p_addresses = vec![];
// Always return the protocol's bootnodes
let mut all_p2p_addresses = crate::chain_spec::bootnode_multiaddrs(id);
// Additionally returns validators found over the DHT
for validator in validators {
let mut returned_addresses = authority_discovery
.write()
@@ -72,14 +78,19 @@ where
.get_addresses_by_authority_id(validator.into())
.await
.unwrap_or_else(HashSet::new)
.into_iter();
// Only take a single address
.into_iter()
.collect::<Vec<_>>();
// Randomly select an address
// There should be one, there may be two if their IP address changed, and more should only
// occur if they have multiple proxies/an IP address changing frequently/some issue
// preventing consistent self-identification
// It isn't beneficial to use multiple addresses for a single peer here
if let Some(address) = returned_addresses.next() {
all_p2p_addresses.push(address);
if !returned_addresses.is_empty() {
all_p2p_addresses.push(
returned_addresses.remove(
usize::try_from(OsRng.next_u64() >> 32).unwrap() % returned_addresses.len(),
),
);
}
}
Ok(all_p2p_addresses)

View File

@@ -161,7 +161,7 @@ pub fn new_partial(
))
}
pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
pub fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError> {
let (
sc_service::PartialComponents {
client,
@@ -176,6 +176,11 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
keystore_container,
) = new_partial(&config)?;
config.network.node_name = "serai".to_string();
config.network.client_version = "0.1.0".to_string();
config.network.listen_addresses =
vec!["/ip4/0.0.0.0/tcp/30333".parse().unwrap(), "/ip6/::/tcp/30333".parse().unwrap()];
let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network);
let grandpa_protocol_name =
grandpa::protocol_standard_name(&client.block_hash(0).unwrap().unwrap(), &config.chain_spec);
@@ -203,6 +208,59 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)),
})?;
task_manager.spawn_handle().spawn("bootnodes", "bootnodes", {
let network = network.clone();
let id = config.chain_spec.id().to_string();
async move {
// Transforms the above Multiaddrs into MultiaddrWithPeerIds
// While the PeerIds *should* be known in advance and hardcoded, that data wasn't collected in
// time and this fine for a testnet
let bootnodes = || async {
use libp2p::{Transport as TransportTrait, tcp::tokio::Transport, noise::Config};
let bootnode_multiaddrs = crate::chain_spec::bootnode_multiaddrs(&id);
let mut tasks = vec![];
for multiaddr in bootnode_multiaddrs {
tasks.push(tokio::time::timeout(
core::time::Duration::from_secs(10),
tokio::task::spawn(async move {
let Ok(noise) = Config::new(&sc_network::Keypair::generate_ed25519()) else { None? };
let mut transport = Transport::default()
.upgrade(libp2p::core::upgrade::Version::V1)
.authenticate(noise)
.multiplex(libp2p::yamux::Config::default());
let Ok(transport) = transport.dial(multiaddr.clone()) else { None? };
let Ok((peer_id, _)) = transport.await else { None? };
Some(sc_network::config::MultiaddrWithPeerId { multiaddr, peer_id })
}),
));
}
let mut res = vec![];
for task in tasks {
if let Ok(Ok(Some(bootnode))) = task.await {
res.push(bootnode);
}
}
res
};
use sc_network::{NetworkStatusProvider, NetworkPeers};
loop {
if let Ok(status) = network.status().await {
if status.num_connected_peers < 3 {
for bootnode in bootnodes().await {
let _ = network.add_reserved_peer(bootnode);
}
}
}
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
}
}
});
if config.offchain_worker.enabled {
task_manager.spawn_handle().spawn(
"offchain-workers-runner",
@@ -258,11 +316,13 @@ pub fn new_full(config: Configuration) -> Result<TaskManager, ServiceError> {
};
let rpc_builder = {
let id = config.chain_spec.id().to_string();
let client = client.clone();
let pool = transaction_pool.clone();
Box::new(move |deny_unsafe, _| {
crate::rpc::create_full(crate::rpc::FullDeps {
id: id.clone(),
client: client.clone(),
pool: pool.clone(),
deny_unsafe,

View File

@@ -6,7 +6,7 @@ license = "MIT"
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/primitives"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
edition = "2021"
rust-version = "1.69"
rust-version = "1.74"
[package.metadata.docs.rs]
all-features = true

View File

@@ -314,12 +314,10 @@ pub type ReportLongevity = <Runtime as pallet_babe::Config>::EpochDuration;
impl babe::Config for Runtime {
#[cfg(feature = "fast-epoch")]
#[allow(clippy::identity_op)]
type EpochDuration = ConstU64<{ DAYS / (24 * 2) }>; // 30 minutes
type EpochDuration = ConstU64<{ MINUTES / 2 }>; // 30 seconds
#[cfg(not(feature = "fast-epoch"))]
#[allow(clippy::identity_op)]
type EpochDuration = ConstU64<{ DAYS }>;
type EpochDuration = ConstU64<{ 4 * 7 * DAYS }>;
type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>;
type EpochChangeTrigger = babe::ExternalTrigger;

View File

@@ -57,4 +57,7 @@ runtime-benchmarks = [
"frame-support/runtime-benchmarks",
]
# TODO
try-runtime = []
default = ["std"]

View File

@@ -142,6 +142,7 @@ pub mod pallet {
}
// 80% threshold
// TODO: Use 34% for halting a set (not 80%)
const REQUIREMENT_NUMERATOR: u64 = 4;
const REQUIREMENT_DIVISOR: u64 = 5;

View File

@@ -70,6 +70,9 @@ std = [
"dex-pallet/std",
]
# TODO
try-runtime = []
runtime-benchmarks = [
"frame-system/runtime-benchmarks",
"frame-support/runtime-benchmarks",

View File

@@ -363,21 +363,26 @@ pub mod pallet {
let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0;
let mut iter = SortedAllocationsIter::<T>::new(network);
let mut participants = vec![];
let mut key_shares = 0;
let mut total_stake = 0;
while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) {
let Some((key, amount)) = iter.next() else { break };
{
let mut iter = SortedAllocationsIter::<T>::new(network);
let mut key_shares = 0;
while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) {
let Some((key, amount)) = iter.next() else { break };
let these_key_shares = amount.0 / allocation_per_key_share;
InSet::<T>::set(network, key, Some(these_key_shares));
participants.push((key, these_key_shares));
let these_key_shares =
(amount.0 / allocation_per_key_share).min(u64::from(MAX_KEY_SHARES_PER_SET));
participants.push((key, these_key_shares));
// This can technically set key_shares to a value exceeding MAX_KEY_SHARES_PER_SET
// Off-chain, the key shares per validator will be accordingly adjusted
key_shares += these_key_shares;
total_stake += amount.0;
key_shares += these_key_shares;
total_stake += amount.0;
}
amortize_excess_key_shares(&mut participants);
}
for (key, shares) in &participants {
InSet::<T>::set(network, key, Some(*shares));
}
TotalAllocatedStake::<T>::set(network, Some(Amount(total_stake)));
@@ -472,7 +477,7 @@ pub mod pallet {
let Some(top) = top else { return false };
// key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause an off-chain reduction of
// key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause a round robin reduction of
// each validator's key shares until their sum is MAX_KEY_SHARES_PER_SET
// post_amortization_key_shares_for_top_validator yields what the top validator's key shares
// would be after such a reduction, letting us evaluate this correctly

View File

@@ -115,11 +115,11 @@ pub fn report_slashes_message(set: &ValidatorSet, slashes: &[(Public, u32)]) ->
/// maximum.
///
/// Reduction occurs by reducing each validator in a reverse round-robin.
pub fn amortize_excess_key_shares(validators: &mut [(Public, u16)]) {
let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::<u16>();
for i in 0 .. usize::from(
total_key_shares.saturating_sub(u16::try_from(MAX_KEY_SHARES_PER_SET).unwrap()),
) {
pub fn amortize_excess_key_shares(validators: &mut [(Public, u64)]) {
let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::<u64>();
for i in 0 .. usize::try_from(total_key_shares.saturating_sub(u64::from(MAX_KEY_SHARES_PER_SET)))
.unwrap()
{
validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1;
}
}