mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-13 22:49:25 +00:00
rebase to develop latest
This commit is contained in:
@@ -31,7 +31,7 @@ pub async fn provide_batch(serai: &Serai, batch: Batch) -> [u8; 32] {
|
||||
keys
|
||||
} else {
|
||||
let keys = KeyPair(pair.public(), vec![].try_into().unwrap());
|
||||
set_keys(serai, set, keys.clone()).await;
|
||||
set_keys(serai, set, keys.clone(), &[insecure_pair_from_name("Alice")]).await;
|
||||
keys
|
||||
};
|
||||
assert_eq!(keys.0, pair.public());
|
||||
|
||||
@@ -14,7 +14,6 @@ use frost::dkg::musig::musig;
|
||||
use schnorrkel::Schnorrkel;
|
||||
|
||||
use serai_client::{
|
||||
primitives::insecure_pair_from_name,
|
||||
validator_sets::{
|
||||
primitives::{ValidatorSet, KeyPair, musig_context, set_keys_message},
|
||||
ValidatorSetsEvent,
|
||||
@@ -25,33 +24,52 @@ use serai_client::{
|
||||
use crate::common::tx::publish_tx;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub async fn set_keys(serai: &Serai, set: ValidatorSet, key_pair: KeyPair) -> [u8; 32] {
|
||||
let pair = insecure_pair_from_name("Alice");
|
||||
let public = pair.public();
|
||||
pub async fn set_keys(
|
||||
serai: &Serai,
|
||||
set: ValidatorSet,
|
||||
key_pair: KeyPair,
|
||||
pairs: &[Pair],
|
||||
) -> [u8; 32] {
|
||||
let mut pub_keys = vec![];
|
||||
for pair in pairs {
|
||||
let public_key =
|
||||
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut pair.public().0.as_ref()).unwrap();
|
||||
pub_keys.push(public_key);
|
||||
}
|
||||
|
||||
let public_key = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut public.0.as_ref()).unwrap();
|
||||
let secret_key = <Ristretto as Ciphersuite>::read_F::<&[u8]>(
|
||||
&mut pair.as_ref().secret.to_bytes()[.. 32].as_ref(),
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(Ristretto::generator() * secret_key, public_key);
|
||||
let threshold_keys =
|
||||
musig::<Ristretto>(&musig_context(set), &Zeroizing::new(secret_key), &[public_key]).unwrap();
|
||||
let mut threshold_keys = vec![];
|
||||
for i in 0 .. pairs.len() {
|
||||
let secret_key = <Ristretto as Ciphersuite>::read_F::<&[u8]>(
|
||||
&mut pairs[i].as_ref().secret.to_bytes()[.. 32].as_ref(),
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(Ristretto::generator() * secret_key, pub_keys[i]);
|
||||
|
||||
threshold_keys.push(
|
||||
musig::<Ristretto>(&musig_context(set), &Zeroizing::new(secret_key), &pub_keys).unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut musig_keys = HashMap::new();
|
||||
for tk in threshold_keys {
|
||||
musig_keys.insert(tk.params().i(), tk.into());
|
||||
}
|
||||
|
||||
let sig = frost::tests::sign_without_caching(
|
||||
&mut OsRng,
|
||||
frost::tests::algorithm_machines(
|
||||
&mut OsRng,
|
||||
&Schnorrkel::new(b"substrate"),
|
||||
&HashMap::from([(threshold_keys.params().i(), threshold_keys.into())]),
|
||||
),
|
||||
frost::tests::algorithm_machines(&mut OsRng, &Schnorrkel::new(b"substrate"), &musig_keys),
|
||||
&set_keys_message(&set, &[], &key_pair),
|
||||
);
|
||||
|
||||
// Set the key pair
|
||||
let block = publish_tx(
|
||||
serai,
|
||||
&SeraiValidatorSets::set_keys(set.network, vec![], key_pair.clone(), Signature(sig.to_bytes())),
|
||||
&SeraiValidatorSets::set_keys(
|
||||
set.network,
|
||||
vec![].try_into().unwrap(),
|
||||
key_pair.clone(),
|
||||
Signature(sig.to_bytes()),
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
|
||||
@@ -1,10 +1,216 @@
|
||||
use serai_client::Serai;
|
||||
use std::{time::Duration, collections::HashMap};
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Ristretto};
|
||||
use frost::dkg::musig::musig;
|
||||
use schnorrkel::Schnorrkel;
|
||||
|
||||
use serai_client::{
|
||||
genesis_liquidity::{
|
||||
primitives::{GENESIS_LIQUIDITY_ACCOUNT, INITIAL_GENESIS_LP_SHARES},
|
||||
SeraiGenesisLiquidity,
|
||||
},
|
||||
validator_sets::primitives::{musig_context, Session, ValidatorSet},
|
||||
};
|
||||
|
||||
use serai_abi::{
|
||||
genesis_liquidity::primitives::{oraclize_values_message, Values},
|
||||
primitives::COINS,
|
||||
};
|
||||
|
||||
use sp_core::{sr25519::Signature, Pair as PairTrait};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{
|
||||
Amount, NetworkId, Coin, Balance, BlockHash, SeraiAddress, insecure_pair_from_name, GENESIS_SRI,
|
||||
},
|
||||
in_instructions::primitives::{InInstruction, InInstructionWithBalance, Batch},
|
||||
Serai,
|
||||
};
|
||||
|
||||
mod common;
|
||||
use common::genesis_liquidity::test_genesis_liquidity;
|
||||
use common::{in_instructions::provide_batch, tx::publish_tx};
|
||||
|
||||
serai_test_fast_epoch!(
|
||||
genesis_liquidity: (|serai: Serai| async move {
|
||||
test_genesis_liquidity(serai).await;
|
||||
})
|
||||
);
|
||||
|
||||
async fn test_genesis_liquidity(serai: Serai) {
|
||||
// all coins except the native
|
||||
let coins = COINS.into_iter().filter(|c| *c != Coin::native()).collect::<Vec<_>>();
|
||||
|
||||
// make accounts with amounts
|
||||
let mut accounts = HashMap::new();
|
||||
for coin in coins.clone() {
|
||||
// make 5 accounts per coin
|
||||
let mut values = vec![];
|
||||
for _ in 0 .. 5 {
|
||||
let mut address = SeraiAddress::new([0; 32]);
|
||||
OsRng.fill_bytes(&mut address.0);
|
||||
values.push((address, Amount(OsRng.next_u64() % 10u64.pow(coin.decimals()))));
|
||||
}
|
||||
accounts.insert(coin, values);
|
||||
}
|
||||
|
||||
// send a batch per coin
|
||||
let mut batch_ids: HashMap<NetworkId, u32> = HashMap::new();
|
||||
for coin in coins.clone() {
|
||||
// set up instructions
|
||||
let instructions = accounts[&coin]
|
||||
.iter()
|
||||
.map(|(addr, amount)| InInstructionWithBalance {
|
||||
instruction: InInstruction::GenesisLiquidity(*addr),
|
||||
balance: Balance { coin, amount: *amount },
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
// set up bloch hash
|
||||
let mut block = BlockHash([0; 32]);
|
||||
OsRng.fill_bytes(&mut block.0);
|
||||
|
||||
// set up batch id
|
||||
batch_ids
|
||||
.entry(coin.network())
|
||||
.and_modify(|v| {
|
||||
*v += 1;
|
||||
})
|
||||
.or_insert(0);
|
||||
|
||||
let batch =
|
||||
Batch { network: coin.network(), id: batch_ids[&coin.network()], block, instructions };
|
||||
provide_batch(&serai, batch).await;
|
||||
}
|
||||
|
||||
// wait until genesis ends
|
||||
let genesis_blocks = 10; // TODO
|
||||
let block_time = 6; // TODO
|
||||
tokio::time::timeout(
|
||||
tokio::time::Duration::from_secs(3 * (genesis_blocks * block_time)),
|
||||
async {
|
||||
while serai.latest_finalized_block().await.unwrap().number() < 10 {
|
||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
||||
}
|
||||
},
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// set values relative to each other
|
||||
// TODO: Random values here
|
||||
let values = Values { monero: 184100, ether: 4785000, dai: 1500 };
|
||||
set_values(&serai, &values).await;
|
||||
let values_map = HashMap::from([
|
||||
(Coin::Monero, values.monero),
|
||||
(Coin::Ether, values.ether),
|
||||
(Coin::Dai, values.dai),
|
||||
]);
|
||||
|
||||
// wait a little bit..
|
||||
tokio::time::sleep(Duration::from_secs(12)).await;
|
||||
|
||||
// check total SRI supply is +100M
|
||||
// there are 6 endowed accounts in dev-net. Take this into consideration when checking
|
||||
// for the total sri minted at this time.
|
||||
let serai = serai.as_of_latest_finalized_block().await.unwrap();
|
||||
let sri = serai.coins().coin_supply(Coin::Serai).await.unwrap();
|
||||
let endowed_amount: u64 = 1 << 60;
|
||||
let total_sri = (6 * endowed_amount) + GENESIS_SRI;
|
||||
assert_eq!(sri, Amount(total_sri));
|
||||
|
||||
// check genesis account has no coins, all transferred to pools.
|
||||
for coin in COINS {
|
||||
let amount = serai.coins().coin_balance(coin, GENESIS_LIQUIDITY_ACCOUNT).await.unwrap();
|
||||
assert_eq!(amount.0, 0);
|
||||
}
|
||||
|
||||
// check pools has proper liquidity
|
||||
let mut pool_amounts = HashMap::new();
|
||||
let mut total_value = 0u128;
|
||||
for coin in coins.clone() {
|
||||
let total_coin = accounts[&coin].iter().fold(0u128, |acc, value| acc + u128::from(value.1 .0));
|
||||
let value = if coin != Coin::Bitcoin {
|
||||
(total_coin * u128::from(values_map[&coin])) / 10u128.pow(coin.decimals())
|
||||
} else {
|
||||
total_coin
|
||||
};
|
||||
|
||||
total_value += value;
|
||||
pool_amounts.insert(coin, (total_coin, value));
|
||||
}
|
||||
|
||||
// check distributed SRI per pool
|
||||
let mut total_sri_distributed = 0u128;
|
||||
for coin in coins.clone() {
|
||||
let sri = if coin == *COINS.last().unwrap() {
|
||||
u128::from(GENESIS_SRI).checked_sub(total_sri_distributed).unwrap()
|
||||
} else {
|
||||
(pool_amounts[&coin].1 * u128::from(GENESIS_SRI)) / total_value
|
||||
};
|
||||
total_sri_distributed += sri;
|
||||
|
||||
let reserves = serai.dex().get_reserves(coin).await.unwrap().unwrap();
|
||||
assert_eq!(u128::from(reserves.0 .0), pool_amounts[&coin].0); // coin side
|
||||
assert_eq!(u128::from(reserves.1 .0), sri); // SRI side
|
||||
}
|
||||
|
||||
// check each liquidity provider got liquidity tokens proportional to their value
|
||||
for coin in coins {
|
||||
let liq_supply = serai.genesis_liquidity().supply(coin).await.unwrap();
|
||||
for (acc, amount) in &accounts[&coin] {
|
||||
let acc_liq_shares = serai.genesis_liquidity().liquidity(acc, coin).await.unwrap().shares;
|
||||
|
||||
// since we can't test the ratios directly(due to integer division giving 0)
|
||||
// we test whether they give the same result when multiplied by another constant.
|
||||
// Following test ensures the account in fact has the right amount of shares.
|
||||
let mut shares_ratio = (INITIAL_GENESIS_LP_SHARES * acc_liq_shares) / liq_supply.shares;
|
||||
let amounts_ratio =
|
||||
(INITIAL_GENESIS_LP_SHARES * amount.0) / u64::try_from(pool_amounts[&coin].0).unwrap();
|
||||
|
||||
// we can tolerate 1 unit diff between them due to integer division.
|
||||
if shares_ratio.abs_diff(amounts_ratio) == 1 {
|
||||
shares_ratio = amounts_ratio;
|
||||
}
|
||||
|
||||
assert_eq!(shares_ratio, amounts_ratio);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: test remove the liq before/after genesis ended.
|
||||
}
|
||||
|
||||
async fn set_values(serai: &Serai, values: &Values) {
|
||||
// prepare a Musig tx to oraclize the relative values
|
||||
let pair = insecure_pair_from_name("Alice");
|
||||
let public = pair.public();
|
||||
// we publish the tx in set 4
|
||||
let set = ValidatorSet { session: Session(4), network: NetworkId::Serai };
|
||||
|
||||
let public_key = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut public.0.as_ref()).unwrap();
|
||||
let secret_key = <Ristretto as Ciphersuite>::read_F::<&[u8]>(
|
||||
&mut pair.as_ref().secret.to_bytes()[.. 32].as_ref(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(Ristretto::generator() * secret_key, public_key);
|
||||
let threshold_keys =
|
||||
musig::<Ristretto>(&musig_context(set), &Zeroizing::new(secret_key), &[public_key]).unwrap();
|
||||
|
||||
let sig = frost::tests::sign_without_caching(
|
||||
&mut OsRng,
|
||||
frost::tests::algorithm_machines(
|
||||
&mut OsRng,
|
||||
&Schnorrkel::new(b"substrate"),
|
||||
&HashMap::from([(threshold_keys.params().i(), threshold_keys.into())]),
|
||||
),
|
||||
&oraclize_values_message(&set, values),
|
||||
);
|
||||
|
||||
// oraclize values
|
||||
let _ =
|
||||
publish_tx(serai, &SeraiGenesisLiquidity::oraclize_values(*values, Signature(sig.to_bytes())))
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -1,36 +1,71 @@
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use sp_core::{sr25519::Public, Pair};
|
||||
use sp_core::{
|
||||
sr25519::{Public, Pair},
|
||||
Pair as PairTrait,
|
||||
};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{NETWORKS, NetworkId, insecure_pair_from_name},
|
||||
primitives::{NETWORKS, NetworkId, BlockHash, insecure_pair_from_name},
|
||||
validator_sets::{
|
||||
primitives::{Session, ValidatorSet, KeyPair},
|
||||
ValidatorSetsEvent,
|
||||
},
|
||||
in_instructions::{
|
||||
primitives::{Batch, SignedBatch, batch_message},
|
||||
SeraiInInstructions,
|
||||
},
|
||||
Amount, Serai,
|
||||
};
|
||||
|
||||
mod common;
|
||||
use common::validator_sets::{set_keys, allocate_stake, deallocate_stake};
|
||||
use common::{
|
||||
tx::publish_tx,
|
||||
validator_sets::{allocate_stake, deallocate_stake, set_keys},
|
||||
};
|
||||
|
||||
const EPOCH_INTERVAL: u64 = 300;
|
||||
fn get_random_key_pair() -> KeyPair {
|
||||
let mut ristretto_key = [0; 32];
|
||||
OsRng.fill_bytes(&mut ristretto_key);
|
||||
let mut external_key = vec![0; 33];
|
||||
OsRng.fill_bytes(&mut external_key);
|
||||
KeyPair(Public(ristretto_key), external_key.try_into().unwrap())
|
||||
}
|
||||
|
||||
async fn get_ordered_keys(serai: &Serai, network: NetworkId, accounts: &[Pair]) -> Vec<Pair> {
|
||||
// retrieve the current session validators so that we know the order of the keys
|
||||
// that is necessary for the correct musig signature.
|
||||
let validators = serai
|
||||
.as_of_latest_finalized_block()
|
||||
.await
|
||||
.unwrap()
|
||||
.validator_sets()
|
||||
.active_network_validators(network)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// collect the pairs of the validators
|
||||
let mut pairs = vec![];
|
||||
for v in validators {
|
||||
let p = accounts.iter().find(|pair| pair.public() == v).unwrap().clone();
|
||||
pairs.push(p);
|
||||
}
|
||||
|
||||
pairs
|
||||
}
|
||||
|
||||
serai_test!(
|
||||
set_keys_test: (|serai: Serai| async move {
|
||||
let network = NetworkId::Bitcoin;
|
||||
let set = ValidatorSet { session: Session(0), network };
|
||||
|
||||
let public = insecure_pair_from_name("Alice").public();
|
||||
let pair = insecure_pair_from_name("Alice");
|
||||
let public = pair.public();
|
||||
|
||||
// Neither of these keys are validated
|
||||
// The external key is infeasible to validate on-chain, the Ristretto key is feasible
|
||||
// TODO: Should the Ristretto key be validated?
|
||||
let mut ristretto_key = [0; 32];
|
||||
OsRng.fill_bytes(&mut ristretto_key);
|
||||
let mut external_key = vec![0; 33];
|
||||
OsRng.fill_bytes(&mut external_key);
|
||||
let key_pair = KeyPair(Public(ristretto_key), external_key.try_into().unwrap());
|
||||
let key_pair = get_random_key_pair();
|
||||
|
||||
// Make sure the genesis is as expected
|
||||
assert_eq!(
|
||||
@@ -62,7 +97,7 @@ serai_test!(
|
||||
assert_eq!(participants_ref, [public].as_ref());
|
||||
}
|
||||
|
||||
let block = set_keys(&serai, set, key_pair.clone()).await;
|
||||
let block = set_keys(&serai, set, key_pair.clone(), &[pair]).await;
|
||||
|
||||
// While the set_keys function should handle this, it's beneficial to
|
||||
// independently test it
|
||||
@@ -149,11 +184,13 @@ async fn validator_set_rotation() {
|
||||
);
|
||||
|
||||
// genesis accounts
|
||||
let pair1 = insecure_pair_from_name("Alice");
|
||||
let pair2 = insecure_pair_from_name("Bob");
|
||||
let pair3 = insecure_pair_from_name("Charlie");
|
||||
let pair4 = insecure_pair_from_name("Dave");
|
||||
let pair5 = insecure_pair_from_name("Eve");
|
||||
let accounts = vec![
|
||||
insecure_pair_from_name("Alice"),
|
||||
insecure_pair_from_name("Bob"),
|
||||
insecure_pair_from_name("Charlie"),
|
||||
insecure_pair_from_name("Dave"),
|
||||
insecure_pair_from_name("Eve"),
|
||||
];
|
||||
|
||||
// amounts for single key share per network
|
||||
let key_shares = HashMap::from([
|
||||
@@ -164,8 +201,9 @@ async fn validator_set_rotation() {
|
||||
]);
|
||||
|
||||
// genesis participants per network
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
let default_participants =
|
||||
vec![pair1.public(), pair2.public(), pair3.public(), pair4.public()];
|
||||
accounts[.. 4].to_vec().iter().map(|pair| pair.public()).collect::<Vec<_>>();
|
||||
let mut participants = HashMap::from([
|
||||
(NetworkId::Serai, default_participants.clone()),
|
||||
(NetworkId::Bitcoin, default_participants.clone()),
|
||||
@@ -181,28 +219,83 @@ async fn validator_set_rotation() {
|
||||
participants.sort();
|
||||
verify_session_and_active_validators(&serai, network, 0, participants).await;
|
||||
|
||||
// add 1 participant & verify
|
||||
let hash =
|
||||
allocate_stake(&serai, network, key_shares[&network], &pair5, i.try_into().unwrap())
|
||||
.await;
|
||||
participants.push(pair5.public());
|
||||
participants.sort();
|
||||
verify_session_and_active_validators(
|
||||
// add 1 participant
|
||||
let last_participant = accounts[4].clone();
|
||||
let hash = allocate_stake(
|
||||
&serai,
|
||||
network,
|
||||
get_active_session(&serai, network, hash).await,
|
||||
participants,
|
||||
key_shares[&network],
|
||||
&last_participant,
|
||||
i.try_into().unwrap(),
|
||||
)
|
||||
.await;
|
||||
participants.push(last_participant.public());
|
||||
// the session at which set changes becomes active
|
||||
let activation_session = get_session_at_which_changes_activate(&serai, network, hash).await;
|
||||
|
||||
// remove 1 participant & verify
|
||||
let hash =
|
||||
deallocate_stake(&serai, network, key_shares[&network], &pair2, i.try_into().unwrap())
|
||||
.await;
|
||||
participants.swap_remove(participants.iter().position(|k| *k == pair2.public()).unwrap());
|
||||
let active_session = get_active_session(&serai, network, hash).await;
|
||||
// set the keys if it is an external set
|
||||
if network != NetworkId::Serai {
|
||||
let set = ValidatorSet { session: Session(0), network };
|
||||
let key_pair = get_random_key_pair();
|
||||
let pairs = get_ordered_keys(&serai, network, &accounts).await;
|
||||
set_keys(&serai, set, key_pair, &pairs).await;
|
||||
}
|
||||
|
||||
// verify
|
||||
participants.sort();
|
||||
verify_session_and_active_validators(&serai, network, active_session, participants).await;
|
||||
verify_session_and_active_validators(&serai, network, activation_session, participants)
|
||||
.await;
|
||||
|
||||
// remove 1 participant
|
||||
let participant_to_remove = accounts[1].clone();
|
||||
let hash = deallocate_stake(
|
||||
&serai,
|
||||
network,
|
||||
key_shares[&network],
|
||||
&participant_to_remove,
|
||||
i.try_into().unwrap(),
|
||||
)
|
||||
.await;
|
||||
participants.swap_remove(
|
||||
participants.iter().position(|k| *k == participant_to_remove.public()).unwrap(),
|
||||
);
|
||||
let activation_session = get_session_at_which_changes_activate(&serai, network, hash).await;
|
||||
|
||||
if network != NetworkId::Serai {
|
||||
// set the keys if it is an external set
|
||||
let set = ValidatorSet { session: Session(1), network };
|
||||
|
||||
// we need the whole substrate key pair to sign the batch
|
||||
let (substrate_pair, key_pair) = {
|
||||
let pair = insecure_pair_from_name("session-1-key-pair");
|
||||
let public = pair.public();
|
||||
|
||||
let mut external_key = vec![0; 33];
|
||||
OsRng.fill_bytes(&mut external_key);
|
||||
|
||||
(pair, KeyPair(public, external_key.try_into().unwrap()))
|
||||
};
|
||||
let pairs = get_ordered_keys(&serai, network, &accounts).await;
|
||||
set_keys(&serai, set, key_pair, &pairs).await;
|
||||
|
||||
// provide a batch to complete the handover and retire the previous set
|
||||
let mut block_hash = BlockHash([0; 32]);
|
||||
OsRng.fill_bytes(&mut block_hash.0);
|
||||
let batch = Batch { network, id: 0, block: block_hash, instructions: vec![] };
|
||||
publish_tx(
|
||||
&serai,
|
||||
&SeraiInInstructions::execute_batch(SignedBatch {
|
||||
batch: batch.clone(),
|
||||
signature: substrate_pair.sign(&batch_message(&batch)),
|
||||
}),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
// verify
|
||||
participants.sort();
|
||||
verify_session_and_active_validators(&serai, network, activation_session, participants)
|
||||
.await;
|
||||
|
||||
// check pending deallocations
|
||||
let pending = serai
|
||||
@@ -212,8 +305,8 @@ async fn validator_set_rotation() {
|
||||
.validator_sets()
|
||||
.pending_deallocations(
|
||||
network,
|
||||
pair2.public(),
|
||||
Session(u32::try_from(active_session + 1).unwrap()),
|
||||
participant_to_remove.public(),
|
||||
Session(activation_session + 1),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
@@ -223,24 +316,39 @@ async fn validator_set_rotation() {
|
||||
.await;
|
||||
}
|
||||
|
||||
async fn session_for_block(serai: &Serai, block: [u8; 32], network: NetworkId) -> u32 {
|
||||
serai.as_of(block).validator_sets().session(network).await.unwrap().unwrap().0
|
||||
}
|
||||
|
||||
async fn verify_session_and_active_validators(
|
||||
serai: &Serai,
|
||||
network: NetworkId,
|
||||
session: u64,
|
||||
session: u32,
|
||||
participants: &[Public],
|
||||
) {
|
||||
// wait untill the epoch block finalized
|
||||
let epoch_block = (session * EPOCH_INTERVAL) + 1;
|
||||
while serai.finalized_block_by_number(epoch_block).await.unwrap().is_none() {
|
||||
// sleep 1 block
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(6)).await;
|
||||
}
|
||||
let serai_for_block =
|
||||
serai.as_of(serai.finalized_block_by_number(epoch_block).await.unwrap().unwrap().hash());
|
||||
// wait until the active session. This wait should be max 30 secs since the epoch time.
|
||||
let block = tokio::time::timeout(core::time::Duration::from_secs(2 * 60), async move {
|
||||
loop {
|
||||
let mut block = serai.latest_finalized_block_hash().await.unwrap();
|
||||
if session_for_block(serai, block, network).await < session {
|
||||
// Sleep a block
|
||||
tokio::time::sleep(core::time::Duration::from_secs(6)).await;
|
||||
continue;
|
||||
}
|
||||
while session_for_block(serai, block, network).await > session {
|
||||
block = serai.block(block).await.unwrap().unwrap().header.parent_hash.0;
|
||||
}
|
||||
assert_eq!(session_for_block(serai, block, network).await, session);
|
||||
break block;
|
||||
}
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let serai_for_block = serai.as_of(block);
|
||||
|
||||
// verify session
|
||||
let s = serai_for_block.validator_sets().session(network).await.unwrap().unwrap();
|
||||
assert_eq!(u64::from(s.0), session);
|
||||
assert_eq!(s.0, session);
|
||||
|
||||
// verify participants
|
||||
let mut validators =
|
||||
@@ -249,10 +357,11 @@ async fn verify_session_and_active_validators(
|
||||
assert_eq!(validators, participants);
|
||||
|
||||
// make sure finalization continues as usual after the changes
|
||||
tokio::time::timeout(tokio::time::Duration::from_secs(60), async move {
|
||||
let current_finalized_block = serai.latest_finalized_block().await.unwrap().header.number;
|
||||
tokio::time::timeout(core::time::Duration::from_secs(60), async move {
|
||||
let mut finalized_block = serai.latest_finalized_block().await.unwrap().header.number;
|
||||
while finalized_block <= epoch_block + 2 {
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(6)).await;
|
||||
while finalized_block <= current_finalized_block + 2 {
|
||||
tokio::time::sleep(core::time::Duration::from_secs(6)).await;
|
||||
finalized_block = serai.latest_finalized_block().await.unwrap().header.number;
|
||||
}
|
||||
})
|
||||
@@ -262,15 +371,18 @@ async fn verify_session_and_active_validators(
|
||||
// TODO: verify key shares as well?
|
||||
}
|
||||
|
||||
async fn get_active_session(serai: &Serai, network: NetworkId, hash: [u8; 32]) -> u64 {
|
||||
let block_number = serai.block(hash).await.unwrap().unwrap().header.number;
|
||||
let epoch = block_number / EPOCH_INTERVAL;
|
||||
async fn get_session_at_which_changes_activate(
|
||||
serai: &Serai,
|
||||
network: NetworkId,
|
||||
hash: [u8; 32],
|
||||
) -> u32 {
|
||||
let session = session_for_block(serai, hash, network).await;
|
||||
|
||||
// changes should be active in the next session
|
||||
if network == NetworkId::Serai {
|
||||
// it takes 1 extra session for serai net to make the changes active.
|
||||
epoch + 2
|
||||
session + 2
|
||||
} else {
|
||||
epoch + 1
|
||||
session + 1
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user