add coordinator side rotation test

This commit is contained in:
akildemir
2024-02-26 15:16:44 +03:00
parent 5f2e15604c
commit 360cd023a0
10 changed files with 445 additions and 203 deletions

View File

@@ -14,7 +14,9 @@ use serai_client::{
mod common;
use common::validator_sets::{set_keys, allocate_stake, deallocate_stake};
const EPOCH_INTERVAL: u64 = 5;
// TODO: get rid of this is constant and retrive the epoch numbers from sthe node directly
// since epochs doesn't always change at the exact intervals.
const EPOCH_INTERVAL: u64 = 300;
serai_test!(
set_keys_test: (|serai: Serai| async move {

View File

@@ -315,7 +315,7 @@ pub type ReportLongevity = <Runtime as pallet_babe::Config>::EpochDuration;
impl babe::Config for Runtime {
#[cfg(feature = "fast-epoch")]
#[allow(clippy::identity_op)]
type EpochDuration = ConstU64<{ DAYS / (24 * 60 * 2) }>; // 30 seconds
type EpochDuration = ConstU64<{ DAYS / (24 * 2) }>; // 30 minutes
#[cfg(not(feature = "fast-epoch"))]
#[allow(clippy::identity_op)]

View File

@@ -652,7 +652,7 @@ pub mod pallet {
// If not Serai, check the prior session had its keys cleared, which happens once its
// retired
return (network == NetworkId::Serai) ||
(!Keys::<T>::contains_key(ValidatorSet {
(Keys::<T>::contains_key(ValidatorSet {
network,
session: Session(current_session.0 - 1),
}));

View File

@@ -57,14 +57,22 @@ pub fn coordinator_instance(
)
}
pub fn serai_composition(name: &str) -> TestBodySpecification {
pub fn serai_composition(name: &str, fast_epoch: bool) -> TestBodySpecification {
if fast_epoch {
serai_docker_tests::build("serai-fast-epoch".to_string());
TestBodySpecification::with_image(
Image::with_repository("serai-dev-serai-fast-epoch").pull_policy(PullPolicy::Never),
)
.replace_env([("SERAI_NAME".to_string(), name.to_lowercase())].into())
.set_publish_all_ports(true)
} else {
serai_docker_tests::build("serai".to_string());
TestBodySpecification::with_image(
Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never),
)
.replace_env([("SERAI_NAME".to_string(), name.to_lowercase())].into())
.set_publish_all_ports(true)
}
}
fn is_cosign_message(msg: &CoordinatorMessage) -> bool {
@@ -346,9 +354,9 @@ impl Processor {
/// Receive a message from the coordinator as a processor.
pub async fn recv_message(&mut self) -> CoordinatorMessage {
// Set a timeout of 20 minutes to allow effectively any protocol to occur without a fear of
// Set a timeout of 30 minutes to allow effectively any protocol to occur without a fear of
// an arbitrary timeout cutting it short
tokio::time::timeout(Duration::from_secs(20 * 60), self.recv_message_inner()).await.unwrap()
tokio::time::timeout(Duration::from_secs(30 * 60), self.recv_message_inner()).await.unwrap()
}
pub async fn set_substrate_key(

View File

@@ -260,8 +260,14 @@ pub async fn batch(
#[tokio::test]
async fn batch_test() {
new_test(|mut processors: Vec<Processor>| async move {
let (processor_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
new_test(
|mut processors: Vec<Processor>| async move {
// pop the last participant since genesis keygen has only 4 participant.
processors.pop().unwrap();
assert_eq!(processors.len(), COORDINATORS);
let (processor_is, substrate_key, _) =
key_gen::<Secp256k1>(&mut processors, Session(0)).await;
batch(
&mut processors,
&processor_is,
@@ -275,6 +281,8 @@ async fn batch_test() {
},
)
.await;
})
},
false,
)
.await;
}

View File

@@ -23,13 +23,17 @@ use crate::tests::*;
pub async fn key_gen<C: Ciphersuite>(
processors: &mut [Processor],
session: Session,
) -> (Vec<u8>, Zeroizing<<Ristretto as Ciphersuite>::F>, Zeroizing<C::F>) {
let coordinators = processors.len();
let mut participant_is = vec![];
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
let set = ValidatorSet { session, network: NetworkId::Bitcoin };
let id = KeyGenId { session: set.session, attempt: 0 };
for (i, processor) in processors.iter_mut().enumerate() {
let mut found = false;
while !found {
let msg = processor.recv_message().await;
match &msg {
CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey {
@@ -37,6 +41,12 @@ pub async fn key_gen<C: Ciphersuite>(
..
}) => {
participant_is.push(params.i());
found = true;
}
CoordinatorMessage::Substrate(
messages::substrate::CoordinatorMessage::ConfirmKeyPair { .. },
) => {
continue;
}
_ => panic!("unexpected message: {msg:?}"),
}
@@ -46,14 +56,15 @@ pub async fn key_gen<C: Ciphersuite>(
CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::GenerateKey {
id,
params: ThresholdParams::new(
u16::try_from(((COORDINATORS * 2) / 3) + 1).unwrap(),
u16::try_from(COORDINATORS).unwrap(),
u16::try_from(((coordinators * 2) / 3) + 1).unwrap(),
u16::try_from(coordinators).unwrap(),
participant_is[i],
)
.unwrap(),
shares: 1,
})
);
}
processor
.send_message(messages::key_gen::ProcessorMessage::Commitments {
@@ -65,7 +76,7 @@ pub async fn key_gen<C: Ciphersuite>(
wait_for_tributary().await;
for (i, processor) in processors.iter_mut().enumerate() {
let mut commitments = (0 .. u8::try_from(COORDINATORS).unwrap())
let mut commitments = (0 .. u8::try_from(coordinators).unwrap())
.map(|l| {
(
participant_is[usize::from(l)],
@@ -83,7 +94,7 @@ pub async fn key_gen<C: Ciphersuite>(
);
// Recipient it's for -> (Sender i, Recipient i)
let mut shares = (0 .. u8::try_from(COORDINATORS).unwrap())
let mut shares = (0 .. u8::try_from(coordinators).unwrap())
.map(|l| {
(
participant_is[usize::from(l)],
@@ -118,7 +129,7 @@ pub async fn key_gen<C: Ciphersuite>(
CoordinatorMessage::KeyGen(messages::key_gen::CoordinatorMessage::Shares {
id,
shares: {
let mut shares = (0 .. u8::try_from(COORDINATORS).unwrap())
let mut shares = (0 .. u8::try_from(coordinators).unwrap())
.map(|l| {
(
participant_is[usize::from(l)],
@@ -182,14 +193,14 @@ pub async fn key_gen<C: Ciphersuite>(
.unwrap()
.as_secs()
.abs_diff(context.serai_time) <
70
(60 * 60 * 3) // 3hrs
);
assert_eq!(context.network_latest_finalized_block.0, [0; 32]);
assert_eq!(set.session, session);
assert_eq!(key_pair.0 .0, substrate_key);
assert_eq!(&key_pair.1, &network_key);
}
_ => panic!("coordinator didn't respond with ConfirmKeyPair"),
_ => panic!("coordinator didn't respond with ConfirmKeyPair msg: {:?} ", msg),
}
message = Some(msg);
} else {
@@ -220,8 +231,15 @@ pub async fn key_gen<C: Ciphersuite>(
#[tokio::test]
async fn key_gen_test() {
new_test(|mut processors: Vec<Processor>| async move {
key_gen::<Secp256k1>(&mut processors).await;
})
new_test(
|mut processors: Vec<Processor>| async move {
// pop the last participant since genesis keygen has only 4 participant.
processors.pop().unwrap();
assert_eq!(processors.len(), COORDINATORS);
key_gen::<Secp256k1>(&mut processors, Session(0)).await;
},
false,
)
.await;
}

View File

@@ -22,6 +22,10 @@ mod sign;
#[allow(unused_imports)]
pub use sign::sign;
mod rotation;
#[allow(unused_imports)]
pub use rotation::rotate;
pub(crate) const COORDINATORS: usize = 4;
pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1;
@@ -39,13 +43,13 @@ impl<F: Send + Future, TB: 'static + Send + Sync + Fn(Vec<Processor>) -> F> Test
}
}
pub(crate) async fn new_test(test_body: impl TestBody) {
pub(crate) async fn new_test(test_body: impl TestBody, fast_epoch: bool) {
let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await;
let mut coordinators = vec![];
let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);
let mut coordinator_compositions = vec![];
for i in 0 .. COORDINATORS {
for i in 0 .. 5 {
let name = match i {
0 => "Alice",
1 => "Bob",
@@ -55,7 +59,7 @@ pub(crate) async fn new_test(test_body: impl TestBody) {
5 => "Ferdie",
_ => panic!("needed a 7th name for a serai node"),
};
let serai_composition = serai_composition(name);
let serai_composition = serai_composition(name, fast_epoch);
let (processor_key, message_queue_keys, message_queue_composition) =
serai_message_queue_tests::instance();

View File

@@ -0,0 +1,191 @@
use tokio::time::{sleep, Duration};
use zeroize::Zeroizing;
use ciphersuite::{Ciphersuite, Ristretto, Secp256k1};
use serai_client::{
primitives::{insecure_pair_from_name, NetworkId},
validator_sets::{
self,
primitives::{Session, ValidatorSet},
ValidatorSetsEvent,
},
Amount, Pair, Transaction,
};
use crate::{*, tests::*};
async fn publish_tx(serai: &Serai, tx: &Transaction) -> [u8; 32] {
let mut latest = serai
.block(serai.latest_finalized_block_hash().await.unwrap())
.await
.unwrap()
.unwrap()
.number();
serai.publish(tx).await.unwrap();
// Get the block it was included in
// TODO: Add an RPC method for this/check the guarantee on the subscription
let mut ticks = 0;
loop {
latest += 1;
let block = {
let mut block;
while {
block = serai.finalized_block_by_number(latest).await.unwrap();
block.is_none()
} {
sleep(Duration::from_secs(1)).await;
ticks += 1;
if ticks > 60 {
panic!("60 seconds without inclusion in a finalized block");
}
}
block.unwrap()
};
for transaction in &block.transactions {
if transaction == tx {
return block.hash();
}
}
}
}
#[allow(dead_code)]
async fn allocate_stake(
serai: &Serai,
network: NetworkId,
amount: Amount,
pair: &Pair,
nonce: u32,
) -> [u8; 32] {
// get the call
let tx =
serai.sign(&pair, validator_sets::SeraiValidatorSets::allocate(network, amount), nonce, 0);
publish_tx(serai, &tx).await
}
#[allow(dead_code)]
async fn deallocate_stake(
serai: &Serai,
network: NetworkId,
amount: Amount,
pair: &Pair,
nonce: u32,
) -> [u8; 32] {
// get the call
let tx =
serai.sign(&pair, validator_sets::SeraiValidatorSets::deallocate(network, amount), nonce, 0);
publish_tx(serai, &tx).await
}
async fn wait_till_next_epoch(serai: &Serai, current_epoch: u32) -> Session {
let mut session = Session(current_epoch);
while session.0 < current_epoch + 1 {
sleep(Duration::from_secs(6)).await;
session = serai
.as_of_latest_finalized_block()
.await
.unwrap()
.validator_sets()
.session(NetworkId::Serai)
.await
.unwrap()
.unwrap();
println!("current session: {} ", session.0);
}
session
}
async fn get_session(serai: &Serai, block: [u8; 32], network: NetworkId) -> Session {
serai.as_of(block).validator_sets().session(network).await.unwrap().unwrap()
}
async fn new_set_events(
serai: &Serai,
session: Session,
network: NetworkId,
) -> Vec<ValidatorSetsEvent> {
let mut current_block = serai.latest_finalized_block().await.unwrap();
let mut current_session = get_session(serai, current_block.hash(), network).await;
while current_session == session {
let mut events =
serai.as_of(current_block.hash()).validator_sets().new_set_events().await.unwrap();
if !events.is_empty() {
return events;
}
current_block = serai.block(current_block.header.parent_hash.0).await.unwrap().unwrap();
current_session = get_session(serai, current_block.hash(), network).await;
}
panic!("can't find the new set events for session: {} ", session.0);
}
pub async fn rotate(
processors: &mut Vec<Processor>,
excluded: Processor,
_: &[u8],
_: &Zeroizing<<Ristretto as Ciphersuite>::F>,
) {
// accounts
let pair1 = insecure_pair_from_name("Alice");
let pair5 = insecure_pair_from_name("Eve");
let network = NetworkId::Bitcoin;
let amount = Amount(1_000_000 * 10_u64.pow(8));
let serai = processors[0].serai().await;
// add the last participant into validator set for btc network
let block = allocate_stake(&serai, network, amount, &pair5, 0).await;
// wait until next session to see the effect on coordinator
let current_epoch = get_session(&serai, block, NetworkId::Serai).await;
let session = wait_till_next_epoch(&serai, current_epoch.0).await;
// verfiy that coordinator received new_set
let events = new_set_events(&serai, session, network).await;
assert!(events.contains(&ValidatorSetsEvent::NewSet { set: ValidatorSet { session, network } }));
// do the keygen
processors.push(excluded);
let _ = key_gen::<Secp256k1>(processors, session).await;
// pop 1 participant
let block = deallocate_stake(&serai, network, amount, &pair1, 0).await;
// wait for this epoch to end
let current_epoch = get_session(&serai, block, NetworkId::Serai).await;
let session = wait_till_next_epoch(&serai, current_epoch.0).await;
// verfiy that coordinator received new_set
let events = new_set_events(&serai, session, network).await;
assert!(events.contains(&ValidatorSetsEvent::NewSet { set: ValidatorSet { session, network } }));
// do the keygen
processors.remove(0);
let _ = key_gen::<Secp256k1>(processors, session).await;
}
#[tokio::test]
async fn set_rotation_test() {
new_test(
|mut processors: Vec<Processor>| async move {
// exclude the last processor from keygen since we will add him later
let excluded = processors.pop().unwrap();
assert_eq!(processors.len(), COORDINATORS);
let (processor_is, substrate_key, _) =
key_gen::<Secp256k1>(&mut processors, Session(0)).await;
rotate(&mut processors, excluded, &processor_is, &substrate_key).await;
},
true,
)
.await;
}

View File

@@ -168,8 +168,14 @@ pub async fn sign(
#[tokio::test]
async fn sign_test() {
new_test(|mut processors: Vec<Processor>| async move {
let (participant_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
new_test(
|mut processors: Vec<Processor>| async move {
// pop the last participant since genesis keygen has only 4 participant.
processors.pop().unwrap();
assert_eq!(processors.len(), COORDINATORS);
let (participant_is, substrate_key, _) =
key_gen::<Secp256k1>(&mut processors, Session(0)).await;
// 'Send' external coins into Serai
let serai = processors[0].serai().await;
@@ -222,7 +228,10 @@ async fn sign_test() {
let serai = serai.as_of(block_included_in_hash);
let serai = serai.coins();
assert_eq!(serai.coin_balance(Coin::Serai, serai_addr).await.unwrap(), Amount(1_000_000_000));
assert_eq!(
serai.coin_balance(Coin::Serai, serai_addr).await.unwrap(),
Amount(1_000_000_000)
);
// Verify the mint occurred as expected
assert_eq!(
@@ -323,6 +332,8 @@ async fn sign_test() {
}
sign(&mut processors, &participant_is, Session(0), plan_id).await;
})
},
false,
)
.await;
}

View File

@@ -65,7 +65,7 @@ pub(crate) async fn new_test(test_body: impl TestBody) {
processor_instance(NetworkId::Monero, monero_port, message_queue_keys[&NetworkId::Monero]);
let coordinator_composition = coordinator_instance(name, coord_key);
let serai_composition = serai_composition(name);
let serai_composition = serai_composition(name, false);
// Give every item in this stack a unique ID
// Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits