Simultaenously build Docker images used in tests

This commit is contained in:
Luke Parker
2023-11-27 01:10:23 -05:00
parent 571195bfda
commit 292263b21e
23 changed files with 639 additions and 526 deletions

View File

@@ -24,7 +24,20 @@ mod tests;
static UNIQUE_ID: OnceLock<Mutex<u16>> = OnceLock::new();
pub fn processor_instance(
fn network_str(network: NetworkId) -> &'static str {
match network {
NetworkId::Serai => panic!("starting a processor for Serai"),
NetworkId::Bitcoin => "bitcoin",
NetworkId::Ethereum => "ethereum",
NetworkId::Monero => "monero",
}
}
pub fn processor_docker_name(network: NetworkId) -> String {
format!("{}-processor", network_str(network))
}
pub async fn processor_instance(
network: NetworkId,
port: u32,
message_queue_key: <Ristretto as Ciphersuite>::F,
@@ -32,17 +45,12 @@ pub fn processor_instance(
let mut entropy = [0; 32];
OsRng.fill_bytes(&mut entropy);
let network_str = match network {
NetworkId::Serai => panic!("starting a processor for Serai"),
NetworkId::Bitcoin => "bitcoin",
NetworkId::Ethereum => "ethereum",
NetworkId::Monero => "monero",
};
let image = format!("{network_str}-processor");
serai_docker_tests::build(image.clone());
let network_str = network_str(network);
serai_docker_tests::build(processor_docker_name(network)).await;
TestBodySpecification::with_image(
Image::with_repository(format!("serai-dev-{image}")).pull_policy(PullPolicy::Never),
Image::with_repository(format!("serai-dev-{}", processor_docker_name(network)))
.pull_policy(PullPolicy::Never),
)
.replace_env(
[
@@ -58,17 +66,23 @@ pub fn processor_instance(
)
}
pub fn docker_names(network: NetworkId) -> Vec<String> {
vec![network_docker_name(network), processor_docker_name(network)]
}
pub type Handles = (String, String, String);
pub fn processor_stack(
pub async fn processor_stack(
network: NetworkId,
) -> (Handles, <Ristretto as Ciphersuite>::F, Vec<TestBodySpecification>) {
let (network_composition, network_rpc_port) = network_instance(network);
serai_docker_tests::build_batch(docker_names(network)).await;
let (network_composition, network_rpc_port) = network_instance(network).await;
let (coord_key, message_queue_keys, message_queue_composition) =
serai_message_queue_tests::instance();
serai_message_queue_tests::instance().await;
let processor_composition =
processor_instance(network, network_rpc_port, message_queue_keys[&network]);
processor_instance(network, network_rpc_port, message_queue_keys[&network]).await;
// Give every item in this stack a unique ID
// Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits

View File

@@ -21,8 +21,19 @@ pub const RPC_PASS: &str = "seraidex";
pub const BTC_PORT: u32 = 8332;
pub const XMR_PORT: u32 = 18081;
pub fn bitcoin_instance() -> (TestBodySpecification, u32) {
serai_docker_tests::build("bitcoin".to_string());
pub fn network_docker_name(network: NetworkId) -> String {
match network {
NetworkId::Serai => {
panic!("asking for docker name for external network Serai, which isn't external")
}
NetworkId::Bitcoin => "bitcoin".to_string(),
NetworkId::Ethereum => todo!(),
NetworkId::Monero => "monero".to_string(),
}
}
pub async fn bitcoin_instance() -> (TestBodySpecification, u32) {
serai_docker_tests::build(network_docker_name(NetworkId::Bitcoin)).await;
let composition = TestBodySpecification::with_image(
Image::with_repository("serai-dev-bitcoin").pull_policy(PullPolicy::Never),
@@ -41,8 +52,8 @@ pub fn bitcoin_instance() -> (TestBodySpecification, u32) {
(composition, BTC_PORT)
}
pub fn monero_instance() -> (TestBodySpecification, u32) {
serai_docker_tests::build("monero".to_string());
pub async fn monero_instance() -> (TestBodySpecification, u32) {
serai_docker_tests::build(network_docker_name(NetworkId::Monero)).await;
let composition = TestBodySpecification::with_image(
Image::with_repository("serai-dev-monero").pull_policy(PullPolicy::Never),
@@ -63,11 +74,11 @@ pub fn monero_instance() -> (TestBodySpecification, u32) {
(composition, XMR_PORT)
}
pub fn network_instance(network: NetworkId) -> (TestBodySpecification, u32) {
pub async fn network_instance(network: NetworkId) -> (TestBodySpecification, u32) {
match network {
NetworkId::Bitcoin => bitcoin_instance(),
NetworkId::Bitcoin => bitcoin_instance().await,
NetworkId::Ethereum => todo!(),
NetworkId::Monero => monero_instance(),
NetworkId::Monero => monero_instance().await,
NetworkId::Serai => {
panic!("Serai is not a valid network to spawn an instance of for a processor")
}

View File

@@ -191,164 +191,167 @@ pub(crate) async fn substrate_block(
}
}
#[test]
fn batch_test() {
#[tokio::test]
async fn batch_test() {
for network in [NetworkId::Bitcoin, NetworkId::Monero] {
let (coordinators, test) = new_test(network);
let (coordinators, test) = new_test(network).await;
test.run(|ops| async move {
tokio::time::sleep(Duration::from_secs(1)).await;
let mut coordinators = coordinators
.into_iter()
.map(|(handles, key)| Coordinator::new(network, &ops, handles, key))
.collect::<Vec<_>>();
// Create a wallet before we start generating keys
let mut wallet = Wallet::new(network, &ops, coordinators[0].network_handle.clone()).await;
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Generate keys
let key_pair = key_gen(&mut coordinators).await;
// Now we we have to mine blocks to activate the key
// (the first key is activated when the network's time as of a block exceeds the Serai time
// it was confirmed at)
// Mine multiple sets of medians to ensure the median is sufficiently advanced
for _ in 0 .. (10 * confirmations(network)) {
coordinators[0].add_block(&ops).await;
test
.run_async(|ops| async move {
tokio::time::sleep(Duration::from_secs(1)).await;
}
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Run twice, once with an instruction and once without
let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1;
for i in 0 .. 2 {
let mut serai_address = [0; 32];
OsRng.fill_bytes(&mut serai_address);
let instruction =
if i == 0 { Some(InInstruction::Transfer(SeraiAddress(serai_address))) } else { None };
let mut coordinators = coordinators
.into_iter()
.map(|(handles, key)| Coordinator::new(network, &ops, handles, key))
.collect::<Vec<_>>();
// Send into the processor's wallet
let (tx, balance_sent) =
wallet.send_to_address(&ops, &key_pair.1, instruction.clone()).await;
for coordinator in &mut coordinators {
coordinator.publish_transacton(&ops, &tx).await;
}
// Create a wallet before we start generating keys
let mut wallet = Wallet::new(network, &ops, coordinators[0].network_handle.clone()).await;
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Put the TX past the confirmation depth
let mut block_with_tx = None;
for _ in 0 .. confirmations(network) {
let (hash, _) = coordinators[0].add_block(&ops).await;
if block_with_tx.is_none() {
block_with_tx = Some(hash);
}
// Generate keys
let key_pair = key_gen(&mut coordinators).await;
// Now we we have to mine blocks to activate the key
// (the first key is activated when the network's time as of a block exceeds the Serai time
// it was confirmed at)
// Mine multiple sets of medians to ensure the median is sufficiently advanced
for _ in 0 .. (10 * confirmations(network)) {
coordinators[0].add_block(&ops).await;
tokio::time::sleep(Duration::from_secs(1)).await;
}
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Sleep for 10s
// The scanner works on a 5s interval, so this leaves a few s for any processing/latency
tokio::time::sleep(Duration::from_secs(10)).await;
// Run twice, once with an instruction and once without
let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1;
for i in 0 .. 2 {
let mut serai_address = [0; 32];
OsRng.fill_bytes(&mut serai_address);
let instruction =
if i == 0 { Some(InInstruction::Transfer(SeraiAddress(serai_address))) } else { None };
let expected_batch = Batch {
network,
id: i,
block: BlockHash(block_with_tx.unwrap()),
instructions: if let Some(instruction) = &instruction {
vec![InInstructionWithBalance {
instruction: instruction.clone(),
balance: Balance {
coin: balance_sent.coin,
amount: Amount(
balance_sent.amount.0 -
(2 * if network == NetworkId::Bitcoin {
Bitcoin::COST_TO_AGGREGATE
} else {
Monero::COST_TO_AGGREGATE
}),
),
},
}]
} else {
// This shouldn't have an instruction as we didn't add any data into the TX we sent
// Empty batches remain valuable as they let us achieve consensus on the block and spend
// contained outputs
vec![]
},
};
// Make sure the processors picked it up by checking they're trying to sign a batch for it
let (mut id, mut preprocesses) =
recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await;
// Trigger a random amount of re-attempts
for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {
// TODO: Double check how the processor handles this ID field
// It should be able to assert its perfectly sequential
id.attempt = attempt;
for coordinator in coordinators.iter_mut() {
coordinator
.send_message(messages::coordinator::CoordinatorMessage::BatchReattempt {
id: id.clone(),
})
.await;
// Send into the processor's wallet
let (tx, balance_sent) =
wallet.send_to_address(&ops, &key_pair.1, instruction.clone()).await;
for coordinator in &mut coordinators {
coordinator.publish_transacton(&ops, &tx).await;
}
(id, preprocesses) =
recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, attempt).await;
}
// Continue with signing the batch
let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await;
// Check it
assert_eq!(batch.batch, expected_batch);
// Fire a SubstrateBlock
let serai_time =
SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
for coordinator in &mut coordinators {
let plans = substrate_block(
coordinator,
messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time,
network_latest_finalized_block: batch.batch.block,
},
block: substrate_block_num + u64::from(i),
burns: vec![],
batches: vec![batch.batch.id],
},
)
.await;
if instruction.is_some() || (instruction.is_none() && (network == NetworkId::Monero)) {
assert!(plans.is_empty());
} else {
// If no instruction was used, and the processor csn presume the origin, it'd have
// created a refund Plan
assert_eq!(plans.len(), 1);
}
}
}
// With the latter InInstruction not existing, we should've triggered a refund if the origin
// was detectable
// Check this is trying to sign a Plan
if network != NetworkId::Monero {
let mut refund_id = None;
for coordinator in &mut coordinators {
match coordinator.recv_message().await {
messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Preprocess {
id,
..
}) => {
if refund_id.is_none() {
refund_id = Some(id.clone());
}
assert_eq!(refund_id.as_ref().unwrap(), &id);
// Put the TX past the confirmation depth
let mut block_with_tx = None;
for _ in 0 .. confirmations(network) {
let (hash, _) = coordinators[0].add_block(&ops).await;
if block_with_tx.is_none() {
block_with_tx = Some(hash);
}
}
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Sleep for 10s
// The scanner works on a 5s interval, so this leaves a few s for any processing/latency
tokio::time::sleep(Duration::from_secs(10)).await;
let expected_batch = Batch {
network,
id: i,
block: BlockHash(block_with_tx.unwrap()),
instructions: if let Some(instruction) = &instruction {
vec![InInstructionWithBalance {
instruction: instruction.clone(),
balance: Balance {
coin: balance_sent.coin,
amount: Amount(
balance_sent.amount.0 -
(2 * if network == NetworkId::Bitcoin {
Bitcoin::COST_TO_AGGREGATE
} else {
Monero::COST_TO_AGGREGATE
}),
),
},
}]
} else {
// This shouldn't have an instruction as we didn't add any data into the TX we sent
// Empty batches remain valuable as they let us achieve consensus on the block and
// spend contained outputs
vec![]
},
};
// Make sure the processors picked it up by checking they're trying to sign a batch for it
let (mut id, mut preprocesses) =
recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await;
// Trigger a random amount of re-attempts
for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {
// TODO: Double check how the processor handles this ID field
// It should be able to assert its perfectly sequential
id.attempt = attempt;
for coordinator in coordinators.iter_mut() {
coordinator
.send_message(messages::coordinator::CoordinatorMessage::BatchReattempt {
id: id.clone(),
})
.await;
}
(id, preprocesses) =
recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, attempt)
.await;
}
// Continue with signing the batch
let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await;
// Check it
assert_eq!(batch.batch, expected_batch);
// Fire a SubstrateBlock
let serai_time =
SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
for coordinator in &mut coordinators {
let plans = substrate_block(
coordinator,
messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time,
network_latest_finalized_block: batch.batch.block,
},
block: substrate_block_num + u64::from(i),
burns: vec![],
batches: vec![batch.batch.id],
},
)
.await;
if instruction.is_some() || (instruction.is_none() && (network == NetworkId::Monero)) {
assert!(plans.is_empty());
} else {
// If no instruction was used, and the processor csn presume the origin, it'd have
// created a refund Plan
assert_eq!(plans.len(), 1);
}
_ => panic!("processor didn't send preprocess for expected refund transaction"),
}
}
}
});
// With the latter InInstruction not existing, we should've triggered a refund if the origin
// was detectable
// Check this is trying to sign a Plan
if network != NetworkId::Monero {
let mut refund_id = None;
for coordinator in &mut coordinators {
match coordinator.recv_message().await {
messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Preprocess {
id,
..
}) => {
if refund_id.is_none() {
refund_id = Some(id.clone());
}
assert_eq!(refund_id.as_ref().unwrap(), &id);
}
_ => panic!("processor didn't send preprocess for expected refund transaction"),
}
}
}
})
.await;
}
}

View File

@@ -142,23 +142,25 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair {
key_pair
}
#[test]
fn key_gen_test() {
#[tokio::test]
async fn key_gen_test() {
for network in [NetworkId::Bitcoin, NetworkId::Monero] {
let (coordinators, test) = new_test(network);
let (coordinators, test) = new_test(network).await;
test.run(|ops| async move {
// Sleep for a second for the message-queue to boot
// It isn't an error to start immediately, it just silences an error
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
test
.run_async(|ops| async move {
// Sleep for a second for the message-queue to boot
// It isn't an error to start immediately, it just silences an error
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
// Connect to the Message Queues as the coordinator
let mut coordinators = coordinators
.into_iter()
.map(|(handles, key)| Coordinator::new(network, &ops, handles, key))
.collect::<Vec<_>>();
// Connect to the Message Queues as the coordinator
let mut coordinators = coordinators
.into_iter()
.map(|(handles, key)| Coordinator::new(network, &ops, handles, key))
.collect::<Vec<_>>();
key_gen(&mut coordinators).await;
});
key_gen(&mut coordinators).await;
})
.await;
}
}

View File

@@ -17,11 +17,13 @@ mod send;
pub(crate) const COORDINATORS: usize = 4;
pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1;
fn new_test(network: NetworkId) -> (Vec<(Handles, <Ristretto as Ciphersuite>::F)>, DockerTest) {
pub(crate) async fn new_test(
network: NetworkId,
) -> (Vec<(Handles, <Ristretto as Ciphersuite>::F)>, DockerTest) {
let mut coordinators = vec![];
let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);
for _ in 0 .. COORDINATORS {
let (handles, coord_key, compositions) = processor_stack(network);
let (handles, coord_key, compositions) = processor_stack(network).await;
coordinators.push((handles, coord_key));
for composition in compositions {
test.provide_container(composition);

View File

@@ -142,163 +142,166 @@ pub(crate) async fn sign_tx(
tx.unwrap()
}
#[test]
fn send_test() {
#[tokio::test]
async fn send_test() {
for network in [NetworkId::Bitcoin, NetworkId::Monero] {
let (coordinators, test) = new_test(network);
let (coordinators, test) = new_test(network).await;
test.run(|ops| async move {
tokio::time::sleep(Duration::from_secs(1)).await;
let mut coordinators = coordinators
.into_iter()
.map(|(handles, key)| Coordinator::new(network, &ops, handles, key))
.collect::<Vec<_>>();
// Create a wallet before we start generating keys
let mut wallet = Wallet::new(network, &ops, coordinators[0].network_handle.clone()).await;
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Generate keys
let key_pair = key_gen(&mut coordinators).await;
// Now we we have to mine blocks to activate the key
// (the first key is activated when the network's time as of a block exceeds the Serai time
// it was confirmed at)
// Mine multiple sets of medians to ensure the median is sufficiently advanced
for _ in 0 .. (10 * confirmations(network)) {
coordinators[0].add_block(&ops).await;
test
.run_async(|ops| async move {
tokio::time::sleep(Duration::from_secs(1)).await;
}
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Send into the processor's wallet
let (tx, balance_sent) = wallet.send_to_address(&ops, &key_pair.1, None).await;
for coordinator in &mut coordinators {
coordinator.publish_transacton(&ops, &tx).await;
}
let mut coordinators = coordinators
.into_iter()
.map(|(handles, key)| Coordinator::new(network, &ops, handles, key))
.collect::<Vec<_>>();
// Put the TX past the confirmation depth
let mut block_with_tx = None;
for _ in 0 .. confirmations(network) {
let (hash, _) = coordinators[0].add_block(&ops).await;
if block_with_tx.is_none() {
block_with_tx = Some(hash);
// Create a wallet before we start generating keys
let mut wallet = Wallet::new(network, &ops, coordinators[0].network_handle.clone()).await;
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Generate keys
let key_pair = key_gen(&mut coordinators).await;
// Now we we have to mine blocks to activate the key
// (the first key is activated when the network's time as of a block exceeds the Serai time
// it was confirmed at)
// Mine multiple sets of medians to ensure the median is sufficiently advanced
for _ in 0 .. (10 * confirmations(network)) {
coordinators[0].add_block(&ops).await;
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// Sleep for 10s
// The scanner works on a 5s interval, so this leaves a few s for any processing/latency
tokio::time::sleep(Duration::from_secs(10)).await;
let expected_batch =
Batch { network, id: 0, block: BlockHash(block_with_tx.unwrap()), instructions: vec![] };
// Make sure the proceessors picked it up by checking they're trying to sign a batch for it
let (id, preprocesses) =
recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await;
// Continue with signing the batch
let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await;
// Check it
assert_eq!(batch.batch, expected_batch);
// Fire a SubstrateBlock with a burn
let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1;
let serai_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
let mut plans = vec![];
for coordinator in &mut coordinators {
let these_plans = substrate_block(
coordinator,
messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time,
network_latest_finalized_block: batch.batch.block,
},
block: substrate_block_num,
burns: vec![OutInstructionWithBalance {
instruction: OutInstruction { address: wallet.address(), data: None },
balance: balance_sent,
}],
batches: vec![batch.batch.id],
},
)
.await;
if plans.is_empty() {
plans = these_plans;
} else {
assert_eq!(plans, these_plans);
}
}
assert_eq!(plans.len(), 1);
// Start signing the TX
let (mut id, mut preprocesses) =
recv_sign_preprocesses(&mut coordinators, Session(0), 0).await;
assert_eq!(id, SignId { session: Session(0), id: plans[0].id, attempt: 0 });
// Trigger a random amount of re-attempts
for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {
// TODO: Double check how the processor handles this ID field
// It should be able to assert its perfectly sequential
id.attempt = attempt;
for coordinator in coordinators.iter_mut() {
coordinator
.send_message(messages::sign::CoordinatorMessage::Reattempt { id: id.clone() })
.await;
}
(id, preprocesses) = recv_sign_preprocesses(&mut coordinators, Session(0), attempt).await;
}
let participating = preprocesses.keys().cloned().collect::<Vec<_>>();
let tx_id = sign_tx(&mut coordinators, Session(0), id.clone(), preprocesses).await;
// Make sure all participating nodes published the TX
let participating =
participating.iter().map(|p| usize::from(u16::from(*p) - 1)).collect::<HashSet<_>>();
for participant in &participating {
assert!(coordinators[*participant].get_transaction(&ops, &tx_id).await.is_some());
}
// Publish this transaction to the left out nodes
let tx = coordinators[*participating.iter().next().unwrap()]
.get_transaction(&ops, &tx_id)
.await
.unwrap();
for (i, coordinator) in coordinators.iter_mut().enumerate() {
if !participating.contains(&i) {
// Send into the processor's wallet
let (tx, balance_sent) = wallet.send_to_address(&ops, &key_pair.1, None).await;
for coordinator in &mut coordinators {
coordinator.publish_transacton(&ops, &tx).await;
// Tell them of it as a completion of the relevant signing nodess
coordinator
.send_message(messages::sign::CoordinatorMessage::Completed {
session: Session(0),
id: id.id,
tx: tx_id.clone(),
})
.await;
// Verify they send Completed back
match coordinator.recv_message().await {
messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Completed {
session,
id: this_id,
tx: this_tx,
}) => {
assert_eq!(session, Session(0));
assert_eq!(&this_id, &id.id);
assert_eq!(this_tx, tx_id);
}
_ => panic!("processor didn't send Completed"),
}
// Put the TX past the confirmation depth
let mut block_with_tx = None;
for _ in 0 .. confirmations(network) {
let (hash, _) = coordinators[0].add_block(&ops).await;
if block_with_tx.is_none() {
block_with_tx = Some(hash);
}
}
}
coordinators[0].sync(&ops, &coordinators[1 ..]).await;
// TODO: Test the Eventuality from the blockchain, instead of from the coordinator
// TODO: Test what happenns when Completed is sent with a non-existent TX ID
// TODO: Test what happenns when Completed is sent with a non-completing TX ID
});
// Sleep for 10s
// The scanner works on a 5s interval, so this leaves a few s for any processing/latency
tokio::time::sleep(Duration::from_secs(10)).await;
let expected_batch =
Batch { network, id: 0, block: BlockHash(block_with_tx.unwrap()), instructions: vec![] };
// Make sure the proceessors picked it up by checking they're trying to sign a batch for it
let (id, preprocesses) =
recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await;
// Continue with signing the batch
let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await;
// Check it
assert_eq!(batch.batch, expected_batch);
// Fire a SubstrateBlock with a burn
let substrate_block_num = (OsRng.next_u64() % 4_000_000_000u64) + 1;
let serai_time =
SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
let mut plans = vec![];
for coordinator in &mut coordinators {
let these_plans = substrate_block(
coordinator,
messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time,
network_latest_finalized_block: batch.batch.block,
},
block: substrate_block_num,
burns: vec![OutInstructionWithBalance {
instruction: OutInstruction { address: wallet.address(), data: None },
balance: balance_sent,
}],
batches: vec![batch.batch.id],
},
)
.await;
if plans.is_empty() {
plans = these_plans;
} else {
assert_eq!(plans, these_plans);
}
}
assert_eq!(plans.len(), 1);
// Start signing the TX
let (mut id, mut preprocesses) =
recv_sign_preprocesses(&mut coordinators, Session(0), 0).await;
assert_eq!(id, SignId { session: Session(0), id: plans[0].id, attempt: 0 });
// Trigger a random amount of re-attempts
for attempt in 1 ..= u32::try_from(OsRng.next_u64() % 4).unwrap() {
// TODO: Double check how the processor handles this ID field
// It should be able to assert its perfectly sequential
id.attempt = attempt;
for coordinator in coordinators.iter_mut() {
coordinator
.send_message(messages::sign::CoordinatorMessage::Reattempt { id: id.clone() })
.await;
}
(id, preprocesses) = recv_sign_preprocesses(&mut coordinators, Session(0), attempt).await;
}
let participating = preprocesses.keys().cloned().collect::<Vec<_>>();
let tx_id = sign_tx(&mut coordinators, Session(0), id.clone(), preprocesses).await;
// Make sure all participating nodes published the TX
let participating =
participating.iter().map(|p| usize::from(u16::from(*p) - 1)).collect::<HashSet<_>>();
for participant in &participating {
assert!(coordinators[*participant].get_transaction(&ops, &tx_id).await.is_some());
}
// Publish this transaction to the left out nodes
let tx = coordinators[*participating.iter().next().unwrap()]
.get_transaction(&ops, &tx_id)
.await
.unwrap();
for (i, coordinator) in coordinators.iter_mut().enumerate() {
if !participating.contains(&i) {
coordinator.publish_transacton(&ops, &tx).await;
// Tell them of it as a completion of the relevant signing nodess
coordinator
.send_message(messages::sign::CoordinatorMessage::Completed {
session: Session(0),
id: id.id,
tx: tx_id.clone(),
})
.await;
// Verify they send Completed back
match coordinator.recv_message().await {
messages::ProcessorMessage::Sign(messages::sign::ProcessorMessage::Completed {
session,
id: this_id,
tx: this_tx,
}) => {
assert_eq!(session, Session(0));
assert_eq!(&this_id, &id.id);
assert_eq!(this_tx, tx_id);
}
_ => panic!("processor didn't send Completed"),
}
}
}
// TODO: Test the Eventuality from the blockchain, instead of from the coordinator
// TODO: Test what happenns when Completed is sent with a non-existent TX ID
// TODO: Test what happenns when Completed is sent with a non-completing TX ID
})
.await;
}
}