Validator DHT (#494)

* Route validators for any active set through sc-authority-discovery

Additionally adds an RPC route to retrieve their P2P addresses.

* Have the coordinator get peers from substrate

* Have the RPC return one address, not up to 3

Prevents the coordinator from believing it has 3 peers when it has one.

* Add missing feature to serai-client

* Correct network argument in serai-client for p2p_validators call

* Add a test in serai-client to check DHT population with a much quicker failure than the coordinator tests

* Update to latest Substrate

Removes distinguishing BABE/AuthorityDiscovery keys which causes
sc_authority_discovery to populate as desired.

* Update to a properly tagged substrate commit

* Add all dialed to peers to GossipSub

* cargo fmt

* Reduce common code in serai-coordinator-tests with amore involved new_test

* Use a recursive async function to spawn `n` DockerTests with the necessary networking configuration

* Merge UNIQUE_ID and ONE_AT_A_TIME

* Tidy up the new recursive code in tests/coordinator

* Use a Mutex in CONTEXT to let it be set multiple times

* Make complimentary edits to full-stack tests

* Augment coordinator P2p connection logs

* Drop lock acquisitions before recursing

* Better scope lock acquisitions in full-stack, preventing a deadlock

* Ensure OUTER_OPS is reset across the test boundary

* Add cargo deny allowance for dockertest fork
This commit is contained in:
Luke Parker
2023-12-22 21:09:18 -05:00
committed by GitHub
parent 00774c29d7
commit b493e3e31f
28 changed files with 1551 additions and 1225 deletions

View File

@@ -1,9 +1,8 @@
#![allow(clippy::needless_pass_by_ref_mut)] // False positives
use std::{
sync::{OnceLock, Arc, Mutex},
sync::{OnceLock, Arc},
time::Duration,
fs,
};
use tokio::{task::AbortHandle, sync::Mutex as AsyncMutex};
@@ -27,16 +26,11 @@ use serai_message_queue::{Service, Metadata, client::MessageQueue};
use serai_client::{primitives::Signature, Serai};
use dockertest::{
PullPolicy, Image, LogAction, LogPolicy, LogSource, LogOptions, StartPolicy,
TestBodySpecification, DockerOperations,
};
use dockertest::{PullPolicy, Image, TestBodySpecification, DockerOperations};
#[cfg(test)]
mod tests;
static UNIQUE_ID: OnceLock<Mutex<u16>> = OnceLock::new();
pub fn coordinator_instance(
name: &str,
message_queue_key: <Ristretto as Ciphersuite>::F,
@@ -81,78 +75,6 @@ pub fn serai_composition(name: &str) -> TestBodySpecification {
.set_publish_all_ports(true)
}
pub type Handles = (String, String, String);
pub fn coordinator_stack(
name: &str,
) -> (Handles, <Ristretto as Ciphersuite>::F, Vec<TestBodySpecification>) {
let serai_composition = serai_composition(name);
let (coord_key, message_queue_keys, message_queue_composition) =
serai_message_queue_tests::instance();
let coordinator_composition = coordinator_instance(name, coord_key);
// Give every item in this stack a unique ID
// Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits
let (first, unique_id) = {
let unique_id_mutex = UNIQUE_ID.get_or_init(|| Mutex::new(0));
let mut unique_id_lock = unique_id_mutex.lock().unwrap();
let first = *unique_id_lock == 0;
let unique_id = *unique_id_lock;
*unique_id_lock += 1;
(first, unique_id)
};
let logs_path = [std::env::current_dir().unwrap().to_str().unwrap(), ".test-logs", "coordinator"]
.iter()
.collect::<std::path::PathBuf>();
if first {
let _ = fs::remove_dir_all(&logs_path);
fs::create_dir_all(&logs_path).expect("couldn't create logs directory");
assert!(
fs::read_dir(&logs_path).expect("couldn't read the logs folder").next().is_none(),
"logs folder wasn't empty, despite removing it at the start of the run",
);
}
let logs_path = logs_path.to_str().unwrap().to_string();
let mut compositions = vec![];
let mut handles = vec![];
for (name, composition) in [
("serai_node", serai_composition),
("message_queue", message_queue_composition),
("coordinator", coordinator_composition),
] {
let handle = format!("coordinator-{name}-{unique_id}");
compositions.push(
composition.set_start_policy(StartPolicy::Strict).set_handle(handle.clone()).set_log_options(
Some(LogOptions {
action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
LogAction::Forward
} else {
LogAction::ForwardToFile { path: logs_path.clone() }
},
policy: LogPolicy::Always,
source: LogSource::Both,
}),
),
);
handles.push(handle);
}
let coordinator_composition = compositions.last_mut().unwrap();
coordinator_composition.inject_container_name(handles[0].clone(), "SERAI_HOSTNAME");
coordinator_composition.inject_container_name(handles[1].clone(), "MESSAGE_QUEUE_RPC");
(
(handles[0].clone(), handles[1].clone(), handles[2].clone()),
message_queue_keys[&NetworkId::Bitcoin],
compositions,
)
}
fn is_cosign_message(msg: &CoordinatorMessage) -> bool {
matches!(
msg,
@@ -176,15 +98,19 @@ fn is_cosign_message(msg: &CoordinatorMessage) -> bool {
)
}
#[derive(Clone, PartialEq, Eq, Debug)]
pub struct Handles {
pub(crate) serai: String,
pub(crate) message_queue: String,
}
#[derive(Clone)]
pub struct Processor {
network: NetworkId,
serai_rpc: String,
#[allow(unused)]
message_queue_handle: String,
#[allow(unused)]
coordinator_handle: String,
handles: Handles,
queue: Arc<AsyncMutex<(u64, u64, MessageQueue)>>,
abort_handle: Option<Arc<AbortHandle>>,
@@ -205,14 +131,14 @@ impl Processor {
raw_i: u8,
network: NetworkId,
ops: &DockerOperations,
handles: (String, String, String),
handles: Handles,
processor_key: <Ristretto as Ciphersuite>::F,
) -> Processor {
let message_queue_rpc = ops.handle(&handles.1).host_port(2287).unwrap();
let message_queue_rpc = ops.handle(&handles.message_queue).host_port(2287).unwrap();
let message_queue_rpc = format!("{}:{}", message_queue_rpc.0, message_queue_rpc.1);
// Sleep until the Substrate RPC starts
let serai_rpc = ops.handle(&handles.0).host_port(9944).unwrap();
let serai_rpc = ops.handle(&handles.serai).host_port(9944).unwrap();
let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1);
// Bound execution to 60 seconds
for _ in 0 .. 60 {
@@ -231,8 +157,7 @@ impl Processor {
network,
serai_rpc,
message_queue_handle: handles.1,
coordinator_handle: handles.2,
handles,
queue: Arc::new(AsyncMutex::new((
0,

View File

@@ -1,5 +1,4 @@
use std::{
sync::Mutex,
time::Duration,
collections::{HashSet, HashMap},
};
@@ -261,43 +260,21 @@ pub async fn batch(
#[tokio::test]
async fn batch_test() {
let _one_at_a_time = ONE_AT_A_TIME.get_or_init(|| Mutex::new(())).lock();
let (processors, test) = new_test();
test
.run_async(|ops| async move {
// Wait for the Serai node to boot, and for the Tendermint chain to get past the first block
// TODO: Replace this with a Coordinator RPC
tokio::time::sleep(Duration::from_secs(150)).await;
// Sleep even longer if in the CI due to it being slower than commodity hardware
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
tokio::time::sleep(Duration::from_secs(120)).await;
}
// Connect to the Message Queues as the processor
let mut new_processors: Vec<Processor> = vec![];
for (i, (handles, key)) in processors.into_iter().enumerate() {
new_processors.push(
Processor::new(i.try_into().unwrap(), NetworkId::Bitcoin, &ops, handles, key).await,
);
}
let mut processors = new_processors;
let (processor_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
batch(
&mut processors,
&processor_is,
Session(0),
&substrate_key,
Batch {
network: NetworkId::Bitcoin,
id: 0,
block: BlockHash([0x22; 32]),
instructions: vec![],
},
)
.await;
})
new_test(|mut processors: Vec<Processor>| async move {
let (processor_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
batch(
&mut processors,
&processor_is,
Session(0),
&substrate_key,
Batch {
network: NetworkId::Bitcoin,
id: 0,
block: BlockHash([0x22; 32]),
instructions: vec![],
},
)
.await;
})
.await;
}

View File

@@ -1,5 +1,4 @@
use std::{
sync::Mutex,
time::{Duration, SystemTime},
collections::HashMap,
};
@@ -221,30 +220,8 @@ pub async fn key_gen<C: Ciphersuite>(
#[tokio::test]
async fn key_gen_test() {
let _one_at_a_time = ONE_AT_A_TIME.get_or_init(|| Mutex::new(())).lock();
let (processors, test) = new_test();
test
.run_async(|ops| async move {
// Wait for the Serai node to boot, and for the Tendermint chain to get past the first block
// TODO: Replace this with a Coordinator RPC
tokio::time::sleep(Duration::from_secs(150)).await;
// Sleep even longer if in the CI due to it being slower than commodity hardware
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
tokio::time::sleep(Duration::from_secs(120)).await;
}
// Connect to the Message Queues as the processor
let mut new_processors: Vec<Processor> = vec![];
for (i, (handles, key)) in processors.into_iter().enumerate() {
new_processors.push(
Processor::new(i.try_into().unwrap(), NetworkId::Bitcoin, &ops, handles, key).await,
);
}
let mut processors = new_processors;
key_gen::<Secp256k1>(&mut processors).await;
})
.await;
new_test(|mut processors: Vec<Processor>| async move {
key_gen::<Secp256k1>(&mut processors).await;
})
.await;
}

View File

@@ -1,8 +1,14 @@
use std::sync::OnceLock;
use core::future::Future;
use std::{sync::OnceLock, collections::HashMap};
use ciphersuite::Ristretto;
use tokio::sync::Mutex;
use dockertest::DockerTest;
use dockertest::{
LogAction, LogPolicy, LogSource, LogOptions, StartPolicy, TestBodySpecification,
DockerOperations, DockerTest,
};
use serai_docker_tests::fresh_logs_folder;
use crate::*;
@@ -19,13 +25,28 @@ pub use sign::sign;
pub(crate) const COORDINATORS: usize = 4;
pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1;
pub(crate) static ONE_AT_A_TIME: OnceLock<Mutex<()>> = OnceLock::new();
// Provide a unique ID and ensures only one invocation occurs at a time.
static UNIQUE_ID: OnceLock<Mutex<u16>> = OnceLock::new();
#[async_trait::async_trait]
pub(crate) trait TestBody: 'static + Send + Sync {
async fn body(&self, processors: Vec<Processor>);
}
#[async_trait::async_trait]
impl<F: Send + Future, TB: 'static + Send + Sync + Fn(Vec<Processor>) -> F> TestBody for TB {
async fn body(&self, processors: Vec<Processor>) {
(self)(processors).await;
}
}
pub(crate) async fn new_test(test_body: impl TestBody) {
let mut unique_id_lock = UNIQUE_ID.get_or_init(|| Mutex::new(0)).lock().await;
pub(crate) fn new_test() -> (Vec<(Handles, <Ristretto as Ciphersuite>::F)>, DockerTest) {
let mut coordinators = vec![];
let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);
let mut coordinator_compositions = vec![];
for i in 0 .. COORDINATORS {
let (handles, coord_key, compositions) = coordinator_stack(match i {
let name = match i {
0 => "Alice",
1 => "Bob",
2 => "Charlie",
@@ -33,13 +54,158 @@ pub(crate) fn new_test() -> (Vec<(Handles, <Ristretto as Ciphersuite>::F)>, Dock
4 => "Eve",
5 => "Ferdie",
_ => panic!("needed a 7th name for a serai node"),
});
coordinators.push((handles, coord_key));
};
let serai_composition = serai_composition(name);
let (processor_key, message_queue_keys, message_queue_composition) =
serai_message_queue_tests::instance();
let coordinator_composition = coordinator_instance(name, processor_key);
// Give every item in this stack a unique ID
// Uses a Mutex as we can't generate a 8-byte random ID without hitting hostname length limits
let (first, unique_id) = {
let first = *unique_id_lock == 0;
let unique_id = *unique_id_lock;
*unique_id_lock += 1;
(first, unique_id)
};
let logs_path = fresh_logs_folder(first, "coordinator");
let mut compositions = vec![];
let mut handles = HashMap::new();
for (name, composition) in [
("serai_node", serai_composition),
("message_queue", message_queue_composition),
("coordinator", coordinator_composition),
] {
let handle = format!("coordinator-{name}-{unique_id}");
compositions.push(
composition
.set_start_policy(StartPolicy::Strict)
.set_handle(handle.clone())
.set_log_options(Some(LogOptions {
action: if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
LogAction::Forward
} else {
LogAction::ForwardToFile { path: logs_path.clone() }
},
policy: LogPolicy::Always,
source: LogSource::Both,
})),
);
handles.insert(name, handle);
}
let processor_key = message_queue_keys[&NetworkId::Bitcoin];
coordinators.push((
Handles {
serai: handles.remove("serai_node").unwrap(),
message_queue: handles.remove("message_queue").unwrap(),
},
processor_key,
));
coordinator_compositions.push(compositions.pop().unwrap());
for composition in compositions {
test.provide_container(composition);
}
}
(coordinators, test)
struct Context {
pending_coordinator_compositions: Mutex<Vec<TestBodySpecification>>,
handles_and_keys: Vec<(Handles, <Ristretto as Ciphersuite>::F)>,
test_body: Box<dyn TestBody>,
}
static CONTEXT: OnceLock<Mutex<Option<Context>>> = OnceLock::new();
*CONTEXT.get_or_init(|| Mutex::new(None)).lock().await = Some(Context {
pending_coordinator_compositions: Mutex::new(coordinator_compositions),
handles_and_keys: coordinators,
test_body: Box::new(test_body),
});
// The DockerOperations from the first invocation, containing the Message Queue servers and the
// Serai nodes.
static OUTER_OPS: OnceLock<Mutex<Option<DockerOperations>>> = OnceLock::new();
// Reset OUTER_OPS
*OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None;
// Spawns a coordinator, if one has yet to be spawned, or else runs the test.
#[async_recursion::async_recursion]
async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) {
// If the outer operations have yet to be set, these *are* the outer operations
let outer_ops = OUTER_OPS.get().unwrap();
if outer_ops.lock().await.is_none() {
*outer_ops.lock().await = Some(inner_ops);
}
let context_lock = CONTEXT.get().unwrap().lock().await;
let Context { pending_coordinator_compositions, handles_and_keys: coordinators, test_body } =
context_lock.as_ref().unwrap();
// Check if there is a coordinator left
let maybe_coordinator = {
let mut remaining = pending_coordinator_compositions.lock().await;
let maybe_coordinator = if !remaining.is_empty() {
let handles = coordinators[coordinators.len() - remaining.len()].0.clone();
let composition = remaining.remove(0);
Some((composition, handles))
} else {
None
};
drop(remaining);
maybe_coordinator
};
if let Some((mut composition, handles)) = maybe_coordinator {
let network = {
let outer_ops = outer_ops.lock().await;
let outer_ops = outer_ops.as_ref().unwrap();
// Spawn it by building another DockerTest which recursively calls this function
// TODO: Spawn this outside of DockerTest so we can remove the recursion
let serai_container = outer_ops.handle(&handles.serai);
composition.modify_env("SERAI_HOSTNAME", serai_container.ip());
let message_queue_container = outer_ops.handle(&handles.message_queue);
composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip());
format!("container:{}", serai_container.name())
};
let mut test = DockerTest::new().with_network(dockertest::Network::External(network));
test.provide_container(composition);
drop(context_lock);
test.run_async(spawn_coordinator_or_run_test).await;
} else {
let outer_ops = outer_ops.lock().await.take().unwrap();
// Wait for the Serai node to boot, and for the Tendermint chain to get past the first block
// TODO: Replace this with a Coordinator RPC we can query
tokio::time::sleep(Duration::from_secs(60)).await;
// Connect to the Message Queues as the processor
let mut processors: Vec<Processor> = vec![];
for (i, (handles, key)) in coordinators.iter().enumerate() {
processors.push(
Processor::new(
i.try_into().unwrap(),
NetworkId::Bitcoin,
&outer_ops,
handles.clone(),
*key,
)
.await,
);
}
test_body.body(processors).await;
}
}
test.run_async(spawn_coordinator_or_run_test).await;
}
// TODO: Don't use a pessimistic sleep

View File

@@ -1,5 +1,4 @@
use std::{
sync::Mutex,
time::Duration,
collections::{HashSet, HashMap},
};
@@ -169,186 +168,161 @@ pub async fn sign(
#[tokio::test]
async fn sign_test() {
let _one_at_a_time = ONE_AT_A_TIME.get_or_init(|| Mutex::new(())).lock();
let (processors, test) = new_test();
new_test(|mut processors: Vec<Processor>| async move {
let (participant_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
test
.run_async(|ops| async move {
// Wait for the Serai node to boot, and for the Tendermint chain to get past the first block
// TODO: Replace this with a Coordinator RPC
tokio::time::sleep(Duration::from_secs(150)).await;
// 'Send' external coins into Serai
let serai = processors[0].serai().await;
let (serai_pair, serai_addr) = {
let mut name = [0; 4];
OsRng.fill_bytes(&mut name);
let pair = insecure_pair_from_name(&hex::encode(name));
let address = SeraiAddress::from(pair.public());
// Sleep even longer if in the CI due to it being slower than commodity hardware
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
tokio::time::sleep(Duration::from_secs(120)).await;
}
// Connect to the Message Queues as the processor
let mut new_processors: Vec<Processor> = vec![];
for (i, (handles, key)) in processors.into_iter().enumerate() {
new_processors.push(
Processor::new(i.try_into().unwrap(), NetworkId::Bitcoin, &ops, handles, key).await,
);
}
let mut processors = new_processors;
let (participant_is, substrate_key, _) = key_gen::<Secp256k1>(&mut processors).await;
// 'Send' external coins into Serai
let serai = processors[0].serai().await;
let (serai_pair, serai_addr) = {
let mut name = [0; 4];
OsRng.fill_bytes(&mut name);
let pair = insecure_pair_from_name(&hex::encode(name));
let address = SeraiAddress::from(pair.public());
// Fund the new account to pay for fees
let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) };
serai
.publish(&serai.sign(
&insecure_pair_from_name("Ferdie"),
SeraiCoins::transfer(address, balance),
0,
Default::default(),
))
.await
.unwrap();
(pair, address)
};
#[allow(clippy::inconsistent_digit_grouping)]
let amount = Amount(1_000_000_00);
let balance = Balance { coin: Coin::Bitcoin, amount };
let coin_block = BlockHash([0x33; 32]);
let block_included_in = batch(
&mut processors,
&participant_is,
Session(0),
&substrate_key,
Batch {
network: NetworkId::Bitcoin,
id: 0,
block: coin_block,
instructions: vec![InInstructionWithBalance {
instruction: InInstruction::Transfer(serai_addr),
balance,
}],
},
)
.await;
{
let block_included_in_hash =
serai.finalized_block_by_number(block_included_in).await.unwrap().unwrap().hash();
let serai = serai.as_of(block_included_in_hash);
let serai = serai.coins();
assert_eq!(
serai.coin_balance(Coin::Serai, serai_addr).await.unwrap(),
Amount(1_000_000_000)
);
// Verify the mint occurred as expected
assert_eq!(
serai.mint_events().await.unwrap(),
vec![CoinsEvent::Mint { to: serai_addr, balance }]
);
assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), amount);
assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), amount);
}
// Trigger a burn
let out_instruction = OutInstructionWithBalance {
balance,
instruction: OutInstruction {
address: ExternalAddress::new(b"external".to_vec()).unwrap(),
data: None,
},
};
// Fund the new account to pay for fees
let balance = Balance { coin: Coin::Serai, amount: Amount(1_000_000_000) };
serai
.publish(&serai.sign(
&serai_pair,
SeraiCoins::burn_with_instruction(out_instruction.clone()),
&insecure_pair_from_name("Ferdie"),
SeraiCoins::transfer(address, balance),
0,
Default::default(),
))
.await
.unwrap();
// TODO: We *really* need a helper for this pattern
let mut last_serai_block = block_included_in;
'outer: for _ in 0 .. 20 {
tokio::time::sleep(Duration::from_secs(6)).await;
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
tokio::time::sleep(Duration::from_secs(6)).await;
}
(pair, address)
};
while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() {
let burn_events = serai
.as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash())
.coins()
.burn_with_instruction_events()
.await
.unwrap();
#[allow(clippy::inconsistent_digit_grouping)]
let amount = Amount(1_000_000_00);
let balance = Balance { coin: Coin::Bitcoin, amount };
if !burn_events.is_empty() {
assert_eq!(burn_events.len(), 1);
assert_eq!(
burn_events[0],
CoinsEvent::BurnWithInstruction {
from: serai_addr,
instruction: out_instruction.clone()
}
);
break 'outer;
}
last_serai_block += 1;
}
}
let last_serai_block =
serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap();
let last_serai_block_hash = last_serai_block.hash();
let serai = serai.as_of(last_serai_block_hash);
let serai = serai.coins();
assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), Amount(0));
assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), Amount(0));
let mut plan_id = [0; 32];
OsRng.fill_bytes(&mut plan_id);
let plan_id = plan_id;
// We should now get a SubstrateBlock
for processor in &mut processors {
assert_eq!(
processor.recv_message().await,
messages::CoordinatorMessage::Substrate(
messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time: last_serai_block.time().unwrap() / 1000,
network_latest_finalized_block: coin_block,
},
block: last_serai_block.number(),
burns: vec![out_instruction.clone()],
batches: vec![],
}
)
);
// Send the ACK, claiming there's a plan to sign
processor
.send_message(messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::SubstrateBlockAck {
block: last_serai_block.number(),
plans: vec![PlanMeta { session: Session(0), id: plan_id }],
},
))
.await;
}
sign(&mut processors, &participant_is, Session(0), plan_id).await;
})
let coin_block = BlockHash([0x33; 32]);
let block_included_in = batch(
&mut processors,
&participant_is,
Session(0),
&substrate_key,
Batch {
network: NetworkId::Bitcoin,
id: 0,
block: coin_block,
instructions: vec![InInstructionWithBalance {
instruction: InInstruction::Transfer(serai_addr),
balance,
}],
},
)
.await;
{
let block_included_in_hash =
serai.finalized_block_by_number(block_included_in).await.unwrap().unwrap().hash();
let serai = serai.as_of(block_included_in_hash);
let serai = serai.coins();
assert_eq!(serai.coin_balance(Coin::Serai, serai_addr).await.unwrap(), Amount(1_000_000_000));
// Verify the mint occurred as expected
assert_eq!(
serai.mint_events().await.unwrap(),
vec![CoinsEvent::Mint { to: serai_addr, balance }]
);
assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), amount);
assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), amount);
}
// Trigger a burn
let out_instruction = OutInstructionWithBalance {
balance,
instruction: OutInstruction {
address: ExternalAddress::new(b"external".to_vec()).unwrap(),
data: None,
},
};
serai
.publish(&serai.sign(
&serai_pair,
SeraiCoins::burn_with_instruction(out_instruction.clone()),
0,
Default::default(),
))
.await
.unwrap();
// TODO: We *really* need a helper for this pattern
let mut last_serai_block = block_included_in;
'outer: for _ in 0 .. 20 {
tokio::time::sleep(Duration::from_secs(6)).await;
if std::env::var("GITHUB_CI") == Ok("true".to_string()) {
tokio::time::sleep(Duration::from_secs(6)).await;
}
while last_serai_block <= serai.latest_finalized_block().await.unwrap().number() {
let burn_events = serai
.as_of(serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap().hash())
.coins()
.burn_with_instruction_events()
.await
.unwrap();
if !burn_events.is_empty() {
assert_eq!(burn_events.len(), 1);
assert_eq!(
burn_events[0],
CoinsEvent::BurnWithInstruction {
from: serai_addr,
instruction: out_instruction.clone()
}
);
break 'outer;
}
last_serai_block += 1;
}
}
let last_serai_block =
serai.finalized_block_by_number(last_serai_block).await.unwrap().unwrap();
let last_serai_block_hash = last_serai_block.hash();
let serai = serai.as_of(last_serai_block_hash);
let serai = serai.coins();
assert_eq!(serai.coin_supply(Coin::Bitcoin).await.unwrap(), Amount(0));
assert_eq!(serai.coin_balance(Coin::Bitcoin, serai_addr).await.unwrap(), Amount(0));
let mut plan_id = [0; 32];
OsRng.fill_bytes(&mut plan_id);
let plan_id = plan_id;
// We should now get a SubstrateBlock
for processor in &mut processors {
assert_eq!(
processor.recv_message().await,
messages::CoordinatorMessage::Substrate(
messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time: last_serai_block.time().unwrap() / 1000,
network_latest_finalized_block: coin_block,
},
block: last_serai_block.number(),
burns: vec![out_instruction.clone()],
batches: vec![],
}
)
);
// Send the ACK, claiming there's a plan to sign
processor
.send_message(messages::ProcessorMessage::Coordinator(
messages::coordinator::ProcessorMessage::SubstrateBlockAck {
block: last_serai_block.number(),
plans: vec![PlanMeta { session: Session(0), id: plan_id }],
},
))
.await;
}
sign(&mut processors, &participant_is, Session(0), plan_id).await;
})
.await;
}