mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
* Update `build-dependencies` CI action
* Update `develop` to `patch-polkadot-sdk`
Allows us to finally remove the old `serai-dex/substrate` repository _and_
should have CI pass without issue on `develop` again.
The changes made here should be trivial and maintain all prior
behavior/functionality. The most notable are to `chain_spec.rs`, in order to
still use a SCALE-encoded `GenesisConfig` (avoiding `serde_json`).
* CI fixes
* Add `/usr/local/opt/llvm/lib` to paths on macOS hosts
* Attempt to use `LD_LIBRARY_PATH` in macOS GitHub CI
* Use `libp2p 0.56` in `serai-node`
* Correct Windows build dependencies
* Correct `llvm/lib` path on macOS
* Correct how macOS 13 and 14 have different homebrew paths
* Use `sw_vers` instead of `uname` on macOS
Yields the macOS version instead of the kernel's version.
* Replace hard-coded path with the intended env variable to fix macOS 13
* Add `libclang-dev` as dependency to the Debian Dockerfile
* Set the `CODE` storage slot
* Update to a version of substrate without `wasmtimer`
Turns out `wasmtimer` is WASM only. This should restore the node's functioning
on non-WASM environments.
* Restore `clang` as a dependency due to the Debian Dockerfile as we require a C++ compiler
* Move from Debian bookworm to trixie
* Restore `chain_getBlockBin` to the RPC
* Always generate a new key for the P2P network
* Mention every account on-chain before they publish a transaction
`CheckNonce` required accounts have a provider in order to even have their
nonce considered. This shims that by claiming every account has a provider at
the start of a block, if it signs a transaction.
The actual execution could presumably diverge between block building (which
sets the provider before each transaction) and execution (which sets the
providers at the start of the block). It doesn't diverge in our current
configuration and it won't be propagated to `next` (which doesn't use
`CheckNonce`).
Also uses explicit indexes for the `serai_abi::{Call, Event}` `enum`s.
* Adopt `patch-polkadot-sdk` with fixed peering
* Manually insert the authority discovery key into the keystore
I did try pulling in `pallet-authority-discovery` for this, updating
`SessionKeys`, but that was insufficient for whatever reason.
* Update to latest `substrate-wasm-builder`
* Fix timeline for incrementing providers
e1671dd71b incremented the providers for every
single transaction's sender before execution, noting the solution was fragile
but it worked for us at this time. It did not work for us at this time.
The new solution replaces `inc_providers` with direct access to the `Account`
`StorageMap` to increment the providers, achieving the desired goal, _without_
emitting an event (which is ordered, and the disparate order between building
and execution was causing mismatches of the state root).
This solution is also fragile and may also be insufficient. None of this code
exists anymore on `next` however. It just has to work sufficiently for now.
* clippy
402 lines
13 KiB
Rust
402 lines
13 KiB
Rust
use rand_core::{RngCore, OsRng};
|
|
|
|
use sp_core::{
|
|
sr25519::{Public, Pair},
|
|
Pair as PairTrait,
|
|
};
|
|
|
|
use serai_client::{
|
|
primitives::{
|
|
NETWORKS, NetworkId, BlockHash, insecure_pair_from_name, FAST_EPOCH_DURATION,
|
|
TARGET_BLOCK_TIME, ExternalNetworkId, Amount,
|
|
},
|
|
validator_sets::{
|
|
primitives::{Session, ValidatorSet, ExternalValidatorSet, KeyPair},
|
|
ValidatorSetsEvent,
|
|
},
|
|
in_instructions::{
|
|
primitives::{Batch, SignedBatch, batch_message},
|
|
SeraiInInstructions,
|
|
},
|
|
Serai,
|
|
};
|
|
|
|
mod common;
|
|
use common::{
|
|
tx::publish_tx,
|
|
validator_sets::{allocate_stake, deallocate_stake, set_keys},
|
|
};
|
|
|
|
fn get_random_key_pair() -> KeyPair {
|
|
let mut ristretto_key = [0; 32];
|
|
OsRng.fill_bytes(&mut ristretto_key);
|
|
let mut external_key = vec![0; 33];
|
|
OsRng.fill_bytes(&mut external_key);
|
|
KeyPair(Public::from(ristretto_key), external_key.try_into().unwrap())
|
|
}
|
|
|
|
async fn get_ordered_keys(serai: &Serai, network: NetworkId, accounts: &[Pair]) -> Vec<Pair> {
|
|
// retrieve the current session validators so that we know the order of the keys
|
|
// that is necessary for the correct musig signature.
|
|
let validators = serai
|
|
.as_of_latest_finalized_block()
|
|
.await
|
|
.unwrap()
|
|
.validator_sets()
|
|
.active_network_validators(network)
|
|
.await
|
|
.unwrap();
|
|
|
|
// collect the pairs of the validators
|
|
let mut pairs = vec![];
|
|
for v in validators {
|
|
let p = accounts.iter().find(|pair| pair.public() == v).unwrap().clone();
|
|
pairs.push(p);
|
|
}
|
|
|
|
pairs
|
|
}
|
|
|
|
serai_test!(
|
|
set_keys_test: (|serai: Serai| async move {
|
|
let network = ExternalNetworkId::Bitcoin;
|
|
let set = ExternalValidatorSet { session: Session(0), network };
|
|
|
|
let pair = insecure_pair_from_name("Alice");
|
|
let public = pair.public();
|
|
|
|
// Neither of these keys are validated
|
|
// The external key is infeasible to validate on-chain, the Ristretto key is feasible
|
|
// TODO: Should the Ristretto key be validated?
|
|
let key_pair = get_random_key_pair();
|
|
|
|
// Make sure the genesis is as expected
|
|
assert_eq!(
|
|
serai
|
|
.as_of(serai.finalized_block_by_number(0).await.unwrap().unwrap().hash())
|
|
.validator_sets()
|
|
.new_set_events()
|
|
.await
|
|
.unwrap(),
|
|
NETWORKS
|
|
.iter()
|
|
.copied()
|
|
.map(|network| ValidatorSetsEvent::NewSet {
|
|
set: ValidatorSet { session: Session(0), network }
|
|
})
|
|
.collect::<Vec<_>>(),
|
|
);
|
|
|
|
{
|
|
let vs_serai = serai.as_of_latest_finalized_block().await.unwrap();
|
|
let vs_serai = vs_serai.validator_sets();
|
|
let participants = vs_serai.participants(set.network.into()).await
|
|
.unwrap()
|
|
.unwrap()
|
|
.into_iter()
|
|
.map(|(k, _)| k)
|
|
.collect::<Vec<_>>();
|
|
let participants_ref: &[_] = participants.as_ref();
|
|
assert_eq!(participants_ref, [public].as_ref());
|
|
}
|
|
|
|
let block = set_keys(&serai, set, key_pair.clone(), &[pair]).await;
|
|
|
|
// While the set_keys function should handle this, it's beneficial to
|
|
// independently test it
|
|
let serai = serai.as_of(block);
|
|
let serai = serai.validator_sets();
|
|
assert_eq!(
|
|
serai.key_gen_events().await.unwrap(),
|
|
vec![ValidatorSetsEvent::KeyGen { set, key_pair: key_pair.clone() }]
|
|
);
|
|
assert_eq!(serai.keys(set).await.unwrap(), Some(key_pair));
|
|
})
|
|
);
|
|
|
|
#[tokio::test]
|
|
async fn validator_set_rotation() {
|
|
use dockertest::{
|
|
PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image,
|
|
TestBodySpecification, DockerTest,
|
|
};
|
|
use std::collections::HashMap;
|
|
|
|
serai_docker_tests::build("serai-fast-epoch".to_string());
|
|
|
|
let handle = |name| format!("serai_client-serai_node-{name}");
|
|
let composition = |name| {
|
|
TestBodySpecification::with_image(
|
|
Image::with_repository("serai-dev-serai-fast-epoch").pull_policy(PullPolicy::Never),
|
|
)
|
|
.replace_cmd(vec![
|
|
"serai-node".to_string(),
|
|
"--unsafe-rpc-external".to_string(),
|
|
"--rpc-cors".to_string(),
|
|
"all".to_string(),
|
|
"--chain".to_string(),
|
|
"local".to_string(),
|
|
format!("--{name}"),
|
|
])
|
|
.replace_env(HashMap::from([
|
|
("RUST_LOG".to_string(), "runtime=debug".to_string()),
|
|
("KEY".to_string(), " ".to_string()),
|
|
]))
|
|
.set_publish_all_ports(true)
|
|
.set_handle(handle(name))
|
|
.set_start_policy(StartPolicy::Strict)
|
|
.set_log_options(Some(LogOptions {
|
|
action: LogAction::Forward,
|
|
policy: LogPolicy::Always,
|
|
source: LogSource::Both,
|
|
}))
|
|
};
|
|
|
|
let mut test = DockerTest::new().with_network(dockertest::Network::Isolated);
|
|
test.provide_container(composition("alice"));
|
|
test.provide_container(composition("bob"));
|
|
test.provide_container(composition("charlie"));
|
|
test.provide_container(composition("dave"));
|
|
test.provide_container(composition("eve"));
|
|
test
|
|
.run_async(|ops| async move {
|
|
// Sleep until the Substrate RPC starts
|
|
let alice = handle("alice");
|
|
let alice_rpc = ops.handle(&alice).host_port(9944).unwrap();
|
|
let alice_rpc = format!("http://{}:{}", alice_rpc.0, alice_rpc.1);
|
|
|
|
// Sleep for some time
|
|
tokio::time::sleep(core::time::Duration::from_secs(20)).await;
|
|
let serai = Serai::new(alice_rpc.clone()).await.unwrap();
|
|
|
|
// Make sure the genesis is as expected
|
|
assert_eq!(
|
|
serai
|
|
.as_of(serai.finalized_block_by_number(0).await.unwrap().unwrap().hash())
|
|
.validator_sets()
|
|
.new_set_events()
|
|
.await
|
|
.unwrap(),
|
|
NETWORKS
|
|
.iter()
|
|
.copied()
|
|
.map(|network| ValidatorSetsEvent::NewSet {
|
|
set: ValidatorSet { session: Session(0), network }
|
|
})
|
|
.collect::<Vec<_>>(),
|
|
);
|
|
|
|
// genesis accounts
|
|
let accounts = vec![
|
|
insecure_pair_from_name("Alice"),
|
|
insecure_pair_from_name("Bob"),
|
|
insecure_pair_from_name("Charlie"),
|
|
insecure_pair_from_name("Dave"),
|
|
insecure_pair_from_name("Eve"),
|
|
];
|
|
|
|
// amounts for single key share per network
|
|
let key_shares = HashMap::from([
|
|
(NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))),
|
|
(NetworkId::External(ExternalNetworkId::Bitcoin), Amount(1_000_000 * 10_u64.pow(8))),
|
|
(NetworkId::External(ExternalNetworkId::Monero), Amount(100_000 * 10_u64.pow(8))),
|
|
(NetworkId::External(ExternalNetworkId::Ethereum), Amount(1_000_000 * 10_u64.pow(8))),
|
|
]);
|
|
|
|
// genesis participants per network
|
|
#[allow(clippy::redundant_closure_for_method_calls)]
|
|
let default_participants =
|
|
accounts[.. 4].to_vec().iter().map(|pair| pair.public()).collect::<Vec<_>>();
|
|
let mut participants = HashMap::from([
|
|
(NetworkId::Serai, default_participants.clone()),
|
|
(NetworkId::External(ExternalNetworkId::Bitcoin), default_participants.clone()),
|
|
(NetworkId::External(ExternalNetworkId::Monero), default_participants.clone()),
|
|
(NetworkId::External(ExternalNetworkId::Ethereum), default_participants),
|
|
]);
|
|
|
|
// test the set rotation
|
|
for (i, network) in NETWORKS.into_iter().enumerate() {
|
|
let participants = participants.get_mut(&network).unwrap();
|
|
|
|
// we start the chain with 4 default participants that has a single key share each
|
|
participants.sort();
|
|
verify_session_and_active_validators(&serai, network, 0, participants).await;
|
|
|
|
// add 1 participant
|
|
let last_participant = accounts[4].clone();
|
|
let hash = allocate_stake(
|
|
&serai,
|
|
network,
|
|
key_shares[&network],
|
|
&last_participant,
|
|
i.try_into().unwrap(),
|
|
)
|
|
.await;
|
|
participants.push(last_participant.public());
|
|
// the session at which set changes becomes active
|
|
let activation_session = get_session_at_which_changes_activate(&serai, network, hash).await;
|
|
|
|
// set the keys if it is an external set
|
|
if network != NetworkId::Serai {
|
|
let set =
|
|
ExternalValidatorSet { session: Session(0), network: network.try_into().unwrap() };
|
|
let key_pair = get_random_key_pair();
|
|
let pairs = get_ordered_keys(&serai, network, &accounts).await;
|
|
set_keys(&serai, set, key_pair, &pairs).await;
|
|
}
|
|
|
|
// verify
|
|
participants.sort();
|
|
verify_session_and_active_validators(&serai, network, activation_session, participants)
|
|
.await;
|
|
|
|
// remove 1 participant
|
|
let participant_to_remove = accounts[1].clone();
|
|
let hash = deallocate_stake(
|
|
&serai,
|
|
network,
|
|
key_shares[&network],
|
|
&participant_to_remove,
|
|
i.try_into().unwrap(),
|
|
)
|
|
.await;
|
|
participants.swap_remove(
|
|
participants.iter().position(|k| *k == participant_to_remove.public()).unwrap(),
|
|
);
|
|
let activation_session = get_session_at_which_changes_activate(&serai, network, hash).await;
|
|
|
|
if network != NetworkId::Serai {
|
|
// set the keys if it is an external set
|
|
let set =
|
|
ExternalValidatorSet { session: Session(1), network: network.try_into().unwrap() };
|
|
|
|
// we need the whole substrate key pair to sign the batch
|
|
let (substrate_pair, key_pair) = {
|
|
let pair = insecure_pair_from_name("session-1-key-pair");
|
|
let public = pair.public();
|
|
|
|
let mut external_key = vec![0; 33];
|
|
OsRng.fill_bytes(&mut external_key);
|
|
|
|
(pair, KeyPair(public, external_key.try_into().unwrap()))
|
|
};
|
|
let pairs = get_ordered_keys(&serai, network, &accounts).await;
|
|
set_keys(&serai, set, key_pair, &pairs).await;
|
|
|
|
// provide a batch to complete the handover and retire the previous set
|
|
let mut block_hash = BlockHash([0; 32]);
|
|
OsRng.fill_bytes(&mut block_hash.0);
|
|
let batch = Batch {
|
|
network: network.try_into().unwrap(),
|
|
id: 0,
|
|
block: block_hash,
|
|
instructions: vec![],
|
|
};
|
|
publish_tx(
|
|
&serai,
|
|
&SeraiInInstructions::execute_batch(SignedBatch {
|
|
batch: batch.clone(),
|
|
signature: substrate_pair.sign(&batch_message(&batch)),
|
|
}),
|
|
)
|
|
.await;
|
|
}
|
|
|
|
// verify
|
|
participants.sort();
|
|
verify_session_and_active_validators(&serai, network, activation_session, participants)
|
|
.await;
|
|
|
|
// check pending deallocations
|
|
let pending = serai
|
|
.as_of_latest_finalized_block()
|
|
.await
|
|
.unwrap()
|
|
.validator_sets()
|
|
.pending_deallocations(
|
|
network,
|
|
participant_to_remove.public(),
|
|
Session(activation_session + 1),
|
|
)
|
|
.await
|
|
.unwrap();
|
|
assert_eq!(pending, Some(key_shares[&network]));
|
|
}
|
|
})
|
|
.await;
|
|
}
|
|
|
|
async fn session_for_block(serai: &Serai, block: [u8; 32], network: NetworkId) -> u32 {
|
|
serai.as_of(block).validator_sets().session(network).await.unwrap().unwrap().0
|
|
}
|
|
|
|
async fn verify_session_and_active_validators(
|
|
serai: &Serai,
|
|
network: NetworkId,
|
|
session: u32,
|
|
participants: &[Public],
|
|
) {
|
|
// wait until the active session.
|
|
let block = tokio::time::timeout(
|
|
core::time::Duration::from_secs(FAST_EPOCH_DURATION * TARGET_BLOCK_TIME * 2),
|
|
async move {
|
|
loop {
|
|
let mut block = serai.latest_finalized_block_hash().await.unwrap();
|
|
if session_for_block(serai, block, network).await < session {
|
|
// Sleep a block
|
|
tokio::time::sleep(core::time::Duration::from_secs(TARGET_BLOCK_TIME)).await;
|
|
continue;
|
|
}
|
|
while session_for_block(serai, block, network).await > session {
|
|
block = serai.block(block).await.unwrap().unwrap().header.parent_hash.0;
|
|
}
|
|
assert_eq!(session_for_block(serai, block, network).await, session);
|
|
break block;
|
|
}
|
|
},
|
|
)
|
|
.await
|
|
.unwrap();
|
|
let serai_for_block = serai.as_of(block);
|
|
|
|
// verify session
|
|
let s = serai_for_block.validator_sets().session(network).await.unwrap().unwrap();
|
|
assert_eq!(s.0, session);
|
|
|
|
// verify participants
|
|
let mut validators =
|
|
serai_for_block.validator_sets().active_network_validators(network).await.unwrap();
|
|
validators.sort();
|
|
assert_eq!(validators, participants);
|
|
|
|
// make sure finalization continues as usual after the changes
|
|
let current_finalized_block = serai.latest_finalized_block().await.unwrap().header.number;
|
|
tokio::time::timeout(core::time::Duration::from_secs(TARGET_BLOCK_TIME * 10), async move {
|
|
let mut finalized_block = serai.latest_finalized_block().await.unwrap().header.number;
|
|
while finalized_block <= current_finalized_block + 2 {
|
|
tokio::time::sleep(core::time::Duration::from_secs(TARGET_BLOCK_TIME)).await;
|
|
finalized_block = serai.latest_finalized_block().await.unwrap().header.number;
|
|
}
|
|
})
|
|
.await
|
|
.unwrap();
|
|
|
|
// TODO: verify key shares as well?
|
|
}
|
|
|
|
async fn get_session_at_which_changes_activate(
|
|
serai: &Serai,
|
|
network: NetworkId,
|
|
hash: [u8; 32],
|
|
) -> u32 {
|
|
let session = session_for_block(serai, hash, network).await;
|
|
|
|
// changes should be active in the next session
|
|
if network == NetworkId::Serai {
|
|
// it takes 1 extra session for serai net to make the changes active.
|
|
session + 2
|
|
} else {
|
|
session + 1
|
|
}
|
|
}
|