mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Remove Tendermint for GRANDPA
Updates to polkadot-v0.9.40, with a variety of dependency updates accordingly. Substrate thankfully now uses k256 0.13, pathing the way for #256. We couldn't upgrade to polkadot-v0.9.40 without this due to polkadot-v0.9.40 having fundamental changes to syncing. While we could've updated tendermint, it's not worth the continued development effort given its inability to work with multiple validator sets. Purges sc-tendermint. Keeps tendermint-machine for #163. Closes #137, #148, #157, #171. #96 and #99 should be re-scoped/clarified. #134 and #159 also should be clarified. #169 is also no longer a priority since we're only considering temporal deployments of tendermint. #170 also isn't since we're looking at effectively sharded validator sets, so there should be no singular large set needing high performance.
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use scale::Encode;
|
||||
|
||||
use sp_runtime::RuntimeDebug;
|
||||
|
||||
@@ -12,7 +12,7 @@ pub use in_instructions_primitives as primitives;
|
||||
use primitives::{InInstruction, InInstructionWithBalance, SignedBatch};
|
||||
|
||||
#[derive(Clone, Copy, Encode, RuntimeDebug)]
|
||||
#[cfg_attr(feature = "std", derive(Decode, thiserror::Error))]
|
||||
#[cfg_attr(feature = "std", derive(scale::Decode, thiserror::Error))]
|
||||
pub enum PalletError {
|
||||
#[cfg_attr(feature = "std", error("batch for unrecognized network"))]
|
||||
UnrecognizedNetwork,
|
||||
@@ -42,7 +42,6 @@ pub mod pallet {
|
||||
}
|
||||
|
||||
#[pallet::pallet]
|
||||
#[pallet::generate_store(pub(crate) trait Store)]
|
||||
pub struct Pallet<T>(PhantomData<T>);
|
||||
|
||||
// Latest block number agreed upon for a coin
|
||||
|
||||
@@ -16,16 +16,19 @@ async-trait = "0.1"
|
||||
|
||||
clap = { version = "4", features = ["derive"] }
|
||||
|
||||
futures = "0.3"
|
||||
jsonrpsee = { version = "0.16", features = ["server"] }
|
||||
|
||||
sp-core = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-keyring = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-inherents = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-timestamp = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-blockchain = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-api = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-block-builder = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-consensus = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-consensus-babe = { git = "https://github.com/serai-dex/substrate" }
|
||||
|
||||
frame-benchmarking = { git = "https://github.com/serai-dex/substrate" }
|
||||
frame-benchmarking-cli = { git = "https://github.com/serai-dex/substrate" }
|
||||
@@ -39,9 +42,18 @@ sc-executor = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-service = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-client-db = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-client-api = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-network-common = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-network = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-consensus = { git = "https://github.com/serai-dex/substrate" }
|
||||
|
||||
sc-consensus-babe = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-consensus-babe-rpc = { git = "https://github.com/serai-dex/substrate" }
|
||||
|
||||
sc-consensus-grandpa = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-consensus-grandpa-rpc = { git = "https://github.com/serai-dex/substrate" }
|
||||
|
||||
sc-authority-discovery = { git = "https://github.com/serai-dex/substrate" }
|
||||
|
||||
sc-telemetry = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-cli = { git = "https://github.com/serai-dex/substrate" }
|
||||
|
||||
@@ -50,8 +62,6 @@ sc-rpc-api = { git = "https://github.com/serai-dex/substrate" }
|
||||
substrate-frame-rpc-system = { git = "https://github.com/serai-dex/substrate" }
|
||||
pallet-transaction-payment-rpc = { git = "https://github.com/serai-dex/substrate" }
|
||||
|
||||
sc-tendermint = { path = "../tendermint/client" }
|
||||
|
||||
[build-dependencies]
|
||||
substrate-build-script-utils = { git = "https://github.com/serai-dex/substrate.git" }
|
||||
|
||||
|
||||
@@ -3,9 +3,9 @@ use sp_core::Pair as PairTrait;
|
||||
use sc_service::ChainType;
|
||||
|
||||
use serai_runtime::{
|
||||
primitives::*, tokens::primitives::ADDRESS as TOKENS_ADDRESS, tendermint::crypto::Public,
|
||||
WASM_BINARY, opaque::SessionKeys, GenesisConfig, SystemConfig, BalancesConfig, AssetsConfig,
|
||||
ValidatorSetsConfig, SessionConfig,
|
||||
primitives::*, tokens::primitives::ADDRESS as TOKENS_ADDRESS, WASM_BINARY, opaque::SessionKeys,
|
||||
BABE_GENESIS_EPOCH_CONFIG, GenesisConfig, SystemConfig, BalancesConfig, AssetsConfig,
|
||||
ValidatorSetsConfig, SessionConfig, BabeConfig, GrandpaConfig, AuthorityDiscoveryConfig,
|
||||
};
|
||||
|
||||
pub type ChainSpec = sc_service::GenericChainSpec<GenesisConfig>;
|
||||
@@ -21,7 +21,11 @@ fn testnet_genesis(
|
||||
) -> GenesisConfig {
|
||||
let session_key = |name| {
|
||||
let key = account_from_name(name);
|
||||
(key, key, SessionKeys { tendermint: Public::from(key) })
|
||||
(
|
||||
key,
|
||||
key,
|
||||
SessionKeys { babe: key.into(), grandpa: key.into(), authority_discovery: key.into() },
|
||||
)
|
||||
};
|
||||
|
||||
GenesisConfig {
|
||||
@@ -47,7 +51,6 @@ fn testnet_genesis(
|
||||
accounts: vec![],
|
||||
},
|
||||
|
||||
session: SessionConfig { keys: validators.iter().map(|name| session_key(*name)).collect() },
|
||||
validator_sets: ValidatorSetsConfig {
|
||||
bond: Amount(1_000_000 * 10_u64.pow(8)),
|
||||
networks: vec![
|
||||
@@ -57,6 +60,11 @@ fn testnet_genesis(
|
||||
],
|
||||
participants: validators.iter().map(|name| account_from_name(name)).collect(),
|
||||
},
|
||||
session: SessionConfig { keys: validators.iter().map(|name| session_key(*name)).collect() },
|
||||
babe: BabeConfig { authorities: vec![], epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG) },
|
||||
grandpa: GrandpaConfig { authorities: vec![] },
|
||||
|
||||
authority_discovery: AuthorityDiscoveryConfig { keys: vec![] },
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use serai_runtime::Block;
|
||||
|
||||
use sc_service::{PruningMode, PartialComponents};
|
||||
@@ -9,7 +11,7 @@ use crate::{
|
||||
chain_spec,
|
||||
cli::{Cli, Subcommand},
|
||||
command_helper::{RemarkBuilder, inherent_benchmark_data},
|
||||
service,
|
||||
service::{self, FullClient},
|
||||
};
|
||||
|
||||
impl SubstrateCli for Cli {
|
||||
@@ -62,23 +64,23 @@ pub fn run() -> sc_cli::Result<()> {
|
||||
|
||||
Some(Subcommand::CheckBlock(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||
let PartialComponents { client, task_manager, import_queue, .. } =
|
||||
service::new_partial(&config)?.1;
|
||||
service::new_partial(&config)?;
|
||||
Ok((cmd.run(client, import_queue), task_manager))
|
||||
}),
|
||||
|
||||
Some(Subcommand::ExportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?.1;
|
||||
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?;
|
||||
Ok((cmd.run(client, config.database), task_manager))
|
||||
}),
|
||||
|
||||
Some(Subcommand::ExportState(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?.1;
|
||||
let PartialComponents { client, task_manager, .. } = service::new_partial(&config)?;
|
||||
Ok((cmd.run(client, config.chain_spec), task_manager))
|
||||
}),
|
||||
|
||||
Some(Subcommand::ImportBlocks(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||
let PartialComponents { client, task_manager, import_queue, .. } =
|
||||
service::new_partial(&config)?.1;
|
||||
service::new_partial(&config)?;
|
||||
Ok((cmd.run(client, import_queue), task_manager))
|
||||
}),
|
||||
|
||||
@@ -87,15 +89,19 @@ pub fn run() -> sc_cli::Result<()> {
|
||||
}
|
||||
|
||||
Some(Subcommand::Revert(cmd)) => cli.create_runner(cmd)?.async_run(|config| {
|
||||
let PartialComponents { client, task_manager, backend, .. } =
|
||||
service::new_partial(&config)?.1;
|
||||
Ok((cmd.run(client, backend, None), task_manager))
|
||||
let PartialComponents { client, task_manager, backend, .. } = service::new_partial(&config)?;
|
||||
let aux_revert = Box::new(|client: Arc<FullClient>, backend, blocks| {
|
||||
sc_consensus_babe::revert(client.clone(), backend, blocks)?;
|
||||
sc_consensus_grandpa::revert(client, blocks)?;
|
||||
Ok(())
|
||||
});
|
||||
Ok((cmd.run(client, backend, Some(aux_revert)), task_manager))
|
||||
}),
|
||||
|
||||
Some(Subcommand::Benchmark(cmd)) => cli.create_runner(cmd)?.sync_run(|config| match cmd {
|
||||
BenchmarkCmd::Pallet(cmd) => cmd.run::<Block, service::ExecutorDispatch>(config),
|
||||
|
||||
BenchmarkCmd::Block(cmd) => cmd.run(service::new_partial(&config)?.1.client),
|
||||
BenchmarkCmd::Block(cmd) => cmd.run(service::new_partial(&config)?.client),
|
||||
|
||||
#[cfg(not(feature = "runtime-benchmarks"))]
|
||||
BenchmarkCmd::Storage(_) => {
|
||||
@@ -104,12 +110,12 @@ pub fn run() -> sc_cli::Result<()> {
|
||||
|
||||
#[cfg(feature = "runtime-benchmarks")]
|
||||
BenchmarkCmd::Storage(cmd) => {
|
||||
let PartialComponents { client, backend, .. } = service::new_partial(&config)?.1;
|
||||
let PartialComponents { client, backend, .. } = service::new_partial(&config)?;
|
||||
cmd.run(config, client, backend.expose_db(), backend.expose_storage())
|
||||
}
|
||||
|
||||
BenchmarkCmd::Overhead(cmd) => {
|
||||
let client = service::new_partial(&config)?.1.client;
|
||||
let client = service::new_partial(&config)?.client;
|
||||
cmd.run(
|
||||
config,
|
||||
client.clone(),
|
||||
@@ -120,7 +126,7 @@ pub fn run() -> sc_cli::Result<()> {
|
||||
}
|
||||
|
||||
BenchmarkCmd::Extrinsic(cmd) => {
|
||||
let client = service::new_partial(&config)?.1.client;
|
||||
let client = service::new_partial(&config)?.client;
|
||||
cmd.run(
|
||||
client.clone(),
|
||||
inherent_benchmark_data()?,
|
||||
|
||||
@@ -1,42 +1,24 @@
|
||||
use std::{
|
||||
error::Error,
|
||||
boxed::Box,
|
||||
sync::Arc,
|
||||
time::{UNIX_EPOCH, SystemTime, Duration},
|
||||
str::FromStr,
|
||||
};
|
||||
use std::{boxed::Box, sync::Arc};
|
||||
|
||||
use sp_runtime::traits::{Block as BlockTrait};
|
||||
use sp_inherents::CreateInherentDataProviders;
|
||||
use sp_consensus::DisableProofRecording;
|
||||
use sp_api::ProvideRuntimeApi;
|
||||
use futures::stream::StreamExt;
|
||||
|
||||
use sp_timestamp::InherentDataProvider as TimestampInherent;
|
||||
use sp_consensus_babe::{SlotDuration, inherents::InherentDataProvider as BabeInherent};
|
||||
|
||||
use sc_executor::{NativeVersion, NativeExecutionDispatch, NativeElseWasmExecutor};
|
||||
use sc_transaction_pool::FullPool;
|
||||
use sc_network::NetworkService;
|
||||
|
||||
use sc_network_common::sync::warp::WarpSyncParams;
|
||||
use sc_network::{Event, NetworkEventStream};
|
||||
use sc_service::{error::Error as ServiceError, Configuration, TaskManager, TFullClient};
|
||||
|
||||
use sc_client_api::BlockBackend;
|
||||
|
||||
use sc_telemetry::{Telemetry, TelemetryWorker};
|
||||
|
||||
pub(crate) use sc_tendermint::{
|
||||
TendermintClientMinimal, TendermintValidator, TendermintImport, TendermintAuthority,
|
||||
TendermintSelectChain, import_queue,
|
||||
};
|
||||
use serai_runtime::{self as runtime, BLOCK_SIZE, TARGET_BLOCK_TIME, opaque::Block, RuntimeApi};
|
||||
use serai_runtime::{self as runtime, opaque::Block, RuntimeApi};
|
||||
|
||||
type FullBackend = sc_service::TFullBackend<Block>;
|
||||
pub type FullClient = TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
|
||||
|
||||
type PartialComponents = sc_service::PartialComponents<
|
||||
FullClient,
|
||||
FullBackend,
|
||||
TendermintSelectChain<Block, FullBackend>,
|
||||
sc_consensus::DefaultImportQueue<Block, FullClient>,
|
||||
sc_transaction_pool::FullPool<Block, FullClient>,
|
||||
Option<Telemetry>,
|
||||
>;
|
||||
use sc_consensus_babe::{self, SlotProportion};
|
||||
use sc_consensus_grandpa as grandpa;
|
||||
|
||||
pub struct ExecutorDispatch;
|
||||
impl NativeExecutionDispatch for ExecutorDispatch {
|
||||
@@ -54,57 +36,36 @@ impl NativeExecutionDispatch for ExecutorDispatch {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct Cidp;
|
||||
#[async_trait::async_trait]
|
||||
impl CreateInherentDataProviders<Block, ()> for Cidp {
|
||||
type InherentDataProviders = ();
|
||||
async fn create_inherent_data_providers(
|
||||
&self,
|
||||
_: <Block as BlockTrait>::Hash,
|
||||
_: (),
|
||||
) -> Result<Self::InherentDataProviders, Box<dyn Send + Sync + Error>> {
|
||||
Ok(())
|
||||
}
|
||||
type FullBackend = sc_service::TFullBackend<Block>;
|
||||
pub type FullClient = TFullClient<Block, RuntimeApi, NativeElseWasmExecutor<ExecutorDispatch>>;
|
||||
|
||||
type SelectChain = sc_consensus::LongestChain<FullBackend, Block>;
|
||||
type GrandpaBlockImport = grandpa::GrandpaBlockImport<FullBackend, Block, FullClient, SelectChain>;
|
||||
type BabeBlockImport = sc_consensus_babe::BabeBlockImport<Block, FullClient, GrandpaBlockImport>;
|
||||
|
||||
type PartialComponents = sc_service::PartialComponents<
|
||||
FullClient,
|
||||
FullBackend,
|
||||
SelectChain,
|
||||
sc_consensus::DefaultImportQueue<Block, FullClient>,
|
||||
sc_transaction_pool::FullPool<Block, FullClient>,
|
||||
(
|
||||
BabeBlockImport,
|
||||
sc_consensus_babe::BabeLink<Block>,
|
||||
grandpa::LinkHalf<Block, FullClient, SelectChain>,
|
||||
grandpa::SharedVoterState,
|
||||
Option<Telemetry>,
|
||||
),
|
||||
>;
|
||||
|
||||
fn create_inherent_data_providers(
|
||||
slot_duration: SlotDuration,
|
||||
) -> (BabeInherent, TimestampInherent) {
|
||||
let timestamp = TimestampInherent::from_system_time();
|
||||
(BabeInherent::from_timestamp_and_slot_duration(*timestamp, slot_duration), timestamp)
|
||||
}
|
||||
|
||||
pub struct TendermintValidatorFirm;
|
||||
impl TendermintClientMinimal for TendermintValidatorFirm {
|
||||
// TODO: This is passed directly to propose, which warns not to use the hard limit as finalize
|
||||
// may grow the block. We don't use storage proofs and use the Executive finalize_block. Is that
|
||||
// guaranteed not to grow the block?
|
||||
const PROPOSED_BLOCK_SIZE_LIMIT: usize = { BLOCK_SIZE as usize };
|
||||
// 3 seconds
|
||||
const BLOCK_PROCESSING_TIME_IN_SECONDS: u32 = { (TARGET_BLOCK_TIME / 2) as u32 };
|
||||
// 1 second
|
||||
const LATENCY_TIME_IN_SECONDS: u32 = { (TARGET_BLOCK_TIME / 2 / 3) as u32 };
|
||||
|
||||
type Block = Block;
|
||||
type Backend = sc_client_db::Backend<Block>;
|
||||
type Api = <FullClient as ProvideRuntimeApi<Block>>::Api;
|
||||
type Client = FullClient;
|
||||
}
|
||||
|
||||
impl TendermintValidator for TendermintValidatorFirm {
|
||||
type CIDP = Cidp;
|
||||
type Environment = sc_basic_authorship::ProposerFactory<
|
||||
FullPool<Block, FullClient>,
|
||||
Self::Backend,
|
||||
Self::Client,
|
||||
DisableProofRecording,
|
||||
>;
|
||||
|
||||
type Network = Arc<NetworkService<Block, <Block as BlockTrait>::Hash>>;
|
||||
}
|
||||
|
||||
pub fn new_partial(
|
||||
config: &Configuration,
|
||||
) -> Result<(TendermintImport<TendermintValidatorFirm>, PartialComponents), ServiceError> {
|
||||
debug_assert_eq!(TARGET_BLOCK_TIME, 6);
|
||||
|
||||
if config.keystore_remote.is_some() {
|
||||
return Err(ServiceError::Other("Remote Keystores are not supported".to_string()));
|
||||
}
|
||||
|
||||
pub fn new_partial(config: &Configuration) -> Result<PartialComponents, ServiceError> {
|
||||
let telemetry = config
|
||||
.telemetry_endpoints
|
||||
.clone()
|
||||
@@ -136,6 +97,8 @@ pub fn new_partial(
|
||||
telemetry
|
||||
});
|
||||
|
||||
let select_chain = sc_consensus::LongestChain::new(backend.clone());
|
||||
|
||||
let transaction_pool = sc_transaction_pool::BasicPool::new_full(
|
||||
config.transaction_pool.clone(),
|
||||
config.role.is_authority().into(),
|
||||
@@ -144,55 +107,69 @@ pub fn new_partial(
|
||||
client.clone(),
|
||||
);
|
||||
|
||||
let (authority, import_queue) = import_queue(
|
||||
&task_manager.spawn_essential_handle(),
|
||||
let (grandpa_block_import, grandpa_link) = grandpa::block_import(
|
||||
client.clone(),
|
||||
&client,
|
||||
select_chain.clone(),
|
||||
telemetry.as_ref().map(Telemetry::handle),
|
||||
)?;
|
||||
let justification_import = grandpa_block_import.clone();
|
||||
|
||||
let (block_import, babe_link) = sc_consensus_babe::block_import(
|
||||
sc_consensus_babe::configuration(&*client)?,
|
||||
grandpa_block_import,
|
||||
client.clone(),
|
||||
)?;
|
||||
|
||||
let slot_duration = babe_link.config().slot_duration();
|
||||
let import_queue = sc_consensus_babe::import_queue(
|
||||
babe_link.clone(),
|
||||
block_import.clone(),
|
||||
Some(Box::new(justification_import)),
|
||||
client.clone(),
|
||||
select_chain.clone(),
|
||||
move |_, _| async move { Ok(create_inherent_data_providers(slot_duration)) },
|
||||
&task_manager.spawn_essential_handle(),
|
||||
config.prometheus_registry(),
|
||||
);
|
||||
telemetry.as_ref().map(Telemetry::handle),
|
||||
)?;
|
||||
|
||||
let select_chain = TendermintSelectChain::new(backend.clone());
|
||||
|
||||
Ok((
|
||||
authority,
|
||||
sc_service::PartialComponents {
|
||||
client,
|
||||
backend,
|
||||
task_manager,
|
||||
import_queue,
|
||||
keystore_container,
|
||||
select_chain,
|
||||
transaction_pool,
|
||||
other: telemetry,
|
||||
},
|
||||
))
|
||||
Ok(sc_service::PartialComponents {
|
||||
client,
|
||||
backend,
|
||||
task_manager,
|
||||
keystore_container,
|
||||
select_chain,
|
||||
import_queue,
|
||||
transaction_pool,
|
||||
other: (block_import, babe_link, grandpa_link, grandpa::SharedVoterState::empty(), telemetry),
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceError> {
|
||||
let (
|
||||
authority,
|
||||
sc_service::PartialComponents {
|
||||
client,
|
||||
backend,
|
||||
mut task_manager,
|
||||
import_queue,
|
||||
keystore_container,
|
||||
select_chain: _,
|
||||
other: mut telemetry,
|
||||
transaction_pool,
|
||||
},
|
||||
) = new_partial(&config)?;
|
||||
let sc_service::PartialComponents {
|
||||
client,
|
||||
backend,
|
||||
mut task_manager,
|
||||
import_queue,
|
||||
keystore_container,
|
||||
select_chain,
|
||||
transaction_pool,
|
||||
other: (block_import, babe_link, grandpa_link, shared_voter_state, mut telemetry),
|
||||
} = new_partial(&config)?;
|
||||
|
||||
let is_authority = config.role.is_authority();
|
||||
let genesis = client.block_hash(0).unwrap().unwrap();
|
||||
let tendermint_protocol = sc_tendermint::protocol_name(genesis, config.chain_spec.fork_id());
|
||||
if is_authority {
|
||||
config
|
||||
.network
|
||||
.extra_sets
|
||||
.push(sc_tendermint::set_config(tendermint_protocol.clone(), BLOCK_SIZE.into()));
|
||||
}
|
||||
let publish_non_global_ips = config.network.allow_non_globals_in_dht;
|
||||
let grandpa_protocol_name =
|
||||
grandpa::protocol_standard_name(&client.block_hash(0).unwrap().unwrap(), &config.chain_spec);
|
||||
|
||||
let (network, system_rpc_tx, tx_handler_controller, network_starter) =
|
||||
config.network.extra_sets.push(grandpa::grandpa_peers_set_config(grandpa_protocol_name.clone()));
|
||||
let warp_sync = Arc::new(grandpa::warp_proof::NetworkProvider::new(
|
||||
backend.clone(),
|
||||
grandpa_link.shared_authority_set().clone(),
|
||||
vec![],
|
||||
));
|
||||
|
||||
let (network, system_rpc_tx, tx_handler_controller, network_starter, sync_service) =
|
||||
sc_service::build_network(sc_service::BuildNetworkParams {
|
||||
config: &config,
|
||||
client: client.clone(),
|
||||
@@ -200,7 +177,7 @@ pub async fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceE
|
||||
spawn_handle: task_manager.spawn_handle(),
|
||||
import_queue,
|
||||
block_announce_validator_builder: None,
|
||||
warp_sync_params: None,
|
||||
warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)),
|
||||
})?;
|
||||
|
||||
if config.offchain_worker.enabled {
|
||||
@@ -212,7 +189,7 @@ pub async fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceE
|
||||
);
|
||||
}
|
||||
|
||||
let rpc_extensions_builder = {
|
||||
let rpc_builder = {
|
||||
let client = client.clone();
|
||||
let pool = transaction_pool.clone();
|
||||
|
||||
@@ -226,48 +203,113 @@ pub async fn new_full(mut config: Configuration) -> Result<TaskManager, ServiceE
|
||||
})
|
||||
};
|
||||
|
||||
let genesis_time = if config.chain_spec.id() != "devnet" {
|
||||
UNIX_EPOCH + Duration::from_secs(u64::from_str(&std::env::var("GENESIS").unwrap()).unwrap())
|
||||
} else {
|
||||
SystemTime::now()
|
||||
};
|
||||
let enable_grandpa = !config.disable_grandpa;
|
||||
let role = config.role.clone();
|
||||
let force_authoring = config.force_authoring;
|
||||
let name = config.network.node_name.clone();
|
||||
let prometheus_registry = config.prometheus_registry().cloned();
|
||||
|
||||
let keystore = keystore_container.keystore();
|
||||
|
||||
let registry = config.prometheus_registry().cloned();
|
||||
sc_service::spawn_tasks(sc_service::SpawnTasksParams {
|
||||
network: network.clone(),
|
||||
client: client.clone(),
|
||||
keystore: keystore_container.sync_keystore(),
|
||||
task_manager: &mut task_manager,
|
||||
transaction_pool: transaction_pool.clone(),
|
||||
rpc_builder: rpc_extensions_builder,
|
||||
config,
|
||||
backend,
|
||||
client: client.clone(),
|
||||
keystore: keystore.clone(),
|
||||
network: network.clone(),
|
||||
rpc_builder,
|
||||
transaction_pool: transaction_pool.clone(),
|
||||
task_manager: &mut task_manager,
|
||||
system_rpc_tx,
|
||||
tx_handler_controller,
|
||||
config,
|
||||
sync_service: sync_service.clone(),
|
||||
telemetry: telemetry.as_mut(),
|
||||
})?;
|
||||
|
||||
if is_authority {
|
||||
task_manager.spawn_essential_handle().spawn(
|
||||
"tendermint",
|
||||
None,
|
||||
TendermintAuthority::new(
|
||||
genesis_time,
|
||||
tendermint_protocol,
|
||||
authority,
|
||||
keystore_container.keystore(),
|
||||
Cidp,
|
||||
task_manager.spawn_essential_handle(),
|
||||
sc_basic_authorship::ProposerFactory::new(
|
||||
task_manager.spawn_handle(),
|
||||
client,
|
||||
transaction_pool,
|
||||
registry.as_ref(),
|
||||
telemetry.map(|telemtry| telemtry.handle()),
|
||||
),
|
||||
network,
|
||||
None,
|
||||
if let sc_service::config::Role::Authority { .. } = &role {
|
||||
let slot_duration = babe_link.config().slot_duration();
|
||||
let babe_config = sc_consensus_babe::BabeParams {
|
||||
keystore: keystore.clone(),
|
||||
client: client.clone(),
|
||||
select_chain,
|
||||
env: sc_basic_authorship::ProposerFactory::new(
|
||||
task_manager.spawn_handle(),
|
||||
client.clone(),
|
||||
transaction_pool,
|
||||
prometheus_registry.as_ref(),
|
||||
telemetry.as_ref().map(Telemetry::handle),
|
||||
),
|
||||
block_import,
|
||||
sync_oracle: sync_service.clone(),
|
||||
justification_sync_link: sync_service.clone(),
|
||||
create_inherent_data_providers: move |_, _| async move {
|
||||
Ok(create_inherent_data_providers(slot_duration))
|
||||
},
|
||||
force_authoring,
|
||||
backoff_authoring_blocks: None::<()>,
|
||||
babe_link,
|
||||
block_proposal_slot_portion: SlotProportion::new(0.5),
|
||||
max_block_proposal_slot_portion: None,
|
||||
telemetry: telemetry.as_ref().map(Telemetry::handle),
|
||||
};
|
||||
|
||||
task_manager.spawn_essential_handle().spawn_blocking(
|
||||
"babe-proposer",
|
||||
Some("block-authoring"),
|
||||
sc_consensus_babe::start_babe(babe_config)?,
|
||||
);
|
||||
}
|
||||
|
||||
if role.is_authority() {
|
||||
task_manager.spawn_handle().spawn(
|
||||
"authority-discovery-worker",
|
||||
Some("networking"),
|
||||
sc_authority_discovery::new_worker_and_service_with_config(
|
||||
#[allow(clippy::field_reassign_with_default)]
|
||||
{
|
||||
let mut worker = sc_authority_discovery::WorkerConfig::default();
|
||||
worker.publish_non_global_ips = publish_non_global_ips;
|
||||
worker
|
||||
},
|
||||
client,
|
||||
network.clone(),
|
||||
Box::pin(network.event_stream("authority-discovery").filter_map(|e| async move {
|
||||
match e {
|
||||
Event::Dht(e) => Some(e),
|
||||
_ => None,
|
||||
}
|
||||
})),
|
||||
sc_authority_discovery::Role::PublishAndDiscover(keystore.clone()),
|
||||
prometheus_registry.clone(),
|
||||
)
|
||||
.0
|
||||
.run(),
|
||||
);
|
||||
}
|
||||
|
||||
if enable_grandpa {
|
||||
task_manager.spawn_essential_handle().spawn_blocking(
|
||||
"grandpa-voter",
|
||||
None,
|
||||
grandpa::run_grandpa_voter(grandpa::GrandpaParams {
|
||||
config: grandpa::Config {
|
||||
gossip_duration: std::time::Duration::from_millis(333),
|
||||
justification_period: 512,
|
||||
name: Some(name),
|
||||
observer_enabled: false,
|
||||
keystore: if role.is_authority() { Some(keystore) } else { None },
|
||||
local_role: role,
|
||||
telemetry: telemetry.as_ref().map(Telemetry::handle),
|
||||
protocol_name: grandpa_protocol_name,
|
||||
},
|
||||
link: grandpa_link,
|
||||
network,
|
||||
sync: Arc::new(sync_service),
|
||||
telemetry: telemetry.as_ref().map(Telemetry::handle),
|
||||
voting_rule: grandpa::VotingRulesBuilder::default().build(),
|
||||
prometheus_registry,
|
||||
shared_voter_state,
|
||||
})?,
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
@@ -19,17 +19,23 @@ scale-info = { version = "2", default-features = false, features = ["derive"] }
|
||||
|
||||
sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
sp-offchain = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-version = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-inherents = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-offchain = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
sp-session = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-consensus-babe = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-consensus-grandpa = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
sp-authority-discovery = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
sp-transaction-pool = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-block-builder = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
sp-tendermint = { path = "../tendermint/primitives", default-features = false }
|
||||
|
||||
frame-system = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
frame-support = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
frame-executive = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
@@ -37,6 +43,8 @@ frame-benchmarking = { git = "https://github.com/serai-dex/substrate", default-f
|
||||
|
||||
serai-primitives = { path = "../serai/primitives", default-features = false }
|
||||
|
||||
pallet-timestamp = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
pallet-balances = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
pallet-assets = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
pallet-transaction-payment = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
@@ -46,7 +54,10 @@ in-instructions-pallet = { path = "../in-instructions/pallet", default-features
|
||||
|
||||
validator-sets-pallet = { path = "../validator-sets/pallet", default-features = false }
|
||||
pallet-session = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
pallet-tendermint = { path = "../tendermint/pallet", default-features = false }
|
||||
pallet-babe = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
pallet-grandpa = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
pallet-authority-discovery = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
frame-system-rpc-runtime-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
pallet-transaction-payment-rpc-runtime-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
@@ -61,23 +72,31 @@ std = [
|
||||
|
||||
"sp-core/std",
|
||||
"sp-std/std",
|
||||
|
||||
"sp-offchain/std",
|
||||
"sp-version/std",
|
||||
"sp-inherents/std",
|
||||
"sp-offchain/std",
|
||||
|
||||
"sp-session/std",
|
||||
"sp-consensus-babe/std",
|
||||
"sp-consensus-grandpa/std",
|
||||
|
||||
"sp-authority-discovery/std",
|
||||
|
||||
"sp-transaction-pool/std",
|
||||
"sp-block-builder/std",
|
||||
|
||||
"sp-runtime/std",
|
||||
"sp-api/std",
|
||||
|
||||
"sp-tendermint/std",
|
||||
|
||||
"frame-system/std",
|
||||
"frame-support/std",
|
||||
"frame-executive/std",
|
||||
|
||||
"serai-primitives/std",
|
||||
|
||||
"pallet-timestamp/std",
|
||||
|
||||
"pallet-balances/std",
|
||||
"pallet-transaction-payment/std",
|
||||
|
||||
@@ -87,7 +106,10 @@ std = [
|
||||
|
||||
"validator-sets-pallet/std",
|
||||
"pallet-session/std",
|
||||
"pallet-tendermint/std",
|
||||
"pallet-babe/std",
|
||||
"pallet-grandpa/std",
|
||||
|
||||
"pallet-authority-discovery/std",
|
||||
|
||||
"frame-system-rpc-runtime-api/std",
|
||||
"pallet-transaction-payment-rpc-runtime-api/std",
|
||||
@@ -102,10 +124,13 @@ runtime-benchmarks = [
|
||||
"frame-support/runtime-benchmarks",
|
||||
"frame-benchmarking/runtime-benchmarks",
|
||||
|
||||
"pallet-timestamp/runtime-benchmarks",
|
||||
|
||||
"pallet-balances/runtime-benchmarks",
|
||||
"pallet-assets/runtime-benchmarks",
|
||||
|
||||
"pallet-tendermint/runtime-benchmarks",
|
||||
"pallet-babe/runtime-benchmarks",
|
||||
"pallet-grandpa/runtime-benchmarks",
|
||||
]
|
||||
|
||||
default = ["std"]
|
||||
|
||||
@@ -12,6 +12,8 @@ pub use serai_primitives as primitives;
|
||||
pub use frame_system as system;
|
||||
pub use frame_support as support;
|
||||
|
||||
pub use pallet_timestamp as timestamp;
|
||||
|
||||
pub use pallet_balances as balances;
|
||||
pub use pallet_transaction_payment as transaction_payment;
|
||||
|
||||
@@ -22,7 +24,10 @@ pub use in_instructions_pallet as in_instructions;
|
||||
pub use validator_sets_pallet as validator_sets;
|
||||
|
||||
pub use pallet_session as session;
|
||||
pub use pallet_tendermint as tendermint;
|
||||
pub use pallet_babe as babe;
|
||||
pub use pallet_grandpa as grandpa;
|
||||
|
||||
pub use pallet_authority_discovery as authority_discovery;
|
||||
|
||||
// Actually used by the runtime
|
||||
use sp_core::OpaqueMetadata;
|
||||
@@ -52,7 +57,9 @@ use support::{
|
||||
|
||||
use transaction_payment::CurrencyAdapter;
|
||||
|
||||
use session::PeriodicSessions;
|
||||
use babe::AuthorityId as BabeId;
|
||||
use grandpa::AuthorityId as GrandpaId;
|
||||
use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId;
|
||||
|
||||
/// An index to a block.
|
||||
pub type BlockNumber = u64;
|
||||
@@ -74,7 +81,9 @@ pub mod opaque {
|
||||
|
||||
impl_opaque_keys! {
|
||||
pub struct SessionKeys {
|
||||
pub tendermint: Tendermint,
|
||||
pub babe: Babe,
|
||||
pub grandpa: Grandpa,
|
||||
pub authority_discovery: AuthorityDiscovery,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -94,6 +103,11 @@ pub const VERSION: RuntimeVersion = RuntimeVersion {
|
||||
state_version: 1,
|
||||
};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub fn native_version() -> NativeVersion {
|
||||
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
|
||||
}
|
||||
|
||||
// 1 MB
|
||||
pub const BLOCK_SIZE: u32 = 1024 * 1024;
|
||||
// 6 seconds
|
||||
@@ -104,10 +118,13 @@ pub const MINUTES: BlockNumber = 60 / TARGET_BLOCK_TIME;
|
||||
pub const HOURS: BlockNumber = MINUTES * 60;
|
||||
pub const DAYS: BlockNumber = HOURS * 24;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub fn native_version() -> NativeVersion {
|
||||
NativeVersion { runtime_version: VERSION, can_author_with: Default::default() }
|
||||
}
|
||||
pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4);
|
||||
|
||||
pub const BABE_GENESIS_EPOCH_CONFIG: sp_consensus_babe::BabeEpochConfiguration =
|
||||
sp_consensus_babe::BabeEpochConfiguration {
|
||||
c: PRIMARY_PROBABILITY,
|
||||
allowed_slots: sp_consensus_babe::AllowedSlots::PrimaryAndSecondaryPlainSlots,
|
||||
};
|
||||
|
||||
const NORMAL_DISPATCH_RATIO: Perbill = Perbill::from_percent(75);
|
||||
|
||||
@@ -122,14 +139,20 @@ parameter_types! {
|
||||
system::limits::BlockLength::max_with_normal_ratio(BLOCK_SIZE, NORMAL_DISPATCH_RATIO);
|
||||
pub BlockWeights: system::limits::BlockWeights =
|
||||
system::limits::BlockWeights::with_sensible_defaults(
|
||||
Weight::from_ref_time(2u64 * WEIGHT_REF_TIME_PER_SECOND).set_proof_size(u64::MAX),
|
||||
Weight::from_parts(2u64 * WEIGHT_REF_TIME_PER_SECOND, u64::MAX),
|
||||
NORMAL_DISPATCH_RATIO,
|
||||
);
|
||||
|
||||
pub const MaxAuthorities: u32 = 100;
|
||||
}
|
||||
|
||||
pub struct CallFilter;
|
||||
impl Contains<RuntimeCall> for CallFilter {
|
||||
fn contains(call: &RuntimeCall) -> bool {
|
||||
if let RuntimeCall::Timestamp(call) = call {
|
||||
return matches!(call, timestamp::Call::set { .. });
|
||||
}
|
||||
|
||||
if let RuntimeCall::Balances(call) = call {
|
||||
return matches!(call, balances::Call::transfer { .. } | balances::Call::transfer_all { .. });
|
||||
}
|
||||
@@ -188,14 +211,29 @@ impl system::Config for Runtime {
|
||||
type MaxConsumers = support::traits::ConstU32<16>;
|
||||
}
|
||||
|
||||
impl timestamp::Config for Runtime {
|
||||
type Moment = u64;
|
||||
type OnTimestampSet = Babe;
|
||||
type MinimumPeriod = ConstU64<{ (TARGET_BLOCK_TIME * 1000) / 2 }>;
|
||||
type WeightInfo = ();
|
||||
}
|
||||
|
||||
impl balances::Config for Runtime {
|
||||
type MaxLocks = ConstU32<50>;
|
||||
type MaxReserves = ();
|
||||
type ReserveIdentifier = [u8; 8];
|
||||
type Balance = SubstrateAmount;
|
||||
type RuntimeEvent = RuntimeEvent;
|
||||
|
||||
type Balance = SubstrateAmount;
|
||||
|
||||
type ReserveIdentifier = ();
|
||||
type HoldIdentifier = ();
|
||||
type FreezeIdentifier = ();
|
||||
|
||||
type MaxLocks = ();
|
||||
type MaxReserves = ();
|
||||
type MaxHolds = ();
|
||||
type MaxFreezes = ();
|
||||
|
||||
type DustRemoval = ();
|
||||
type ExistentialDeposit = ConstU64<500>;
|
||||
type ExistentialDeposit = ConstU64<1>;
|
||||
type AccountStore = System;
|
||||
type WeightInfo = balances::weights::SubstrateWeight<Runtime>;
|
||||
}
|
||||
@@ -248,8 +286,9 @@ impl in_instructions::Config for Runtime {
|
||||
type RuntimeEvent = RuntimeEvent;
|
||||
}
|
||||
|
||||
const SESSION_LENGTH: BlockNumber = 5 * DAYS;
|
||||
type Sessions = PeriodicSessions<ConstU64<{ SESSION_LENGTH }>, ConstU64<{ SESSION_LENGTH }>>;
|
||||
impl validator_sets::Config for Runtime {
|
||||
type RuntimeEvent = RuntimeEvent;
|
||||
}
|
||||
|
||||
pub struct IdentityValidatorIdOf;
|
||||
impl Convert<PublicKey, Option<PublicKey>> for IdentityValidatorIdOf {
|
||||
@@ -258,23 +297,49 @@ impl Convert<PublicKey, Option<PublicKey>> for IdentityValidatorIdOf {
|
||||
}
|
||||
}
|
||||
|
||||
impl validator_sets::Config for Runtime {
|
||||
type RuntimeEvent = RuntimeEvent;
|
||||
}
|
||||
|
||||
impl session::Config for Runtime {
|
||||
type RuntimeEvent = RuntimeEvent;
|
||||
type ValidatorId = PublicKey;
|
||||
type ValidatorIdOf = IdentityValidatorIdOf;
|
||||
type ShouldEndSession = Sessions;
|
||||
type NextSessionRotation = Sessions;
|
||||
type SessionManager = ();
|
||||
type ShouldEndSession = Babe;
|
||||
type NextSessionRotation = Babe;
|
||||
type SessionManager = (); // TODO?
|
||||
type SessionHandler = <SessionKeys as OpaqueKeys>::KeyTypeIdProviders;
|
||||
type Keys = SessionKeys;
|
||||
type WeightInfo = session::weights::SubstrateWeight<Runtime>;
|
||||
}
|
||||
|
||||
impl tendermint::Config for Runtime {}
|
||||
impl babe::Config for Runtime {
|
||||
#[allow(clippy::identity_op)]
|
||||
type EpochDuration = ConstU64<{ 1 * DAYS }>;
|
||||
type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>;
|
||||
type EpochChangeTrigger = pallet_babe::ExternalTrigger;
|
||||
type DisabledValidators = Session;
|
||||
|
||||
type WeightInfo = ();
|
||||
|
||||
type MaxAuthorities = MaxAuthorities;
|
||||
|
||||
// TODO: Handle equivocation reports
|
||||
type KeyOwnerProof = sp_core::Void;
|
||||
type EquivocationReportSystem = ();
|
||||
}
|
||||
|
||||
impl grandpa::Config for Runtime {
|
||||
type RuntimeEvent = RuntimeEvent;
|
||||
|
||||
type WeightInfo = ();
|
||||
type MaxAuthorities = MaxAuthorities;
|
||||
|
||||
// TODO: Handle equivocation reports
|
||||
type MaxSetIdSessionEntries = ConstU64<0>;
|
||||
type KeyOwnerProof = sp_core::Void;
|
||||
type EquivocationReportSystem = ();
|
||||
}
|
||||
|
||||
impl authority_discovery::Config for Runtime {
|
||||
type MaxAuthorities = MaxAuthorities;
|
||||
}
|
||||
|
||||
pub type Header = generic::Header<BlockNumber, BlakeTwo256>;
|
||||
pub type Block = generic::Block<Header, UncheckedExtrinsic>;
|
||||
@@ -307,6 +372,8 @@ construct_runtime!(
|
||||
{
|
||||
System: system,
|
||||
|
||||
Timestamp: timestamp,
|
||||
|
||||
Balances: balances,
|
||||
TransactionPayment: transaction_payment,
|
||||
|
||||
@@ -317,7 +384,10 @@ construct_runtime!(
|
||||
ValidatorSets: validator_sets,
|
||||
|
||||
Session: session,
|
||||
Tendermint: tendermint,
|
||||
Babe: babe,
|
||||
Grandpa: grandpa,
|
||||
|
||||
AuthorityDiscovery: authority_discovery,
|
||||
}
|
||||
);
|
||||
|
||||
@@ -329,8 +399,15 @@ extern crate frame_benchmarking;
|
||||
mod benches {
|
||||
define_benchmarks!(
|
||||
[frame_benchmarking, BaselineBench::<Runtime>]
|
||||
|
||||
[system, SystemBench::<Runtime>]
|
||||
|
||||
[pallet_timestamp, Timestamp]
|
||||
|
||||
[balances, Balances]
|
||||
|
||||
[babe, Babe]
|
||||
[grandpa, Grandpa]
|
||||
);
|
||||
}
|
||||
|
||||
@@ -353,6 +430,14 @@ sp_api::impl_runtime_apis! {
|
||||
fn metadata() -> OpaqueMetadata {
|
||||
OpaqueMetadata::new(Runtime::metadata().into())
|
||||
}
|
||||
|
||||
fn metadata_at_version(version: u32) -> Option<OpaqueMetadata> {
|
||||
Runtime::metadata_at_version(version)
|
||||
}
|
||||
|
||||
fn metadata_versions() -> sp_std::vec::Vec<u32> {
|
||||
Runtime::metadata_versions()
|
||||
}
|
||||
}
|
||||
|
||||
impl sp_block_builder::BlockBuilder<Block> for Runtime {
|
||||
@@ -404,13 +489,69 @@ sp_api::impl_runtime_apis! {
|
||||
}
|
||||
}
|
||||
|
||||
impl sp_tendermint::TendermintApi<Block> for Runtime {
|
||||
fn current_session() -> u32 {
|
||||
Tendermint::session()
|
||||
impl sp_consensus_babe::BabeApi<Block> for Runtime {
|
||||
fn configuration() -> sp_consensus_babe::BabeConfiguration {
|
||||
use support::traits::Get;
|
||||
|
||||
let epoch_config = Babe::epoch_config().unwrap_or(BABE_GENESIS_EPOCH_CONFIG);
|
||||
sp_consensus_babe::BabeConfiguration {
|
||||
slot_duration: Babe::slot_duration(),
|
||||
epoch_length: <Runtime as babe::Config>::EpochDuration::get(),
|
||||
c: epoch_config.c,
|
||||
authorities: Babe::authorities().to_vec(),
|
||||
randomness: Babe::randomness(),
|
||||
allowed_slots: epoch_config.allowed_slots,
|
||||
}
|
||||
}
|
||||
|
||||
fn validators() -> Vec<PublicKey> {
|
||||
Session::validators().drain(..).map(Into::into).collect()
|
||||
fn current_epoch_start() -> sp_consensus_babe::Slot {
|
||||
Babe::current_epoch_start()
|
||||
}
|
||||
|
||||
fn current_epoch() -> sp_consensus_babe::Epoch {
|
||||
Babe::current_epoch()
|
||||
}
|
||||
|
||||
fn next_epoch() -> sp_consensus_babe::Epoch {
|
||||
Babe::next_epoch()
|
||||
}
|
||||
|
||||
fn generate_key_ownership_proof(
|
||||
_: sp_consensus_babe::Slot,
|
||||
_: BabeId,
|
||||
) -> Option<sp_consensus_babe::OpaqueKeyOwnershipProof> {
|
||||
None
|
||||
}
|
||||
|
||||
fn submit_report_equivocation_unsigned_extrinsic(
|
||||
_: sp_consensus_babe::EquivocationProof<<Block as BlockT>::Header>,
|
||||
_: sp_consensus_babe::OpaqueKeyOwnershipProof,
|
||||
) -> Option<()> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
impl sp_consensus_grandpa::GrandpaApi<Block> for Runtime {
|
||||
fn grandpa_authorities() -> sp_consensus_grandpa::AuthorityList {
|
||||
Grandpa::grandpa_authorities()
|
||||
}
|
||||
|
||||
fn current_set_id() -> sp_consensus_grandpa::SetId {
|
||||
Grandpa::current_set_id()
|
||||
}
|
||||
|
||||
fn submit_report_equivocation_unsigned_extrinsic(
|
||||
_: sp_consensus_grandpa::EquivocationProof<<Block as BlockT>::Hash, u64>,
|
||||
_: sp_consensus_grandpa::OpaqueKeyOwnershipProof,
|
||||
) -> Option<()> {
|
||||
None
|
||||
}
|
||||
|
||||
fn generate_key_ownership_proof(
|
||||
_set_id: sp_consensus_grandpa::SetId,
|
||||
_authority_id: GrandpaId,
|
||||
) -> Option<sp_consensus_grandpa::OpaqueKeyOwnershipProof> {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
@@ -446,4 +587,10 @@ sp_api::impl_runtime_apis! {
|
||||
TransactionPayment::length_to_fee(length)
|
||||
}
|
||||
}
|
||||
|
||||
impl sp_authority_discovery::AuthorityDiscoveryApi<Block> for Runtime {
|
||||
fn authorities() -> Vec<AuthorityDiscoveryId> {
|
||||
AuthorityDiscovery::authorities()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@ use subxt::{
|
||||
error::Error as SubxtError,
|
||||
utils::Encoded,
|
||||
config::{
|
||||
Header as HeaderTrait,
|
||||
substrate::{BlakeTwo256, SubstrateHeader},
|
||||
extrinsic_params::{BaseExtrinsicParams, BaseExtrinsicParamsBuilder},
|
||||
},
|
||||
@@ -66,6 +67,8 @@ pub enum SeraiError {
|
||||
RpcError(SubxtError),
|
||||
#[error("serai-client library was intended for a different runtime version")]
|
||||
InvalidRuntime,
|
||||
#[error("node is faulty")]
|
||||
InvalidNode,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -123,6 +126,44 @@ impl Serai {
|
||||
Ok(self.0.rpc().finalized_head().await.map_err(SeraiError::RpcError)?.into())
|
||||
}
|
||||
|
||||
// There is no provided method for this
|
||||
// TODO: Add one to Serai
|
||||
pub async fn is_finalized(&self, header: &Header) -> Result<Option<bool>, SeraiError> {
|
||||
// Get the latest finalized block
|
||||
let finalized = self.get_latest_block_hash().await?.into();
|
||||
// If the latest finalized block is this block, return true
|
||||
if finalized == header.hash() {
|
||||
return Ok(Some(true));
|
||||
}
|
||||
|
||||
let Some(finalized) =
|
||||
self.0.rpc().header(Some(finalized)).await.map_err(SeraiError::RpcError)? else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// If the finalized block has a lower number, this block can't be finalized
|
||||
if finalized.number() < header.number() {
|
||||
return Ok(Some(false));
|
||||
}
|
||||
|
||||
// This block, if finalized, comes before the finalized block
|
||||
// If we request the hash of this block's number, Substrate will return the hash on the main
|
||||
// chain
|
||||
// If that hash is this hash, this block is finalized
|
||||
let Some(hash) =
|
||||
self
|
||||
.0
|
||||
.rpc()
|
||||
.block_hash(Some(header.number().into()))
|
||||
.await
|
||||
.map_err(SeraiError::RpcError)? else {
|
||||
// This is an error since there is a block at this index
|
||||
return Err(SeraiError::InvalidNode);
|
||||
};
|
||||
|
||||
Ok(Some(header.hash() == hash))
|
||||
}
|
||||
|
||||
pub async fn get_block(&self, hash: [u8; 32]) -> Result<Option<Block>, SeraiError> {
|
||||
let Some(res) =
|
||||
self.0.rpc().block(Some(hash.into())).await.map_err(SeraiError::RpcError)? else {
|
||||
@@ -130,8 +171,7 @@ impl Serai {
|
||||
};
|
||||
|
||||
// Only return finalized blocks
|
||||
let Some(justifications) = res.justifications.as_ref() else { return Ok(None); };
|
||||
if justifications.is_empty() {
|
||||
if self.is_finalized(&res.block.header).await? != Some(true) {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
@@ -140,9 +180,9 @@ impl Serai {
|
||||
|
||||
// Ideally, this would be get_block_hash, not get_block_by_number
|
||||
// Unfortunately, in order to only operate over only finalized data, we have to check the
|
||||
// returned hash is for a finalized block. We can only do that by calling subxt's `block`, which
|
||||
// will return the block and any justifications
|
||||
// If we're already putting in all the work to get the block, we may as well just return it here
|
||||
// returned hash is for a finalized block. We can only do that by calling the extensive
|
||||
// is_finalized method, which at least requires the header
|
||||
// In practice, the block is likely more useful than the header
|
||||
pub async fn get_block_by_number(&self, number: u64) -> Result<Option<Block>, SeraiError> {
|
||||
let Some(hash) =
|
||||
self.0.rpc().block_hash(Some(number.into())).await.map_err(SeraiError::RpcError)? else {
|
||||
|
||||
@@ -25,7 +25,7 @@ pub async fn provide_batch(batch: SignedBatch) -> [u8; 32] {
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.header()
|
||||
.header
|
||||
.number();
|
||||
|
||||
let execution = serai.execute_batch(batch.clone()).unwrap();
|
||||
|
||||
@@ -10,7 +10,6 @@ use serde::{Serialize, Deserialize};
|
||||
use sp_core::H256;
|
||||
|
||||
/// The type used to identify block numbers.
|
||||
// Doesn't re-export tendermint-machine's due to traits.
|
||||
#[derive(
|
||||
Clone, Copy, Default, PartialEq, Eq, Hash, Debug, Encode, Decode, MaxEncodedLen, TypeInfo,
|
||||
)]
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
[package]
|
||||
name = "sc-tendermint"
|
||||
version = "0.1.0"
|
||||
description = "Tendermint client for Substrate"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/tendermint/client"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1"
|
||||
|
||||
hex = "0.4"
|
||||
log = "0.4"
|
||||
|
||||
futures = "0.3"
|
||||
tokio = { version = "1", features = ["sync", "rt"] }
|
||||
|
||||
sp-core = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-keystore = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-inherents = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-staking = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-blockchain = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-api = { git = "https://github.com/serai-dex/substrate" }
|
||||
sp-consensus = { git = "https://github.com/serai-dex/substrate" }
|
||||
|
||||
sp-tendermint = { path = "../primitives" }
|
||||
|
||||
sc-network-common = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-network = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-network-gossip = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-service = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-client-api = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-block-builder = { git = "https://github.com/serai-dex/substrate" }
|
||||
sc-consensus = { git = "https://github.com/serai-dex/substrate" }
|
||||
|
||||
substrate-prometheus-endpoint = { git = "https://github.com/serai-dex/substrate" }
|
||||
|
||||
tendermint-machine = { path = "../machine", features = ["substrate"] }
|
||||
@@ -1,15 +0,0 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
@@ -1,67 +0,0 @@
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use sp_core::Decode;
|
||||
use sp_runtime::traits::{Hash, Header, Block};
|
||||
|
||||
use sc_network::PeerId;
|
||||
use sc_network_gossip::{Validator, ValidatorContext, ValidationResult};
|
||||
|
||||
use tendermint_machine::{ext::SignatureScheme, SignedMessage};
|
||||
|
||||
use crate::{TendermintValidator, validators::TendermintValidators};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct TendermintGossip<T: TendermintValidator> {
|
||||
number: Arc<RwLock<u64>>,
|
||||
signature_scheme: TendermintValidators<T>,
|
||||
}
|
||||
|
||||
impl<T: TendermintValidator> TendermintGossip<T> {
|
||||
pub(crate) fn new(number: Arc<RwLock<u64>>, signature_scheme: TendermintValidators<T>) -> Self {
|
||||
TendermintGossip { number, signature_scheme }
|
||||
}
|
||||
|
||||
pub(crate) fn topic(number: u64) -> <T::Block as Block>::Hash {
|
||||
<<<T::Block as Block>::Header as Header>::Hashing as Hash>::hash(
|
||||
&[b"Tendermint Block Topic".as_ref(), &number.to_le_bytes()].concat(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TendermintValidator> Validator<T::Block> for TendermintGossip<T> {
|
||||
fn validate(
|
||||
&self,
|
||||
_: &mut dyn ValidatorContext<T::Block>,
|
||||
_: &PeerId,
|
||||
data: &[u8],
|
||||
) -> ValidationResult<<T::Block as Block>::Hash> {
|
||||
let msg = match SignedMessage::<
|
||||
u16,
|
||||
T::Block,
|
||||
<TendermintValidators<T> as SignatureScheme>::Signature,
|
||||
>::decode(&mut &*data)
|
||||
{
|
||||
Ok(msg) => msg,
|
||||
Err(_) => return ValidationResult::Discard,
|
||||
};
|
||||
|
||||
if msg.block().0 < *self.number.read().unwrap() {
|
||||
return ValidationResult::Discard;
|
||||
}
|
||||
|
||||
// Verify the signature here so we don't carry invalid messages in our gossip layer
|
||||
// This will cause double verification of the signature, yet that's a minimal cost
|
||||
if !msg.verify_signature(&self.signature_scheme) {
|
||||
return ValidationResult::Discard;
|
||||
}
|
||||
|
||||
ValidationResult::ProcessAndKeep(Self::topic(msg.block().0))
|
||||
}
|
||||
|
||||
fn message_expired<'a>(
|
||||
&'a self,
|
||||
) -> Box<dyn FnMut(<T::Block as Block>::Hash, &[u8]) -> bool + 'a> {
|
||||
let number = self.number.clone();
|
||||
Box::new(move |topic, _| topic != Self::topic(*number.read().unwrap()))
|
||||
}
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
use std::{
|
||||
pin::Pin,
|
||||
sync::RwLock,
|
||||
task::{Poll, Context},
|
||||
future::Future,
|
||||
};
|
||||
|
||||
use sp_runtime::traits::{Header, Block};
|
||||
|
||||
use sp_consensus::Error;
|
||||
use sc_consensus::{BlockImportStatus, BlockImportError, Link};
|
||||
|
||||
use sc_service::ImportQueue;
|
||||
|
||||
use tendermint_machine::ext::BlockError;
|
||||
|
||||
use crate::TendermintImportQueue;
|
||||
|
||||
// Custom helpers for ImportQueue in order to obtain the result of a block's importing
|
||||
struct ValidateLink<B: Block>(Option<(B::Hash, Result<(), BlockError>)>);
|
||||
impl<B: Block> Link<B> for ValidateLink<B> {
|
||||
fn blocks_processed(
|
||||
&mut self,
|
||||
imported: usize,
|
||||
count: usize,
|
||||
mut results: Vec<(
|
||||
Result<BlockImportStatus<<B::Header as Header>::Number>, BlockImportError>,
|
||||
B::Hash,
|
||||
)>,
|
||||
) {
|
||||
assert!(imported <= 1);
|
||||
assert_eq!(count, 1);
|
||||
self.0 = Some((
|
||||
results[0].1,
|
||||
match results.swap_remove(0).0 {
|
||||
Ok(_) => Ok(()),
|
||||
Err(BlockImportError::Other(Error::Other(err))) => Err(
|
||||
err.downcast::<BlockError>().map(|boxed| *boxed.as_ref()).unwrap_or(BlockError::Fatal),
|
||||
),
|
||||
_ => Err(BlockError::Fatal),
|
||||
},
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct ImportFuture<'a, B: Block, T: Send>(
|
||||
B::Hash,
|
||||
RwLock<&'a mut TendermintImportQueue<B, T>>,
|
||||
);
|
||||
impl<'a, B: Block, T: Send> ImportFuture<'a, B, T> {
|
||||
pub(crate) fn new(
|
||||
hash: B::Hash,
|
||||
queue: &'a mut TendermintImportQueue<B, T>,
|
||||
) -> ImportFuture<B, T> {
|
||||
ImportFuture(hash, RwLock::new(queue))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, B: Block, T: Send> Future for ImportFuture<'a, B, T> {
|
||||
type Output = Result<(), BlockError>;
|
||||
|
||||
fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
let mut link = ValidateLink(None);
|
||||
self.1.write().unwrap().poll_actions(ctx, &mut link);
|
||||
if let Some(res) = link.0 {
|
||||
assert_eq!(res.0, self.0);
|
||||
Poll::Ready(res.1)
|
||||
} else {
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,494 +0,0 @@
|
||||
use std::{
|
||||
sync::{Arc, RwLock},
|
||||
time::{UNIX_EPOCH, SystemTime, Duration},
|
||||
collections::HashSet,
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use log::{debug, warn, error};
|
||||
|
||||
use futures::{
|
||||
SinkExt, StreamExt,
|
||||
lock::Mutex,
|
||||
channel::mpsc::{self, UnboundedSender},
|
||||
};
|
||||
|
||||
use sp_core::{Encode, Decode, traits::SpawnEssentialNamed};
|
||||
use sp_keystore::CryptoStore;
|
||||
use sp_runtime::{
|
||||
traits::{Header, Block},
|
||||
Digest,
|
||||
};
|
||||
use sp_blockchain::HeaderBackend;
|
||||
|
||||
use sp_consensus::{Error, BlockOrigin, BlockStatus, Proposer, Environment};
|
||||
use sc_consensus::import_queue::IncomingBlock;
|
||||
|
||||
use sc_service::ImportQueue;
|
||||
use sc_client_api::{BlockBackend, Finalizer, BlockchainEvents};
|
||||
use sc_network::{ProtocolName, NetworkBlock};
|
||||
use sc_network_gossip::GossipEngine;
|
||||
|
||||
use substrate_prometheus_endpoint::Registry;
|
||||
|
||||
use tendermint_machine::{
|
||||
ext::{BlockError, BlockNumber, Commit, SignatureScheme, Network},
|
||||
SignedMessage, TendermintMachine, TendermintHandle,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
CONSENSUS_ID, TendermintValidator,
|
||||
validators::{TendermintSigner, TendermintValidators},
|
||||
tendermint::TendermintImport,
|
||||
};
|
||||
|
||||
mod gossip;
|
||||
use gossip::TendermintGossip;
|
||||
|
||||
mod import_future;
|
||||
use import_future::ImportFuture;
|
||||
|
||||
// Data for an active validator
|
||||
// This is distinct as even when we aren't an authority, we still create stubbed Authority objects
|
||||
// as it's only Authority which implements tendermint_machine::ext::Network. Network has
|
||||
// verify_commit provided, and even non-authorities have to verify commits
|
||||
struct ActiveAuthority<T: TendermintValidator> {
|
||||
signer: TendermintSigner<T>,
|
||||
|
||||
// The number of the Block we're working on producing
|
||||
block_in_progress: Arc<RwLock<u64>>,
|
||||
// Notification channel for when we start on a new block
|
||||
new_block_event: UnboundedSender<()>,
|
||||
// Outgoing message queue, placed here as the GossipEngine itself can't be
|
||||
gossip: UnboundedSender<
|
||||
SignedMessage<u16, T::Block, <TendermintValidators<T> as SignatureScheme>::Signature>,
|
||||
>,
|
||||
|
||||
// Block producer
|
||||
env: Arc<Mutex<T::Environment>>,
|
||||
announce: T::Network,
|
||||
}
|
||||
|
||||
/// Tendermint Authority. Participates in the block proposal and voting process.
|
||||
pub struct TendermintAuthority<T: TendermintValidator> {
|
||||
import: TendermintImport<T>,
|
||||
active: Option<ActiveAuthority<T>>,
|
||||
}
|
||||
|
||||
// Get a block to propose after the specified header
|
||||
// If stub is true, no time will be spent adding transactions to it (beyond what's required),
|
||||
// making it as minimal as possible (a stub)
|
||||
// This is so we can create proposals when syncing, respecting tendermint-machine's API boundaries,
|
||||
// without spending the entire block processing time trying to include transactions (since we know
|
||||
// our proposal is meaningless and we'll just be syncing a new block anyways)
|
||||
async fn get_proposal<T: TendermintValidator>(
|
||||
env: &Arc<Mutex<T::Environment>>,
|
||||
import: &TendermintImport<T>,
|
||||
header: &<T::Block as Block>::Header,
|
||||
) -> T::Block {
|
||||
let proposer =
|
||||
env.lock().await.init(header).await.expect("Failed to create a proposer for the new block");
|
||||
|
||||
proposer
|
||||
.propose(
|
||||
import.inherent_data(*header.parent_hash()).await,
|
||||
Digest::default(),
|
||||
// The first processing time is to build the block
|
||||
// The second is for it to be downloaded (assumes a block won't take longer to download
|
||||
// than it'll take to process)
|
||||
// The third is for it to actually be processed
|
||||
Duration::from_secs((T::BLOCK_PROCESSING_TIME_IN_SECONDS / 3).into()),
|
||||
Some(T::PROPOSED_BLOCK_SIZE_LIMIT),
|
||||
)
|
||||
.await
|
||||
.expect("Failed to crate a new block proposal")
|
||||
.block
|
||||
}
|
||||
|
||||
impl<T: TendermintValidator> TendermintAuthority<T> {
|
||||
// Authority which is capable of verifying commits
|
||||
pub(crate) fn stub(import: TendermintImport<T>) -> Self {
|
||||
Self { import, active: None }
|
||||
}
|
||||
|
||||
async fn get_proposal(&self, header: &<T::Block as Block>::Header) -> T::Block {
|
||||
get_proposal(&self.active.as_ref().unwrap().env, &self.import, header).await
|
||||
}
|
||||
|
||||
/// Create and run a new Tendermint Authority, proposing and voting on blocks.
|
||||
/// This should be spawned on a task as it will not return until the P2P stack shuts down.
|
||||
#[allow(clippy::too_many_arguments, clippy::new_ret_no_self)]
|
||||
pub async fn new(
|
||||
genesis: SystemTime,
|
||||
protocol: ProtocolName,
|
||||
import: TendermintImport<T>,
|
||||
keys: Arc<dyn CryptoStore>,
|
||||
providers: T::CIDP,
|
||||
spawner: impl SpawnEssentialNamed,
|
||||
env: T::Environment,
|
||||
network: T::Network,
|
||||
registry: Option<&Registry>,
|
||||
) {
|
||||
// This should only have a single value, yet a bounded channel with a capacity of 1 would cause
|
||||
// a firm bound. It's not worth having a backlog crash the node since we aren't constrained
|
||||
let (new_block_event_send, mut new_block_event_recv) = mpsc::unbounded();
|
||||
let (msg_send, mut msg_recv) = mpsc::unbounded();
|
||||
|
||||
// Move the env into an Arc
|
||||
let env = Arc::new(Mutex::new(env));
|
||||
|
||||
// Scoped so the temporary variables used here don't leak
|
||||
let (block_in_progress, mut gossip, TendermintHandle { mut step, mut messages, machine }) = {
|
||||
// Get the info necessary to spawn the machine
|
||||
let info = import.client.info();
|
||||
|
||||
// Header::Number: TryInto<u64> doesn't implement Debug and can't be unwrapped
|
||||
let last_block: u64 = match info.finalized_number.try_into() {
|
||||
Ok(best) => best,
|
||||
Err(_) => panic!("BlockNumber exceeded u64"),
|
||||
};
|
||||
let last_hash = info.finalized_hash;
|
||||
|
||||
let last_time = {
|
||||
// Convert into a Unix timestamp
|
||||
let genesis = genesis.duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
|
||||
// Get the last block's time by grabbing its commit and reading the time from that
|
||||
Commit::<TendermintValidators<T>>::decode(
|
||||
&mut import
|
||||
.client
|
||||
.justifications(last_hash)
|
||||
.unwrap()
|
||||
.map(|justifications| justifications.get(CONSENSUS_ID).cloned().unwrap())
|
||||
.unwrap_or_default()
|
||||
.as_ref(),
|
||||
)
|
||||
.map(|commit| commit.end_time)
|
||||
// The commit provides the time its block ended at
|
||||
// The genesis time is when the network starts
|
||||
// Accordingly, the end of the genesis block is a block time after the genesis time
|
||||
.unwrap_or_else(|_| genesis + u64::from(Self::block_time()))
|
||||
};
|
||||
|
||||
let next_block = last_block + 1;
|
||||
// Shared references between us and the Tendermint machine (and its actions via its Network
|
||||
// trait)
|
||||
let block_in_progress = Arc::new(RwLock::new(next_block));
|
||||
|
||||
// Write the providers into the import so it can verify inherents
|
||||
*import.providers.write().await = Some(providers);
|
||||
|
||||
let authority = Self {
|
||||
import: import.clone(),
|
||||
active: Some(ActiveAuthority {
|
||||
signer: TendermintSigner(keys, import.validators.clone()),
|
||||
|
||||
block_in_progress: block_in_progress.clone(),
|
||||
new_block_event: new_block_event_send,
|
||||
gossip: msg_send,
|
||||
|
||||
env: env.clone(),
|
||||
announce: network.clone(),
|
||||
}),
|
||||
};
|
||||
|
||||
// Get our first proposal
|
||||
let proposal =
|
||||
authority.get_proposal(&import.client.header(last_hash).unwrap().unwrap()).await;
|
||||
|
||||
// Create the gossip network
|
||||
// This has to be spawning the machine, else gossip fails for some reason
|
||||
let gossip = GossipEngine::new(
|
||||
network,
|
||||
protocol,
|
||||
Arc::new(TendermintGossip::new(block_in_progress.clone(), import.validators.clone())),
|
||||
registry,
|
||||
);
|
||||
|
||||
(
|
||||
block_in_progress,
|
||||
gossip,
|
||||
TendermintMachine::new(authority, BlockNumber(last_block), last_time, proposal).await,
|
||||
)
|
||||
};
|
||||
spawner.spawn_essential("machine", Some("tendermint"), Box::pin(machine.run()));
|
||||
|
||||
// Start receiving messages about the Tendermint process for this block
|
||||
let mut gossip_recv =
|
||||
gossip.messages_for(TendermintGossip::<T>::topic(*block_in_progress.read().unwrap()));
|
||||
|
||||
// Get finality events from Substrate
|
||||
let mut finality = import.client.finality_notification_stream();
|
||||
|
||||
loop {
|
||||
futures::select_biased! {
|
||||
// GossipEngine closed down
|
||||
_ = gossip => {
|
||||
debug!(
|
||||
target: "tendermint",
|
||||
"GossipEngine shut down. {}",
|
||||
"Is the node shutting down?"
|
||||
);
|
||||
break;
|
||||
},
|
||||
|
||||
// Synced a block from the network
|
||||
notif = finality.next() => {
|
||||
if let Some(notif) = notif {
|
||||
let number = match (*notif.header.number()).try_into() {
|
||||
Ok(number) => number,
|
||||
Err(_) => panic!("BlockNumber exceeded u64"),
|
||||
};
|
||||
|
||||
// There's a race condition between the machine add_block and this
|
||||
// Both wait for a write lock on this ref and don't release it until after updating it
|
||||
// accordingly
|
||||
{
|
||||
let mut block_in_progress = block_in_progress.write().unwrap();
|
||||
if number < *block_in_progress {
|
||||
continue;
|
||||
}
|
||||
let next_block = number + 1;
|
||||
*block_in_progress = next_block;
|
||||
gossip_recv = gossip.messages_for(TendermintGossip::<T>::topic(next_block));
|
||||
}
|
||||
|
||||
let justifications = import.client.justifications(notif.hash).unwrap().unwrap();
|
||||
step.send((
|
||||
BlockNumber(number),
|
||||
Commit::decode(&mut justifications.get(CONSENSUS_ID).unwrap().as_ref()).unwrap(),
|
||||
// Creating a proposal will fail if syncing occurs radically faster than machine
|
||||
// stepping takes
|
||||
// Don't create proposals when stepping accordingly
|
||||
None
|
||||
)).await.unwrap();
|
||||
} else {
|
||||
debug!(
|
||||
target: "tendermint",
|
||||
"Finality notification stream closed down. {}",
|
||||
"Is the node shutting down?"
|
||||
);
|
||||
break;
|
||||
}
|
||||
},
|
||||
|
||||
// Machine accomplished a new block
|
||||
new_block = new_block_event_recv.next() => {
|
||||
if new_block.is_some() {
|
||||
gossip_recv = gossip.messages_for(
|
||||
TendermintGossip::<T>::topic(*block_in_progress.read().unwrap())
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
target: "tendermint",
|
||||
"Block notification stream shut down. {}",
|
||||
"Is the node shutting down?"
|
||||
);
|
||||
break;
|
||||
}
|
||||
},
|
||||
|
||||
// Message to broadcast
|
||||
msg = msg_recv.next() => {
|
||||
if let Some(msg) = msg {
|
||||
let topic = TendermintGossip::<T>::topic(msg.block().0);
|
||||
gossip.gossip_message(topic, msg.encode(), false);
|
||||
} else {
|
||||
debug!(
|
||||
target: "tendermint",
|
||||
"Machine's message channel shut down. {}",
|
||||
"Is the node shutting down?"
|
||||
);
|
||||
break;
|
||||
}
|
||||
},
|
||||
|
||||
// Received a message
|
||||
msg = gossip_recv.next() => {
|
||||
if let Some(msg) = msg {
|
||||
messages.send(
|
||||
match SignedMessage::decode(&mut msg.message.as_ref()) {
|
||||
Ok(msg) => msg,
|
||||
Err(e) => {
|
||||
// This is guaranteed to be valid thanks to to the gossip validator, assuming
|
||||
// that pipeline is correct. This doesn't panic as a hedge
|
||||
error!(target: "tendermint", "Couldn't decode valid message: {}", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
).await.unwrap();
|
||||
} else {
|
||||
debug!(
|
||||
target: "tendermint",
|
||||
"Gossip channel shut down. {}",
|
||||
"Is the node shutting down?"
|
||||
);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: TendermintValidator> Network for TendermintAuthority<T> {
|
||||
type ValidatorId = u16;
|
||||
type SignatureScheme = TendermintValidators<T>;
|
||||
type Weights = TendermintValidators<T>;
|
||||
type Block = T::Block;
|
||||
|
||||
const BLOCK_PROCESSING_TIME: u32 = T::BLOCK_PROCESSING_TIME_IN_SECONDS;
|
||||
const LATENCY_TIME: u32 = T::LATENCY_TIME_IN_SECONDS;
|
||||
|
||||
fn signer(&self) -> TendermintSigner<T> {
|
||||
self.active.as_ref().unwrap().signer.clone()
|
||||
}
|
||||
|
||||
fn signature_scheme(&self) -> TendermintValidators<T> {
|
||||
self.import.validators.clone()
|
||||
}
|
||||
|
||||
fn weights(&self) -> TendermintValidators<T> {
|
||||
self.import.validators.clone()
|
||||
}
|
||||
|
||||
async fn broadcast(
|
||||
&mut self,
|
||||
msg: SignedMessage<u16, Self::Block, <TendermintValidators<T> as SignatureScheme>::Signature>,
|
||||
) {
|
||||
if self.active.as_mut().unwrap().gossip.unbounded_send(msg).is_err() {
|
||||
warn!(
|
||||
target: "tendermint",
|
||||
"Attempted to broadcast a message except the gossip channel is closed. {}",
|
||||
"Is the node shutting down?"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async fn slash(&mut self, validator: u16) {
|
||||
// TODO
|
||||
error!("slashing {}, if this is a local network, this shouldn't happen", validator);
|
||||
}
|
||||
|
||||
// The Tendermint machine will call add_block for any block which is committed to, regardless of
|
||||
// validity. To determine validity, it expects a validate function, which Substrate doesn't
|
||||
// directly offer, and an add function. In order to comply with Serai's modified view of inherent
|
||||
// transactions, validate MUST check inherents, yet add_block must not.
|
||||
//
|
||||
// In order to acquire a validate function, any block proposed by a legitimate proposer is
|
||||
// imported. This performs full validation and makes the block available as a tip. While this
|
||||
// would be incredibly unsafe thanks to the unchecked inherents, it's defined as a tip with less
|
||||
// work, despite being a child of some parent. This means it won't be moved to nor operated on by
|
||||
// the node.
|
||||
//
|
||||
// When Tendermint completes, the block is finalized, setting it as the tip regardless of work.
|
||||
async fn validate(&mut self, block: &T::Block) -> Result<(), BlockError> {
|
||||
let hash = block.hash();
|
||||
let (header, body) = block.clone().deconstruct();
|
||||
let parent = *header.parent_hash();
|
||||
let number = *header.number();
|
||||
|
||||
// Can happen when we sync a block while also acting as a validator
|
||||
if number <= self.import.client.info().best_number {
|
||||
debug!(target: "tendermint", "Machine proposed a block for a slot we've already synced");
|
||||
Err(BlockError::Temporal)?;
|
||||
}
|
||||
|
||||
let mut queue_write = self.import.queue.write().await;
|
||||
*self.import.importing_block.write().unwrap() = Some(hash);
|
||||
|
||||
queue_write.as_mut().unwrap().service_ref().import_blocks(
|
||||
BlockOrigin::ConsensusBroadcast, // TODO: Use BlockOrigin::Own when it's our block
|
||||
vec![IncomingBlock {
|
||||
hash,
|
||||
header: Some(header),
|
||||
body: Some(body),
|
||||
indexed_body: None,
|
||||
justifications: None,
|
||||
origin: None, // TODO
|
||||
allow_missing_state: false,
|
||||
skip_execution: false,
|
||||
import_existing: self.import.recheck.read().unwrap().contains(&hash),
|
||||
state: None,
|
||||
}],
|
||||
);
|
||||
|
||||
ImportFuture::new(hash, queue_write.as_mut().unwrap()).await?;
|
||||
|
||||
// Sanity checks that a child block can have less work than its parent
|
||||
{
|
||||
let info = self.import.client.info();
|
||||
assert_eq!(info.best_hash, parent);
|
||||
assert_eq!(info.finalized_hash, parent);
|
||||
assert_eq!(info.best_number, number - 1u8.into());
|
||||
assert_eq!(info.finalized_number, number - 1u8.into());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn add_block(
|
||||
&mut self,
|
||||
block: T::Block,
|
||||
commit: Commit<TendermintValidators<T>>,
|
||||
) -> Option<T::Block> {
|
||||
// Prevent import_block from being called while we run
|
||||
let _lock = self.import.sync_lock.lock().await;
|
||||
|
||||
// If we didn't import this block already, return
|
||||
// If it's a legitimate block, we'll pick it up in the standard sync loop
|
||||
if self.import.client.block_status(block.hash()).unwrap() != BlockStatus::InChainWithState {
|
||||
return None;
|
||||
}
|
||||
|
||||
// Check if we already imported this externally
|
||||
if self.import.client.justifications(block.hash()).unwrap().is_some() {
|
||||
debug!(target: "tendermint", "Machine produced a commit after we already synced it");
|
||||
} else {
|
||||
let hash = block.hash();
|
||||
let justification = (CONSENSUS_ID, commit.encode());
|
||||
debug_assert!(self.import.verify_justification(hash, &justification).is_ok());
|
||||
|
||||
let raw_number = *block.header().number();
|
||||
let number: u64 = match raw_number.try_into() {
|
||||
Ok(number) => number,
|
||||
Err(_) => panic!("BlockNumber exceeded u64"),
|
||||
};
|
||||
|
||||
let active = self.active.as_mut().unwrap();
|
||||
let mut block_in_progress = active.block_in_progress.write().unwrap();
|
||||
// This will hold true unless we received, and handled, a notification for the block before
|
||||
// its justification was made available
|
||||
debug_assert_eq!(number, *block_in_progress);
|
||||
|
||||
// Finalize the block
|
||||
self
|
||||
.import
|
||||
.client
|
||||
.finalize_block(hash, Some(justification), true)
|
||||
.map_err(|_| Error::InvalidJustification)
|
||||
.unwrap();
|
||||
|
||||
// Tell the loop we received a block and to move to the next
|
||||
*block_in_progress = number + 1;
|
||||
if active.new_block_event.unbounded_send(()).is_err() {
|
||||
warn!(
|
||||
target: "tendermint",
|
||||
"Attempted to send a new number to the gossip handler except it's closed. {}",
|
||||
"Is the node shutting down?"
|
||||
);
|
||||
}
|
||||
|
||||
// Announce the block to the network so new clients can sync properly
|
||||
active.announce.announce_block(hash, None);
|
||||
active.announce.new_best_block_imported(hash, raw_number);
|
||||
}
|
||||
|
||||
// Clear any blocks for the previous slot which we were willing to recheck
|
||||
*self.import.recheck.write().unwrap() = HashSet::new();
|
||||
|
||||
Some(self.get_proposal(block.header()).await)
|
||||
}
|
||||
}
|
||||
@@ -1,180 +0,0 @@
|
||||
use std::{marker::PhantomData, sync::Arc, collections::HashMap};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use sp_runtime::traits::{Header, Block};
|
||||
use sp_blockchain::{BlockStatus, HeaderBackend, Backend as BlockchainBackend};
|
||||
use sp_consensus::{Error, CacheKeyId, BlockOrigin, SelectChain};
|
||||
|
||||
use sc_consensus::{BlockCheckParams, BlockImportParams, ImportResult, BlockImport, Verifier};
|
||||
|
||||
use sc_client_api::{Backend, BlockBackend};
|
||||
|
||||
use crate::{TendermintValidator, tendermint::TendermintImport};
|
||||
|
||||
impl<T: TendermintValidator> TendermintImport<T> {
|
||||
fn check_already_in_chain(&self, hash: <T::Block as Block>::Hash) -> bool {
|
||||
// If it's in chain, with justifications, return it's already on chain
|
||||
// If it's in chain, without justifications, continue the block import process to import its
|
||||
// justifications
|
||||
// This can be triggered if the validators add a block, without justifications, yet the p2p
|
||||
// process then broadcasts it with its justifications
|
||||
(self.client.status(hash).unwrap() == BlockStatus::InChain) &&
|
||||
self.client.justifications(hash).unwrap().is_some()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: TendermintValidator> BlockImport<T::Block> for TendermintImport<T>
|
||||
where
|
||||
Arc<T::Client>: BlockImport<T::Block, Transaction = T::BackendTransaction>,
|
||||
<Arc<T::Client> as BlockImport<T::Block>>::Error: Into<Error>,
|
||||
{
|
||||
type Error = Error;
|
||||
type Transaction = T::BackendTransaction;
|
||||
|
||||
// TODO: Is there a DoS where you send a block without justifications, causing it to error,
|
||||
// yet adding it to the blacklist in the process preventing further syncing?
|
||||
async fn check_block(
|
||||
&mut self,
|
||||
mut block: BlockCheckParams<T::Block>,
|
||||
) -> Result<ImportResult, Self::Error> {
|
||||
if self.check_already_in_chain(block.hash) {
|
||||
return Ok(ImportResult::AlreadyInChain);
|
||||
}
|
||||
self.verify_order(block.parent_hash, block.number)?;
|
||||
|
||||
// Does not verify origin here as origin only applies to unfinalized blocks
|
||||
// We don't have context on if this block has justifications or not
|
||||
|
||||
block.allow_missing_state = false;
|
||||
block.allow_missing_parent = false;
|
||||
|
||||
self.client.check_block(block).await.map_err(Into::into)
|
||||
}
|
||||
|
||||
async fn import_block(
|
||||
&mut self,
|
||||
mut block: BlockImportParams<T::Block, Self::Transaction>,
|
||||
new_cache: HashMap<CacheKeyId, Vec<u8>>,
|
||||
) -> Result<ImportResult, Self::Error> {
|
||||
// Don't allow multiple blocks to be imported at once
|
||||
let _lock = self.sync_lock.lock().await;
|
||||
|
||||
if self.check_already_in_chain(block.header.hash()) {
|
||||
return Ok(ImportResult::AlreadyInChain);
|
||||
}
|
||||
|
||||
self.check(&mut block).await?;
|
||||
self.client.import_block(block, new_cache).await.map_err(Into::into)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: TendermintValidator> Verifier<T::Block> for TendermintImport<T>
|
||||
where
|
||||
Arc<T::Client>: BlockImport<T::Block, Transaction = T::BackendTransaction>,
|
||||
<Arc<T::Client> as BlockImport<T::Block>>::Error: Into<Error>,
|
||||
{
|
||||
async fn verify(
|
||||
&mut self,
|
||||
mut block: BlockImportParams<T::Block, ()>,
|
||||
) -> Result<(BlockImportParams<T::Block, ()>, Option<Vec<(CacheKeyId, Vec<u8>)>>), String> {
|
||||
block.origin = match block.origin {
|
||||
BlockOrigin::Genesis => BlockOrigin::Genesis,
|
||||
BlockOrigin::NetworkBroadcast => BlockOrigin::NetworkBroadcast,
|
||||
|
||||
// Re-map NetworkInitialSync to NetworkBroadcast so it still triggers notifications
|
||||
// Tendermint will listen to the finality stream. If we sync a block we're running a machine
|
||||
// for, it'll force the machine to move ahead. We can only do that if there actually are
|
||||
// notifications
|
||||
//
|
||||
// Then Serai also runs data indexing code based on block addition, so ensuring it always
|
||||
// emits events ensures we always perform our necessary indexing (albeit with a race
|
||||
// condition since Substrate will eventually prune the block's state, potentially before
|
||||
// indexing finishes when syncing)
|
||||
//
|
||||
// The alternative to this would be editing Substrate directly, which would be a lot less
|
||||
// fragile, manually triggering the notifications (which may be possible with code intended
|
||||
// for testing), writing our own notification system, or implementing lock_import_and_run
|
||||
// on our end, letting us directly set the notifications, so we're not beholden to when
|
||||
// Substrate decides to call notify_finalized
|
||||
//
|
||||
// lock_import_and_run unfortunately doesn't allow async code and generally isn't feasible to
|
||||
// work with though. We also couldn't use it to prevent Substrate from creating
|
||||
// notifications, so it only solves half the problem. We'd *still* have to keep this patch,
|
||||
// with all its fragility, unless we edit Substrate or move the entire block import flow here
|
||||
BlockOrigin::NetworkInitialSync => BlockOrigin::NetworkBroadcast,
|
||||
// Also re-map File so bootstraps also trigger notifications, enabling using bootstraps
|
||||
BlockOrigin::File => BlockOrigin::NetworkBroadcast,
|
||||
|
||||
// We do not want this block, which hasn't been confirmed, to be broadcast over the net
|
||||
// Substrate will generate notifications unless it's Genesis, which this isn't, InitialSync,
|
||||
// which changes telemetry behavior, or File, which is... close enough
|
||||
BlockOrigin::ConsensusBroadcast => BlockOrigin::File,
|
||||
BlockOrigin::Own => BlockOrigin::File,
|
||||
};
|
||||
|
||||
if self.check_already_in_chain(block.header.hash()) {
|
||||
return Ok((block, None));
|
||||
}
|
||||
|
||||
self.check(&mut block).await.map_err(|e| format!("{e}"))?;
|
||||
Ok((block, None))
|
||||
}
|
||||
}
|
||||
|
||||
/// Tendermint's Select Chain, where the best chain is defined as the most recently finalized
|
||||
/// block.
|
||||
///
|
||||
/// leaves panics on call due to not being applicable under Tendermint. Any provided answer would
|
||||
/// have conflicts best left unraised.
|
||||
//
|
||||
// SelectChain, while provided by Substrate and part of PartialComponents, isn't used by Substrate
|
||||
// It's common between various block-production/finality crates, yet Substrate as a system doesn't
|
||||
// rely on it, which is good, because its definition is explicitly incompatible with Tendermint
|
||||
//
|
||||
// leaves is supposed to return all leaves of the blockchain. While Tendermint maintains that view,
|
||||
// an honest node will only build on the most recently finalized block, so it is a 'leaf' despite
|
||||
// having descendants
|
||||
//
|
||||
// best_chain will always be this finalized block, yet Substrate explicitly defines it as one of
|
||||
// the above leaves, which this finalized block is explicitly not included in. Accordingly, we
|
||||
// can never provide a compatible decision
|
||||
//
|
||||
// Since PartialComponents expects it, an implementation which does its best is provided. It panics
|
||||
// if leaves is called, yet returns the finalized chain tip for best_chain, as that's intended to
|
||||
// be the header to build upon
|
||||
pub struct TendermintSelectChain<B: Block, Be: Backend<B>>(Arc<Be>, PhantomData<B>);
|
||||
|
||||
impl<B: Block, Be: Backend<B>> Clone for TendermintSelectChain<B, Be> {
|
||||
fn clone(&self) -> Self {
|
||||
TendermintSelectChain(self.0.clone(), PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Block, Be: Backend<B>> TendermintSelectChain<B, Be> {
|
||||
pub fn new(backend: Arc<Be>) -> TendermintSelectChain<B, Be> {
|
||||
TendermintSelectChain(backend, PhantomData)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<B: Block, Be: Backend<B>> SelectChain<B> for TendermintSelectChain<B, Be> {
|
||||
async fn leaves(&self) -> Result<Vec<B::Hash>, Error> {
|
||||
panic!("Substrate definition of leaves is incompatible with Tendermint")
|
||||
}
|
||||
|
||||
async fn best_chain(&self) -> Result<B::Header, Error> {
|
||||
Ok(
|
||||
self
|
||||
.0
|
||||
.blockchain()
|
||||
// There should always be a finalized block
|
||||
.header(self.0.blockchain().last_finalized().unwrap())
|
||||
// There should not be an error in retrieving it and since it's finalized, it should exist
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,163 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use sp_core::crypto::KeyTypeId;
|
||||
use sp_inherents::CreateInherentDataProviders;
|
||||
use sp_runtime::traits::{Header, Block};
|
||||
use sp_blockchain::HeaderBackend;
|
||||
use sp_api::{StateBackend, StateBackendFor, TransactionFor, ApiExt, ProvideRuntimeApi};
|
||||
use sp_consensus::{Error, Environment};
|
||||
|
||||
use sc_client_api::{BlockBackend, Backend, Finalizer, BlockchainEvents};
|
||||
use sc_block_builder::BlockBuilderApi;
|
||||
use sc_consensus::{BlockImport, BasicQueue};
|
||||
|
||||
use sc_network_common::config::NonDefaultSetConfig;
|
||||
use sc_network::{ProtocolName, NetworkBlock};
|
||||
use sc_network_gossip::Network;
|
||||
|
||||
use sp_tendermint::TendermintApi;
|
||||
|
||||
use substrate_prometheus_endpoint::Registry;
|
||||
|
||||
mod validators;
|
||||
|
||||
pub(crate) mod tendermint;
|
||||
pub use tendermint::TendermintImport;
|
||||
|
||||
mod block_import;
|
||||
pub use block_import::TendermintSelectChain;
|
||||
|
||||
pub(crate) mod authority;
|
||||
pub use authority::TendermintAuthority;
|
||||
|
||||
pub const CONSENSUS_ID: [u8; 4] = *b"tend";
|
||||
pub(crate) const KEY_TYPE_ID: KeyTypeId = KeyTypeId(CONSENSUS_ID);
|
||||
|
||||
const PROTOCOL_NAME: &str = "/tendermint/1";
|
||||
|
||||
pub fn protocol_name<Hash: AsRef<[u8]>>(genesis: Hash, fork: Option<&str>) -> ProtocolName {
|
||||
let mut name = format!("/{}", hex::encode(genesis.as_ref()));
|
||||
if let Some(fork) = fork {
|
||||
name += &format!("/{fork}");
|
||||
}
|
||||
name += PROTOCOL_NAME;
|
||||
name.into()
|
||||
}
|
||||
|
||||
pub fn set_config(protocol: ProtocolName, block_size: u64) -> NonDefaultSetConfig {
|
||||
// The extra 512 bytes is for the additional data part of Tendermint
|
||||
// Even with BLS, that should just be 161 bytes in the worst case, for a perfect messaging scheme
|
||||
// While 256 bytes would suffice there, it's unknown if any LibP2P overhead exists nor if
|
||||
// anything here will be perfect. Considering this is miniscule compared to the block size, it's
|
||||
// better safe than sorry.
|
||||
let mut cfg = NonDefaultSetConfig::new(protocol, block_size + 512);
|
||||
cfg.allow_non_reserved(25, 25);
|
||||
cfg
|
||||
}
|
||||
|
||||
/// Trait consolidating all generics required by sc_tendermint for processing.
|
||||
pub trait TendermintClient: Send + Sync + 'static {
|
||||
const PROPOSED_BLOCK_SIZE_LIMIT: usize;
|
||||
const BLOCK_PROCESSING_TIME_IN_SECONDS: u32;
|
||||
const LATENCY_TIME_IN_SECONDS: u32;
|
||||
|
||||
type Block: Block;
|
||||
type Backend: Backend<Self::Block> + 'static;
|
||||
|
||||
/// TransactionFor<Client, Block>
|
||||
type BackendTransaction: Send + Sync + 'static;
|
||||
/// StateBackendFor<Client, Block>
|
||||
type StateBackend: StateBackend<
|
||||
<<Self::Block as Block>::Header as Header>::Hashing,
|
||||
Transaction = Self::BackendTransaction,
|
||||
>;
|
||||
// Client::Api
|
||||
type Api: ApiExt<Self::Block, StateBackend = Self::StateBackend>
|
||||
+ BlockBuilderApi<Self::Block>
|
||||
+ TendermintApi<Self::Block>;
|
||||
type Client: Send
|
||||
+ Sync
|
||||
+ HeaderBackend<Self::Block>
|
||||
+ BlockBackend<Self::Block>
|
||||
+ BlockImport<Self::Block, Transaction = Self::BackendTransaction>
|
||||
+ Finalizer<Self::Block, Self::Backend>
|
||||
+ BlockchainEvents<Self::Block>
|
||||
+ ProvideRuntimeApi<Self::Block, Api = Self::Api>
|
||||
+ 'static;
|
||||
}
|
||||
|
||||
/// Trait implementable on firm types to automatically provide a full TendermintClient impl.
|
||||
pub trait TendermintClientMinimal: Send + Sync + 'static {
|
||||
const PROPOSED_BLOCK_SIZE_LIMIT: usize;
|
||||
const BLOCK_PROCESSING_TIME_IN_SECONDS: u32;
|
||||
const LATENCY_TIME_IN_SECONDS: u32;
|
||||
|
||||
type Block: Block;
|
||||
type Backend: Backend<Self::Block> + 'static;
|
||||
type Api: ApiExt<Self::Block> + BlockBuilderApi<Self::Block> + TendermintApi<Self::Block>;
|
||||
type Client: Send
|
||||
+ Sync
|
||||
+ HeaderBackend<Self::Block>
|
||||
+ BlockBackend<Self::Block>
|
||||
+ BlockImport<Self::Block, Transaction = TransactionFor<Self::Client, Self::Block>>
|
||||
+ Finalizer<Self::Block, Self::Backend>
|
||||
+ BlockchainEvents<Self::Block>
|
||||
+ ProvideRuntimeApi<Self::Block, Api = Self::Api>
|
||||
+ 'static;
|
||||
}
|
||||
|
||||
impl<T: TendermintClientMinimal> TendermintClient for T
|
||||
where
|
||||
<T::Client as ProvideRuntimeApi<T::Block>>::Api:
|
||||
BlockBuilderApi<T::Block> + TendermintApi<T::Block>,
|
||||
TransactionFor<T::Client, T::Block>: Send + Sync + 'static,
|
||||
{
|
||||
const PROPOSED_BLOCK_SIZE_LIMIT: usize = T::PROPOSED_BLOCK_SIZE_LIMIT;
|
||||
const BLOCK_PROCESSING_TIME_IN_SECONDS: u32 = T::BLOCK_PROCESSING_TIME_IN_SECONDS;
|
||||
const LATENCY_TIME_IN_SECONDS: u32 = T::LATENCY_TIME_IN_SECONDS;
|
||||
|
||||
type Block = T::Block;
|
||||
type Backend = T::Backend;
|
||||
|
||||
type BackendTransaction = TransactionFor<T::Client, T::Block>;
|
||||
type StateBackend = StateBackendFor<T::Client, T::Block>;
|
||||
type Api = <T::Client as ProvideRuntimeApi<T::Block>>::Api;
|
||||
type Client = T::Client;
|
||||
}
|
||||
|
||||
/// Trait consolidating additional generics required by sc_tendermint for authoring.
|
||||
pub trait TendermintValidator: TendermintClient {
|
||||
type CIDP: CreateInherentDataProviders<Self::Block, ()> + 'static;
|
||||
type Environment: Send + Sync + Environment<Self::Block> + 'static;
|
||||
|
||||
type Network: Clone
|
||||
+ Send
|
||||
+ Sync
|
||||
+ Network<Self::Block>
|
||||
+ NetworkBlock<<Self::Block as Block>::Hash, <<Self::Block as Block>::Header as Header>::Number>
|
||||
+ 'static;
|
||||
}
|
||||
|
||||
pub type TendermintImportQueue<Block, Transaction> = BasicQueue<Block, Transaction>;
|
||||
|
||||
/// Create an import queue, additionally returning the Tendermint Import object iself, enabling
|
||||
/// creating an author later as well.
|
||||
pub fn import_queue<T: TendermintValidator>(
|
||||
spawner: &impl sp_core::traits::SpawnEssentialNamed,
|
||||
client: Arc<T::Client>,
|
||||
registry: Option<&Registry>,
|
||||
) -> (TendermintImport<T>, TendermintImportQueue<T::Block, T::BackendTransaction>)
|
||||
where
|
||||
Arc<T::Client>: BlockImport<T::Block, Transaction = T::BackendTransaction>,
|
||||
<Arc<T::Client> as BlockImport<T::Block>>::Error: Into<Error>,
|
||||
{
|
||||
let import = TendermintImport::<T>::new(client);
|
||||
|
||||
let boxed = Box::new(import.clone());
|
||||
// Use None for the justification importer since justifications always come with blocks
|
||||
// Therefore, they're never imported after the fact, which is what mandates an importer
|
||||
let queue = || BasicQueue::new(import.clone(), boxed.clone(), None, spawner, registry);
|
||||
|
||||
*futures::executor::block_on(import.queue.write()) = Some(queue());
|
||||
(import.clone(), queue())
|
||||
}
|
||||
@@ -1,247 +0,0 @@
|
||||
use std::{
|
||||
sync::{Arc, RwLock},
|
||||
collections::HashSet,
|
||||
};
|
||||
|
||||
use log::{debug, warn};
|
||||
|
||||
use tokio::sync::{Mutex, RwLock as AsyncRwLock};
|
||||
|
||||
use sp_core::Decode;
|
||||
use sp_runtime::{
|
||||
traits::{Header, Block},
|
||||
Justification,
|
||||
};
|
||||
use sp_inherents::{InherentData, InherentDataProvider, CreateInherentDataProviders};
|
||||
use sp_blockchain::HeaderBackend;
|
||||
use sp_api::ProvideRuntimeApi;
|
||||
|
||||
use sp_consensus::Error;
|
||||
use sc_consensus::{ForkChoiceStrategy, BlockImportParams};
|
||||
|
||||
use sc_block_builder::BlockBuilderApi;
|
||||
|
||||
use tendermint_machine::ext::{BlockError, Commit, Network};
|
||||
|
||||
use crate::{
|
||||
CONSENSUS_ID, TendermintClient, TendermintValidator, validators::TendermintValidators,
|
||||
TendermintImportQueue, authority::TendermintAuthority,
|
||||
};
|
||||
|
||||
type InstantiatedTendermintImportQueue<T> = TendermintImportQueue<
|
||||
<T as TendermintClient>::Block,
|
||||
<T as TendermintClient>::BackendTransaction,
|
||||
>;
|
||||
|
||||
/// Tendermint import handler.
|
||||
pub struct TendermintImport<T: TendermintValidator> {
|
||||
// Lock ensuring only one block is imported at a time
|
||||
pub(crate) sync_lock: Arc<Mutex<()>>,
|
||||
|
||||
pub(crate) validators: TendermintValidators<T>,
|
||||
|
||||
pub(crate) providers: Arc<AsyncRwLock<Option<T::CIDP>>>,
|
||||
pub(crate) importing_block: Arc<RwLock<Option<<T::Block as Block>::Hash>>>,
|
||||
|
||||
// A set of blocks which we're willing to recheck
|
||||
// We reject blocks with invalid inherents, yet inherents can be fatally flawed or solely
|
||||
// perceived as flawed
|
||||
// If we solely perceive them as flawed, we mark them as eligible for being checked again. Then,
|
||||
// if they're proposed again, we see if our perception has changed
|
||||
pub(crate) recheck: Arc<RwLock<HashSet<<T::Block as Block>::Hash>>>,
|
||||
|
||||
pub(crate) client: Arc<T::Client>,
|
||||
pub(crate) queue: Arc<AsyncRwLock<Option<InstantiatedTendermintImportQueue<T>>>>,
|
||||
}
|
||||
|
||||
impl<T: TendermintValidator> Clone for TendermintImport<T> {
|
||||
fn clone(&self) -> Self {
|
||||
TendermintImport {
|
||||
sync_lock: self.sync_lock.clone(),
|
||||
|
||||
validators: self.validators.clone(),
|
||||
|
||||
providers: self.providers.clone(),
|
||||
importing_block: self.importing_block.clone(),
|
||||
recheck: self.recheck.clone(),
|
||||
|
||||
client: self.client.clone(),
|
||||
queue: self.queue.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TendermintValidator> TendermintImport<T> {
|
||||
pub(crate) fn new(client: Arc<T::Client>) -> TendermintImport<T> {
|
||||
TendermintImport {
|
||||
sync_lock: Arc::new(Mutex::new(())),
|
||||
|
||||
validators: TendermintValidators::new(client.clone()),
|
||||
|
||||
providers: Arc::new(AsyncRwLock::new(None)),
|
||||
importing_block: Arc::new(RwLock::new(None)),
|
||||
recheck: Arc::new(RwLock::new(HashSet::new())),
|
||||
|
||||
client,
|
||||
queue: Arc::new(AsyncRwLock::new(None)),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) async fn inherent_data(&self, parent: <T::Block as Block>::Hash) -> InherentData {
|
||||
match self
|
||||
.providers
|
||||
.read()
|
||||
.await
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.create_inherent_data_providers(parent, ())
|
||||
.await
|
||||
{
|
||||
Ok(providers) => match providers.create_inherent_data().await {
|
||||
Ok(data) => Some(data),
|
||||
Err(err) => {
|
||||
warn!(target: "tendermint", "Failed to create inherent data: {}", err);
|
||||
None
|
||||
}
|
||||
},
|
||||
Err(err) => {
|
||||
warn!(target: "tendermint", "Failed to create inherent data providers: {}", err);
|
||||
None
|
||||
}
|
||||
}
|
||||
.unwrap_or_else(InherentData::new)
|
||||
}
|
||||
|
||||
async fn check_inherents(
|
||||
&self,
|
||||
hash: <T::Block as Block>::Hash,
|
||||
block: T::Block,
|
||||
) -> Result<(), Error> {
|
||||
let inherent_data = self.inherent_data(*block.header().parent_hash()).await;
|
||||
let err = self
|
||||
.client
|
||||
.runtime_api()
|
||||
.check_inherents(self.client.info().finalized_hash, block, inherent_data)
|
||||
.map_err(|_| Error::Other(BlockError::Fatal.into()))?;
|
||||
|
||||
if err.ok() {
|
||||
self.recheck.write().unwrap().remove(&hash);
|
||||
Ok(())
|
||||
} else if err.fatal_error() {
|
||||
Err(Error::Other(BlockError::Fatal.into()))
|
||||
} else {
|
||||
debug!(target: "tendermint", "Proposed block has temporally wrong inherents");
|
||||
self.recheck.write().unwrap().insert(hash);
|
||||
Err(Error::Other(BlockError::Temporal.into()))
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure this is part of a sequential import
|
||||
pub(crate) fn verify_order(
|
||||
&self,
|
||||
parent: <T::Block as Block>::Hash,
|
||||
number: <<T::Block as Block>::Header as Header>::Number,
|
||||
) -> Result<(), Error> {
|
||||
let info = self.client.info();
|
||||
if (info.finalized_hash != parent) || ((info.finalized_number + 1u16.into()) != number) {
|
||||
Err(Error::Other("non-sequential import".into()))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Do not allow blocks from the traditional network to be broadcast
|
||||
// Only allow blocks from Tendermint
|
||||
// Tendermint's propose message could be rewritten as a seal OR Tendermint could produce blocks
|
||||
// which this checks the proposer slot for, and then tells the Tendermint machine
|
||||
// While those would be more seamless with Substrate, there's no actual benefit to doing so
|
||||
fn verify_origin(&self, hash: <T::Block as Block>::Hash) -> Result<(), Error> {
|
||||
if let Some(tm_hash) = *self.importing_block.read().unwrap() {
|
||||
if hash == tm_hash {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Err(Error::Other("block created outside of tendermint".into()))
|
||||
}
|
||||
|
||||
// Errors if the justification isn't valid
|
||||
pub(crate) fn verify_justification(
|
||||
&self,
|
||||
hash: <T::Block as Block>::Hash,
|
||||
justification: &Justification,
|
||||
) -> Result<(), Error> {
|
||||
if justification.0 != CONSENSUS_ID {
|
||||
Err(Error::InvalidJustification)?;
|
||||
}
|
||||
|
||||
let commit: Commit<TendermintValidators<T>> =
|
||||
Commit::decode(&mut justification.1.as_ref()).map_err(|_| Error::InvalidJustification)?;
|
||||
// Create a stubbed TendermintAuthority so we can verify the commit
|
||||
if !TendermintAuthority::stub(self.clone()).verify_commit(hash, &commit) {
|
||||
Err(Error::InvalidJustification)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Verifies the justifications aren't malformed, not that the block is justified
|
||||
// Errors if justifications is neither empty nor a single Tendermint justification
|
||||
// If the block does have a justification, finalized will be set to true
|
||||
fn verify_justifications<BT>(
|
||||
&self,
|
||||
block: &mut BlockImportParams<T::Block, BT>,
|
||||
) -> Result<(), Error> {
|
||||
if !block.finalized {
|
||||
if let Some(justifications) = &block.justifications {
|
||||
let mut iter = justifications.iter();
|
||||
let next = iter.next();
|
||||
if next.is_none() || iter.next().is_some() {
|
||||
Err(Error::InvalidJustification)?;
|
||||
}
|
||||
self.verify_justification(block.header.hash(), next.unwrap())?;
|
||||
block.finalized = true;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) async fn check<BT>(
|
||||
&self,
|
||||
block: &mut BlockImportParams<T::Block, BT>,
|
||||
) -> Result<(), Error> {
|
||||
if block.finalized {
|
||||
if block.fork_choice != Some(ForkChoiceStrategy::Custom(false)) {
|
||||
// Since we alw1ays set the fork choice, this means something else marked the block as
|
||||
// finalized, which shouldn't be possible. Ensuring nothing else is setting blocks as
|
||||
// finalized helps ensure our security
|
||||
panic!("block was finalized despite not setting the fork choice");
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Set the block as a worse choice
|
||||
block.fork_choice = Some(ForkChoiceStrategy::Custom(false));
|
||||
|
||||
self.verify_order(*block.header.parent_hash(), *block.header.number())?;
|
||||
self.verify_justifications(block)?;
|
||||
|
||||
// If the block wasn't finalized, verify the origin and validity of its inherents
|
||||
if !block.finalized {
|
||||
let hash = block.header.hash();
|
||||
self.verify_origin(hash)?;
|
||||
self
|
||||
.check_inherents(hash, T::Block::new(block.header.clone(), block.body.clone().unwrap()))
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Additionally check these fields are empty
|
||||
// They *should* be unused, so requiring their emptiness prevents malleability and ensures
|
||||
// nothing slips through
|
||||
if !block.post_digests.is_empty() {
|
||||
Err(Error::Other("post-digests included".into()))?;
|
||||
}
|
||||
if !block.auxiliary.is_empty() {
|
||||
Err(Error::Other("auxiliary included".into()))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,191 +0,0 @@
|
||||
use core::ops::Deref;
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use sp_core::Decode;
|
||||
use sp_application_crypto::{
|
||||
RuntimePublic as PublicTrait,
|
||||
sr25519::{Public, Signature},
|
||||
};
|
||||
use sp_keystore::CryptoStore;
|
||||
|
||||
use sp_staking::SessionIndex;
|
||||
use sp_api::ProvideRuntimeApi;
|
||||
|
||||
use sc_client_api::HeaderBackend;
|
||||
|
||||
use tendermint_machine::ext::{BlockNumber, RoundNumber, Weights, Signer, SignatureScheme};
|
||||
|
||||
use sp_tendermint::TendermintApi;
|
||||
|
||||
use crate::{KEY_TYPE_ID, TendermintClient};
|
||||
|
||||
struct TendermintValidatorsStruct {
|
||||
session: SessionIndex,
|
||||
|
||||
total_weight: u64,
|
||||
weights: Vec<u64>,
|
||||
|
||||
lookup: Vec<Public>,
|
||||
}
|
||||
|
||||
impl TendermintValidatorsStruct {
|
||||
fn from_module<T: TendermintClient>(client: &Arc<T::Client>) -> Self {
|
||||
let last = client.info().finalized_hash;
|
||||
let api = client.runtime_api();
|
||||
let session = api.current_session(last).unwrap();
|
||||
let validators = api.validators(last).unwrap();
|
||||
|
||||
Self {
|
||||
session,
|
||||
|
||||
// TODO
|
||||
total_weight: validators.len().try_into().unwrap(),
|
||||
weights: vec![1; validators.len()],
|
||||
|
||||
lookup: validators,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Wrap every access of the validators struct in something which forces calling refresh
|
||||
struct Refresh<T: TendermintClient> {
|
||||
client: Arc<T::Client>,
|
||||
_refresh: Arc<RwLock<TendermintValidatorsStruct>>,
|
||||
}
|
||||
|
||||
impl<T: TendermintClient> Refresh<T> {
|
||||
// If the session has changed, re-create the struct with the data on it
|
||||
fn refresh(&self) {
|
||||
let session = self._refresh.read().unwrap().session;
|
||||
if session !=
|
||||
self.client.runtime_api().current_session(self.client.info().finalized_hash).unwrap()
|
||||
{
|
||||
*self._refresh.write().unwrap() = TendermintValidatorsStruct::from_module::<T>(&self.client);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TendermintClient> Deref for Refresh<T> {
|
||||
type Target = RwLock<TendermintValidatorsStruct>;
|
||||
fn deref(&self) -> &RwLock<TendermintValidatorsStruct> {
|
||||
self.refresh();
|
||||
&self._refresh
|
||||
}
|
||||
}
|
||||
|
||||
/// Tendermint validators observer, providing data on the active validators.
|
||||
pub struct TendermintValidators<T: TendermintClient>(Refresh<T>);
|
||||
impl<T: TendermintClient> Clone for TendermintValidators<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self(Refresh { _refresh: self.0._refresh.clone(), client: self.0.client.clone() })
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TendermintClient> TendermintValidators<T> {
|
||||
pub(crate) fn new(client: Arc<T::Client>) -> TendermintValidators<T> {
|
||||
TendermintValidators(Refresh {
|
||||
_refresh: Arc::new(RwLock::new(TendermintValidatorsStruct::from_module::<T>(&client))),
|
||||
client,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub struct TendermintSigner<T: TendermintClient>(
|
||||
pub(crate) Arc<dyn CryptoStore>,
|
||||
pub(crate) TendermintValidators<T>,
|
||||
);
|
||||
|
||||
impl<T: TendermintClient> Clone for TendermintSigner<T> {
|
||||
fn clone(&self) -> Self {
|
||||
Self(self.0.clone(), self.1.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TendermintClient> TendermintSigner<T> {
|
||||
async fn get_public_key(&self) -> Public {
|
||||
let pubs = self.0.sr25519_public_keys(KEY_TYPE_ID).await;
|
||||
if pubs.is_empty() {
|
||||
self.0.sr25519_generate_new(KEY_TYPE_ID, None).await.unwrap()
|
||||
} else {
|
||||
pubs[0]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<T: TendermintClient> Signer for TendermintSigner<T> {
|
||||
type ValidatorId = u16;
|
||||
type Signature = Signature;
|
||||
|
||||
async fn validator_id(&self) -> Option<u16> {
|
||||
let key = self.get_public_key().await;
|
||||
for (i, k) in (*self.1 .0).read().unwrap().lookup.iter().enumerate() {
|
||||
if k == &key {
|
||||
return Some(u16::try_from(i).unwrap());
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
async fn sign(&self, msg: &[u8]) -> Signature {
|
||||
Signature::decode(
|
||||
&mut self
|
||||
.0
|
||||
.sign_with(KEY_TYPE_ID, &self.get_public_key().await.into(), msg)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap()
|
||||
.as_ref(),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TendermintClient> SignatureScheme for TendermintValidators<T> {
|
||||
type ValidatorId = u16;
|
||||
type Signature = Signature;
|
||||
type AggregateSignature = Vec<Signature>;
|
||||
type Signer = TendermintSigner<T>;
|
||||
|
||||
fn verify(&self, validator: u16, msg: &[u8], sig: &Signature) -> bool {
|
||||
self.0.read().unwrap().lookup[usize::try_from(validator).unwrap()].verify(&msg, sig)
|
||||
}
|
||||
|
||||
fn aggregate(sigs: &[Signature]) -> Vec<Signature> {
|
||||
sigs.to_vec()
|
||||
}
|
||||
|
||||
fn verify_aggregate(&self, validators: &[u16], msg: &[u8], sigs: &Vec<Signature>) -> bool {
|
||||
if validators.len() != sigs.len() {
|
||||
return false;
|
||||
}
|
||||
for (v, sig) in validators.iter().zip(sigs.iter()) {
|
||||
if !self.verify(*v, msg, sig) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: TendermintClient> Weights for TendermintValidators<T> {
|
||||
type ValidatorId = u16;
|
||||
|
||||
fn total_weight(&self) -> u64 {
|
||||
self.0.read().unwrap().total_weight
|
||||
}
|
||||
|
||||
fn weight(&self, id: u16) -> u64 {
|
||||
self.0.read().unwrap().weights[usize::try_from(id).unwrap()]
|
||||
}
|
||||
|
||||
// TODO: https://github.com/serai-dex/serai/issues/159
|
||||
fn proposer(&self, number: BlockNumber, round: RoundNumber) -> u16 {
|
||||
u16::try_from(
|
||||
(number.0 + u64::from(round.0)) % u64::try_from(self.0.read().unwrap().lookup.len()).unwrap(),
|
||||
)
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
[package]
|
||||
name = "tendermint-machine"
|
||||
version = "0.2.0"
|
||||
description = "An implementation of the Tendermint state machine in Rust"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/tendermint/machine"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[dependencies]
|
||||
async-trait = "0.1"
|
||||
thiserror = "1"
|
||||
|
||||
log = "0.4"
|
||||
|
||||
parity-scale-codec = { version = "3", features = ["derive"] }
|
||||
|
||||
futures = "0.3"
|
||||
tokio = { version = "1", features = ["macros", "sync", "time", "rt"] }
|
||||
|
||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", version = "7.0.0", optional = true }
|
||||
|
||||
[features]
|
||||
substrate = ["sp-runtime"]
|
||||
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,61 +0,0 @@
|
||||
# Tendermint
|
||||
|
||||
An implementation of the Tendermint state machine in Rust.
|
||||
|
||||
This is solely the state machine, intended to be mapped to any arbitrary system.
|
||||
It supports an arbitrary signature scheme, weighting, and block definition
|
||||
accordingly. It is not intended to work with the Cosmos SDK, solely to be an
|
||||
implementation of the [academic protocol](https://arxiv.org/pdf/1807.04938.pdf).
|
||||
|
||||
### Caveats
|
||||
|
||||
- Only SCALE serialization is supported currently. Ideally, everything from
|
||||
SCALE to borsh to bincode would be supported. SCALE was chosen due to this
|
||||
being under Serai, which uses Substrate, which uses SCALE. Accordingly, when
|
||||
deciding which of the three (mutually incompatible) options to support...
|
||||
|
||||
- The only supported runtime is tokio due to requiring a `sleep` implementation.
|
||||
Ideally, the runtime choice will be moved to a feature in the future.
|
||||
|
||||
- It is possible for `add_block` to be called on a block which failed (or never
|
||||
went through in the first place) validation. This is a break from the paper
|
||||
which is accepted here. This is for two reasons.
|
||||
|
||||
1) Serai needing this functionality.
|
||||
2) If a block is committed which is invalid, either there's a malicious
|
||||
majority now defining consensus OR the local node is malicious by virtue of
|
||||
being faulty. Considering how either represents a fatal circumstance,
|
||||
except with regards to system like Serai which have their own logic for
|
||||
pseudo-valid blocks, it is accepted as a possible behavior with the caveat
|
||||
any consumers must be aware of it. No machine will vote nor precommit to a
|
||||
block it considers invalid, so for a network with an honest majority, this
|
||||
is a non-issue.
|
||||
|
||||
### Paper
|
||||
|
||||
The [paper](https://arxiv.org/abs/1807.04938) describes the algorithm with
|
||||
pseudocode on page 6. This pseudocode isn't directly implementable, nor does it
|
||||
specify faulty behavior. Instead, it's solely a series of conditions which
|
||||
trigger events in order to successfully achieve consensus.
|
||||
|
||||
The included pseudocode segments can be minimally described as follows:
|
||||
|
||||
```
|
||||
01-09 Init
|
||||
10-10 StartRound(0)
|
||||
11-21 StartRound
|
||||
22-27 Fresh proposal
|
||||
28-33 Proposal building off a valid round with prevotes
|
||||
34-35 2f+1 prevote -> schedule timeout prevote
|
||||
36-43 First proposal with prevotes -> precommit Some
|
||||
44-46 2f+1 nil prevote -> precommit nil
|
||||
47-48 2f+1 precommit -> schedule timeout precommit
|
||||
49-54 First proposal with precommits -> finalize
|
||||
55-56 f+1 round > local round, jump
|
||||
57-60 on timeout propose
|
||||
61-64 on timeout prevote
|
||||
65-67 on timeout precommit
|
||||
```
|
||||
|
||||
The corresponding Rust code implementing these tasks are marked with their
|
||||
related line numbers.
|
||||
@@ -1,139 +0,0 @@
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use crate::{
|
||||
time::CanonicalInstant,
|
||||
ext::{RoundNumber, BlockNumber, Block, Network},
|
||||
round::RoundData,
|
||||
message_log::MessageLog,
|
||||
Step, Data, DataFor, Message, MessageFor,
|
||||
};
|
||||
|
||||
pub(crate) struct BlockData<N: Network> {
|
||||
pub(crate) number: BlockNumber,
|
||||
pub(crate) validator_id: Option<N::ValidatorId>,
|
||||
pub(crate) proposal: Option<N::Block>,
|
||||
|
||||
pub(crate) log: MessageLog<N>,
|
||||
pub(crate) slashes: HashSet<N::ValidatorId>,
|
||||
// We track the end times of each round for two reasons:
|
||||
// 1) Knowing the start time of the next round
|
||||
// 2) Validating precommits, which include the end time of the round which produced it
|
||||
// This HashMap contains the end time of the round we're currently in and every round prior
|
||||
pub(crate) end_time: HashMap<RoundNumber, CanonicalInstant>,
|
||||
|
||||
pub(crate) round: Option<RoundData<N>>,
|
||||
|
||||
pub(crate) locked: Option<(RoundNumber, <N::Block as Block>::Id)>,
|
||||
pub(crate) valid: Option<(RoundNumber, N::Block)>,
|
||||
}
|
||||
|
||||
impl<N: Network> BlockData<N> {
|
||||
pub(crate) fn new(
|
||||
weights: Arc<N::Weights>,
|
||||
number: BlockNumber,
|
||||
validator_id: Option<N::ValidatorId>,
|
||||
proposal: Option<N::Block>,
|
||||
) -> BlockData<N> {
|
||||
BlockData {
|
||||
number,
|
||||
validator_id,
|
||||
proposal,
|
||||
|
||||
log: MessageLog::new(weights),
|
||||
slashes: HashSet::new(),
|
||||
end_time: HashMap::new(),
|
||||
|
||||
// The caller of BlockData::new is expected to be populated after by the caller
|
||||
round: None,
|
||||
|
||||
locked: None,
|
||||
valid: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn round(&self) -> &RoundData<N> {
|
||||
self.round.as_ref().unwrap()
|
||||
}
|
||||
|
||||
pub(crate) fn round_mut(&mut self) -> &mut RoundData<N> {
|
||||
self.round.as_mut().unwrap()
|
||||
}
|
||||
|
||||
// Populate the end time up to the specified round
|
||||
// This is generally used when moving to the next round, where this will only populate one time,
|
||||
// yet is also used when jumping rounds (when 33% of the validators are on a round ahead of us)
|
||||
pub(crate) fn populate_end_time(&mut self, round: RoundNumber) {
|
||||
// Starts from the current round since we only start the current round once we have have all
|
||||
// the prior time data
|
||||
for r in (self.round().number.0 + 1) ..= round.0 {
|
||||
self.end_time.insert(
|
||||
RoundNumber(r),
|
||||
RoundData::<N>::new(RoundNumber(r), self.end_time[&RoundNumber(r - 1)]).end_time(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Start a new round. Optionally takes in the time for when this is the first round, and the time
|
||||
// isn't simply the time of the prior round (yet rather the prior block). Returns the proposal
|
||||
// data, if we are the proposer.
|
||||
pub(crate) fn new_round(
|
||||
&mut self,
|
||||
round: RoundNumber,
|
||||
proposer: N::ValidatorId,
|
||||
time: Option<CanonicalInstant>,
|
||||
) -> Option<DataFor<N>> {
|
||||
debug_assert_eq!(round.0 == 0, time.is_some());
|
||||
|
||||
// If this is the first round, we don't have a prior round's end time to use as the start
|
||||
// We use the passed in time instead
|
||||
// If this isn't the first round, ensure we have the prior round's end time by populating the
|
||||
// map with all rounds till this round
|
||||
// This can happen we jump from round x to round x+n, where n != 1
|
||||
// The paper says to do so whenever you observe a sufficient amount of peers on a higher round
|
||||
if round.0 != 0 {
|
||||
self.populate_end_time(round);
|
||||
}
|
||||
|
||||
// 11-13
|
||||
self.round = Some(RoundData::<N>::new(
|
||||
round,
|
||||
time.unwrap_or_else(|| self.end_time[&RoundNumber(round.0 - 1)]),
|
||||
));
|
||||
self.end_time.insert(round, self.round().end_time());
|
||||
|
||||
// 14-21
|
||||
if Some(proposer) == self.validator_id {
|
||||
let (round, block) = self.valid.clone().unzip();
|
||||
block.or_else(|| self.proposal.clone()).map(|block| Data::Proposal(round, block))
|
||||
} else {
|
||||
self.round_mut().set_timeout(Step::Propose);
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
// Transform Data into an actual Message, using the contextual data from this block
|
||||
pub(crate) fn message(&mut self, data: DataFor<N>) -> Option<MessageFor<N>> {
|
||||
debug_assert_eq!(
|
||||
self.round().step,
|
||||
match data.step() {
|
||||
Step::Propose | Step::Prevote => Step::Propose,
|
||||
Step::Precommit => Step::Prevote,
|
||||
},
|
||||
);
|
||||
// Tendermint always sets the round's step to whatever it just broadcasted
|
||||
// Consolidate all of those here to ensure they aren't missed by an oversight
|
||||
// 27, 33, 41, 46, 60, 64
|
||||
self.round_mut().step = data.step();
|
||||
|
||||
// Only return a message to if we're actually a current validator
|
||||
self.validator_id.map(|validator_id| Message {
|
||||
sender: validator_id,
|
||||
block: self.number,
|
||||
round: self.round().number,
|
||||
data,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,274 +0,0 @@
|
||||
use core::{hash::Hash, fmt::Debug};
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use thiserror::Error;
|
||||
|
||||
use parity_scale_codec::{Encode, Decode};
|
||||
|
||||
use crate::{SignedMessageFor, commit_msg};
|
||||
|
||||
/// An alias for a series of traits required for a type to be usable as a validator ID,
|
||||
/// automatically implemented for all types satisfying those traits.
|
||||
pub trait ValidatorId:
|
||||
Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode
|
||||
{
|
||||
}
|
||||
impl<V: Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode> ValidatorId
|
||||
for V
|
||||
{
|
||||
}
|
||||
|
||||
/// An alias for a series of traits required for a type to be usable as a signature,
|
||||
/// automatically implemented for all types satisfying those traits.
|
||||
pub trait Signature: Send + Sync + Clone + PartialEq + Debug + Encode + Decode {}
|
||||
impl<S: Send + Sync + Clone + PartialEq + Debug + Encode + Decode> Signature for S {}
|
||||
|
||||
// Type aliases which are distinct according to the type system
|
||||
|
||||
/// A struct containing a Block Number, wrapped to have a distinct type.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
|
||||
pub struct BlockNumber(pub u64);
|
||||
/// A struct containing a round number, wrapped to have a distinct type.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
|
||||
pub struct RoundNumber(pub u32);
|
||||
|
||||
/// A signer for a validator.
|
||||
#[async_trait]
|
||||
pub trait Signer: Send + Sync {
|
||||
// Type used to identify validators.
|
||||
type ValidatorId: ValidatorId;
|
||||
/// Signature type.
|
||||
type Signature: Signature;
|
||||
|
||||
/// Returns the validator's current ID. Returns None if they aren't a current validator.
|
||||
async fn validator_id(&self) -> Option<Self::ValidatorId>;
|
||||
/// Sign a signature with the current validator's private key.
|
||||
async fn sign(&self, msg: &[u8]) -> Self::Signature;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<S: Signer> Signer for Arc<S> {
|
||||
type ValidatorId = S::ValidatorId;
|
||||
type Signature = S::Signature;
|
||||
|
||||
async fn validator_id(&self) -> Option<Self::ValidatorId> {
|
||||
self.as_ref().validator_id().await
|
||||
}
|
||||
|
||||
async fn sign(&self, msg: &[u8]) -> Self::Signature {
|
||||
self.as_ref().sign(msg).await
|
||||
}
|
||||
}
|
||||
|
||||
/// A signature scheme used by validators.
|
||||
pub trait SignatureScheme: Send + Sync {
|
||||
// Type used to identify validators.
|
||||
type ValidatorId: ValidatorId;
|
||||
/// Signature type.
|
||||
type Signature: Signature;
|
||||
/// Type representing an aggregate signature. This would presumably be a BLS signature,
|
||||
/// yet even with Schnorr signatures
|
||||
/// [half-aggregation is possible](https://eprint.iacr.org/2021/350).
|
||||
/// It could even be a threshold signature scheme, though that's currently unexpected.
|
||||
type AggregateSignature: Signature;
|
||||
|
||||
/// Type representing a signer of this scheme.
|
||||
type Signer: Signer<ValidatorId = Self::ValidatorId, Signature = Self::Signature>;
|
||||
|
||||
/// Verify a signature from the validator in question.
|
||||
#[must_use]
|
||||
fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool;
|
||||
|
||||
/// Aggregate signatures.
|
||||
fn aggregate(sigs: &[Self::Signature]) -> Self::AggregateSignature;
|
||||
/// Verify an aggregate signature for the list of signers.
|
||||
#[must_use]
|
||||
fn verify_aggregate(
|
||||
&self,
|
||||
signers: &[Self::ValidatorId],
|
||||
msg: &[u8],
|
||||
sig: &Self::AggregateSignature,
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
impl<S: SignatureScheme> SignatureScheme for Arc<S> {
|
||||
type ValidatorId = S::ValidatorId;
|
||||
type Signature = S::Signature;
|
||||
type AggregateSignature = S::AggregateSignature;
|
||||
type Signer = S::Signer;
|
||||
|
||||
fn verify(&self, validator: Self::ValidatorId, msg: &[u8], sig: &Self::Signature) -> bool {
|
||||
self.as_ref().verify(validator, msg, sig)
|
||||
}
|
||||
|
||||
fn aggregate(sigs: &[Self::Signature]) -> Self::AggregateSignature {
|
||||
S::aggregate(sigs)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn verify_aggregate(
|
||||
&self,
|
||||
signers: &[Self::ValidatorId],
|
||||
msg: &[u8],
|
||||
sig: &Self::AggregateSignature,
|
||||
) -> bool {
|
||||
self.as_ref().verify_aggregate(signers, msg, sig)
|
||||
}
|
||||
}
|
||||
|
||||
/// A commit for a specific block. The list of validators have weight exceeding the threshold for
|
||||
/// a valid commit.
|
||||
#[derive(Clone, PartialEq, Debug, Encode, Decode)]
|
||||
pub struct Commit<S: SignatureScheme> {
|
||||
/// End time of the round which created this commit, used as the start time of the next block.
|
||||
pub end_time: u64,
|
||||
/// Validators participating in the signature.
|
||||
pub validators: Vec<S::ValidatorId>,
|
||||
/// Aggregate signature.
|
||||
pub signature: S::AggregateSignature,
|
||||
}
|
||||
|
||||
/// Weights for the validators present.
|
||||
pub trait Weights: Send + Sync {
|
||||
type ValidatorId: ValidatorId;
|
||||
|
||||
/// Total weight of all validators.
|
||||
fn total_weight(&self) -> u64;
|
||||
/// Weight for a specific validator.
|
||||
fn weight(&self, validator: Self::ValidatorId) -> u64;
|
||||
/// Threshold needed for BFT consensus.
|
||||
fn threshold(&self) -> u64 {
|
||||
((self.total_weight() * 2) / 3) + 1
|
||||
}
|
||||
/// Threshold preventing BFT consensus.
|
||||
fn fault_thresold(&self) -> u64 {
|
||||
(self.total_weight() - self.threshold()) + 1
|
||||
}
|
||||
|
||||
/// Weighted round robin function.
|
||||
fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::ValidatorId;
|
||||
}
|
||||
|
||||
impl<W: Weights> Weights for Arc<W> {
|
||||
type ValidatorId = W::ValidatorId;
|
||||
|
||||
fn total_weight(&self) -> u64 {
|
||||
self.as_ref().total_weight()
|
||||
}
|
||||
|
||||
fn weight(&self, validator: Self::ValidatorId) -> u64 {
|
||||
self.as_ref().weight(validator)
|
||||
}
|
||||
|
||||
fn proposer(&self, block: BlockNumber, round: RoundNumber) -> Self::ValidatorId {
|
||||
self.as_ref().proposer(block, round)
|
||||
}
|
||||
}
|
||||
|
||||
/// Simplified error enum representing a block's validity.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error, Encode, Decode)]
|
||||
pub enum BlockError {
|
||||
/// Malformed block which is wholly invalid.
|
||||
#[error("invalid block")]
|
||||
Fatal,
|
||||
/// Valid block by syntax, with semantics which may or may not be valid yet are locally
|
||||
/// considered invalid. If a block fails to validate with this, a slash will not be triggered.
|
||||
#[error("invalid block under local view")]
|
||||
Temporal,
|
||||
}
|
||||
|
||||
/// Trait representing a Block.
|
||||
pub trait Block: Send + Sync + Clone + PartialEq + Debug + Encode + Decode {
|
||||
// Type used to identify blocks. Presumably a cryptographic hash of the block.
|
||||
type Id: Send + Sync + Copy + Clone + PartialEq + AsRef<[u8]> + Debug + Encode + Decode;
|
||||
|
||||
/// Return the deterministic, unique ID for this block.
|
||||
fn id(&self) -> Self::Id;
|
||||
}
|
||||
|
||||
#[cfg(feature = "substrate")]
|
||||
impl<B: sp_runtime::traits::Block> Block for B {
|
||||
type Id = B::Hash;
|
||||
fn id(&self) -> B::Hash {
|
||||
self.hash()
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait representing the distributed system Tendermint is providing consensus over.
|
||||
#[async_trait]
|
||||
pub trait Network: Send + Sync {
|
||||
// Type used to identify validators.
|
||||
type ValidatorId: ValidatorId;
|
||||
/// Signature scheme used by validators.
|
||||
type SignatureScheme: SignatureScheme<ValidatorId = Self::ValidatorId>;
|
||||
/// Object representing the weights of validators.
|
||||
type Weights: Weights<ValidatorId = Self::ValidatorId>;
|
||||
/// Type used for ordered blocks of information.
|
||||
type Block: Block;
|
||||
|
||||
/// Maximum block processing time in seconds. This should include both the actual processing time
|
||||
/// and the time to download the block.
|
||||
const BLOCK_PROCESSING_TIME: u32;
|
||||
/// Network latency time in seconds.
|
||||
const LATENCY_TIME: u32;
|
||||
|
||||
/// The block time is defined as the processing time plus three times the latency.
|
||||
fn block_time() -> u32 {
|
||||
Self::BLOCK_PROCESSING_TIME + (3 * Self::LATENCY_TIME)
|
||||
}
|
||||
|
||||
/// Return a handle on the signer in use, usable for the entire lifetime of the machine.
|
||||
fn signer(&self) -> <Self::SignatureScheme as SignatureScheme>::Signer;
|
||||
/// Return a handle on the signing scheme in use, usable for the entire lifetime of the machine.
|
||||
fn signature_scheme(&self) -> Self::SignatureScheme;
|
||||
/// Return a handle on the validators' weights, usable for the entire lifetime of the machine.
|
||||
fn weights(&self) -> Self::Weights;
|
||||
|
||||
/// Verify a commit for a given block. Intended for use when syncing or when not an active
|
||||
/// validator.
|
||||
#[must_use]
|
||||
fn verify_commit(
|
||||
&self,
|
||||
id: <Self::Block as Block>::Id,
|
||||
commit: &Commit<Self::SignatureScheme>,
|
||||
) -> bool {
|
||||
if commit.validators.iter().collect::<HashSet<_>>().len() != commit.validators.len() {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !self.signature_scheme().verify_aggregate(
|
||||
&commit.validators,
|
||||
&commit_msg(commit.end_time, id.as_ref()),
|
||||
&commit.signature,
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let weights = self.weights();
|
||||
commit.validators.iter().map(|v| weights.weight(*v)).sum::<u64>() >= weights.threshold()
|
||||
}
|
||||
|
||||
/// Broadcast a message to the other validators. If authenticated channels have already been
|
||||
/// established, this will double-authenticate. Switching to unauthenticated channels in a system
|
||||
/// already providing authenticated channels is not recommended as this is a minor, temporal
|
||||
/// inefficiency while downgrading channels may have wider implications.
|
||||
async fn broadcast(&mut self, msg: SignedMessageFor<Self>);
|
||||
|
||||
/// Trigger a slash for the validator in question who was definitively malicious.
|
||||
/// The exact process of triggering a slash is undefined and left to the network as a whole.
|
||||
async fn slash(&mut self, validator: Self::ValidatorId);
|
||||
|
||||
/// Validate a block.
|
||||
async fn validate(&mut self, block: &Self::Block) -> Result<(), BlockError>;
|
||||
/// Add a block, returning the proposal for the next one. It's possible a block, which was never
|
||||
/// validated or even failed validation, may be passed here if a supermajority of validators did
|
||||
/// consider it valid and created a commit for it. This deviates from the paper which will have a
|
||||
/// local node refuse to decide on a block it considers invalid. This library acknowledges the
|
||||
/// network did decide on it, leaving handling of it to the network, and outside of this scope.
|
||||
async fn add_block(
|
||||
&mut self,
|
||||
block: Self::Block,
|
||||
commit: Commit<Self::SignatureScheme>,
|
||||
) -> Option<Self::Block>;
|
||||
}
|
||||
@@ -1,648 +0,0 @@
|
||||
use core::fmt::Debug;
|
||||
|
||||
use std::{
|
||||
sync::Arc,
|
||||
time::{SystemTime, Instant, Duration},
|
||||
collections::VecDeque,
|
||||
};
|
||||
|
||||
use log::debug;
|
||||
|
||||
use parity_scale_codec::{Encode, Decode};
|
||||
|
||||
use futures::{
|
||||
FutureExt, StreamExt,
|
||||
future::{self, Fuse},
|
||||
channel::mpsc,
|
||||
};
|
||||
use tokio::time::sleep;
|
||||
|
||||
mod time;
|
||||
use time::{sys_time, CanonicalInstant};
|
||||
|
||||
mod round;
|
||||
|
||||
mod block;
|
||||
use block::BlockData;
|
||||
|
||||
pub(crate) mod message_log;
|
||||
|
||||
/// Traits and types of the external network being integrated with to provide consensus over.
|
||||
pub mod ext;
|
||||
use ext::*;
|
||||
|
||||
pub(crate) fn commit_msg(end_time: u64, id: &[u8]) -> Vec<u8> {
|
||||
[&end_time.to_le_bytes(), id].concat().to_vec()
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
|
||||
enum Step {
|
||||
Propose,
|
||||
Prevote,
|
||||
Precommit,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Encode, Decode)]
|
||||
enum Data<B: Block, S: Signature> {
|
||||
Proposal(Option<RoundNumber>, B),
|
||||
Prevote(Option<B::Id>),
|
||||
Precommit(Option<(B::Id, S)>),
|
||||
}
|
||||
|
||||
impl<B: Block, S: Signature> PartialEq for Data<B, S> {
|
||||
fn eq(&self, other: &Data<B, S>) -> bool {
|
||||
match (self, other) {
|
||||
(Data::Proposal(valid_round, block), Data::Proposal(valid_round2, block2)) => {
|
||||
(valid_round == valid_round2) && (block == block2)
|
||||
}
|
||||
(Data::Prevote(id), Data::Prevote(id2)) => id == id2,
|
||||
(Data::Precommit(None), Data::Precommit(None)) => true,
|
||||
(Data::Precommit(Some((id, _))), Data::Precommit(Some((id2, _)))) => id == id2,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<B: Block, S: Signature> Data<B, S> {
|
||||
fn step(&self) -> Step {
|
||||
match self {
|
||||
Data::Proposal(..) => Step::Propose,
|
||||
Data::Prevote(..) => Step::Prevote,
|
||||
Data::Precommit(..) => Step::Precommit,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Encode, Decode)]
|
||||
struct Message<V: ValidatorId, B: Block, S: Signature> {
|
||||
sender: V,
|
||||
|
||||
block: BlockNumber,
|
||||
round: RoundNumber,
|
||||
|
||||
data: Data<B, S>,
|
||||
}
|
||||
|
||||
/// A signed Tendermint consensus message to be broadcast to the other validators.
|
||||
#[derive(Clone, PartialEq, Debug, Encode, Decode)]
|
||||
pub struct SignedMessage<V: ValidatorId, B: Block, S: Signature> {
|
||||
msg: Message<V, B, S>,
|
||||
sig: S,
|
||||
}
|
||||
|
||||
impl<V: ValidatorId, B: Block, S: Signature> SignedMessage<V, B, S> {
|
||||
/// Number of the block this message is attempting to add to the chain.
|
||||
pub fn block(&self) -> BlockNumber {
|
||||
self.msg.block
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn verify_signature<Scheme: SignatureScheme<ValidatorId = V, Signature = S>>(
|
||||
&self,
|
||||
signer: &Scheme,
|
||||
) -> bool {
|
||||
signer.verify(self.msg.sender, &self.msg.encode(), &self.sig)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
enum TendermintError<V: ValidatorId> {
|
||||
Malicious(V),
|
||||
Temporal,
|
||||
}
|
||||
|
||||
// Type aliases to abstract over generic hell
|
||||
pub(crate) type DataFor<N> =
|
||||
Data<<N as Network>::Block, <<N as Network>::SignatureScheme as SignatureScheme>::Signature>;
|
||||
pub(crate) type MessageFor<N> = Message<
|
||||
<N as Network>::ValidatorId,
|
||||
<N as Network>::Block,
|
||||
<<N as Network>::SignatureScheme as SignatureScheme>::Signature,
|
||||
>;
|
||||
/// Type alias to the SignedMessage type for a given Network
|
||||
pub type SignedMessageFor<N> = SignedMessage<
|
||||
<N as Network>::ValidatorId,
|
||||
<N as Network>::Block,
|
||||
<<N as Network>::SignatureScheme as SignatureScheme>::Signature,
|
||||
>;
|
||||
|
||||
/// A machine executing the Tendermint protocol.
|
||||
pub struct TendermintMachine<N: Network> {
|
||||
network: N,
|
||||
signer: <N::SignatureScheme as SignatureScheme>::Signer,
|
||||
validators: N::SignatureScheme,
|
||||
weights: Arc<N::Weights>,
|
||||
|
||||
queue: VecDeque<MessageFor<N>>,
|
||||
msg_recv: mpsc::UnboundedReceiver<SignedMessageFor<N>>,
|
||||
#[allow(clippy::type_complexity)]
|
||||
step_recv: mpsc::UnboundedReceiver<(BlockNumber, Commit<N::SignatureScheme>, Option<N::Block>)>,
|
||||
|
||||
block: BlockData<N>,
|
||||
}
|
||||
|
||||
pub type StepSender<N> = mpsc::UnboundedSender<(
|
||||
BlockNumber,
|
||||
Commit<<N as Network>::SignatureScheme>,
|
||||
Option<<N as Network>::Block>,
|
||||
)>;
|
||||
|
||||
pub type MessageSender<N> = mpsc::UnboundedSender<SignedMessageFor<N>>;
|
||||
|
||||
/// A Tendermint machine and its channel to receive messages from the gossip layer over.
|
||||
pub struct TendermintHandle<N: Network> {
|
||||
/// Channel to trigger the machine to move to the next block.
|
||||
/// Takes in the the previous block's commit, along with the new proposal.
|
||||
pub step: StepSender<N>,
|
||||
/// Channel to send messages received from the P2P layer.
|
||||
pub messages: MessageSender<N>,
|
||||
/// Tendermint machine to be run on an asynchronous task.
|
||||
pub machine: TendermintMachine<N>,
|
||||
}
|
||||
|
||||
impl<N: Network + 'static> TendermintMachine<N> {
|
||||
// Broadcast the given piece of data
|
||||
// Tendermint messages always specify their block/round, yet Tendermint only ever broadcasts for
|
||||
// the current block/round. Accordingly, instead of manually fetching those at every call-site,
|
||||
// this function can simply pass the data to the block which can contextualize it
|
||||
fn broadcast(&mut self, data: DataFor<N>) {
|
||||
if let Some(msg) = self.block.message(data) {
|
||||
// Push it on to the queue. This is done so we only handle one message at a time, and so we
|
||||
// can handle our own message before broadcasting it. That way, we fail before before
|
||||
// becoming malicious
|
||||
self.queue.push_back(msg);
|
||||
}
|
||||
}
|
||||
|
||||
// Start a new round. Returns true if we were the proposer
|
||||
fn round(&mut self, round: RoundNumber, time: Option<CanonicalInstant>) -> bool {
|
||||
if let Some(data) =
|
||||
self.block.new_round(round, self.weights.proposer(self.block.number, round), time)
|
||||
{
|
||||
self.broadcast(data);
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
// 53-54
|
||||
async fn reset(&mut self, end_round: RoundNumber, proposal: Option<N::Block>) {
|
||||
// Ensure we have the end time data for the last round
|
||||
self.block.populate_end_time(end_round);
|
||||
|
||||
// Sleep until this round ends
|
||||
let round_end = self.block.end_time[&end_round];
|
||||
sleep(round_end.instant().saturating_duration_since(Instant::now())).await;
|
||||
|
||||
// Clear our outbound message queue
|
||||
self.queue = VecDeque::new();
|
||||
|
||||
// Create the new block
|
||||
self.block = BlockData::new(
|
||||
self.weights.clone(),
|
||||
BlockNumber(self.block.number.0 + 1),
|
||||
self.signer.validator_id().await,
|
||||
proposal,
|
||||
);
|
||||
|
||||
// Start the first round
|
||||
self.round(RoundNumber(0), Some(round_end));
|
||||
}
|
||||
|
||||
async fn reset_by_commit(
|
||||
&mut self,
|
||||
commit: Commit<N::SignatureScheme>,
|
||||
proposal: Option<N::Block>,
|
||||
) {
|
||||
let mut round = self.block.round().number;
|
||||
// If this commit is for a round we don't have, jump up to it
|
||||
while self.block.end_time[&round].canonical() < commit.end_time {
|
||||
round.0 += 1;
|
||||
self.block.populate_end_time(round);
|
||||
}
|
||||
// If this commit is for a prior round, find it
|
||||
while self.block.end_time[&round].canonical() > commit.end_time {
|
||||
if round.0 == 0 {
|
||||
panic!("commit isn't for this machine's next block");
|
||||
}
|
||||
round.0 -= 1;
|
||||
}
|
||||
debug_assert_eq!(self.block.end_time[&round].canonical(), commit.end_time);
|
||||
|
||||
self.reset(round, proposal).await;
|
||||
}
|
||||
|
||||
async fn slash(&mut self, validator: N::ValidatorId) {
|
||||
if !self.block.slashes.contains(&validator) {
|
||||
debug!(target: "tendermint", "Slashing validator {:?}", validator);
|
||||
self.block.slashes.insert(validator);
|
||||
self.network.slash(validator).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new Tendermint machine, from the specified point, with the specified block as the
|
||||
/// one to propose next. This will return a channel to send messages from the gossip layer and
|
||||
/// the machine itself. The machine should have `run` called from an asynchronous task.
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub async fn new(
|
||||
network: N,
|
||||
last_block: BlockNumber,
|
||||
last_time: u64,
|
||||
proposal: N::Block,
|
||||
) -> TendermintHandle<N> {
|
||||
let (msg_send, msg_recv) = mpsc::unbounded();
|
||||
let (step_send, step_recv) = mpsc::unbounded();
|
||||
TendermintHandle {
|
||||
step: step_send,
|
||||
messages: msg_send,
|
||||
machine: {
|
||||
let sys_time = sys_time(last_time);
|
||||
// If the last block hasn't ended yet, sleep until it has
|
||||
sleep(sys_time.duration_since(SystemTime::now()).unwrap_or(Duration::ZERO)).await;
|
||||
|
||||
let signer = network.signer();
|
||||
let validators = network.signature_scheme();
|
||||
let weights = Arc::new(network.weights());
|
||||
let validator_id = signer.validator_id().await;
|
||||
// 01-10
|
||||
let mut machine = TendermintMachine {
|
||||
network,
|
||||
signer,
|
||||
validators,
|
||||
weights: weights.clone(),
|
||||
|
||||
queue: VecDeque::new(),
|
||||
msg_recv,
|
||||
step_recv,
|
||||
|
||||
block: BlockData::new(
|
||||
weights,
|
||||
BlockNumber(last_block.0 + 1),
|
||||
validator_id,
|
||||
Some(proposal),
|
||||
),
|
||||
};
|
||||
|
||||
// The end time of the last block is the start time for this one
|
||||
// The Commit explicitly contains the end time, so loading the last commit will provide
|
||||
// this. The only exception is for the genesis block, which doesn't have a commit
|
||||
// Using the genesis time in place will cause this block to be created immediately
|
||||
// after it, without the standard amount of separation (so their times will be
|
||||
// equivalent or minimally offset)
|
||||
// For callers wishing to avoid this, they should pass (0, GENESIS + N::block_time())
|
||||
machine.round(RoundNumber(0), Some(CanonicalInstant::new(last_time)));
|
||||
machine
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run(mut self) {
|
||||
loop {
|
||||
// Also create a future for if the queue has a message
|
||||
// Does not pop_front as if another message has higher priority, its future will be handled
|
||||
// instead in this loop, and the popped value would be dropped with the next iteration
|
||||
// While no other message has a higher priority right now, this is a safer practice
|
||||
let mut queue_future =
|
||||
if self.queue.is_empty() { Fuse::terminated() } else { future::ready(()).fuse() };
|
||||
|
||||
if let Some((broadcast, msg)) = futures::select_biased! {
|
||||
// Handle a new block occuring externally (an external sync loop)
|
||||
// Has the highest priority as it makes all other futures here irrelevant
|
||||
msg = self.step_recv.next() => {
|
||||
if let Some((block_number, commit, proposal)) = msg {
|
||||
// Commit is for a block we've already moved past
|
||||
if block_number != self.block.number {
|
||||
continue;
|
||||
}
|
||||
self.reset_by_commit(commit, proposal).await;
|
||||
None
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
},
|
||||
|
||||
// Handle our messages
|
||||
_ = queue_future => {
|
||||
Some((true, self.queue.pop_front().unwrap()))
|
||||
},
|
||||
|
||||
// Handle any timeouts
|
||||
step = self.block.round().timeout_future().fuse() => {
|
||||
// Remove the timeout so it doesn't persist, always being the selected future due to bias
|
||||
// While this does enable the timeout to be entered again, the timeout setting code will
|
||||
// never attempt to add a timeout after its timeout has expired
|
||||
self.block.round_mut().timeouts.remove(&step);
|
||||
// Only run if it's still the step in question
|
||||
if self.block.round().step == step {
|
||||
match step {
|
||||
Step::Propose => {
|
||||
// Slash the validator for not proposing when they should've
|
||||
debug!(target: "tendermint", "Validator didn't propose when they should have");
|
||||
self.slash(
|
||||
self.weights.proposer(self.block.number, self.block.round().number)
|
||||
).await;
|
||||
self.broadcast(Data::Prevote(None));
|
||||
},
|
||||
Step::Prevote => self.broadcast(Data::Precommit(None)),
|
||||
Step::Precommit => {
|
||||
self.round(RoundNumber(self.block.round().number.0 + 1), None);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
},
|
||||
|
||||
// Handle any received messages
|
||||
msg = self.msg_recv.next() => {
|
||||
if let Some(msg) = msg {
|
||||
if !msg.verify_signature(&self.validators) {
|
||||
continue;
|
||||
}
|
||||
Some((false, msg.msg))
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
} {
|
||||
let res = self.message(msg.clone()).await;
|
||||
if res.is_err() && broadcast {
|
||||
panic!("honest node had invalid behavior");
|
||||
}
|
||||
|
||||
match res {
|
||||
Ok(None) => (),
|
||||
Ok(Some(block)) => {
|
||||
let mut validators = vec![];
|
||||
let mut sigs = vec![];
|
||||
// Get all precommits for this round
|
||||
for (validator, msgs) in &self.block.log.log[&msg.round] {
|
||||
if let Some(Data::Precommit(Some((id, sig)))) = msgs.get(&Step::Precommit) {
|
||||
// If this precommit was for this block, include it
|
||||
if id == &block.id() {
|
||||
validators.push(*validator);
|
||||
sigs.push(sig.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let commit = Commit {
|
||||
end_time: self.block.end_time[&msg.round].canonical(),
|
||||
validators,
|
||||
signature: N::SignatureScheme::aggregate(&sigs),
|
||||
};
|
||||
debug_assert!(self.network.verify_commit(block.id(), &commit));
|
||||
|
||||
let proposal = self.network.add_block(block, commit).await;
|
||||
self.reset(msg.round, proposal).await;
|
||||
}
|
||||
Err(TendermintError::Malicious(validator)) => self.slash(validator).await,
|
||||
Err(TendermintError::Temporal) => (),
|
||||
}
|
||||
|
||||
if broadcast {
|
||||
let sig = self.signer.sign(&msg.encode()).await;
|
||||
self.network.broadcast(SignedMessage { msg, sig }).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Returns Ok(true) if this was a Precommit which had its signature validated
|
||||
// Returns Ok(false) if it wasn't a Precommit or the signature wasn't validated yet
|
||||
// Returns Err if the signature was invalid
|
||||
fn verify_precommit_signature(
|
||||
&self,
|
||||
sender: N::ValidatorId,
|
||||
round: RoundNumber,
|
||||
data: &DataFor<N>,
|
||||
) -> Result<bool, TendermintError<N::ValidatorId>> {
|
||||
if let Data::Precommit(Some((id, sig))) = data {
|
||||
// Also verify the end_time of the commit
|
||||
// Only perform this verification if we already have the end_time
|
||||
// Else, there's a DoS where we receive a precommit for some round infinitely in the future
|
||||
// which forces us to calculate every end time
|
||||
if let Some(end_time) = self.block.end_time.get(&round) {
|
||||
if !self.validators.verify(sender, &commit_msg(end_time.canonical(), id.as_ref()), sig) {
|
||||
debug!(target: "tendermint", "Validator produced an invalid commit signature");
|
||||
Err(TendermintError::Malicious(sender))?;
|
||||
}
|
||||
return Ok(true);
|
||||
}
|
||||
}
|
||||
Ok(false)
|
||||
}
|
||||
|
||||
async fn message(
|
||||
&mut self,
|
||||
msg: MessageFor<N>,
|
||||
) -> Result<Option<N::Block>, TendermintError<N::ValidatorId>> {
|
||||
if msg.block != self.block.number {
|
||||
Err(TendermintError::Temporal)?;
|
||||
}
|
||||
|
||||
// If this is a precommit, verify its signature
|
||||
self.verify_precommit_signature(msg.sender, msg.round, &msg.data)?;
|
||||
|
||||
// Only let the proposer propose
|
||||
if matches!(msg.data, Data::Proposal(..)) &&
|
||||
(msg.sender != self.weights.proposer(msg.block, msg.round))
|
||||
{
|
||||
debug!(target: "tendermint", "Validator who wasn't the proposer proposed");
|
||||
Err(TendermintError::Malicious(msg.sender))?;
|
||||
};
|
||||
|
||||
if !self.block.log.log(msg.clone())? {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// All functions, except for the finalizer and the jump, are locked to the current round
|
||||
|
||||
// Run the finalizer to see if it applies
|
||||
// 49-52
|
||||
if matches!(msg.data, Data::Proposal(..)) || matches!(msg.data, Data::Precommit(_)) {
|
||||
let proposer = self.weights.proposer(self.block.number, msg.round);
|
||||
|
||||
// Get the proposal
|
||||
if let Some(Data::Proposal(_, block)) = self.block.log.get(msg.round, proposer, Step::Propose)
|
||||
{
|
||||
// Check if it has gotten a sufficient amount of precommits
|
||||
// Use a junk signature since message equality disregards the signature
|
||||
if self.block.log.has_consensus(
|
||||
msg.round,
|
||||
Data::Precommit(Some((block.id(), self.signer.sign(&[]).await))),
|
||||
) {
|
||||
return Ok(Some(block.clone()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Else, check if we need to jump ahead
|
||||
#[allow(clippy::comparison_chain)]
|
||||
if msg.round.0 < self.block.round().number.0 {
|
||||
// Prior round, disregard if not finalizing
|
||||
return Ok(None);
|
||||
} else if msg.round.0 > self.block.round().number.0 {
|
||||
// 55-56
|
||||
// Jump, enabling processing by the below code
|
||||
if self.block.log.round_participation(msg.round) > self.weights.fault_thresold() {
|
||||
// If this round already has precommit messages, verify their signatures
|
||||
let round_msgs = self.block.log.log[&msg.round].clone();
|
||||
for (validator, msgs) in &round_msgs {
|
||||
if let Some(data) = msgs.get(&Step::Precommit) {
|
||||
if let Ok(res) = self.verify_precommit_signature(*validator, msg.round, data) {
|
||||
// Ensure this actually verified the signature instead of believing it shouldn't yet
|
||||
debug_assert!(res);
|
||||
} else {
|
||||
// Remove the message so it isn't counted towards forming a commit/included in one
|
||||
// This won't remove the fact the precommitted for this block hash in the MessageLog
|
||||
// TODO: Don't even log these in the first place until we jump, preventing needing
|
||||
// to do this in the first place
|
||||
self
|
||||
.block
|
||||
.log
|
||||
.log
|
||||
.get_mut(&msg.round)
|
||||
.unwrap()
|
||||
.get_mut(validator)
|
||||
.unwrap()
|
||||
.remove(&Step::Precommit);
|
||||
self.slash(*validator).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we're the proposer, return now so we re-run processing with our proposal
|
||||
// If we continue now, it'd just be wasted ops
|
||||
if self.round(msg.round, None) {
|
||||
return Ok(None);
|
||||
}
|
||||
} else {
|
||||
// Future round which we aren't ready to jump to, so return for now
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
// The paper executes these checks when the step is prevote. Making sure this message warrants
|
||||
// rerunning these checks is a sane optimization since message instances is a full iteration
|
||||
// of the round map
|
||||
if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) {
|
||||
let (participation, weight) =
|
||||
self.block.log.message_instances(self.block.round().number, Data::Prevote(None));
|
||||
// 34-35
|
||||
if participation >= self.weights.threshold() {
|
||||
self.block.round_mut().set_timeout(Step::Prevote);
|
||||
}
|
||||
|
||||
// 44-46
|
||||
if weight >= self.weights.threshold() {
|
||||
self.broadcast(Data::Precommit(None));
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
|
||||
// 47-48
|
||||
if matches!(msg.data, Data::Precommit(_)) &&
|
||||
self.block.log.has_participation(self.block.round().number, Step::Precommit)
|
||||
{
|
||||
self.block.round_mut().set_timeout(Step::Precommit);
|
||||
}
|
||||
|
||||
// All further operations require actually having the proposal in question
|
||||
let proposer = self.weights.proposer(self.block.number, self.block.round().number);
|
||||
let (vr, block) = if let Some(Data::Proposal(vr, block)) =
|
||||
self.block.log.get(self.block.round().number, proposer, Step::Propose)
|
||||
{
|
||||
(vr, block)
|
||||
} else {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// 22-33
|
||||
if self.block.round().step == Step::Propose {
|
||||
// Delay error handling (triggering a slash) until after we vote.
|
||||
let (valid, err) = match self.network.validate(block).await {
|
||||
Ok(_) => (true, Ok(None)),
|
||||
Err(BlockError::Temporal) => (false, Ok(None)),
|
||||
Err(BlockError::Fatal) => (false, {
|
||||
debug!(target: "tendermint", "Validator proposed a fatally invalid block");
|
||||
Err(TendermintError::Malicious(proposer))
|
||||
}),
|
||||
};
|
||||
// Create a raw vote which only requires block validity as a basis for the actual vote.
|
||||
let raw_vote = Some(block.id()).filter(|_| valid);
|
||||
|
||||
// If locked is none, it has a round of -1 according to the protocol. That satisfies
|
||||
// 23 and 29. If it's some, both are satisfied if they're for the same ID. If it's some
|
||||
// with different IDs, the function on 22 rejects yet the function on 28 has one other
|
||||
// condition
|
||||
let locked = self.block.locked.as_ref().map(|(_, id)| id == &block.id()).unwrap_or(true);
|
||||
let mut vote = raw_vote.filter(|_| locked);
|
||||
|
||||
if let Some(vr) = vr {
|
||||
// Malformed message
|
||||
if vr.0 >= self.block.round().number.0 {
|
||||
debug!(target: "tendermint", "Validator claimed a round from the future was valid");
|
||||
Err(TendermintError::Malicious(msg.sender))?;
|
||||
}
|
||||
|
||||
if self.block.log.has_consensus(*vr, Data::Prevote(Some(block.id()))) {
|
||||
// Allow differing locked values if the proposal has a newer valid round
|
||||
// This is the other condition described above
|
||||
if let Some((locked_round, _)) = self.block.locked.as_ref() {
|
||||
vote = vote.or_else(|| raw_vote.filter(|_| locked_round.0 <= vr.0));
|
||||
}
|
||||
|
||||
self.broadcast(Data::Prevote(vote));
|
||||
return err;
|
||||
}
|
||||
} else {
|
||||
self.broadcast(Data::Prevote(vote));
|
||||
return err;
|
||||
}
|
||||
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if self
|
||||
.block
|
||||
.valid
|
||||
.as_ref()
|
||||
.map(|(round, _)| round != &self.block.round().number)
|
||||
.unwrap_or(true)
|
||||
{
|
||||
// 36-43
|
||||
|
||||
// The run once condition is implemented above. Since valid will always be set by this, it
|
||||
// not being set, or only being set historically, means this has yet to be run
|
||||
|
||||
if self.block.log.has_consensus(self.block.round().number, Data::Prevote(Some(block.id()))) {
|
||||
match self.network.validate(block).await {
|
||||
Ok(_) => (),
|
||||
Err(BlockError::Temporal) => (),
|
||||
Err(BlockError::Fatal) => {
|
||||
debug!(target: "tendermint", "Validator proposed a fatally invalid block");
|
||||
Err(TendermintError::Malicious(proposer))?
|
||||
}
|
||||
};
|
||||
|
||||
self.block.valid = Some((self.block.round().number, block.clone()));
|
||||
if self.block.round().step == Step::Prevote {
|
||||
self.block.locked = Some((self.block.round().number, block.id()));
|
||||
self.broadcast(Data::Precommit(Some((
|
||||
block.id(),
|
||||
self
|
||||
.signer
|
||||
.sign(&commit_msg(
|
||||
self.block.end_time[&self.block.round().number].canonical(),
|
||||
block.id().as_ref(),
|
||||
))
|
||||
.await,
|
||||
))));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
@@ -1,108 +0,0 @@
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use log::debug;
|
||||
|
||||
use crate::{ext::*, RoundNumber, Step, Data, DataFor, MessageFor, TendermintError};
|
||||
|
||||
type RoundLog<N> = HashMap<<N as Network>::ValidatorId, HashMap<Step, DataFor<N>>>;
|
||||
pub(crate) struct MessageLog<N: Network> {
|
||||
weights: Arc<N::Weights>,
|
||||
precommitted: HashMap<N::ValidatorId, <N::Block as Block>::Id>,
|
||||
pub(crate) log: HashMap<RoundNumber, RoundLog<N>>,
|
||||
}
|
||||
|
||||
impl<N: Network> MessageLog<N> {
|
||||
pub(crate) fn new(weights: Arc<N::Weights>) -> MessageLog<N> {
|
||||
MessageLog { weights, precommitted: HashMap::new(), log: HashMap::new() }
|
||||
}
|
||||
|
||||
// Returns true if it's a new message
|
||||
pub(crate) fn log(
|
||||
&mut self,
|
||||
msg: MessageFor<N>,
|
||||
) -> Result<bool, TendermintError<N::ValidatorId>> {
|
||||
let round = self.log.entry(msg.round).or_insert_with(HashMap::new);
|
||||
let msgs = round.entry(msg.sender).or_insert_with(HashMap::new);
|
||||
|
||||
// Handle message replays without issue. It's only multiple messages which is malicious
|
||||
let step = msg.data.step();
|
||||
if let Some(existing) = msgs.get(&step) {
|
||||
if existing != &msg.data {
|
||||
debug!(
|
||||
target: "tendermint",
|
||||
"Validator sent multiple messages for the same block + round + step"
|
||||
);
|
||||
Err(TendermintError::Malicious(msg.sender))?;
|
||||
}
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// If they already precommitted to a distinct hash, error
|
||||
if let Data::Precommit(Some((hash, _))) = &msg.data {
|
||||
if let Some(prev) = self.precommitted.get(&msg.sender) {
|
||||
if hash != prev {
|
||||
debug!(target: "tendermint", "Validator precommitted to multiple blocks");
|
||||
Err(TendermintError::Malicious(msg.sender))?;
|
||||
}
|
||||
}
|
||||
self.precommitted.insert(msg.sender, *hash);
|
||||
}
|
||||
|
||||
msgs.insert(step, msg.data);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
// For a given round, return the participating weight for this step, and the weight agreeing with
|
||||
// the data.
|
||||
pub(crate) fn message_instances(&self, round: RoundNumber, data: DataFor<N>) -> (u64, u64) {
|
||||
let mut participating = 0;
|
||||
let mut weight = 0;
|
||||
for (participant, msgs) in &self.log[&round] {
|
||||
if let Some(msg) = msgs.get(&data.step()) {
|
||||
let validator_weight = self.weights.weight(*participant);
|
||||
participating += validator_weight;
|
||||
if &data == msg {
|
||||
weight += validator_weight;
|
||||
}
|
||||
}
|
||||
}
|
||||
(participating, weight)
|
||||
}
|
||||
|
||||
// Get the participation in a given round
|
||||
pub(crate) fn round_participation(&self, round: RoundNumber) -> u64 {
|
||||
let mut weight = 0;
|
||||
if let Some(round) = self.log.get(&round) {
|
||||
for participant in round.keys() {
|
||||
weight += self.weights.weight(*participant);
|
||||
}
|
||||
};
|
||||
weight
|
||||
}
|
||||
|
||||
// Check if a supermajority of nodes have participated on a specific step
|
||||
pub(crate) fn has_participation(&self, round: RoundNumber, step: Step) -> bool {
|
||||
let mut participating = 0;
|
||||
for (participant, msgs) in &self.log[&round] {
|
||||
if msgs.get(&step).is_some() {
|
||||
participating += self.weights.weight(*participant);
|
||||
}
|
||||
}
|
||||
participating >= self.weights.threshold()
|
||||
}
|
||||
|
||||
// Check if consensus has been reached on a specific piece of data
|
||||
pub(crate) fn has_consensus(&self, round: RoundNumber, data: DataFor<N>) -> bool {
|
||||
let (_, weight) = self.message_instances(round, data);
|
||||
weight >= self.weights.threshold()
|
||||
}
|
||||
|
||||
pub(crate) fn get(
|
||||
&self,
|
||||
round: RoundNumber,
|
||||
sender: N::ValidatorId,
|
||||
step: Step,
|
||||
) -> Option<&DataFor<N>> {
|
||||
self.log.get(&round).and_then(|round| round.get(&sender).and_then(|msgs| msgs.get(&step)))
|
||||
}
|
||||
}
|
||||
@@ -1,83 +0,0 @@
|
||||
use std::{
|
||||
marker::PhantomData,
|
||||
time::{Duration, Instant},
|
||||
collections::HashMap,
|
||||
};
|
||||
|
||||
use futures::{FutureExt, future};
|
||||
use tokio::time::sleep;
|
||||
|
||||
use crate::{
|
||||
time::CanonicalInstant,
|
||||
Step,
|
||||
ext::{RoundNumber, Network},
|
||||
};
|
||||
|
||||
pub(crate) struct RoundData<N: Network> {
|
||||
_network: PhantomData<N>,
|
||||
pub(crate) number: RoundNumber,
|
||||
pub(crate) start_time: CanonicalInstant,
|
||||
pub(crate) step: Step,
|
||||
pub(crate) timeouts: HashMap<Step, Instant>,
|
||||
}
|
||||
|
||||
impl<N: Network> RoundData<N> {
|
||||
pub(crate) fn new(number: RoundNumber, start_time: CanonicalInstant) -> Self {
|
||||
RoundData {
|
||||
_network: PhantomData,
|
||||
number,
|
||||
start_time,
|
||||
step: Step::Propose,
|
||||
timeouts: HashMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn timeout(&self, step: Step) -> CanonicalInstant {
|
||||
let adjusted_block = N::BLOCK_PROCESSING_TIME * (self.number.0 + 1);
|
||||
let adjusted_latency = N::LATENCY_TIME * (self.number.0 + 1);
|
||||
let offset = Duration::from_secs(
|
||||
(match step {
|
||||
Step::Propose => adjusted_block + adjusted_latency,
|
||||
Step::Prevote => adjusted_block + (2 * adjusted_latency),
|
||||
Step::Precommit => adjusted_block + (3 * adjusted_latency),
|
||||
})
|
||||
.into(),
|
||||
);
|
||||
self.start_time + offset
|
||||
}
|
||||
|
||||
pub(crate) fn end_time(&self) -> CanonicalInstant {
|
||||
self.timeout(Step::Precommit)
|
||||
}
|
||||
|
||||
pub(crate) fn set_timeout(&mut self, step: Step) {
|
||||
let timeout = self.timeout(step).instant();
|
||||
self.timeouts.entry(step).or_insert(timeout);
|
||||
}
|
||||
|
||||
// Poll all set timeouts, returning the Step whose timeout has just expired
|
||||
pub(crate) async fn timeout_future(&self) -> Step {
|
||||
let timeout_future = |step| {
|
||||
let timeout = self.timeouts.get(&step).copied();
|
||||
(async move {
|
||||
if let Some(timeout) = timeout {
|
||||
sleep(timeout.saturating_duration_since(Instant::now())).await;
|
||||
} else {
|
||||
future::pending::<()>().await;
|
||||
}
|
||||
step
|
||||
})
|
||||
.fuse()
|
||||
};
|
||||
let propose_timeout = timeout_future(Step::Propose);
|
||||
let prevote_timeout = timeout_future(Step::Prevote);
|
||||
let precommit_timeout = timeout_future(Step::Precommit);
|
||||
futures::pin_mut!(propose_timeout, prevote_timeout, precommit_timeout);
|
||||
|
||||
futures::select_biased! {
|
||||
step = propose_timeout => step,
|
||||
step = prevote_timeout => step,
|
||||
step = precommit_timeout => step,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
use core::ops::Add;
|
||||
use std::time::{UNIX_EPOCH, SystemTime, Instant, Duration};
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub(crate) struct CanonicalInstant {
|
||||
/// Time since the epoch.
|
||||
time: u64,
|
||||
/// An Instant synchronized with the above time.
|
||||
instant: Instant,
|
||||
}
|
||||
|
||||
pub(crate) fn sys_time(time: u64) -> SystemTime {
|
||||
UNIX_EPOCH + Duration::from_secs(time)
|
||||
}
|
||||
|
||||
impl CanonicalInstant {
|
||||
pub(crate) fn new(time: u64) -> CanonicalInstant {
|
||||
// This is imprecise yet should be precise enough, as it'll resolve within a few ms
|
||||
let instant_now = Instant::now();
|
||||
let sys_now = SystemTime::now();
|
||||
|
||||
// If the time is in the future, this will be off by that much time
|
||||
let elapsed = sys_now.duration_since(sys_time(time)).unwrap_or(Duration::ZERO);
|
||||
// Except for the fact this panics here
|
||||
let synced_instant = instant_now.checked_sub(elapsed).unwrap();
|
||||
|
||||
CanonicalInstant { time, instant: synced_instant }
|
||||
}
|
||||
|
||||
pub(crate) fn canonical(&self) -> u64 {
|
||||
self.time
|
||||
}
|
||||
|
||||
pub(crate) fn instant(&self) -> Instant {
|
||||
self.instant
|
||||
}
|
||||
}
|
||||
|
||||
impl Add<Duration> for CanonicalInstant {
|
||||
type Output = CanonicalInstant;
|
||||
fn add(self, duration: Duration) -> CanonicalInstant {
|
||||
CanonicalInstant { time: self.time + duration.as_secs(), instant: self.instant + duration }
|
||||
}
|
||||
}
|
||||
@@ -1,177 +0,0 @@
|
||||
use std::{
|
||||
sync::Arc,
|
||||
time::{UNIX_EPOCH, SystemTime, Duration},
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use parity_scale_codec::{Encode, Decode};
|
||||
|
||||
use futures::SinkExt;
|
||||
use tokio::{sync::RwLock, time::sleep};
|
||||
|
||||
use tendermint_machine::{
|
||||
ext::*, SignedMessageFor, StepSender, MessageSender, TendermintMachine, TendermintHandle,
|
||||
};
|
||||
|
||||
type TestValidatorId = u16;
|
||||
type TestBlockId = [u8; 4];
|
||||
|
||||
struct TestSigner(u16);
|
||||
#[async_trait]
|
||||
impl Signer for TestSigner {
|
||||
type ValidatorId = TestValidatorId;
|
||||
type Signature = [u8; 32];
|
||||
|
||||
async fn validator_id(&self) -> Option<TestValidatorId> {
|
||||
Some(self.0)
|
||||
}
|
||||
|
||||
async fn sign(&self, msg: &[u8]) -> [u8; 32] {
|
||||
let mut sig = [0; 32];
|
||||
sig[.. 2].copy_from_slice(&self.0.to_le_bytes());
|
||||
sig[2 .. (2 + 30.min(msg.len()))].copy_from_slice(&msg[.. 30.min(msg.len())]);
|
||||
sig
|
||||
}
|
||||
}
|
||||
|
||||
struct TestSignatureScheme;
|
||||
impl SignatureScheme for TestSignatureScheme {
|
||||
type ValidatorId = TestValidatorId;
|
||||
type Signature = [u8; 32];
|
||||
type AggregateSignature = Vec<[u8; 32]>;
|
||||
type Signer = TestSigner;
|
||||
|
||||
#[must_use]
|
||||
fn verify(&self, validator: u16, msg: &[u8], sig: &[u8; 32]) -> bool {
|
||||
(sig[.. 2] == validator.to_le_bytes()) && (sig[2 ..] == [msg, &[0; 30]].concat()[.. 30])
|
||||
}
|
||||
|
||||
fn aggregate(sigs: &[[u8; 32]]) -> Vec<[u8; 32]> {
|
||||
sigs.to_vec()
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn verify_aggregate(
|
||||
&self,
|
||||
signers: &[TestValidatorId],
|
||||
msg: &[u8],
|
||||
sigs: &Vec<[u8; 32]>,
|
||||
) -> bool {
|
||||
assert_eq!(signers.len(), sigs.len());
|
||||
for sig in signers.iter().zip(sigs.iter()) {
|
||||
assert!(self.verify(*sig.0, msg, sig.1));
|
||||
}
|
||||
true
|
||||
}
|
||||
}
|
||||
|
||||
struct TestWeights;
|
||||
impl Weights for TestWeights {
|
||||
type ValidatorId = TestValidatorId;
|
||||
|
||||
fn total_weight(&self) -> u64 {
|
||||
4
|
||||
}
|
||||
fn weight(&self, id: TestValidatorId) -> u64 {
|
||||
[1; 4][usize::try_from(id).unwrap()]
|
||||
}
|
||||
|
||||
fn proposer(&self, number: BlockNumber, round: RoundNumber) -> TestValidatorId {
|
||||
TestValidatorId::try_from((number.0 + u64::from(round.0)) % 4).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug, Encode, Decode)]
|
||||
struct TestBlock {
|
||||
id: TestBlockId,
|
||||
valid: Result<(), BlockError>,
|
||||
}
|
||||
|
||||
impl Block for TestBlock {
|
||||
type Id = TestBlockId;
|
||||
|
||||
fn id(&self) -> TestBlockId {
|
||||
self.id
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
struct TestNetwork(u16, Arc<RwLock<Vec<(MessageSender<Self>, StepSender<Self>)>>>);
|
||||
|
||||
#[async_trait]
|
||||
impl Network for TestNetwork {
|
||||
type ValidatorId = TestValidatorId;
|
||||
type SignatureScheme = TestSignatureScheme;
|
||||
type Weights = TestWeights;
|
||||
type Block = TestBlock;
|
||||
|
||||
const BLOCK_PROCESSING_TIME: u32 = 2;
|
||||
const LATENCY_TIME: u32 = 1;
|
||||
|
||||
fn signer(&self) -> TestSigner {
|
||||
TestSigner(self.0)
|
||||
}
|
||||
|
||||
fn signature_scheme(&self) -> TestSignatureScheme {
|
||||
TestSignatureScheme
|
||||
}
|
||||
|
||||
fn weights(&self) -> TestWeights {
|
||||
TestWeights
|
||||
}
|
||||
|
||||
async fn broadcast(&mut self, msg: SignedMessageFor<Self>) {
|
||||
for (messages, _) in self.1.write().await.iter_mut() {
|
||||
messages.send(msg.clone()).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async fn slash(&mut self, _: TestValidatorId) {
|
||||
dbg!("Slash");
|
||||
todo!()
|
||||
}
|
||||
|
||||
async fn validate(&mut self, block: &TestBlock) -> Result<(), BlockError> {
|
||||
block.valid
|
||||
}
|
||||
|
||||
async fn add_block(
|
||||
&mut self,
|
||||
block: TestBlock,
|
||||
commit: Commit<TestSignatureScheme>,
|
||||
) -> Option<TestBlock> {
|
||||
dbg!("Adding ", &block);
|
||||
assert!(block.valid.is_ok());
|
||||
assert!(self.verify_commit(block.id(), &commit));
|
||||
Some(TestBlock { id: (u32::from_le_bytes(block.id) + 1).to_le_bytes(), valid: Ok(()) })
|
||||
}
|
||||
}
|
||||
|
||||
impl TestNetwork {
|
||||
async fn new(validators: usize) -> Arc<RwLock<Vec<(MessageSender<Self>, StepSender<Self>)>>> {
|
||||
let arc = Arc::new(RwLock::new(vec![]));
|
||||
{
|
||||
let mut write = arc.write().await;
|
||||
for i in 0 .. validators {
|
||||
let i = u16::try_from(i).unwrap();
|
||||
let TendermintHandle { messages, machine, step } = TendermintMachine::new(
|
||||
TestNetwork(i, arc.clone()),
|
||||
BlockNumber(1),
|
||||
SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) },
|
||||
)
|
||||
.await;
|
||||
tokio::task::spawn(machine.run());
|
||||
write.push((messages, step));
|
||||
}
|
||||
}
|
||||
arc
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test() {
|
||||
TestNetwork::new(4).await;
|
||||
sleep(Duration::from_secs(30)).await;
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
[package]
|
||||
name = "pallet-tendermint"
|
||||
version = "0.1.0"
|
||||
description = "Tendermint pallet for Substrate"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/tendermint/pallet"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[dependencies]
|
||||
parity-scale-codec = { version = "3", default-features = false, features = ["derive"] }
|
||||
scale-info = { version = "2", default-features = false, features = ["derive"] }
|
||||
|
||||
sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
frame-system = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
frame-support = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"sp-application-crypto/std",
|
||||
|
||||
"frame-system/std",
|
||||
"frame-support/std",
|
||||
]
|
||||
|
||||
runtime-benchmarks = [
|
||||
"frame-system/runtime-benchmarks",
|
||||
"frame-support/runtime-benchmarks",
|
||||
]
|
||||
|
||||
default = ["std"]
|
||||
@@ -1,15 +0,0 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
@@ -1,75 +0,0 @@
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
#[frame_support::pallet]
|
||||
pub mod pallet {
|
||||
use sp_std::vec::Vec;
|
||||
use sp_core::sr25519::Public;
|
||||
|
||||
use frame_support::pallet_prelude::*;
|
||||
use frame_support::traits::{ConstU32, OneSessionHandler};
|
||||
|
||||
type MaxValidators = ConstU32<{ u16::MAX as u32 }>;
|
||||
|
||||
#[pallet::config]
|
||||
pub trait Config: frame_system::Config {}
|
||||
|
||||
#[pallet::pallet]
|
||||
#[pallet::generate_store(pub(super) trait Store)]
|
||||
pub struct Pallet<T>(PhantomData<T>);
|
||||
|
||||
#[pallet::storage]
|
||||
#[pallet::getter(fn session)]
|
||||
pub type Session<T: Config> = StorageValue<_, u32, ValueQuery>;
|
||||
|
||||
#[pallet::storage]
|
||||
#[pallet::getter(fn validators)]
|
||||
pub type Validators<T: Config> = StorageValue<_, BoundedVec<Public, MaxValidators>, ValueQuery>;
|
||||
|
||||
pub mod crypto {
|
||||
use sp_application_crypto::{KeyTypeId, app_crypto, sr25519};
|
||||
app_crypto!(sr25519, KeyTypeId(*b"tend"));
|
||||
|
||||
impl<T> sp_application_crypto::BoundToRuntimeAppPublic for crate::Pallet<T> {
|
||||
type Public = Public;
|
||||
}
|
||||
|
||||
sp_application_crypto::with_pair! {
|
||||
pub type AuthorityPair = Pair;
|
||||
}
|
||||
pub type AuthoritySignature = Signature;
|
||||
pub type AuthorityId = Public;
|
||||
}
|
||||
|
||||
impl<T: Config, V> OneSessionHandler<V> for Pallet<T> {
|
||||
type Key = crypto::Public;
|
||||
|
||||
// TODO
|
||||
fn on_genesis_session<'a, I: 'a>(_validators: I)
|
||||
where
|
||||
I: Iterator<Item = (&'a V, Self::Key)>,
|
||||
V: 'a,
|
||||
{
|
||||
}
|
||||
|
||||
fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued: I)
|
||||
where
|
||||
I: Iterator<Item = (&'a V, Self::Key)>,
|
||||
V: 'a,
|
||||
{
|
||||
if !changed {
|
||||
return;
|
||||
}
|
||||
|
||||
Session::<T>::put(Self::session() + 1);
|
||||
Validators::<T>::put(
|
||||
BoundedVec::try_from(validators.map(|(_, key)| key.into()).collect::<Vec<Public>>())
|
||||
.unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
// TODO
|
||||
fn on_disabled(_validator_index: u32) {}
|
||||
}
|
||||
}
|
||||
|
||||
pub use pallet::*;
|
||||
@@ -1,21 +0,0 @@
|
||||
[package]
|
||||
name = "sp-tendermint"
|
||||
version = "0.1.0"
|
||||
description = "Tendermint primitives for Substrate"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/substrate/tendermint/primitives"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[dependencies]
|
||||
sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
sp-api = { git = "https://github.com/serai-dex/substrate", default-features = false }
|
||||
|
||||
[features]
|
||||
std = ["sp-core/std", "sp-std/std", "sp-api/std"]
|
||||
default = ["std"]
|
||||
@@ -1,15 +0,0 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
@@ -1,16 +0,0 @@
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
use sp_core::sr25519::Public;
|
||||
use sp_std::vec::Vec;
|
||||
|
||||
sp_api::decl_runtime_apis! {
|
||||
/// TendermintApi trait for runtimes to implement.
|
||||
pub trait TendermintApi {
|
||||
/// Current session number. A session is NOT a fixed length of blocks, yet rather a continuous
|
||||
/// set of validators.
|
||||
fn current_session() -> u32;
|
||||
|
||||
/// Current validators.
|
||||
fn validators() -> Vec<Public>;
|
||||
}
|
||||
}
|
||||
@@ -34,7 +34,6 @@ pub mod pallet {
|
||||
}
|
||||
|
||||
#[pallet::pallet]
|
||||
#[pallet::generate_store(pub(crate) trait Store)]
|
||||
pub struct Pallet<T>(PhantomData<T>);
|
||||
|
||||
impl<T: Config> Pallet<T> {
|
||||
|
||||
@@ -52,7 +52,6 @@ pub mod pallet {
|
||||
}
|
||||
|
||||
#[pallet::pallet]
|
||||
#[pallet::generate_store(pub(super) trait Store)]
|
||||
pub struct Pallet<T>(PhantomData<T>);
|
||||
|
||||
/// The details of a validator set instance.
|
||||
|
||||
Reference in New Issue
Block a user