9 Commits

Author SHA1 Message Date
Luke Parker
6272c40561 Restore block_hash to Batch
It's not only helpful (to easily check where Serai's view of the external
network is) but it's necessary in case of a non-trivial chain fork to determine
which blockchain Serai considers canonical.
2024-12-31 18:10:47 -05:00
Luke Parker
2240a50a0c Rebroadcast cosigns for the currently evaluated session, not the latest intended
If Substrate has a block 500 with a key gen, and a block 600 with a key gen,
and the session starting on 500 never cosigns everything, everyone up-to-date
will want the cosigns for the session starting on block 500. Everyone
up-to-date will also be rebroadcasting the non-existent cosigns for the session
which has yet to start. This wouldn't cause a stall as eventually, each
individual set would cosign the latest notable block, and then that would be
explicitly synced, but it's still not the intended behavior.

We also won't even intake the cosigns for the latest intended session if it
exceeds the session we're currently evaluating. This does mean those behind on
the cosigning protocol wouldn't have rebroadcasted their historical cosigns,
and now will, but that's valuable as we don't actually know if we're behind or
up-to-date (per above posited issue).
2024-12-31 17:17:12 -05:00
Luke Parker
7e2b31e5da Clean the transaction definitions in the coordinator
Moves to borsh for serialization. No longer includes nonces anywhere in the TX.
2024-12-31 12:14:32 -05:00
Luke Parker
8c9441a1a5 Redo coordinator's Substrate scanner 2024-12-31 10:37:19 -05:00
Luke Parker
5a42f66dc2 alloy 0.9 2024-12-30 11:09:09 -05:00
Luke Parker
b584a2beab Remove old DB entry from the scanner
We read from it but never writ to it.

It was used to check we didn't flag a block as notable after reporting it, but
it was called by the scan task as it scanned a block. We only batch/report
blocks after the scan task after scanning them, so it was very redundant.
2024-12-30 11:07:05 -05:00
Luke Parker
26ccff25a1 Split reporting Batches to the signer from the Batch test 2024-12-30 11:03:52 -05:00
Luke Parker
f0094b3c7c Rename Report task to Batch task 2024-12-30 10:49:35 -05:00
Luke Parker
458f4fe170 Move where we check if we should delay reporting of Batches 2024-12-30 10:18:38 -05:00
56 changed files with 1575 additions and 1792 deletions

View File

@@ -176,6 +176,7 @@ jobs:
cargo msrv verify --manifest-path coordinator/tributary/tendermint/Cargo.toml cargo msrv verify --manifest-path coordinator/tributary/tendermint/Cargo.toml
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
cargo msrv verify --manifest-path coordinator/Cargo.toml cargo msrv verify --manifest-path coordinator/Cargo.toml
msrv-substrate: msrv-substrate:

View File

@@ -62,6 +62,7 @@ jobs:
-p tendermint-machine \ -p tendermint-machine \
-p tributary-chain \ -p tributary-chain \
-p serai-cosign \ -p serai-cosign \
-p serai-coordinator-substrate \
-p serai-coordinator \ -p serai-coordinator \
-p serai-orchestrator \ -p serai-orchestrator \
-p serai-docker-tests -p serai-docker-tests

101
Cargo.lock generated
View File

@@ -112,9 +112,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-consensus" name = "alloy-consensus"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e88e1edea70787c33e11197d3f32ae380f3db19e6e061e539a5bcf8184a6b326" checksum = "db66918860ff33920fb9e6d648d1e8cee275321406ea255ac9320f6562e26fec"
dependencies = [ dependencies = [
"alloy-eips", "alloy-eips",
"alloy-primitives", "alloy-primitives",
@@ -130,9 +130,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-consensus-any" name = "alloy-consensus-any"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "57b1bb53f40c0273cd1975573cd457b39213e68584e36d1401d25fd0398a1d65" checksum = "04519b5157de8a2166bddb07d84a63590100f1d3e2b3682144e787f1c27ccdac"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips", "alloy-eips",
@@ -164,9 +164,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-eip7702" name = "alloy-eip7702"
version = "0.4.2" version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c986539255fb839d1533c128e190e557e52ff652c9ef62939e233a81dd93f7e" checksum = "cabf647eb4650c91a9d38cb6f972bb320009e7e9d61765fb688a86f1563b33e8"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-rlp", "alloy-rlp",
@@ -177,9 +177,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-eips" name = "alloy-eips"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f9fadfe089e9ccc0650473f2d4ef0a28bc015bbca5631d9f0f09e49b557fdb3" checksum = "e56518f46b074d562ac345238343e2231b672a13aca18142d285f95cc055980b"
dependencies = [ dependencies = [
"alloy-eip2930", "alloy-eip2930",
"alloy-eip7702", "alloy-eip7702",
@@ -195,10 +195,11 @@ dependencies = [
[[package]] [[package]]
name = "alloy-genesis" name = "alloy-genesis"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2b2a4cf7b70f3495788e74ce1c765260ffe38820a2a774ff4aacb62e31ea73f9" checksum = "2cf200fd4c28435995e47b26d4761a4cf6e1011a13b81f9a9afaf16a93d9fd09"
dependencies = [ dependencies = [
"alloy-eips",
"alloy-primitives", "alloy-primitives",
"alloy-serde", "alloy-serde",
"alloy-trie", "alloy-trie",
@@ -207,9 +208,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-json-rpc" name = "alloy-json-rpc"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e29040b9d5fe2fb70415531882685b64f8efd08dfbd6cc907120650504821105" checksum = "b17c5ada5faf0f9d2921e8b20971eced68abbc92a272b0502cac8b1d00f56777"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"alloy-sol-types", "alloy-sol-types",
@@ -221,9 +222,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-network" name = "alloy-network"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "510cc00b318db0dfccfdd2d032411cfae64fc144aef9679409e014145d3dacc4" checksum = "24f3117647e3262f6db9e18b371bf67c5810270c0cf915786c30fad3b1739561"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-consensus-any", "alloy-consensus-any",
@@ -246,9 +247,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-network-primitives" name = "alloy-network-primitives"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9081c099e798b8a2bba2145eb82a9a146f01fc7a35e9ab6e7b43305051f97550" checksum = "1535a4577648ec2fd3c446d4644d9b8e9e01e5816be53a5d515dc1624e2227b2"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-eips", "alloy-eips",
@@ -259,9 +260,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-node-bindings" name = "alloy-node-bindings"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aef9849fb8bbb28f69f2cbdb4b0dac2f0e35c04f6078a00dfb8486469aed02de" checksum = "bf741e871fb62c80e0007041e8bc1e81978abfd98aafea8354472f06bfd4d309"
dependencies = [ dependencies = [
"alloy-genesis", "alloy-genesis",
"alloy-primitives", "alloy-primitives",
@@ -304,9 +305,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-provider" name = "alloy-provider"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dc2dfaddd9a30aa870a78a4e1316e3e115ec1e12e552cbc881310456b85c1f24" checksum = "fcfa2db03d4221b5ca14bff7dbed4712689cb87a3e826af522468783ff05ec5d"
dependencies = [ dependencies = [
"alloy-chains", "alloy-chains",
"alloy-consensus", "alloy-consensus",
@@ -360,9 +361,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-rpc-client" name = "alloy-rpc-client"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "531137b283547d5b9a5cafc96b006c64ef76810c681d606f28be9781955293b6" checksum = "d2ec6963b08f1c6ef8eacc01dbba20f2c6a1533550403f6b52dbbe0da0360834"
dependencies = [ dependencies = [
"alloy-json-rpc", "alloy-json-rpc",
"alloy-primitives", "alloy-primitives",
@@ -381,9 +382,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-rpc-types-any" name = "alloy-rpc-types-any"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ed98e1af55a7d856bfa385f30f63d8d56be2513593655c904a8f4a7ec963aa3e" checksum = "c64a83112b09bd293ef522bfa3800fa2d2df4d72f2bcd3a84b08490503b22e55"
dependencies = [ dependencies = [
"alloy-consensus-any", "alloy-consensus-any",
"alloy-rpc-types-eth", "alloy-rpc-types-eth",
@@ -392,9 +393,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-rpc-types-eth" name = "alloy-rpc-types-eth"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8737d7a6e37ca7bba9c23e9495c6534caec6760eb24abc9d5ffbaaba147818e1" checksum = "5fc1892a1ac0d2a49c063f0791aa6bde342f020c5d37aaaec14832b661802cb4"
dependencies = [ dependencies = [
"alloy-consensus", "alloy-consensus",
"alloy-consensus-any", "alloy-consensus-any",
@@ -404,17 +405,17 @@ dependencies = [
"alloy-rlp", "alloy-rlp",
"alloy-serde", "alloy-serde",
"alloy-sol-types", "alloy-sol-types",
"derive_more",
"itertools 0.13.0", "itertools 0.13.0",
"serde", "serde",
"serde_json", "serde_json",
"thiserror 2.0.9",
] ]
[[package]] [[package]]
name = "alloy-serde" name = "alloy-serde"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5851bf8d5ad33014bd0c45153c603303e730acc8a209450a7ae6b4a12c2789e2" checksum = "17939f6bef49268e4494158fce1ab8913cd6164ec3f9a4ada2c677b9b5a77f2f"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"serde", "serde",
@@ -423,9 +424,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-signer" name = "alloy-signer"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7e10ca565da6500cca015ba35ee424d59798f2e1b85bc0dd8f81dafd401f029a" checksum = "77d1f0762a44338f0e05987103bd5919df52170d949080bfebfeb6aaaa867c39"
dependencies = [ dependencies = [
"alloy-primitives", "alloy-primitives",
"async-trait", "async-trait",
@@ -506,9 +507,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-transport" name = "alloy-transport"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "538a04a37221469cac0ce231b737fd174de2fdfcdd843bdd068cb39ed3e066ad" checksum = "3a3827275a4eed3431ce876a59c76fd19effc2a8c09566b2603e3a3376d38af0"
dependencies = [ dependencies = [
"alloy-json-rpc", "alloy-json-rpc",
"base64 0.22.1", "base64 0.22.1",
@@ -526,9 +527,9 @@ dependencies = [
[[package]] [[package]]
name = "alloy-transport-http" name = "alloy-transport-http"
version = "0.8.3" version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2ed40eb1e1265b2911512f6aa1dcece9702d078f5a646730c45e39e2be00ac1c" checksum = "958417ddf333c55b0627cb7fbee7c6666895061dee79f50404dd6dbdd8e9eba0"
dependencies = [ dependencies = [
"alloy-transport", "alloy-transport",
"url", "url",
@@ -2527,7 +2528,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d"
dependencies = [ dependencies = [
"libc", "libc",
"windows-sys 0.52.0", "windows-sys 0.59.0",
] ]
[[package]] [[package]]
@@ -3547,7 +3548,7 @@ dependencies = [
"httpdate", "httpdate",
"itoa", "itoa",
"pin-project-lite", "pin-project-lite",
"socket2 0.4.10", "socket2 0.5.8",
"tokio", "tokio",
"tower-service", "tower-service",
"tracing", "tracing",
@@ -4111,7 +4112,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34"
dependencies = [ dependencies = [
"cfg-if", "cfg-if",
"windows-targets 0.48.5", "windows-targets 0.52.6",
] ]
[[package]] [[package]]
@@ -6906,7 +6907,7 @@ dependencies = [
"errno", "errno",
"libc", "libc",
"linux-raw-sys", "linux-raw-sys",
"windows-sys 0.52.0", "windows-sys 0.59.0",
] ]
[[package]] [[package]]
@@ -8339,6 +8340,22 @@ dependencies = [
"zeroize", "zeroize",
] ]
[[package]]
name = "serai-coordinator-substrate"
version = "0.1.0"
dependencies = [
"borsh",
"futures",
"log",
"parity-scale-codec",
"serai-client",
"serai-cosign",
"serai-db",
"serai-processor-messages",
"serai-task",
"tokio",
]
[[package]] [[package]]
name = "serai-coordinator-tests" name = "serai-coordinator-tests"
version = "0.1.0" version = "0.1.0"
@@ -8944,6 +8961,7 @@ dependencies = [
name = "serai-processor-scanner" name = "serai-processor-scanner"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"blake2",
"borsh", "borsh",
"group", "group",
"hex", "hex",
@@ -8956,6 +8974,7 @@ dependencies = [
"serai-processor-messages", "serai-processor-messages",
"serai-processor-primitives", "serai-processor-primitives",
"serai-processor-scheduler-primitives", "serai-processor-scheduler-primitives",
"serai-validator-sets-primitives",
"tokio", "tokio",
] ]
@@ -10467,7 +10486,7 @@ dependencies = [
"fastrand", "fastrand",
"once_cell", "once_cell",
"rustix", "rustix",
"windows-sys 0.52.0", "windows-sys 0.59.0",
] ]
[[package]] [[package]]
@@ -11693,7 +11712,7 @@ version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb"
dependencies = [ dependencies = [
"windows-sys 0.48.0", "windows-sys 0.59.0",
] ]
[[package]] [[package]]

View File

@@ -99,6 +99,7 @@ members = [
"coordinator/tributary/tendermint", "coordinator/tributary/tendermint",
"coordinator/tributary", "coordinator/tributary",
"coordinator/cosign", "coordinator/cosign",
"coordinator/substrate",
"coordinator", "coordinator",
"substrate/primitives", "substrate/primitives",

View File

@@ -1,6 +1,6 @@
AGPL-3.0-only license AGPL-3.0-only license
Copyright (c) 2023 Luke Parker Copyright (c) 2023-2024 Luke Parker
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as it under the terms of the GNU Affero General Public License Version 3 as

View File

@@ -1,7 +1,19 @@
# Coordinator # Coordinator
The Serai coordinator communicates with other coordinators to prepare batches - [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint BFT algorithm.
for Serai and sign transactions.
In order to achieve consensus over gossip, and order certain events, a - [`tributary`](./tributary) is a micro-blockchain framework. Instead of a producing a blockchain
micro-blockchain is instantiated. daemon like the Polkadot SDK or Cosmos SDK intend to, `tributary` is solely intended to be an
embedded asynchronous task within an application.
The Serai coordinator spawns a tributary for each validator set it's coordinating. This allows
the participating validators to communicate in a byzantine-fault-tolerant manner (relying on
Tendermint for consensus).
- [`cosign`](./cosign) contains a library to decide which Substrate blocks should be cosigned and
to evaluate cosigns.
- [`substrate`](./substrate) contains a library to index the Substrate blockchain and handle its
events.
- [`src`](./src) contains the source code for the Coordinator binary itself.

View File

@@ -14,9 +14,6 @@ rust-version = "1.81"
all-features = true all-features = true
rustdoc-args = ["--cfg", "docsrs"] rustdoc-args = ["--cfg", "docsrs"]
[package.metadata.cargo-machete]
ignored = ["scale"]
[lints] [lints]
workspace = true workspace = true
@@ -30,7 +27,7 @@ serai-client = { path = "../../substrate/client", default-features = false, feat
log = { version = "0.4", default-features = false, features = ["std"] } log = { version = "0.4", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false, features = [] } tokio = { version = "1", default-features = false }
serai-db = { path = "../../common/db" } serai-db = { path = "../../common/db", version = "0.1.1" }
serai-task = { path = "../../common/task" } serai-task = { path = "../../common/task", version = "0.1" }

View File

@@ -24,7 +24,7 @@ db_channel!(
); );
// This is a strict function which won't panic, even with a malicious Serai node, so long as: // This is a strict function which won't panic, even with a malicious Serai node, so long as:
// - It's called incrementally // - It's called incrementally (with an increment of 1)
// - It's only called for block numbers we've completed indexing on within the intend task // - It's only called for block numbers we've completed indexing on within the intend task
// - It's only called for block numbers after a global session has started // - It's only called for block numbers after a global session has started
// - The global sessions channel is populated as the block declaring the session is indexed // - The global sessions channel is populated as the block declaring the session is indexed
@@ -69,6 +69,10 @@ fn currently_evaluated_global_session_strict(
res res
} }
pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u8; 32]> {
CurrentlyEvaluatedGlobalSession::get(getter).map(|(id, _info)| id)
}
/// A task to determine if a block has been cosigned and we should handle it. /// A task to determine if a block has been cosigned and we should handle it.
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> { pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
pub(crate) db: D, pub(crate) db: D,
@@ -87,13 +91,14 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
break; break;
}; };
// Fetch the global session information
let (global_session, global_session_info) =
currently_evaluated_global_session_strict(&mut txn, block_number);
match has_events { match has_events {
// Because this had notable events, we require an explicit cosign for this block by a // Because this had notable events, we require an explicit cosign for this block by a
// supermajority of the prior block's validator sets // supermajority of the prior block's validator sets
HasEvents::Notable => { HasEvents::Notable => {
let (global_session, global_session_info) =
currently_evaluated_global_session_strict(&mut txn, block_number);
let mut weight_cosigned = 0; let mut weight_cosigned = 0;
for set in global_session_info.sets { for set in global_session_info.sets {
// Check if we have the cosign from this set // Check if we have the cosign from this set
@@ -122,6 +127,8 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly", "notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
)); ));
} }
log::info!("marking notable block #{block_number} as cosigned");
} }
// Since this block didn't have any notable events, we simply require a cosign for this // Since this block didn't have any notable events, we simply require a cosign for this
// block or a greater block by the current validator sets // block or a greater block by the current validator sets
@@ -143,10 +150,6 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
is during the latest global session we've evaluated the start of. is during the latest global session we've evaluated the start of.
*/ */
// Get the global session for this block
let (global_session, global_session_info) =
currently_evaluated_global_session_strict(&mut txn, block_number);
let mut weight_cosigned = 0; let mut weight_cosigned = 0;
let mut lowest_common_block: Option<u64> = None; let mut lowest_common_block: Option<u64> = None;
for set in global_session_info.sets { for set in global_session_info.sets {
@@ -194,6 +197,8 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
*/ */
known_cosign = lowest_common_block; known_cosign = lowest_common_block;
} }
log::debug!("marking non-notable block #{block_number} as cosigned");
} }
// If this block has no events necessitating cosigning, we can immediately consider the // If this block has no events necessitating cosigning, we can immediately consider the
// block cosigned (making this block a NOP) // block cosigned (making this block a NOP)
@@ -213,6 +218,10 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
); );
txn.commit(); txn.commit();
if (block_number % 500) == 0 {
log::info!("marking block #{block_number} as cosigned");
}
made_progress = true; made_progress = true;
} }

View File

@@ -7,6 +7,7 @@ use std::collections::HashMap;
use blake2::{Digest, Blake2s256}; use blake2::{Digest, Blake2s256};
use scale::{Encode, Decode};
use borsh::{BorshSerialize, BorshDeserialize}; use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::{ use serai_client::{
@@ -63,6 +64,64 @@ impl GlobalSession {
} }
} }
/// If the block has events.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
enum HasEvents {
/// The block had a notable event.
///
/// This is a special case as blocks with key gen events change the keys used for cosigning, and
/// accordingly must be cosigned before we advance past them.
Notable,
/// The block had an non-notable event justifying a cosign.
NonNotable,
/// The block didn't have an event justifying a cosign.
No,
}
/// An intended cosign.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct CosignIntent {
/// The global session this cosign is being performed under.
global_session: [u8; 32],
/// The number of the block to cosign.
block_number: u64,
/// The hash of the block to cosign.
block_hash: [u8; 32],
/// If this cosign must be handled before further cosigns are.
notable: bool,
}
/// A cosign.
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
pub struct Cosign {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: [u8; 32],
/// The actual cosigner.
pub cosigner: NetworkId,
}
/// A signed cosign.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedCosign {
/// The cosign.
pub cosign: Cosign,
/// The signature for the cosign.
pub signature: [u8; 64],
}
impl SignedCosign {
fn verify_signature(&self, signer: serai_client::Public) -> bool {
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.encode(), &signature).is_ok()
}
}
create_db! { create_db! {
Cosign { Cosign {
// The following are populated by the intend task and used throughout the library // The following are populated by the intend task and used throughout the library
@@ -97,64 +156,6 @@ create_db! {
} }
} }
/// If the block has events.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
enum HasEvents {
/// The block had a notable event.
///
/// This is a special case as blocks with key gen events change the keys used for cosigning, and
/// accordingly must be cosigned before we advance past them.
Notable,
/// The block had an non-notable event justifying a cosign.
NonNotable,
/// The block didn't have an event justifying a cosign.
No,
}
/// An intended cosign.
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
struct CosignIntent {
/// The global session this cosign is being performed under.
global_session: [u8; 32],
/// The number of the block to cosign.
block_number: u64,
/// The hash of the block to cosign.
block_hash: [u8; 32],
/// If this cosign must be handled before further cosigns are.
notable: bool,
}
/// A cosign.
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct Cosign {
/// The global session this cosign is being performed under.
pub global_session: [u8; 32],
/// The number of the block to cosign.
pub block_number: u64,
/// The hash of the block to cosign.
pub block_hash: [u8; 32],
/// The actual cosigner.
pub cosigner: NetworkId,
}
/// A signed cosign.
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
pub struct SignedCosign {
/// The cosign.
pub cosign: Cosign,
/// The signature for the cosign.
pub signature: [u8; 64],
}
impl SignedCosign {
fn verify_signature(&self, signer: serai_client::Public) -> bool {
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
signer.verify_simple(COSIGN_CONTEXT, &borsh::to_vec(&self.cosign).unwrap(), &signature).is_ok()
}
}
/// Fetch the keys used for cosigning by a specific network. /// Fetch the keys used for cosigning by a specific network.
async fn keys_for_network( async fn keys_for_network(
serai: &TemporalSerai<'_>, serai: &TemporalSerai<'_>,
@@ -219,6 +220,7 @@ pub trait RequestNotableCosigns: 'static + Send {
} }
/// An error used to indicate the cosigning protocol has faulted. /// An error used to indicate the cosigning protocol has faulted.
#[derive(Debug)]
pub struct Faulted; pub struct Faulted;
/// The interface to manage cosigning with. /// The interface to manage cosigning with.
@@ -255,12 +257,23 @@ impl<D: Db> Cosigning<D> {
} }
/// The latest cosigned block number. /// The latest cosigned block number.
pub fn latest_cosigned_block_number(&self) -> Result<u64, Faulted> { pub fn latest_cosigned_block_number(getter: &impl Get) -> Result<u64, Faulted> {
if FaultedSession::get(&self.db).is_some() { if FaultedSession::get(getter).is_some() {
Err(Faulted)?; Err(Faulted)?;
} }
Ok(LatestCosignedBlockNumber::get(&self.db).unwrap_or(0)) Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0))
}
/// Fetch an cosigned Substrate block by its block number.
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
if block_number > Self::latest_cosigned_block_number(getter)? {
return Ok(None);
}
Ok(Some(
SubstrateBlocks::get(getter, block_number).expect("cosigned block but didn't index it"),
))
} }
/// Fetch the notable cosigns for a global session in order to respond to requests. /// Fetch the notable cosigns for a global session in order to respond to requests.
@@ -295,14 +308,12 @@ impl<D: Db> Cosigning<D> {
} }
cosigns cosigns
} else { } else {
let Some(latest_global_session) = LatestGlobalSessionIntended::get(&self.db) else { let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
return vec![]; return vec![];
}; };
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len()); let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
for network in serai_client::primitives::NETWORKS { for network in serai_client::primitives::NETWORKS {
if let Some(cosign) = if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
NetworksLatestCosignedBlock::get(&self.db, latest_global_session, network)
{
cosigns.push(cosign); cosigns.push(cosign);
} }
} }
@@ -422,4 +433,19 @@ impl<D: Db> Cosigning<D> {
txn.commit(); txn.commit();
Ok(true) Ok(true)
} }
/// Receive intended cosigns to produce for this ValidatorSet.
///
/// All cosigns intended, up to and including the next notable cosign, are returned.
///
/// This will drain the internal channel and not re-yield these intentions again.
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<CosignIntent> {
let mut res: Vec<CosignIntent> = vec![];
// While we have yet to find a notable cosign...
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
let Some(intent) = intend::IntendedCosigns::try_recv(txn, set) else { break };
res.push(intent);
}
res
}
} }

View File

@@ -1,32 +0,0 @@
use serai_client::primitives::NetworkId;
pub use serai_db::*;
mod inner_db {
use super::*;
create_db!(
SubstrateDb {
NextBlock: () -> u64,
HandledEvent: (block: [u8; 32]) -> u32,
BatchInstructionsHashDb: (network: NetworkId, id: u32) -> [u8; 32]
}
);
}
pub(crate) use inner_db::{NextBlock, BatchInstructionsHashDb};
pub struct HandledEvent;
impl HandledEvent {
fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 {
inner_db::HandledEvent::get(getter, block).map_or(0, |last| last + 1)
}
pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool {
let next = Self::next_to_handle_event(getter, block);
assert!(next >= event_id);
next == event_id
}
pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) {
assert!(Self::next_to_handle_event(txn, block) == index);
inner_db::HandledEvent::set(txn, block, &index);
}
}

View File

@@ -1,583 +0,0 @@
use core::{ops::Deref, time::Duration};
use std::{
sync::Arc,
collections::{HashSet, HashMap},
};
use zeroize::Zeroizing;
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
use serai_client::{
SeraiError, Block, Serai, TemporalSerai,
primitives::{BlockHash, EmbeddedEllipticCurve, NetworkId},
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
in_instructions::InInstructionsEvent,
coins::CoinsEvent,
};
use serai_db::DbTxn;
use processor_messages::SubstrateContext;
use tokio::{sync::mpsc, time::sleep};
use crate::{
Db,
processors::Processors,
tributary::{TributarySpec, SeraiDkgCompleted},
};
mod db;
pub use db::*;
mod cosign;
pub use cosign::*;
async fn in_set(
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
serai: &TemporalSerai<'_>,
set: ValidatorSet,
) -> Result<Option<bool>, SeraiError> {
let Some(participants) = serai.validator_sets().participants(set.network).await? else {
return Ok(None);
};
let key = (Ristretto::generator() * key.deref()).to_bytes();
Ok(Some(participants.iter().any(|(participant, _)| participant.0 == key)))
}
async fn handle_new_set<D: Db>(
txn: &mut D::Transaction<'_>,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
serai: &Serai,
block: &Block,
set: ValidatorSet,
) -> Result<(), SeraiError> {
if in_set(key, &serai.as_of(block.hash()), set)
.await?
.expect("NewSet for set which doesn't exist")
{
log::info!("present in set {:?}", set);
let validators;
let mut evrf_public_keys = vec![];
{
let serai = serai.as_of(block.hash());
let serai = serai.validator_sets();
let set_participants =
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
validators = set_participants
.iter()
.map(|(k, w)| {
(
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut k.0.as_ref())
.expect("invalid key registered as participant"),
u16::try_from(*w).unwrap(),
)
})
.collect::<Vec<_>>();
for (validator, _) in set_participants {
// This is only run for external networks which always do a DKG for Serai
let substrate = serai
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519)
.await?
.expect("Serai called NewSet on a validator without an Embedwards25519 key");
// `embedded_elliptic_curves` is documented to have the second entry be the
// network-specific curve (if it exists and is distinct from Embedwards25519)
let network =
if let Some(embedded_elliptic_curve) = set.network.embedded_elliptic_curves().get(1) {
serai.embedded_elliptic_curve_key(validator, *embedded_elliptic_curve).await?.expect(
"Serai called NewSet on a validator without the embedded key required for the network",
)
} else {
substrate.clone()
};
evrf_public_keys.push((
<[u8; 32]>::try_from(substrate)
.expect("validator-sets pallet accepted a key of an invalid length"),
network,
));
}
};
let time = if let Ok(time) = block.time() {
time
} else {
assert_eq!(block.number(), 0);
// Use the next block's time
loop {
let Ok(Some(res)) = serai.finalized_block_by_number(1).await else {
sleep(Duration::from_secs(5)).await;
continue;
};
break res.time().unwrap();
}
};
// The block time is in milliseconds yet the Tributary is in seconds
let time = time / 1000;
// Since this block is in the past, and Tendermint doesn't play nice with starting chains after
// their start time (though it does eventually work), delay the start time by 120 seconds
// This is meant to handle ~20 blocks of lack of finalization for this first block
const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;
let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;
let spec = TributarySpec::new(block.hash(), time, set, validators, evrf_public_keys);
log::info!("creating new tributary for {:?}", spec.set());
// Save it to the database now, not on the channel receiver's side, so this is safe against
// reboots
// If this txn finishes, and we reboot, then this'll be reloaded from active Tributaries
// If this txn doesn't finish, this will be re-fired
// If we waited to save to the DB, this txn may be finished, preventing re-firing, yet the
// prior fired event may have not been received yet
crate::ActiveTributaryDb::add_participating_in_tributary(txn, &spec);
new_tributary_spec.send(spec).unwrap();
} else {
log::info!("not present in new set {:?}", set);
}
Ok(())
}
async fn handle_batch_and_burns<Pro: Processors>(
txn: &mut impl DbTxn,
processors: &Pro,
serai: &Serai,
block: &Block,
) -> Result<(), SeraiError> {
// Track which networks had events with a Vec in ordr to preserve the insertion order
// While that shouldn't be needed, ensuring order never hurts, and may enable design choices
// with regards to Processor <-> Coordinator message passing
let mut networks_with_event = vec![];
let mut network_had_event = |burns: &mut HashMap<_, _>, batches: &mut HashMap<_, _>, network| {
// Don't insert this network multiple times
// A Vec is still used in order to maintain the insertion order
if !networks_with_event.contains(&network) {
networks_with_event.push(network);
burns.insert(network, vec![]);
batches.insert(network, vec![]);
}
};
let mut batch_block = HashMap::new();
let mut batches = HashMap::<NetworkId, Vec<u32>>::new();
let mut burns = HashMap::new();
let serai = serai.as_of(block.hash());
for batch in serai.in_instructions().batch_events().await? {
if let InInstructionsEvent::Batch { network, id, block: network_block, instructions_hash } =
batch
{
network_had_event(&mut burns, &mut batches, network);
BatchInstructionsHashDb::set(txn, network, id, &instructions_hash);
// Make sure this is the only Batch event for this network in this Block
assert!(batch_block.insert(network, network_block).is_none());
// Add the batch included by this block
batches.get_mut(&network).unwrap().push(id);
} else {
panic!("Batch event wasn't Batch: {batch:?}");
}
}
for burn in serai.coins().burn_with_instruction_events().await? {
if let CoinsEvent::BurnWithInstruction { from: _, instruction } = burn {
let network = instruction.balance.coin.network();
network_had_event(&mut burns, &mut batches, network);
// network_had_event should register an entry in burns
burns.get_mut(&network).unwrap().push(instruction);
} else {
panic!("Burn event wasn't Burn: {burn:?}");
}
}
assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len());
for network in networks_with_event {
let network_latest_finalized_block = if let Some(block) = batch_block.remove(&network) {
block
} else {
// If it's had a batch or a burn, it must have had a block acknowledged
serai
.in_instructions()
.latest_block_for_network(network)
.await?
.expect("network had a batch/burn yet never set a latest block")
};
processors
.send(
network,
processor_messages::substrate::CoordinatorMessage::SubstrateBlock {
context: SubstrateContext {
serai_time: block.time().unwrap() / 1000,
network_latest_finalized_block,
},
block: block.number(),
burns: burns.remove(&network).unwrap(),
batches: batches.remove(&network).unwrap(),
},
)
.await;
}
Ok(())
}
// Handle a specific Substrate block, returning an error when it fails to get data
// (not blocking / holding)
#[allow(clippy::too_many_arguments)]
async fn handle_block<D: Db, Pro: Processors>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
processors: &Pro,
serai: &Serai,
block: Block,
) -> Result<(), SeraiError> {
let hash = block.hash();
// Define an indexed event ID.
let mut event_id = 0;
// If a new validator set was activated, create tributary/inform processor to do a DKG
for new_set in serai.as_of(hash).validator_sets().new_set_events().await? {
// Individually mark each event as handled so on reboot, we minimize duplicates
// Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000
// events will successfully be incrementally handled
// (though the Serai connection should be stable, making this unnecessary)
let ValidatorSetsEvent::NewSet { set } = new_set else {
panic!("NewSet event wasn't NewSet: {new_set:?}");
};
// If this is Serai, do nothing
// We only coordinate/process external networks
if set.network == NetworkId::Serai {
continue;
}
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh new set event {:?}", new_set);
let mut txn = db.txn();
handle_new_set::<D>(&mut txn, key, new_tributary_spec, serai, &block, set).await?;
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
event_id += 1;
}
// If a key pair was confirmed, inform the processor
for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? {
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh key gen event {:?}", key_gen);
let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else {
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
};
let substrate_key = key_pair.0 .0;
processors
.send(
set.network,
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
context: SubstrateContext {
serai_time: block.time().unwrap() / 1000,
network_latest_finalized_block: serai
.as_of(block.hash())
.in_instructions()
.latest_block_for_network(set.network)
.await?
// The processor treats this as a magic value which will cause it to find a network
// block which has a time greater than or equal to the Serai time
.unwrap_or(BlockHash([0; 32])),
},
session: set.session,
key_pair,
},
)
.await;
// TODO: If we were in the set, yet were removed, drop the tributary
let mut txn = db.txn();
SeraiDkgCompleted::set(&mut txn, set, &substrate_key);
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
event_id += 1;
}
for accepted_handover in serai.as_of(hash).validator_sets().accepted_handover_events().await? {
let ValidatorSetsEvent::AcceptedHandover { set } = accepted_handover else {
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
};
if set.network == NetworkId::Serai {
continue;
}
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh accepted handover event {:?}", accepted_handover);
// TODO: This isn't atomic with the event handling
// Send a oneshot receiver so we can await the response?
perform_slash_report.send(set).unwrap();
let mut txn = db.txn();
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
event_id += 1;
}
for retired_set in serai.as_of(hash).validator_sets().set_retired_events().await? {
let ValidatorSetsEvent::SetRetired { set } = retired_set else {
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
};
if set.network == NetworkId::Serai {
continue;
}
if HandledEvent::is_unhandled(db, hash, event_id) {
log::info!("found fresh set retired event {:?}", retired_set);
let mut txn = db.txn();
crate::ActiveTributaryDb::retire_tributary(&mut txn, set);
tributary_retired.send(set).unwrap();
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
event_id += 1;
}
// Finally, tell the processor of acknowledged blocks/burns
// This uses a single event as unlike prior events which individually executed code, all
// following events share data collection
if HandledEvent::is_unhandled(db, hash, event_id) {
let mut txn = db.txn();
handle_batch_and_burns(&mut txn, processors, serai, &block).await?;
HandledEvent::handle_event(&mut txn, hash, event_id);
txn.commit();
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
async fn handle_new_blocks<D: Db, Pro: Processors>(
db: &mut D,
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
processors: &Pro,
serai: &Serai,
next_block: &mut u64,
) -> Result<(), SeraiError> {
// Check if there's been a new Substrate block
let latest_number = serai.latest_finalized_block().await?.number();
// Advance the cosigning protocol
advance_cosign_protocol(db, key, serai, latest_number).await?;
// Reduce to the latest cosigned block
let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db));
if latest_number < *next_block {
return Ok(());
}
for b in *next_block ..= latest_number {
let block = serai
.finalized_block_by_number(b)
.await?
.expect("couldn't get block before the latest finalized block");
log::info!("handling substrate block {b}");
handle_block(
db,
key,
new_tributary_spec,
perform_slash_report,
tributary_retired,
processors,
serai,
block,
)
.await?;
*next_block += 1;
let mut txn = db.txn();
NextBlock::set(&mut txn, next_block);
txn.commit();
log::info!("handled substrate block {b}");
}
Ok(())
}
pub async fn scan_task<D: Db, Pro: Processors>(
mut db: D,
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
processors: Pro,
serai: Arc<Serai>,
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
perform_slash_report: mpsc::UnboundedSender<ValidatorSet>,
tributary_retired: mpsc::UnboundedSender<ValidatorSet>,
) {
log::info!("scanning substrate");
let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();
/*
let new_substrate_block_notifier = {
let serai = &serai;
move || async move {
loop {
match serai.newly_finalized_block().await {
Ok(sub) => return sub,
Err(e) => {
log::error!("couldn't communicate with serai node: {e}");
sleep(Duration::from_secs(5)).await;
}
}
}
}
};
*/
// TODO: Restore the above subscription-based system
// That would require moving serai-client from HTTP to websockets
let new_substrate_block_notifier = {
let serai = &serai;
move |next_substrate_block| async move {
loop {
match serai.latest_finalized_block().await {
Ok(latest) => {
if latest.header.number >= next_substrate_block {
return latest;
}
sleep(Duration::from_secs(3)).await;
}
Err(e) => {
log::error!("couldn't communicate with serai node: {e}");
sleep(Duration::from_secs(5)).await;
}
}
}
}
};
loop {
// await the next block, yet if our notifier had an error, re-create it
{
let Ok(_) = tokio::time::timeout(
Duration::from_secs(60),
new_substrate_block_notifier(next_substrate_block),
)
.await
else {
// Timed out, which may be because Serai isn't finalizing or may be some issue with the
// notifier
if serai.latest_finalized_block().await.map(|block| block.number()).ok() ==
Some(next_substrate_block.saturating_sub(1))
{
log::info!("serai hasn't finalized a block in the last 60s...");
}
continue;
};
/*
// next_block is a Option<Result>
if next_block.and_then(Result::ok).is_none() {
substrate_block_notifier = new_substrate_block_notifier(next_substrate_block);
continue;
}
*/
}
match handle_new_blocks(
&mut db,
&key,
&new_tributary_spec,
&perform_slash_report,
&tributary_retired,
&processors,
&serai,
&mut next_substrate_block,
)
.await
{
Ok(()) => {}
Err(e) => {
log::error!("couldn't communicate with serai node: {e}");
sleep(Duration::from_secs(5)).await;
}
}
}
}
/// Gets the expected ID for the next Batch.
///
/// Will log an error and apply a slight sleep on error, letting the caller simply immediately
/// retry.
pub(crate) async fn expected_next_batch(
serai: &Serai,
network: NetworkId,
) -> Result<u32, SeraiError> {
async fn expected_next_batch_inner(serai: &Serai, network: NetworkId) -> Result<u32, SeraiError> {
let serai = serai.as_of_latest_finalized_block().await?;
let last = serai.in_instructions().last_batch_for_network(network).await?;
Ok(if let Some(last) = last { last + 1 } else { 0 })
}
match expected_next_batch_inner(serai, network).await {
Ok(next) => Ok(next),
Err(e) => {
log::error!("couldn't get the expected next batch from substrate: {e:?}");
sleep(Duration::from_millis(100)).await;
Err(e)
}
}
}
/// Verifies `Batch`s which have already been indexed from Substrate.
///
/// Spins if a distinct `Batch` is detected on-chain.
///
/// This has a slight malleability in that doesn't verify *who* published a `Batch` is as expected.
/// This is deemed fine.
pub(crate) async fn verify_published_batches<D: Db>(
txn: &mut D::Transaction<'_>,
network: NetworkId,
optimistic_up_to: u32,
) -> Option<u32> {
// TODO: Localize from MainDb to SubstrateDb
let last = crate::LastVerifiedBatchDb::get(txn, network);
for id in last.map_or(0, |last| last + 1) ..= optimistic_up_to {
let Some(on_chain) = BatchInstructionsHashDb::get(txn, network, id) else {
break;
};
let off_chain = crate::ExpectedBatchDb::get(txn, network, id).unwrap();
if on_chain != off_chain {
// Halt operations on this network and spin, as this is a critical fault
loop {
log::error!(
"{}! network: {:?} id: {} off-chain: {} on-chain: {}",
"on-chain batch doesn't match off-chain",
network,
id,
hex::encode(off_chain),
hex::encode(on_chain),
);
sleep(Duration::from_secs(60)).await;
}
}
crate::LastVerifiedBatchDb::set(txn, network, &id);
}
crate::LastVerifiedBatchDb::get(txn, network)
}

View File

@@ -4,9 +4,7 @@ use std::io;
use zeroize::Zeroizing; use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng}; use rand_core::{RngCore, CryptoRng};
use blake2::{Digest, Blake2s256}; use blake2::{digest::typenum::U32, Digest, Blake2b};
use transcript::{Transcript, RecommendedTranscript};
use ciphersuite::{ use ciphersuite::{
group::{ff::Field, GroupEncoding}, group::{ff::Field, GroupEncoding},
Ciphersuite, Ristretto, Ciphersuite, Ristretto,
@@ -14,22 +12,30 @@ use ciphersuite::{
use schnorr::SchnorrSignature; use schnorr::SchnorrSignature;
use scale::{Encode, Decode}; use scale::{Encode, Decode};
use processor_messages::coordinator::SubstrateSignableId; use borsh::{BorshSerialize, BorshDeserialize};
use serai_client::primitives::PublicKey;
use processor_messages::sign::VariantSignId;
use tributary::{ use tributary::{
TRANSACTION_SIZE_LIMIT, ReadWrite, ReadWrite,
transaction::{Signed, TransactionError, TransactionKind, Transaction as TransactionTrait}, transaction::{
Signed as TributarySigned, TransactionError, TransactionKind, Transaction as TransactionTrait,
},
}; };
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)] /// The label for data from a signing protocol.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
pub enum Label { pub enum Label {
/// A preprocess.
Preprocess, Preprocess,
/// A signature share.
Share, Share,
} }
impl Label { impl Label {
// TODO: Should nonces be u8 thanks to our use of topics? fn nonce(&self) -> u32 {
pub fn nonce(&self) -> u32 {
match self { match self {
Label::Preprocess => 0, Label::Preprocess => 0,
Label::Share => 1, Label::Share => 1,
@@ -37,474 +43,202 @@ impl Label {
} }
} }
#[derive(Clone, PartialEq, Eq)] fn borsh_serialize_public<W: io::Write>(
pub struct SignData<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> { public: &PublicKey,
pub plan: Id, writer: &mut W,
pub attempt: u32, ) -> Result<(), io::Error> {
pub label: Label, // This doesn't use `encode_to` as `encode_to` panics if the writer returns an error
writer.write_all(&public.encode())
pub data: Vec<Vec<u8>>, }
fn borsh_deserialize_public<R: io::Read>(reader: &mut R) -> Result<PublicKey, io::Error> {
pub signed: Signed, Decode::decode(&mut scale::IoReader(reader)).map_err(io::Error::other)
} }
impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> Debug for SignData<Id> { /// `tributary::Signed` without the nonce.
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { ///
fmt /// All of our nonces are deterministic to the type of transaction and fields within.
.debug_struct("SignData") #[derive(Clone, Copy, PartialEq, Eq, Debug)]
.field("id", &hex::encode(self.plan.encode())) pub struct Signed {
.field("attempt", &self.attempt) pub signer: <Ristretto as Ciphersuite>::G,
.field("label", &self.label) pub signature: SchnorrSignature<Ristretto>,
.field("signer", &hex::encode(self.signed.signer.to_bytes())) }
.finish_non_exhaustive()
impl BorshSerialize for Signed {
fn serialize<W: io::Write>(&self, writer: &mut W) -> Result<(), io::Error> {
writer.write_all(self.signer.to_bytes().as_ref())?;
self.signature.write(writer)
}
}
impl BorshDeserialize for Signed {
fn deserialize_reader<R: io::Read>(reader: &mut R) -> Result<Self, io::Error> {
let signer = Ristretto::read_G(reader)?;
let signature = SchnorrSignature::read(reader)?;
Ok(Self { signer, signature })
} }
} }
impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> { impl Signed {
pub(crate) fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> { /// Provide a nonce to convert a `Signed` into a `tributary::Signed`.
let plan = Id::decode(&mut scale::IoReader(&mut *reader)) fn nonce(&self, nonce: u32) -> TributarySigned {
.map_err(|_| io::Error::other("invalid plan in SignData"))?; TributarySigned { signer: self.signer, nonce, signature: self.signature }
let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt);
let mut label = [0; 1];
reader.read_exact(&mut label)?;
let label = match label[0] {
0 => Label::Preprocess,
1 => Label::Share,
_ => Err(io::Error::other("invalid label in SignData"))?,
};
let data = {
let mut data_pieces = [0];
reader.read_exact(&mut data_pieces)?;
if data_pieces[0] == 0 {
Err(io::Error::other("zero pieces of data in SignData"))?;
}
let mut all_data = vec![];
for _ in 0 .. data_pieces[0] {
let mut data_len = [0; 2];
reader.read_exact(&mut data_len)?;
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
reader.read_exact(&mut data)?;
all_data.push(data);
}
all_data
};
let signed = Signed::read_without_nonce(reader, label.nonce())?;
Ok(SignData { plan, attempt, label, data, signed })
}
pub(crate) fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
writer.write_all(&self.plan.encode())?;
writer.write_all(&self.attempt.to_le_bytes())?;
writer.write_all(&[match self.label {
Label::Preprocess => 0,
Label::Share => 1,
}])?;
writer.write_all(&[u8::try_from(self.data.len()).unwrap()])?;
for data in &self.data {
if data.len() > u16::MAX.into() {
// Currently, the largest individual preprocess is a Monero transaction
// It provides 4 commitments per input (128 bytes), a 64-byte proof for them, along with a
// key image and proof (96 bytes)
// Even with all of that, we could support 227 inputs in a single TX
// Monero is limited to ~120 inputs per TX
//
// Bitcoin has a much higher input count of 520, yet it only uses 64 bytes per preprocess
Err(io::Error::other("signing data exceeded 65535 bytes"))?;
}
writer.write_all(&u16::try_from(data.len()).unwrap().to_le_bytes())?;
writer.write_all(data)?;
}
self.signed.write_without_nonce(writer)
} }
} }
#[derive(Clone, PartialEq, Eq)] /// The Tributary transaction definition used by Serai
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub enum Transaction { pub enum Transaction {
/// A vote to remove a participant for invalid behavior
RemoveParticipant { RemoveParticipant {
participant: <Ristretto as Ciphersuite>::G, /// The participant to remove
#[borsh(
serialize_with = "borsh_serialize_public",
deserialize_with = "borsh_deserialize_public"
)]
participant: PublicKey,
/// The transaction's signer and signature
signed: Signed, signed: Signed,
}, },
/// A participation in the DKG
DkgParticipation { DkgParticipation {
participation: Vec<u8>, participation: Vec<u8>,
/// The transaction's signer and signature
signed: Signed, signed: Signed,
}, },
DkgConfirmationNonces { /// The preprocess to confirm the DKG results on-chain
// The confirmation attempt DkgConfirmationPreprocess {
/// The attempt number of this signing protocol
attempt: u32, attempt: u32,
// The nonces for DKG confirmation attempt #attempt // The preprocess
confirmation_nonces: [u8; 64], preprocess: [u8; 64],
/// The transaction's signer and signature
signed: Signed, signed: Signed,
}, },
/// The signature share to confirm the DKG results on-chain
DkgConfirmationShare { DkgConfirmationShare {
// The confirmation attempt /// The attempt number of this signing protocol
attempt: u32, attempt: u32,
// The share for DKG confirmation attempt #attempt // The signature share
confirmation_share: [u8; 32], confirmation_share: [u8; 32],
/// The transaction's signer and signature
signed: Signed, signed: Signed,
}, },
// Co-sign a Substrate block. /// Intend to co-sign a finalized Substrate block
CosignSubstrateBlock([u8; 32]), ///
/// When the time comes to start a new co-signing protocol, the most recent Substrate block will
/// be the one selected to be cosigned.
CosignSubstrateBlock {
/// THe hash of the Substrate block to sign
hash: [u8; 32],
},
// When we have synchrony on a batch, we can allow signing it /// Acknowledge a Substrate block
// TODO (never?): This is less efficient compared to an ExternalBlock provided transaction, ///
// which would be binding over the block hash and automatically achieve synchrony on all /// This is provided after the block has been cosigned.
// relevant batches. ExternalBlock was removed for this due to complexity around the pipeline ///
// with the current processor, yet it would still be an improvement. /// With the acknowledgement of a Substrate block, we can whitelist all the `VariantSignId`s
/// resulting from its handling.
SubstrateBlock {
/// The hash of the Substrate block
hash: [u8; 32],
},
/// Acknowledge a Batch
///
/// Once everyone has acknowledged the Batch, we can begin signing it.
Batch { Batch {
block: [u8; 32], /// The hash of the Batch's serialization.
batch: u32, ///
}, /// Generally, we refer to a Batch by its ID/the hash of its instructions. Here, we want to
// When a Serai block is finalized, with the contained batches, we can allow the associated plan /// ensure consensus on the Batch, and achieving consensus on its hash is the most effective
// IDs /// way to do that.
SubstrateBlock(u64), hash: [u8; 32],
SubstrateSign(SignData<SubstrateSignableId>),
Sign(SignData<[u8; 32]>),
// This is defined as an Unsigned transaction in order to de-duplicate SignCompleted amongst
// reporters (who should all report the same thing)
// We do still track the signer in order to prevent a single signer from publishing arbitrarily
// many TXs without penalty
// Here, they're denoted as the first_signer, as only the signer of the first TX to be included
// with this pairing will be remembered on-chain
SignCompleted {
plan: [u8; 32],
tx_hash: Vec<u8>,
first_signer: <Ristretto as Ciphersuite>::G,
signature: SchnorrSignature<Ristretto>,
}, },
SlashReport(Vec<u32>, Signed), /// The local view of slashes observed by the transaction's sender
} SlashReport {
/// The slash points accrued by each validator
slash_points: Vec<u32>,
/// The transaction's signer and signature
signed: Signed,
},
impl Debug for Transaction { Sign {
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { /// The ID of the object being signed
match self { id: VariantSignId,
Transaction::RemoveParticipant { participant, signed } => fmt /// The attempt number of this signing protocol
.debug_struct("Transaction::RemoveParticipant") attempt: u32,
.field("participant", &hex::encode(participant.to_bytes())) /// The label for this data within the signing protocol
.field("signer", &hex::encode(signed.signer.to_bytes())) label: Label,
.finish_non_exhaustive(), /// The data itself
Transaction::DkgParticipation { signed, .. } => fmt ///
.debug_struct("Transaction::DkgParticipation") /// There will be `n` blobs of data where `n` is the amount of key shares the validator sending
.field("signer", &hex::encode(signed.signer.to_bytes())) /// this transaction has.
.finish_non_exhaustive(), data: Vec<Vec<u8>>,
Transaction::DkgConfirmationNonces { attempt, signed, .. } => fmt /// The transaction's signer and signature
.debug_struct("Transaction::DkgConfirmationNonces") signed: Signed,
.field("attempt", attempt) },
.field("signer", &hex::encode(signed.signer.to_bytes()))
.finish_non_exhaustive(),
Transaction::DkgConfirmationShare { attempt, signed, .. } => fmt
.debug_struct("Transaction::DkgConfirmationShare")
.field("attempt", attempt)
.field("signer", &hex::encode(signed.signer.to_bytes()))
.finish_non_exhaustive(),
Transaction::CosignSubstrateBlock(block) => fmt
.debug_struct("Transaction::CosignSubstrateBlock")
.field("block", &hex::encode(block))
.finish(),
Transaction::Batch { block, batch } => fmt
.debug_struct("Transaction::Batch")
.field("block", &hex::encode(block))
.field("batch", &batch)
.finish(),
Transaction::SubstrateBlock(block) => {
fmt.debug_struct("Transaction::SubstrateBlock").field("block", block).finish()
}
Transaction::SubstrateSign(sign_data) => {
fmt.debug_struct("Transaction::SubstrateSign").field("sign_data", sign_data).finish()
}
Transaction::Sign(sign_data) => {
fmt.debug_struct("Transaction::Sign").field("sign_data", sign_data).finish()
}
Transaction::SignCompleted { plan, tx_hash, .. } => fmt
.debug_struct("Transaction::SignCompleted")
.field("plan", &hex::encode(plan))
.field("tx_hash", &hex::encode(tx_hash))
.finish_non_exhaustive(),
Transaction::SlashReport(points, signed) => fmt
.debug_struct("Transaction::SignCompleted")
.field("points", points)
.field("signed", signed)
.finish(),
}
}
} }
impl ReadWrite for Transaction { impl ReadWrite for Transaction {
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> { fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
let mut kind = [0]; borsh::from_reader(reader)
reader.read_exact(&mut kind)?;
match kind[0] {
0 => Ok(Transaction::RemoveParticipant {
participant: Ristretto::read_G(reader)?,
signed: Signed::read_without_nonce(reader, 0)?,
}),
1 => {
let participation = {
let mut participation_len = [0; 4];
reader.read_exact(&mut participation_len)?;
let participation_len = u32::from_le_bytes(participation_len);
if participation_len > u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() {
Err(io::Error::other(
"participation present in transaction exceeded transaction size limit",
))?;
}
let participation_len = usize::try_from(participation_len).unwrap();
let mut participation = vec![0; participation_len];
reader.read_exact(&mut participation)?;
participation
};
let signed = Signed::read_without_nonce(reader, 0)?;
Ok(Transaction::DkgParticipation { participation, signed })
}
2 => {
let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt);
let mut confirmation_nonces = [0; 64];
reader.read_exact(&mut confirmation_nonces)?;
let signed = Signed::read_without_nonce(reader, 0)?;
Ok(Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed })
}
3 => {
let mut attempt = [0; 4];
reader.read_exact(&mut attempt)?;
let attempt = u32::from_le_bytes(attempt);
let mut confirmation_share = [0; 32];
reader.read_exact(&mut confirmation_share)?;
let signed = Signed::read_without_nonce(reader, 1)?;
Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed })
}
4 => {
let mut block = [0; 32];
reader.read_exact(&mut block)?;
Ok(Transaction::CosignSubstrateBlock(block))
}
5 => {
let mut block = [0; 32];
reader.read_exact(&mut block)?;
let mut batch = [0; 4];
reader.read_exact(&mut batch)?;
Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) })
}
6 => {
let mut block = [0; 8];
reader.read_exact(&mut block)?;
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
}
7 => SignData::read(reader).map(Transaction::SubstrateSign),
8 => SignData::read(reader).map(Transaction::Sign),
9 => {
let mut plan = [0; 32];
reader.read_exact(&mut plan)?;
let mut tx_hash_len = [0];
reader.read_exact(&mut tx_hash_len)?;
let mut tx_hash = vec![0; usize::from(tx_hash_len[0])];
reader.read_exact(&mut tx_hash)?;
let first_signer = Ristretto::read_G(reader)?;
let signature = SchnorrSignature::<Ristretto>::read(reader)?;
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
}
10 => {
let mut len = [0];
reader.read_exact(&mut len)?;
let len = len[0];
// If the set has as many validators as MAX_KEY_SHARES_PER_SET, then the amount of distinct
// validators (the amount of validators reported on) will be at most
// `MAX_KEY_SHARES_PER_SET - 1`
if u32::from(len) > (serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET - 1) {
Err(io::Error::other("more points reported than allowed validator"))?;
}
let mut points = vec![0u32; len.into()];
for points in &mut points {
let mut these_points = [0; 4];
reader.read_exact(&mut these_points)?;
*points = u32::from_le_bytes(these_points);
}
Ok(Transaction::SlashReport(points, Signed::read_without_nonce(reader, 0)?))
}
_ => Err(io::Error::other("invalid transaction type")),
}
} }
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> { fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
match self { borsh::to_writer(writer, self)
Transaction::RemoveParticipant { participant, signed } => {
writer.write_all(&[0])?;
writer.write_all(&participant.to_bytes())?;
signed.write_without_nonce(writer)
}
Transaction::DkgParticipation { participation, signed } => {
writer.write_all(&[1])?;
writer.write_all(&u32::try_from(participation.len()).unwrap().to_le_bytes())?;
writer.write_all(participation)?;
signed.write_without_nonce(writer)
}
Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => {
writer.write_all(&[2])?;
writer.write_all(&attempt.to_le_bytes())?;
writer.write_all(confirmation_nonces)?;
signed.write_without_nonce(writer)
}
Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => {
writer.write_all(&[3])?;
writer.write_all(&attempt.to_le_bytes())?;
writer.write_all(confirmation_share)?;
signed.write_without_nonce(writer)
}
Transaction::CosignSubstrateBlock(block) => {
writer.write_all(&[4])?;
writer.write_all(block)
}
Transaction::Batch { block, batch } => {
writer.write_all(&[5])?;
writer.write_all(block)?;
writer.write_all(&batch.to_le_bytes())
}
Transaction::SubstrateBlock(block) => {
writer.write_all(&[6])?;
writer.write_all(&block.to_le_bytes())
}
Transaction::SubstrateSign(data) => {
writer.write_all(&[7])?;
data.write(writer)
}
Transaction::Sign(data) => {
writer.write_all(&[8])?;
data.write(writer)
}
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
writer.write_all(&[9])?;
writer.write_all(plan)?;
writer
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
writer.write_all(tx_hash)?;
writer.write_all(&first_signer.to_bytes())?;
signature.write(writer)
}
Transaction::SlashReport(points, signed) => {
writer.write_all(&[10])?;
writer.write_all(&[u8::try_from(points.len()).unwrap()])?;
for points in points {
writer.write_all(&points.to_le_bytes())?;
}
signed.write_without_nonce(writer)
}
}
} }
} }
impl TransactionTrait for Transaction { impl TransactionTrait for Transaction {
fn kind(&self) -> TransactionKind<'_> { fn kind(&self) -> TransactionKind {
match self { match self {
Transaction::RemoveParticipant { participant, signed } => { Transaction::RemoveParticipant { participant, signed } => {
TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed) TransactionKind::Signed((b"RemoveParticipant", participant).encode(), signed.nonce(0))
} }
Transaction::DkgParticipation { signed, .. } => { Transaction::DkgParticipation { signed, .. } => {
TransactionKind::Signed(b"dkg".to_vec(), signed) TransactionKind::Signed(b"DkgParticipation".encode(), signed.nonce(0))
}
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => {
TransactionKind::Signed((b"DkgConfirmation", attempt).encode(), signed.nonce(0))
} }
Transaction::DkgConfirmationNonces { attempt, signed, .. } |
Transaction::DkgConfirmationShare { attempt, signed, .. } => { Transaction::DkgConfirmationShare { attempt, signed, .. } => {
TransactionKind::Signed((b"dkg_confirmation", attempt).encode(), signed) TransactionKind::Signed((b"DkgConfirmation", attempt).encode(), signed.nonce(1))
} }
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"), Transaction::CosignSubstrateBlock { .. } => TransactionKind::Provided("CosignSubstrateBlock"),
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
Transaction::Batch { .. } => TransactionKind::Provided("batch"), Transaction::Sign { id, attempt, label, signed, .. } => {
Transaction::SubstrateBlock(_) => TransactionKind::Provided("serai"), TransactionKind::Signed((b"Sign", id, attempt).encode(), signed.nonce(label.nonce()))
Transaction::SubstrateSign(data) => {
TransactionKind::Signed((b"substrate", data.plan, data.attempt).encode(), &data.signed)
} }
Transaction::Sign(data) => {
TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed)
}
Transaction::SignCompleted { .. } => TransactionKind::Unsigned,
Transaction::SlashReport(_, signed) => { Transaction::SlashReport { signed, .. } => {
TransactionKind::Signed(b"slash_report".to_vec(), signed) TransactionKind::Signed(b"SlashReport".encode(), signed.nonce(0))
} }
} }
} }
fn hash(&self) -> [u8; 32] { fn hash(&self) -> [u8; 32] {
let mut tx = self.serialize(); let mut tx = ReadWrite::serialize(self);
if let TransactionKind::Signed(_, signed) = self.kind() { if let TransactionKind::Signed(_, signed) = self.kind() {
// Make sure the part we're cutting off is the signature // Make sure the part we're cutting off is the signature
assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize()); assert_eq!(tx.drain((tx.len() - 64) ..).collect::<Vec<_>>(), signed.signature.serialize());
} }
Blake2s256::digest([b"Coordinator Tributary Transaction".as_slice(), &tx].concat()).into() Blake2b::<U32>::digest(&tx).into()
} }
// We don't have any verification logic embedded into the transaction. We just slash anyone who
// publishes an invalid transaction.
fn verify(&self) -> Result<(), TransactionError> { fn verify(&self) -> Result<(), TransactionError> {
// TODO: Check SubstrateSign's lengths here
if let Transaction::SignCompleted { first_signer, signature, .. } = self {
if !signature.verify(*first_signer, self.sign_completed_challenge()) {
Err(TransactionError::InvalidContent)?;
}
}
Ok(()) Ok(())
} }
} }
impl Transaction { impl Transaction {
// Used to initially construct transactions so we can then get sig hashes and perform signing
pub fn empty_signed() -> Signed {
Signed {
signer: Ristretto::generator(),
nonce: 0,
signature: SchnorrSignature::<Ristretto> {
R: Ristretto::generator(),
s: <Ristretto as Ciphersuite>::F::ZERO,
},
}
}
// Sign a transaction // Sign a transaction
pub fn sign<R: RngCore + CryptoRng>( pub fn sign<R: RngCore + CryptoRng>(
&mut self, &mut self,
@@ -512,76 +246,38 @@ impl Transaction {
genesis: [u8; 32], genesis: [u8; 32],
key: &Zeroizing<<Ristretto as Ciphersuite>::F>, key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
) { ) {
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) { fn signed(tx: &mut Transaction) -> &mut Signed {
#[allow(clippy::match_same_arms)] // Doesn't make semantic sense here #[allow(clippy::match_same_arms)] // This doesn't make semantic sense here
let nonce = match tx { match tx {
Transaction::RemoveParticipant { .. } => 0, Transaction::RemoveParticipant { ref mut signed, .. } |
Transaction::DkgParticipation { ref mut signed, .. } |
Transaction::DkgParticipation { .. } => 0, Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed,
// Uses a nonce of 0 as it has an internal attempt counter we distinguish by Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
Transaction::DkgConfirmationNonces { .. } => 0,
// Uses a nonce of 1 due to internal attempt counter and due to following
// DkgConfirmationNonces
Transaction::DkgConfirmationShare { .. } => 1,
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
Transaction::CosignSubstrateBlock { .. } => panic!("signing CosignSubstrateBlock"),
Transaction::SubstrateBlock { .. } => panic!("signing SubstrateBlock"),
Transaction::Batch { .. } => panic!("signing Batch"), Transaction::Batch { .. } => panic!("signing Batch"),
Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"),
Transaction::SubstrateSign(data) => data.label.nonce(), Transaction::Sign { ref mut signed, .. } => signed,
Transaction::Sign(data) => data.label.nonce(),
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"), Transaction::SlashReport { ref mut signed, .. } => signed,
}
Transaction::SlashReport(_, _) => 0,
};
(
nonce,
#[allow(clippy::match_same_arms)]
match tx {
Transaction::RemoveParticipant { ref mut signed, .. } |
Transaction::DkgParticipation { ref mut signed, .. } |
Transaction::DkgConfirmationNonces { ref mut signed, .. } => signed,
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
Transaction::Batch { .. } => panic!("signing Batch"),
Transaction::SubstrateBlock(_) => panic!("signing SubstrateBlock"),
Transaction::SubstrateSign(ref mut data) => &mut data.signed,
Transaction::Sign(ref mut data) => &mut data.signed,
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
Transaction::SlashReport(_, ref mut signed) => signed,
},
)
} }
let (nonce, signed_ref) = signed(self); // Decide the nonce to sign with
signed_ref.signer = Ristretto::generator() * key.deref();
signed_ref.nonce = nonce;
let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng)); let sig_nonce = Zeroizing::new(<Ristretto as Ciphersuite>::F::random(rng));
signed(self).1.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
let sig_hash = self.sig_hash(genesis);
signed(self).1.signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
}
pub fn sign_completed_challenge(&self) -> <Ristretto as Ciphersuite>::F { {
if let Transaction::SignCompleted { plan, tx_hash, first_signer, signature } = self { // Set the signer and the nonce
let mut transcript = let signed = signed(self);
RecommendedTranscript::new(b"Coordinator Tributary Transaction SignCompleted"); signed.signer = Ristretto::generator() * key.deref();
transcript.append_message(b"plan", plan); signed.signature.R = <Ristretto as Ciphersuite>::generator() * sig_nonce.deref();
transcript.append_message(b"tx_hash", tx_hash);
transcript.append_message(b"signer", first_signer.to_bytes());
transcript.append_message(b"nonce", signature.R.to_bytes());
Ristretto::hash_to_F(b"SignCompleted signature", &transcript.challenge(b"challenge"))
} else {
panic!("sign_completed_challenge called on transaction which wasn't SignCompleted")
} }
// Get the signature hash (which now includes `R || A` making it valid as the challenge)
let sig_hash = self.sig_hash(genesis);
// Sign the signature
signed(self).signature = SchnorrSignature::<Ristretto>::sign(key, sig_nonce, sig_hash);
} }
} }

View File

@@ -0,0 +1,35 @@
[package]
name = "serai-coordinator-substrate"
version = "0.1.0"
description = "Serai Coordinator's Substrate Scanner"
license = "AGPL-3.0-only"
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/substrate"
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
keywords = []
edition = "2021"
publish = false
rust-version = "1.81"
[package.metadata.docs.rs]
all-features = true
rustdoc-args = ["--cfg", "docsrs"]
[lints]
workspace = true
[dependencies]
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
log = { version = "0.4", default-features = false, features = ["std"] }
futures = { version = "0.3", default-features = false, features = ["std"] }
tokio = { version = "1", default-features = false }
serai-db = { path = "../../common/db", version = "0.1.1" }
serai-task = { path = "../../common/task", version = "0.1" }
serai-cosign = { path = "../cosign", version = "0.1" }
messages = { package = "serai-processor-messages", version = "0.1", path = "../../processor/messages" }

View File

@@ -0,0 +1,15 @@
AGPL-3.0-only license
Copyright (c) 2023-2024 Luke Parker
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License Version 3 as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.

View File

@@ -0,0 +1,14 @@
# Serai Coordinate Substrate Scanner
This is the scanner of the Serai blockchain for the purposes of Serai's coordinator.
Two event streams are defined:
- Canonical events, which must be handled by every validator, regardless of the sets they're present
in. These are represented by `serai_processor_messages::substrate::CoordinatorMessage`.
- Ephemeral events, which only need to be handled by the validators present within the sets they
relate to. These are represented by two channels, `NewSet` and `SignSlashReport`.
The canonical event stream is available without provision of a validator's public key. The ephemeral
event stream requires provision of a validator's public key. Both are ordered within themselves, yet
there are no ordering guarantees across the two.

View File

@@ -0,0 +1,218 @@
use std::future::Future;
use futures::stream::{StreamExt, FuturesOrdered};
use serai_client::Serai;
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
use serai_db::*;
use serai_task::ContinuallyRan;
use serai_cosign::Cosigning;
create_db!(
CoordinatorSubstrateCanonical {
NextBlock: () -> u64,
}
);
/// The event stream for canonical events.
pub struct CanonicalEventStream<D: Db> {
db: D,
serai: Serai,
}
impl<D: Db> CanonicalEventStream<D> {
/// Create a new canonical event stream.
///
/// Only one of these may exist over the provided database.
pub fn new(db: D, serai: Serai) -> Self {
Self { db, serai }
}
}
impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
async move {
let next_block = NextBlock::get(&self.db).unwrap_or(0);
let latest_finalized_block =
Cosigning::<D>::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))?;
// These are all the events which generate canonical messages
struct CanonicalEvents {
time: u64,
key_gen_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
set_retired_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
batch_events: Vec<serai_client::in_instructions::InInstructionsEvent>,
burn_events: Vec<serai_client::coins::CoinsEvent>,
}
// For a cosigned block, fetch all relevant events
let scan = {
let db = self.db.clone();
let serai = &self.serai;
move |block_number| {
let block_hash = Cosigning::<D>::cosigned_block(&db, block_number);
async move {
let block_hash = match block_hash {
Ok(Some(block_hash)) => block_hash,
Ok(None) => {
panic!("iterating to latest cosigned block but couldn't get cosigned block")
}
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
};
let temporal_serai = serai.as_of(block_hash);
let temporal_serai_validators = temporal_serai.validator_sets();
let temporal_serai_instructions = temporal_serai.in_instructions();
let temporal_serai_coins = temporal_serai.coins();
let (block, key_gen_events, set_retired_events, batch_events, burn_events) =
tokio::try_join!(
serai.block(block_hash),
temporal_serai_validators.key_gen_events(),
temporal_serai_validators.set_retired_events(),
temporal_serai_instructions.batch_events(),
temporal_serai_coins.burn_with_instruction_events(),
)
.map_err(|e| format!("{e:?}"))?;
let Some(block) = block else {
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
};
let time = if block_number == 0 {
block.time().unwrap_or(0)
} else {
// Serai's block time is in milliseconds
block
.time()
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
1000
};
Ok((
block_number,
CanonicalEvents {
time,
key_gen_events,
set_retired_events,
batch_events,
burn_events,
},
))
}
}
};
// Sync the next set of upcoming blocks all at once to minimize latency
const BLOCKS_TO_SYNC_AT_ONCE: u64 = 10;
let mut set = FuturesOrdered::new();
for block_number in
next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE)
{
set.push_back(scan(block_number));
}
for block_number in next_block ..= latest_finalized_block {
// Get the next block in our queue
let (popped_block_number, block) = set.next().await.unwrap()?;
assert_eq!(block_number, popped_block_number);
// Re-populate the queue
if (block_number + BLOCKS_TO_SYNC_AT_ONCE) <= latest_finalized_block {
set.push_back(scan(block_number + BLOCKS_TO_SYNC_AT_ONCE));
}
let mut txn = self.db.txn();
for key_gen in block.key_gen_events {
let serai_client::validator_sets::ValidatorSetsEvent::KeyGen { set, key_pair } = &key_gen
else {
panic!("KeyGen event wasn't a KeyGen event: {key_gen:?}");
};
crate::Canonical::send(
&mut txn,
set.network,
&CoordinatorMessage::SetKeys {
serai_time: block.time,
session: set.session,
key_pair: key_pair.clone(),
},
);
}
for set_retired in block.set_retired_events {
let serai_client::validator_sets::ValidatorSetsEvent::SetRetired { set } = &set_retired
else {
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
};
crate::Canonical::send(
&mut txn,
set.network,
&CoordinatorMessage::SlashesReported { session: set.session },
);
}
for network in serai_client::primitives::NETWORKS {
let mut batch = None;
for this_batch in &block.batch_events {
let serai_client::in_instructions::InInstructionsEvent::Batch {
network: batch_network,
publishing_session,
id,
external_network_block_hash,
in_instructions_hash,
in_instruction_results,
} = this_batch
else {
panic!("Batch event wasn't a Batch event: {this_batch:?}");
};
if network == *batch_network {
if batch.is_some() {
Err("Serai block had multiple batches for the same network".to_string())?;
}
batch = Some(ExecutedBatch {
id: *id,
publisher: *publishing_session,
external_network_block_hash: *external_network_block_hash,
in_instructions_hash: *in_instructions_hash,
in_instruction_results: in_instruction_results
.iter()
.map(|bit| {
if *bit {
InInstructionResult::Succeeded
} else {
InInstructionResult::Failed
}
})
.collect(),
});
}
}
let mut burns = vec![];
for burn in &block.burn_events {
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
&burn
else {
panic!("Burn event wasn't a Burn.in event: {burn:?}");
};
if instruction.balance.coin.network() == network {
burns.push(instruction.clone());
}
}
crate::Canonical::send(
&mut txn,
network,
&CoordinatorMessage::Block { serai_block_number: block_number, batch, burns },
);
}
txn.commit();
}
Ok(next_block <= latest_finalized_block)
}
}
}

View File

@@ -0,0 +1,240 @@
use std::future::Future;
use futures::stream::{StreamExt, FuturesOrdered};
use serai_client::{
primitives::{PublicKey, NetworkId, EmbeddedEllipticCurve},
validator_sets::primitives::MAX_KEY_SHARES_PER_SET,
Serai,
};
use serai_db::*;
use serai_task::ContinuallyRan;
use serai_cosign::Cosigning;
use crate::NewSetInformation;
create_db!(
CoordinatorSubstrateEphemeral {
NextBlock: () -> u64,
}
);
/// The event stream for ephemeral events.
pub struct EphemeralEventStream<D: Db> {
db: D,
serai: Serai,
validator: PublicKey,
}
impl<D: Db> EphemeralEventStream<D> {
/// Create a new ephemeral event stream.
///
/// Only one of these may exist over the provided database.
pub fn new(db: D, serai: Serai, validator: PublicKey) -> Self {
Self { db, serai, validator }
}
}
impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
async move {
let next_block = NextBlock::get(&self.db).unwrap_or(0);
let latest_finalized_block =
Cosigning::<D>::latest_cosigned_block_number(&self.db).map_err(|e| format!("{e:?}"))?;
// These are all the events which generate canonical messages
struct EphemeralEvents {
block_hash: [u8; 32],
time: u64,
new_set_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
accepted_handover_events: Vec<serai_client::validator_sets::ValidatorSetsEvent>,
}
// For a cosigned block, fetch all relevant events
let scan = {
let db = self.db.clone();
let serai = &self.serai;
move |block_number| {
let block_hash = Cosigning::<D>::cosigned_block(&db, block_number);
async move {
let block_hash = match block_hash {
Ok(Some(block_hash)) => block_hash,
Ok(None) => {
panic!("iterating to latest cosigned block but couldn't get cosigned block")
}
Err(serai_cosign::Faulted) => return Err("cosigning process faulted".to_string()),
};
let temporal_serai = serai.as_of(block_hash);
let temporal_serai_validators = temporal_serai.validator_sets();
let (block, new_set_events, accepted_handover_events) = tokio::try_join!(
serai.block(block_hash),
temporal_serai_validators.new_set_events(),
temporal_serai_validators.accepted_handover_events(),
)
.map_err(|e| format!("{e:?}"))?;
let Some(block) = block else {
Err(format!("Serai node didn't have cosigned block #{block_number}"))?
};
let time = if block_number == 0 {
block.time().unwrap_or(0)
} else {
// Serai's block time is in milliseconds
block
.time()
.ok_or_else(|| "non-genesis Serai block didn't have a time".to_string())? /
1000
};
Ok((
block_number,
EphemeralEvents { block_hash, time, new_set_events, accepted_handover_events },
))
}
}
};
// Sync the next set of upcoming blocks all at once to minimize latency
const BLOCKS_TO_SYNC_AT_ONCE: u64 = 50;
let mut set = FuturesOrdered::new();
for block_number in
next_block ..= latest_finalized_block.min(next_block + BLOCKS_TO_SYNC_AT_ONCE)
{
set.push_back(scan(block_number));
}
for block_number in next_block ..= latest_finalized_block {
// Get the next block in our queue
let (popped_block_number, block) = set.next().await.unwrap()?;
assert_eq!(block_number, popped_block_number);
// Re-populate the queue
if (block_number + BLOCKS_TO_SYNC_AT_ONCE) <= latest_finalized_block {
set.push_back(scan(block_number + BLOCKS_TO_SYNC_AT_ONCE));
}
let mut txn = self.db.txn();
for new_set in block.new_set_events {
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
};
// We only coordinate over external networks
if set.network == NetworkId::Serai {
continue;
}
let serai = self.serai.as_of(block.block_hash);
let serai = serai.validator_sets();
let Some(validators) =
serai.participants(set.network).await.map_err(|e| format!("{e:?}"))?
else {
Err(format!(
"block #{block_number} declared a new set but didn't have the participants"
))?
};
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
if in_set {
if u16::try_from(validators.len()).is_err() {
Err("more than u16::MAX validators sent")?;
}
let Ok(validators) = validators
.into_iter()
.map(|(validator, weight)| u16::try_from(weight).map(|weight| (validator, weight)))
.collect::<Result<Vec<_>, _>>()
else {
Err("validator's weight exceeded u16::MAX".to_string())?
};
let total_weight = validators.iter().map(|(_, weight)| u32::from(*weight)).sum::<u32>();
if total_weight > MAX_KEY_SHARES_PER_SET {
Err(format!(
"{set:?} has {total_weight} key shares when the max is {MAX_KEY_SHARES_PER_SET}"
))?;
}
let total_weight = u16::try_from(total_weight).unwrap();
// Fetch all of the validators' embedded elliptic curve keys
let mut embedded_elliptic_curve_keys = FuturesOrdered::new();
for (validator, _) in &validators {
let validator = *validator;
// try_join doesn't return a future so we need to wrap it in this additional async
// block
embedded_elliptic_curve_keys.push_back(async move {
tokio::try_join!(
// One future to fetch the substrate embedded key
serai
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519),
// One future to fetch the external embedded key, if there is a distinct curve
async {
// `embedded_elliptic_curves` is documented to have the second entry be the
// network-specific curve (if it exists and is distinct from Embedwards25519)
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
serai.embedded_elliptic_curve_key(validator, *curve).await.map(Some)
} else {
Ok(None)
}
}
)
.map(|(substrate_embedded_key, external_embedded_key)| {
(validator, substrate_embedded_key, external_embedded_key)
})
});
}
let mut evrf_public_keys = Vec::with_capacity(usize::from(total_weight));
for (validator, weight) in &validators {
let (future_validator, substrate_embedded_key, external_embedded_key) =
embedded_elliptic_curve_keys.next().await.unwrap().map_err(|e| format!("{e:?}"))?;
assert_eq!(*validator, future_validator);
let external_embedded_key =
external_embedded_key.unwrap_or(substrate_embedded_key.clone());
match (substrate_embedded_key, external_embedded_key) {
(Some(substrate_embedded_key), Some(external_embedded_key)) => {
let substrate_embedded_key = <[u8; 32]>::try_from(substrate_embedded_key)
.map_err(|_| "Embedwards25519 key wasn't 32 bytes".to_string())?;
for _ in 0 .. *weight {
evrf_public_keys.push((substrate_embedded_key, external_embedded_key.clone()));
}
}
_ => Err("NewSet with validator missing an embedded key".to_string())?,
}
}
crate::NewSet::send(
&mut txn,
&NewSetInformation {
set: *set,
serai_block: block.block_hash,
start_time: block.time,
// TODO: Why do we have this as an explicit field here?
// Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
threshold: ((total_weight * 2) / 3) + 1,
validators,
evrf_public_keys,
},
);
}
}
for accepted_handover in block.accepted_handover_events {
let serai_client::validator_sets::ValidatorSetsEvent::AcceptedHandover { set } =
&accepted_handover
else {
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
};
crate::SignSlashReport::send(&mut txn, set);
}
txn.commit();
}
Ok(next_block <= latest_finalized_block)
}
}
}

View File

@@ -0,0 +1,112 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
use scale::{Encode, Decode};
use borsh::{io, BorshSerialize, BorshDeserialize};
use serai_client::{
primitives::{PublicKey, NetworkId},
validator_sets::primitives::ValidatorSet,
};
use serai_db::*;
mod canonical;
mod ephemeral;
fn borsh_serialize_validators<W: io::Write>(
validators: &Vec<(PublicKey, u16)>,
writer: &mut W,
) -> Result<(), io::Error> {
// This doesn't use `encode_to` as `encode_to` panics if the writer returns an error
writer.write_all(&validators.encode())
}
fn borsh_deserialize_validators<R: io::Read>(
reader: &mut R,
) -> Result<Vec<(PublicKey, u16)>, io::Error> {
Decode::decode(&mut scale::IoReader(reader)).map_err(io::Error::other)
}
/// The information for a new set.
#[derive(Debug, BorshSerialize, BorshDeserialize)]
pub struct NewSetInformation {
set: ValidatorSet,
serai_block: [u8; 32],
start_time: u64,
threshold: u16,
#[borsh(
serialize_with = "borsh_serialize_validators",
deserialize_with = "borsh_deserialize_validators"
)]
validators: Vec<(PublicKey, u16)>,
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
}
mod _public_db {
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
use serai_db::*;
use crate::NewSetInformation;
db_channel!(
CoordinatorSubstrate {
// Canonical messages to send to the processor
Canonical: (network: NetworkId) -> messages::substrate::CoordinatorMessage,
// Relevant new set, from an ephemeral event stream
NewSet: () -> NewSetInformation,
// Relevant sign slash report, from an ephemeral event stream
SignSlashReport: () -> ValidatorSet,
}
);
}
/// The canonical event stream.
pub struct Canonical;
impl Canonical {
pub(crate) fn send(
txn: &mut impl DbTxn,
network: NetworkId,
msg: &messages::substrate::CoordinatorMessage,
) {
_public_db::Canonical::send(txn, network, msg);
}
/// Try to receive a canonical event, returning `None` if there is none to receive.
pub fn try_recv(
txn: &mut impl DbTxn,
network: NetworkId,
) -> Option<messages::substrate::CoordinatorMessage> {
_public_db::Canonical::try_recv(txn, network)
}
}
/// The channel for new set events emitted by an ephemeral event stream.
pub struct NewSet;
impl NewSet {
pub(crate) fn send(txn: &mut impl DbTxn, msg: &NewSetInformation) {
_public_db::NewSet::send(txn, msg);
}
/// Try to receive a new set's information, returning `None` if there is none to receive.
pub fn try_recv(txn: &mut impl DbTxn) -> Option<NewSetInformation> {
_public_db::NewSet::try_recv(txn)
}
}
/// The channel for notifications to sign a slash report, as emitted by an ephemeral event stream.
///
/// These notifications MAY be for irrelevant validator sets. The only guarantee is the
/// notifications for all relevant validator sets will be included.
pub struct SignSlashReport;
impl SignSlashReport {
pub(crate) fn send(txn: &mut impl DbTxn, set: &ValidatorSet) {
_public_db::SignSlashReport::send(txn, set);
}
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
/// receive.
pub fn try_recv(txn: &mut impl DbTxn) -> Option<ValidatorSet> {
_public_db::SignSlashReport::try_recv(txn)
}
}

View File

@@ -135,7 +135,7 @@ impl<T: TransactionTrait> Block<T> {
// Check TXs are sorted by nonce. // Check TXs are sorted by nonce.
let nonce = |tx: &Transaction<T>| { let nonce = |tx: &Transaction<T>| {
if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() { if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() {
*nonce nonce
} else { } else {
0 0
} }

View File

@@ -323,7 +323,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
} }
TransactionKind::Signed(order, Signed { signer, nonce, .. }) => { TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {
let next_nonce = nonce + 1; let next_nonce = nonce + 1;
txn.put(Self::next_nonce_key(&self.genesis, signer, &order), next_nonce.to_le_bytes()); txn.put(Self::next_nonce_key(&self.genesis, &signer, &order), next_nonce.to_le_bytes());
self.mempool.remove(&tx.hash()); self.mempool.remove(&tx.hash());
} }
} }

View File

@@ -110,7 +110,7 @@ impl<T: TransactionTrait> Transaction<T> {
} }
} }
pub fn kind(&self) -> TransactionKind<'_> { pub fn kind(&self) -> TransactionKind {
match self { match self {
Transaction::Tendermint(tx) => tx.kind(), Transaction::Tendermint(tx) => tx.kind(),
Transaction::Application(tx) => tx.kind(), Transaction::Application(tx) => tx.kind(),

View File

@@ -81,11 +81,11 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
} }
Transaction::Application(tx) => match tx.kind() { Transaction::Application(tx) => match tx.kind() {
TransactionKind::Signed(order, Signed { signer, nonce, .. }) => { TransactionKind::Signed(order, Signed { signer, nonce, .. }) => {
let amount = *res.txs_per_signer.get(signer).unwrap_or(&0) + 1; let amount = *res.txs_per_signer.get(&signer).unwrap_or(&0) + 1;
res.txs_per_signer.insert(*signer, amount); res.txs_per_signer.insert(signer, amount);
if let Some(prior_nonce) = if let Some(prior_nonce) =
res.last_nonce_in_mempool.insert((*signer, order.clone()), *nonce) res.last_nonce_in_mempool.insert((signer, order.clone()), nonce)
{ {
assert_eq!(prior_nonce, nonce - 1); assert_eq!(prior_nonce, nonce - 1);
} }
@@ -133,14 +133,14 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
match app_tx.kind() { match app_tx.kind() {
TransactionKind::Signed(order, Signed { signer, .. }) => { TransactionKind::Signed(order, Signed { signer, .. }) => {
// Get the nonce from the blockchain // Get the nonce from the blockchain
let Some(blockchain_next_nonce) = blockchain_next_nonce(*signer, order.clone()) else { let Some(blockchain_next_nonce) = blockchain_next_nonce(signer, order.clone()) else {
// Not a participant // Not a participant
Err(TransactionError::InvalidSigner)? Err(TransactionError::InvalidSigner)?
}; };
let mut next_nonce = blockchain_next_nonce; let mut next_nonce = blockchain_next_nonce;
if let Some(mempool_last_nonce) = if let Some(mempool_last_nonce) =
self.last_nonce_in_mempool.get(&(*signer, order.clone())) self.last_nonce_in_mempool.get(&(signer, order.clone()))
{ {
assert!(*mempool_last_nonce >= blockchain_next_nonce); assert!(*mempool_last_nonce >= blockchain_next_nonce);
next_nonce = *mempool_last_nonce + 1; next_nonce = *mempool_last_nonce + 1;
@@ -148,14 +148,14 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
// If we have too many transactions from this sender, don't add this yet UNLESS we are // If we have too many transactions from this sender, don't add this yet UNLESS we are
// this sender // this sender
let amount_in_pool = *self.txs_per_signer.get(signer).unwrap_or(&0) + 1; let amount_in_pool = *self.txs_per_signer.get(&signer).unwrap_or(&0) + 1;
if !internal && (amount_in_pool > ACCOUNT_MEMPOOL_LIMIT) { if !internal && (amount_in_pool > ACCOUNT_MEMPOOL_LIMIT) {
Err(TransactionError::TooManyInMempool)?; Err(TransactionError::TooManyInMempool)?;
} }
verify_transaction(app_tx, self.genesis, &mut |_, _| Some(next_nonce))?; verify_transaction(app_tx, self.genesis, &mut |_, _| Some(next_nonce))?;
self.last_nonce_in_mempool.insert((*signer, order.clone()), next_nonce); self.last_nonce_in_mempool.insert((signer, order.clone()), next_nonce);
self.txs_per_signer.insert(*signer, amount_in_pool); self.txs_per_signer.insert(signer, amount_in_pool);
} }
TransactionKind::Unsigned => { TransactionKind::Unsigned => {
// check we have the tx in the pool/chain // check we have the tx in the pool/chain
@@ -205,7 +205,7 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
// Sort signed by nonce // Sort signed by nonce
let nonce = |tx: &Transaction<T>| { let nonce = |tx: &Transaction<T>| {
if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() { if let TransactionKind::Signed(_, Signed { nonce, .. }) = tx.kind() {
*nonce nonce
} else { } else {
unreachable!() unreachable!()
} }
@@ -242,11 +242,11 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
if let Some(tx) = self.txs.remove(tx) { if let Some(tx) = self.txs.remove(tx) {
if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() { if let TransactionKind::Signed(order, Signed { signer, nonce, .. }) = tx.kind() {
let amount = *self.txs_per_signer.get(signer).unwrap() - 1; let amount = *self.txs_per_signer.get(&signer).unwrap() - 1;
self.txs_per_signer.insert(*signer, amount); self.txs_per_signer.insert(signer, amount);
if self.last_nonce_in_mempool.get(&(*signer, order.clone())) == Some(nonce) { if self.last_nonce_in_mempool.get(&(signer, order.clone())) == Some(&nonce) {
self.last_nonce_in_mempool.remove(&(*signer, order)); self.last_nonce_in_mempool.remove(&(signer, order));
} }
} }
} }

View File

@@ -39,7 +39,7 @@ impl ReadWrite for TendermintTx {
} }
impl Transaction for TendermintTx { impl Transaction for TendermintTx {
fn kind(&self) -> TransactionKind<'_> { fn kind(&self) -> TransactionKind {
// There's an assert elsewhere in the codebase expecting this behavior // There's an assert elsewhere in the codebase expecting this behavior
// If we do want to add Provided/Signed TendermintTxs, review the implications carefully // If we do want to add Provided/Signed TendermintTxs, review the implications carefully
TransactionKind::Unsigned TransactionKind::Unsigned

View File

@@ -60,8 +60,8 @@ impl ReadWrite for NonceTransaction {
} }
impl TransactionTrait for NonceTransaction { impl TransactionTrait for NonceTransaction {
fn kind(&self) -> TransactionKind<'_> { fn kind(&self) -> TransactionKind {
TransactionKind::Signed(vec![], &self.2) TransactionKind::Signed(vec![], self.2.clone())
} }
fn hash(&self) -> [u8; 32] { fn hash(&self) -> [u8; 32] {

View File

@@ -425,7 +425,7 @@ async fn block_tx_ordering() {
} }
impl TransactionTrait for SignedTx { impl TransactionTrait for SignedTx {
fn kind(&self) -> TransactionKind<'_> { fn kind(&self) -> TransactionKind {
match self { match self {
SignedTx::Signed(signed) => signed.kind(), SignedTx::Signed(signed) => signed.kind(),
SignedTx::Provided(pro) => pro.kind(), SignedTx::Provided(pro) => pro.kind(),

View File

@@ -67,7 +67,7 @@ impl ReadWrite for ProvidedTransaction {
} }
impl Transaction for ProvidedTransaction { impl Transaction for ProvidedTransaction {
fn kind(&self) -> TransactionKind<'_> { fn kind(&self) -> TransactionKind {
match self.0[0] { match self.0[0] {
1 => TransactionKind::Provided("order1"), 1 => TransactionKind::Provided("order1"),
2 => TransactionKind::Provided("order2"), 2 => TransactionKind::Provided("order2"),
@@ -119,8 +119,8 @@ impl ReadWrite for SignedTransaction {
} }
impl Transaction for SignedTransaction { impl Transaction for SignedTransaction {
fn kind(&self) -> TransactionKind<'_> { fn kind(&self) -> TransactionKind {
TransactionKind::Signed(vec![], &self.1) TransactionKind::Signed(vec![], self.1.clone())
} }
fn hash(&self) -> [u8; 32] { fn hash(&self) -> [u8; 32] {

View File

@@ -109,7 +109,7 @@ impl Signed {
#[allow(clippy::large_enum_variant)] #[allow(clippy::large_enum_variant)]
#[derive(Clone, PartialEq, Eq, Debug)] #[derive(Clone, PartialEq, Eq, Debug)]
pub enum TransactionKind<'a> { pub enum TransactionKind {
/// This transaction should be provided by every validator, in an exact order. /// This transaction should be provided by every validator, in an exact order.
/// ///
/// The contained static string names the orderer to use. This allows two distinct provided /// The contained static string names the orderer to use. This allows two distinct provided
@@ -137,14 +137,14 @@ pub enum TransactionKind<'a> {
Unsigned, Unsigned,
/// A signed transaction. /// A signed transaction.
Signed(Vec<u8>, &'a Signed), Signed(Vec<u8>, Signed),
} }
// TODO: Should this be renamed TransactionTrait now that a literal Transaction exists? // TODO: Should this be renamed TransactionTrait now that a literal Transaction exists?
// Or should the literal Transaction be renamed to Event? // Or should the literal Transaction be renamed to Event?
pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite { pub trait Transaction: 'static + Send + Sync + Clone + Eq + Debug + ReadWrite {
/// Return what type of transaction this is. /// Return what type of transaction this is.
fn kind(&self) -> TransactionKind<'_>; fn kind(&self) -> TransactionKind;
/// Return the hash of this transaction. /// Return the hash of this transaction.
/// ///
@@ -198,8 +198,8 @@ pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
match tx.kind() { match tx.kind() {
TransactionKind::Provided(_) | TransactionKind::Unsigned => {} TransactionKind::Provided(_) | TransactionKind::Unsigned => {}
TransactionKind::Signed(order, Signed { signer, nonce, signature }) => { TransactionKind::Signed(order, Signed { signer, nonce, signature }) => {
if let Some(next_nonce) = get_and_increment_nonce(signer, &order) { if let Some(next_nonce) = get_and_increment_nonce(&signer, &order) {
if *nonce != next_nonce { if nonce != next_nonce {
Err(TransactionError::InvalidNonce)?; Err(TransactionError::InvalidNonce)?;
} }
} else { } else {
@@ -208,7 +208,7 @@ pub(crate) fn verify_transaction<F: GAIN, T: Transaction>(
} }
// TODO: Use a batch verification here // TODO: Use a batch verification here
if !signature.verify(*signer, tx.sig_hash(genesis)) { if !signature.verify(signer, tx.sig_hash(genesis)) {
Err(TransactionError::InvalidSignature)?; Err(TransactionError::InvalidSignature)?;
} }
} }

View File

@@ -74,6 +74,7 @@ exceptions = [
{ allow = ["AGPL-3.0"], name = "tributary-chain" }, { allow = ["AGPL-3.0"], name = "tributary-chain" },
{ allow = ["AGPL-3.0"], name = "serai-cosign" }, { allow = ["AGPL-3.0"], name = "serai-cosign" },
{ allow = ["AGPL-3.0"], name = "serai-coordinator-substrate" },
{ allow = ["AGPL-3.0"], name = "serai-coordinator" }, { allow = ["AGPL-3.0"], name = "serai-coordinator" },
{ allow = ["AGPL-3.0"], name = "serai-coins-pallet" }, { allow = ["AGPL-3.0"], name = "serai-coins-pallet" },

View File

@@ -21,8 +21,8 @@ tower = "0.5"
serde_json = { version = "1", default-features = false } serde_json = { version = "1", default-features = false }
simple-request = { path = "../../../common/request", version = "0.1", default-features = false } simple-request = { path = "../../../common/request", version = "0.1", default-features = false }
alloy-json-rpc = { version = "0.8", default-features = false } alloy-json-rpc = { version = "0.9", default-features = false }
alloy-transport = { version = "0.8", default-features = false } alloy-transport = { version = "0.9", default-features = false }
[features] [features]
default = ["tls"] default = ["tls"]

View File

@@ -33,10 +33,10 @@ alloy-core = { version = "0.8", default-features = false }
alloy-sol-types = { version = "0.8", default-features = false } alloy-sol-types = { version = "0.8", default-features = false }
alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false }
alloy-rpc-types-eth = { version = "0.8", default-features = false } alloy-rpc-types-eth = { version = "0.9", default-features = false }
alloy-rpc-client = { version = "0.8", default-features = false } alloy-rpc-client = { version = "0.9", default-features = false }
alloy-provider = { version = "0.8", default-features = false } alloy-provider = { version = "0.9", default-features = false }
alloy-node-bindings = { version = "0.8", default-features = false } alloy-node-bindings = { version = "0.9", default-features = false }
tokio = { version = "1", default-features = false, features = ["macros"] } tokio = { version = "1", default-features = false, features = ["macros"] }

View File

@@ -5,9 +5,8 @@ use tokio::sync::mpsc;
use scale::Encode; use scale::Encode;
use serai_client::{ use serai_client::{
primitives::Signature, primitives::Signature, validator_sets::primitives::Session,
validator_sets::primitives::Session, in_instructions::primitives::SignedBatch,
in_instructions::primitives::{Batch, SignedBatch},
}; };
use serai_db::{Get, DbTxn, Db, create_db, db_channel}; use serai_db::{Get, DbTxn, Db, create_db, db_channel};
@@ -196,18 +195,6 @@ impl signers::Coordinator for CoordinatorSend {
} }
} }
fn publish_batch(
&mut self,
batch: Batch,
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
async move {
self.send(&messages::ProcessorMessage::Substrate(
messages::substrate::ProcessorMessage::Batch { batch },
));
Ok(())
}
}
fn publish_signed_batch( fn publish_signed_batch(
&mut self, &mut self,
batch: SignedBatch, batch: SignedBatch,

View File

@@ -272,31 +272,19 @@ pub async fn main_loop<
} }
messages::substrate::CoordinatorMessage::Block { messages::substrate::CoordinatorMessage::Block {
serai_block_number: _, serai_block_number: _,
batches, batch,
mut burns, mut burns,
} => { } => {
let scanner = scanner.as_mut().unwrap(); let scanner = scanner.as_mut().unwrap();
// Substrate sets this limit to prevent DoSs from malicious validator sets if let Some(batch) = batch {
// That bound lets us consume this txn in the following loop body, as an optimization
assert!(batches.len() <= 1);
for messages::substrate::ExecutedBatch {
id,
publisher,
in_instructions_hash,
in_instruction_results,
} in batches
{
let key_to_activate = let key_to_activate =
KeyToActivate::<KeyFor<S>>::try_recv(txn.as_mut().unwrap()).map(|key| key.0); KeyToActivate::<KeyFor<S>>::try_recv(txn.as_mut().unwrap()).map(|key| key.0);
// This is a cheap call as it internally just queues this to be done later // This is a cheap call as it internally just queues this to be done later
let _: () = scanner.acknowledge_batch( let _: () = scanner.acknowledge_batch(
txn.take().unwrap(), txn.take().unwrap(),
id, batch,
publisher,
in_instructions_hash,
in_instruction_results,
/* /*
`acknowledge_batch` takes burns to optimize handling returns with standard `acknowledge_batch` takes burns to optimize handling returns with standard
payments. That's why handling these with a Batch (and not waiting until the payments. That's why handling these with a Batch (and not waiting until the

View File

@@ -34,11 +34,11 @@ k256 = { version = "^0.13.1", default-features = false, features = ["std"] }
alloy-core = { version = "0.8", default-features = false } alloy-core = { version = "0.8", default-features = false }
alloy-rlp = { version = "0.3", default-features = false } alloy-rlp = { version = "0.3", default-features = false }
alloy-rpc-types-eth = { version = "0.8", default-features = false } alloy-rpc-types-eth = { version = "0.9", default-features = false }
alloy-transport = { version = "0.8", default-features = false } alloy-transport = { version = "0.9", default-features = false }
alloy-simple-request-transport = { path = "../../networks/ethereum/alloy-simple-request-transport", default-features = false } alloy-simple-request-transport = { path = "../../networks/ethereum/alloy-simple-request-transport", default-features = false }
alloy-rpc-client = { version = "0.8", default-features = false } alloy-rpc-client = { version = "0.9", default-features = false }
alloy-provider = { version = "0.8", default-features = false } alloy-provider = { version = "0.9", default-features = false }
serai-client = { path = "../../substrate/client", default-features = false, features = ["ethereum"] } serai-client = { path = "../../substrate/client", default-features = false, features = ["ethereum"] }

View File

@@ -22,12 +22,12 @@ alloy-core = { version = "0.8", default-features = false }
alloy-sol-types = { version = "0.8", default-features = false } alloy-sol-types = { version = "0.8", default-features = false }
alloy-sol-macro = { version = "0.8", default-features = false } alloy-sol-macro = { version = "0.8", default-features = false }
alloy-consensus = { version = "0.8", default-features = false } alloy-consensus = { version = "0.9", default-features = false }
alloy-rpc-types-eth = { version = "0.8", default-features = false } alloy-rpc-types-eth = { version = "0.9", default-features = false }
alloy-transport = { version = "0.8", default-features = false } alloy-transport = { version = "0.9", default-features = false }
alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false }
alloy-provider = { version = "0.8", default-features = false } alloy-provider = { version = "0.9", default-features = false }
ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false } ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false }

View File

@@ -22,9 +22,9 @@ alloy-core = { version = "0.8", default-features = false }
alloy-sol-types = { version = "0.8", default-features = false } alloy-sol-types = { version = "0.8", default-features = false }
alloy-sol-macro = { version = "0.8", default-features = false } alloy-sol-macro = { version = "0.8", default-features = false }
alloy-rpc-types-eth = { version = "0.8", default-features = false } alloy-rpc-types-eth = { version = "0.9", default-features = false }
alloy-transport = { version = "0.8", default-features = false } alloy-transport = { version = "0.9", default-features = false }
alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false }
alloy-provider = { version = "0.8", default-features = false } alloy-provider = { version = "0.9", default-features = false }
tokio = { version = "1", default-features = false, features = ["rt"] } tokio = { version = "1", default-features = false, features = ["rt"] }

View File

@@ -21,4 +21,4 @@ group = { version = "0.13", default-features = false }
k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] } k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] }
alloy-core = { version = "0.8", default-features = false } alloy-core = { version = "0.8", default-features = false }
alloy-consensus = { version = "0.8", default-features = false, features = ["k256"] } alloy-consensus = { version = "0.9", default-features = false, features = ["k256"] }

View File

@@ -24,12 +24,12 @@ alloy-core = { version = "0.8", default-features = false }
alloy-sol-types = { version = "0.8", default-features = false } alloy-sol-types = { version = "0.8", default-features = false }
alloy-sol-macro = { version = "0.8", default-features = false } alloy-sol-macro = { version = "0.8", default-features = false }
alloy-consensus = { version = "0.8", default-features = false } alloy-consensus = { version = "0.9", default-features = false }
alloy-rpc-types-eth = { version = "0.8", default-features = false } alloy-rpc-types-eth = { version = "0.9", default-features = false }
alloy-transport = { version = "0.8", default-features = false } alloy-transport = { version = "0.9", default-features = false }
alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false }
alloy-provider = { version = "0.8", default-features = false } alloy-provider = { version = "0.9", default-features = false }
ethereum-schnorr = { package = "ethereum-schnorr-contract", path = "../../../networks/ethereum/schnorr", default-features = false } ethereum-schnorr = { package = "ethereum-schnorr-contract", path = "../../../networks/ethereum/schnorr", default-features = false }
@@ -53,8 +53,8 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
k256 = { version = "0.13", default-features = false, features = ["std"] } k256 = { version = "0.13", default-features = false, features = ["std"] }
alloy-rpc-client = { version = "0.8", default-features = false } alloy-rpc-client = { version = "0.9", default-features = false }
alloy-node-bindings = { version = "0.8", default-features = false } alloy-node-bindings = { version = "0.9", default-features = false }
tokio = { version = "1.0", default-features = false, features = ["rt-multi-thread", "macros"] } tokio = { version = "1.0", default-features = false, features = ["rt-multi-thread", "macros"] }

View File

@@ -20,10 +20,10 @@ workspace = true
k256 = { version = "0.13", default-features = false, features = ["std"] } k256 = { version = "0.13", default-features = false, features = ["std"] }
alloy-core = { version = "0.8", default-features = false } alloy-core = { version = "0.8", default-features = false }
alloy-consensus = { version = "0.8", default-features = false, features = ["std"] } alloy-consensus = { version = "0.9", default-features = false, features = ["std"] }
alloy-rpc-types-eth = { version = "0.8", default-features = false } alloy-rpc-types-eth = { version = "0.9", default-features = false }
alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false } alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false }
alloy-provider = { version = "0.8", default-features = false } alloy-provider = { version = "0.9", default-features = false }
ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false } ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false }

View File

@@ -9,7 +9,7 @@ use dkg::Participant;
use serai_primitives::BlockHash; use serai_primitives::BlockHash;
use validator_sets_primitives::{Session, KeyPair, Slash}; use validator_sets_primitives::{Session, KeyPair, Slash};
use coins_primitives::OutInstructionWithBalance; use coins_primitives::OutInstructionWithBalance;
use in_instructions_primitives::{Batch, SignedBatch}; use in_instructions_primitives::SignedBatch;
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] #[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
pub struct SubstrateContext { pub struct SubstrateContext {
@@ -145,7 +145,7 @@ pub mod sign {
pub mod coordinator { pub mod coordinator {
use super::*; use super::*;
// TODO: Why does this not simply take the block hash? // TODO: Remove this for the one defined in serai-cosign
pub fn cosign_block_msg(block_number: u64, block: [u8; 32]) -> Vec<u8> { pub fn cosign_block_msg(block_number: u64, block: [u8; 32]) -> Vec<u8> {
const DST: &[u8] = b"Cosign"; const DST: &[u8] = b"Cosign";
let mut res = vec![u8::try_from(DST.len()).unwrap()]; let mut res = vec![u8::try_from(DST.len()).unwrap()];
@@ -188,6 +188,7 @@ pub mod substrate {
pub struct ExecutedBatch { pub struct ExecutedBatch {
pub id: u32, pub id: u32,
pub publisher: Session, pub publisher: Session,
pub external_network_block_hash: [u8; 32],
pub in_instructions_hash: [u8; 32], pub in_instructions_hash: [u8; 32],
pub in_instruction_results: Vec<InInstructionResult>, pub in_instruction_results: Vec<InInstructionResult>,
} }
@@ -203,14 +204,22 @@ pub mod substrate {
/// A block from Serai with relevance to this processor. /// A block from Serai with relevance to this processor.
Block { Block {
serai_block_number: u64, serai_block_number: u64,
batches: Vec<ExecutedBatch>, batch: Option<ExecutedBatch>,
burns: Vec<OutInstructionWithBalance>, burns: Vec<OutInstructionWithBalance>,
}, },
} }
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)] #[derive(Clone, PartialEq, Eq, Debug)]
pub enum ProcessorMessage { pub enum ProcessorMessage {}
Batch { batch: Batch }, impl BorshSerialize for ProcessorMessage {
fn serialize<W: borsh::io::Write>(&self, _writer: &mut W) -> borsh::io::Result<()> {
unimplemented!()
}
}
impl BorshDeserialize for ProcessorMessage {
fn deserialize_reader<R: borsh::io::Read>(_reader: &mut R) -> borsh::io::Result<Self> {
unimplemented!()
}
} }
} }
@@ -383,15 +392,7 @@ impl ProcessorMessage {
res.extend(&id); res.extend(&id);
res res
} }
ProcessorMessage::Substrate(msg) => { ProcessorMessage::Substrate(_) => panic!("requesting intent for empty message type"),
let (sub, id) = match msg {
substrate::ProcessorMessage::Batch { batch } => (0, batch.id.encode()),
};
let mut res = vec![PROCESSOR_UID, TYPE_SUBSTRATE_UID, sub];
res.extend(&id);
res
}
} }
} }
} }

View File

@@ -0,0 +1,125 @@
use core::marker::PhantomData;
use std::io::{Read, Write};
use group::GroupEncoding;
use scale::{Encode, Decode, IoReader};
use borsh::{BorshSerialize, BorshDeserialize};
use serai_db::{Get, DbTxn, create_db};
use serai_primitives::Balance;
use serai_validator_sets_primitives::Session;
use primitives::EncodableG;
use crate::{ScannerFeed, KeyFor, AddressFor};
#[derive(BorshSerialize, BorshDeserialize)]
pub(crate) struct BatchInfo<K: BorshSerialize> {
pub(crate) block_number: u64,
pub(crate) session_to_sign_batch: Session,
pub(crate) external_key_for_session_to_sign_batch: K,
pub(crate) in_instructions_hash: [u8; 32],
}
create_db!(
ScannerBatch {
// The next block to create batches for
NextBlockToBatch: () -> u64,
// The next Batch ID to use
NextBatchId: () -> u32,
// The information needed to verify a batch
InfoForBatch: <G: GroupEncoding>(batch: u32) -> BatchInfo<EncodableG<G>>,
// The return addresses for the InInstructions within a Batch
SerializedReturnAddresses: (batch: u32) -> Vec<u8>,
}
);
pub(crate) struct ReturnInformation<S: ScannerFeed> {
pub(crate) address: AddressFor<S>,
pub(crate) balance: Balance,
}
pub(crate) struct BatchDb<S: ScannerFeed>(PhantomData<S>);
impl<S: ScannerFeed> BatchDb<S> {
pub(crate) fn set_next_block_to_batch(txn: &mut impl DbTxn, next_block_to_batch: u64) {
NextBlockToBatch::set(txn, &next_block_to_batch);
}
pub(crate) fn next_block_to_batch(getter: &impl Get) -> Option<u64> {
NextBlockToBatch::get(getter)
}
pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 {
let id = NextBatchId::get(txn).unwrap_or(0);
NextBatchId::set(txn, &(id + 1));
id
}
pub(crate) fn save_batch_info(
txn: &mut impl DbTxn,
id: u32,
block_number: u64,
session_to_sign_batch: Session,
external_key_for_session_to_sign_batch: KeyFor<S>,
in_instructions_hash: [u8; 32],
) {
InfoForBatch::set(
txn,
id,
&BatchInfo {
block_number,
session_to_sign_batch,
external_key_for_session_to_sign_batch: EncodableG(external_key_for_session_to_sign_batch),
in_instructions_hash,
},
);
}
pub(crate) fn take_info_for_batch(
txn: &mut impl DbTxn,
id: u32,
) -> Option<BatchInfo<EncodableG<KeyFor<S>>>> {
InfoForBatch::take(txn, id)
}
pub(crate) fn save_return_information(
txn: &mut impl DbTxn,
id: u32,
return_information: &Vec<Option<ReturnInformation<S>>>,
) {
let mut buf = Vec::with_capacity(return_information.len() * (32 + 1 + 8));
for return_information in return_information {
if let Some(ReturnInformation { address, balance }) = return_information {
buf.write_all(&[1]).unwrap();
address.serialize(&mut buf).unwrap();
balance.encode_to(&mut buf);
} else {
buf.write_all(&[0]).unwrap();
}
}
SerializedReturnAddresses::set(txn, id, &buf);
}
pub(crate) fn take_return_information(
txn: &mut impl DbTxn,
id: u32,
) -> Option<Vec<Option<ReturnInformation<S>>>> {
let buf = SerializedReturnAddresses::take(txn, id)?;
let mut buf = buf.as_slice();
let mut res = Vec::with_capacity(buf.len() / (32 + 1 + 8));
while !buf.is_empty() {
let mut opt = [0xff];
buf.read_exact(&mut opt).unwrap();
assert!((opt[0] == 0) || (opt[0] == 1));
res.push((opt[0] == 1).then(|| {
let address = AddressFor::<S>::deserialize_reader(&mut buf).unwrap();
let balance = Balance::decode(&mut IoReader(&mut buf)).unwrap();
ReturnInformation { address, balance }
}));
}
Some(res)
}
}

View File

@@ -0,0 +1,190 @@
use core::{marker::PhantomData, future::Future};
use blake2::{digest::typenum::U32, Digest, Blake2b};
use scale::Encode;
use serai_db::{DbTxn, Db};
use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch};
use primitives::{EncodableG, task::ContinuallyRan};
use crate::{
db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToBatchDb, BatchData, BatchToReportDb},
index,
scan::next_to_scan_for_outputs_block,
ScannerFeed, KeyFor,
};
mod db;
pub(crate) use db::{BatchInfo, ReturnInformation};
use db::BatchDb;
pub(crate) fn take_info_for_batch<S: ScannerFeed>(
txn: &mut impl DbTxn,
id: u32,
) -> Option<BatchInfo<EncodableG<KeyFor<S>>>> {
BatchDb::<S>::take_info_for_batch(txn, id)
}
pub(crate) fn take_return_information<S: ScannerFeed>(
txn: &mut impl DbTxn,
id: u32,
) -> Option<Vec<Option<ReturnInformation<S>>>> {
BatchDb::<S>::take_return_information(txn, id)
}
/*
This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion.
We only produce batches once both tasks, scanning for received outputs and checking for resolved
Eventualities, have processed the block. This ensures we know if this block is notable, and have
the InInstructions for it.
*/
#[allow(non_snake_case)]
pub(crate) struct BatchTask<D: Db, S: ScannerFeed> {
db: D,
_S: PhantomData<S>,
}
impl<D: Db, S: ScannerFeed> BatchTask<D, S> {
pub(crate) fn new(mut db: D, start_block: u64) -> Self {
if BatchDb::<S>::next_block_to_batch(&db).is_none() {
// Initialize the DB
let mut txn = db.txn();
BatchDb::<S>::set_next_block_to_batch(&mut txn, start_block);
txn.commit();
}
Self { db, _S: PhantomData }
}
}
impl<D: Db, S: ScannerFeed> ContinuallyRan for BatchTask<D, S> {
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
async move {
let highest_batchable = {
// Fetch the next to scan block
let next_to_scan = next_to_scan_for_outputs_block::<S>(&self.db)
.expect("BatchTask run before writing the start block");
// If we haven't done any work, return
if next_to_scan == 0 {
return Ok(false);
}
// The last scanned block is the block prior to this
#[allow(clippy::let_and_return)]
let last_scanned = next_to_scan - 1;
// The last scanned block is the highest batchable block as we only scan blocks within a
// window where it's safe to immediately report the block
// See `eventuality.rs` for more info
last_scanned
};
let next_block_to_batch = BatchDb::<S>::next_block_to_batch(&self.db)
.expect("BatchTask run before writing the start block");
for block_number in next_block_to_batch ..= highest_batchable {
let mut txn = self.db.txn();
// Receive the InInstructions for this block
// We always do this as we can't trivially tell if we should recv InInstructions before we
// do
let InInstructionData {
session_to_sign_batch,
external_key_for_session_to_sign_batch,
returnable_in_instructions: in_instructions,
} = ScanToBatchDb::<S>::recv_in_instructions(&mut txn, block_number);
let notable = ScannerGlobalDb::<S>::is_block_notable(&txn, block_number);
if !notable {
assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions");
}
// If this block is notable, create the Batch(s) for it
if notable {
let network = S::NETWORK;
let external_network_block_hash = index::block_id(&txn, block_number);
let mut batch_id = BatchDb::<S>::acquire_batch_id(&mut txn);
// start with empty batch
let mut batches = vec![Batch {
network,
id: batch_id,
external_network_block_hash,
instructions: vec![],
}];
// We also track the return information for the InInstructions within a Batch in case
// they error
let mut return_information = vec![vec![]];
for Returnable { return_address, in_instruction } in in_instructions {
let balance = in_instruction.balance;
let batch = batches.last_mut().unwrap();
batch.instructions.push(in_instruction);
// check if batch is over-size
if batch.encode().len() > MAX_BATCH_SIZE {
// pop the last instruction so it's back in size
let in_instruction = batch.instructions.pop().unwrap();
// bump the id for the new batch
batch_id = BatchDb::<S>::acquire_batch_id(&mut txn);
// make a new batch with this instruction included
batches.push(Batch {
network,
id: batch_id,
external_network_block_hash,
instructions: vec![in_instruction],
});
// Since we're allocating a new batch, allocate a new set of return addresses for it
return_information.push(vec![]);
}
// For the set of return addresses for the InInstructions for the batch we just pushed
// onto, push this InInstruction's return addresses
return_information
.last_mut()
.unwrap()
.push(return_address.map(|address| ReturnInformation { address, balance }));
}
// Now that we've finalized the Batches, save the information for each to the database
assert_eq!(batches.len(), return_information.len());
for (batch, return_information) in batches.iter().zip(&return_information) {
assert_eq!(batch.instructions.len(), return_information.len());
BatchDb::<S>::save_batch_info(
&mut txn,
batch.id,
block_number,
session_to_sign_batch,
external_key_for_session_to_sign_batch,
Blake2b::<U32>::digest(batch.instructions.encode()).into(),
);
BatchDb::<S>::save_return_information(&mut txn, batch.id, return_information);
}
for batch in batches {
BatchToReportDb::<S>::send_batch(
&mut txn,
&BatchData {
session_to_sign_batch,
external_key_for_session_to_sign_batch: EncodableG(
external_key_for_session_to_sign_batch,
),
batch,
},
);
}
}
// Update the next block to batch
BatchDb::<S>::set_next_block_to_batch(&mut txn, block_number + 1);
txn.commit();
}
// Run dependents if were able to batch any blocks
Ok(next_block_to_batch <= highest_batchable)
}
}
}

View File

@@ -81,8 +81,6 @@ create_db!(
ActiveKeys: <K: Borshy>() -> Vec<SeraiKeyDbEntry<K>>, ActiveKeys: <K: Borshy>() -> Vec<SeraiKeyDbEntry<K>>,
RetireAt: <K: Encode>(key: K) -> u64, RetireAt: <K: Encode>(key: K) -> u64,
// The next block to potentially report
NextToPotentiallyReportBlock: () -> u64,
// Highest acknowledged block // Highest acknowledged block
HighestAcknowledgedBlock: () -> u64, HighestAcknowledgedBlock: () -> u64,
@@ -277,10 +275,6 @@ impl<S: ScannerFeed> ScannerGlobalDb<S> {
blocks in which we receive outputs is notable). blocks in which we receive outputs is notable).
*/ */
pub(crate) fn flag_notable_due_to_non_external_output(txn: &mut impl DbTxn, block_number: u64) { pub(crate) fn flag_notable_due_to_non_external_output(txn: &mut impl DbTxn, block_number: u64) {
assert!(
NextToPotentiallyReportBlock::get(txn).unwrap() <= block_number,
"already potentially reported a block we're only now flagging as notable"
);
NotableBlock::set(txn, block_number, &()); NotableBlock::set(txn, block_number, &());
} }
@@ -482,7 +476,7 @@ struct BlockBoundInInstructions {
} }
db_channel! { db_channel! {
ScannerScanReport { ScannerScanBatch {
InInstructions: () -> BlockBoundInInstructions, InInstructions: () -> BlockBoundInInstructions,
} }
} }
@@ -493,8 +487,8 @@ pub(crate) struct InInstructionData<S: ScannerFeed> {
pub(crate) returnable_in_instructions: Vec<Returnable<S>>, pub(crate) returnable_in_instructions: Vec<Returnable<S>>,
} }
pub(crate) struct ScanToReportDb<S: ScannerFeed>(PhantomData<S>); pub(crate) struct ScanToBatchDb<S: ScannerFeed>(PhantomData<S>);
impl<S: ScannerFeed> ScanToReportDb<S> { impl<S: ScannerFeed> ScanToBatchDb<S> {
pub(crate) fn send_in_instructions( pub(crate) fn send_in_instructions(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
block_number: u64, block_number: u64,
@@ -545,6 +539,30 @@ impl<S: ScannerFeed> ScanToReportDb<S> {
} }
} }
#[derive(BorshSerialize, BorshDeserialize)]
pub(crate) struct BatchData<K: BorshSerialize + BorshDeserialize> {
pub(crate) session_to_sign_batch: Session,
pub(crate) external_key_for_session_to_sign_batch: K,
pub(crate) batch: Batch,
}
db_channel! {
ScannerBatchReport {
BatchToReport: <K: Borshy>() -> BatchData<K>,
}
}
pub(crate) struct BatchToReportDb<S: ScannerFeed>(PhantomData<S>);
impl<S: ScannerFeed> BatchToReportDb<S> {
pub(crate) fn send_batch(txn: &mut impl DbTxn, batch_data: &BatchData<EncodableG<KeyFor<S>>>) {
BatchToReport::send(txn, batch_data);
}
pub(crate) fn try_recv_batch(txn: &mut impl DbTxn) -> Option<BatchData<EncodableG<KeyFor<S>>>> {
BatchToReport::try_recv(txn)
}
}
db_channel! { db_channel! {
ScannerSubstrateEventuality { ScannerSubstrateEventuality {
Burns: (acknowledged_block: u64) -> Vec<OutInstructionWithBalance>, Burns: (acknowledged_block: u64) -> Vec<OutInstructionWithBalance>,
@@ -583,7 +601,6 @@ mod _public_db {
db_channel! { db_channel! {
ScannerPublic { ScannerPublic {
Batches: () -> Batch,
BatchesToSign: (key: &[u8]) -> Batch, BatchesToSign: (key: &[u8]) -> Batch,
AcknowledgedBatches: (key: &[u8]) -> u32, AcknowledgedBatches: (key: &[u8]) -> u32,
CompletedEventualities: (key: &[u8]) -> [u8; 32], CompletedEventualities: (key: &[u8]) -> [u8; 32],
@@ -591,21 +608,6 @@ mod _public_db {
} }
} }
/// The batches to publish.
///
/// This is used for auditing the Batches published to Serai.
pub struct Batches;
impl Batches {
pub(crate) fn send(txn: &mut impl DbTxn, batch: &Batch) {
_public_db::Batches::send(txn, batch);
}
/// Receive a batch to publish.
pub fn try_recv(txn: &mut impl DbTxn) -> Option<Batch> {
_public_db::Batches::try_recv(txn)
}
}
/// The batches to sign and publish. /// The batches to sign and publish.
/// ///
/// This is used for publishing Batches onto Serai. /// This is used for publishing Batches onto Serai.

View File

@@ -11,9 +11,9 @@ use borsh::{BorshSerialize, BorshDeserialize};
use serai_db::{Get, DbTxn, Db}; use serai_db::{Get, DbTxn, Db};
use serai_primitives::{NetworkId, Coin, Amount}; use serai_primitives::{NetworkId, Coin, Amount};
use serai_validator_sets_primitives::Session;
use serai_coins_primitives::OutInstructionWithBalance; use serai_coins_primitives::OutInstructionWithBalance;
use messages::substrate::ExecutedBatch;
use primitives::{task::*, Address, ReceivedOutput, Block, Payment}; use primitives::{task::*, Address, ReceivedOutput, Block, Payment};
// Logic for deciding where in its lifetime a multisig is. // Logic for deciding where in its lifetime a multisig is.
@@ -23,12 +23,14 @@ pub use lifetime::LifetimeStage;
// Database schema definition and associated functions. // Database schema definition and associated functions.
mod db; mod db;
use db::ScannerGlobalDb; use db::ScannerGlobalDb;
pub use db::{Batches, BatchesToSign, AcknowledgedBatches, CompletedEventualities}; pub use db::{BatchesToSign, AcknowledgedBatches, CompletedEventualities};
// Task to index the blockchain, ensuring we don't reorganize finalized blocks. // Task to index the blockchain, ensuring we don't reorganize finalized blocks.
mod index; mod index;
// Scans blocks for received coins. // Scans blocks for received coins.
mod scan; mod scan;
/// Task which reports Batches to Substrate. /// Task which creates Batches for Substrate.
mod batch;
/// Task which reports Batches for signing.
mod report; mod report;
/// Task which handles events from Substrate once we can. /// Task which handles events from Substrate once we can.
mod substrate; mod substrate;
@@ -379,23 +381,27 @@ impl<S: ScannerFeed> Scanner<S> {
let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await; let index_task = index::IndexTask::new(db.clone(), feed.clone(), start_block).await;
let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block); let scan_task = scan::ScanTask::new(db.clone(), feed.clone(), start_block);
let report_task = report::ReportTask::<_, S>::new(db.clone(), start_block); let batch_task = batch::BatchTask::<_, S>::new(db.clone(), start_block);
let report_task = report::ReportTask::<_, S>::new(db.clone());
let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone()); let substrate_task = substrate::SubstrateTask::<_, S>::new(db.clone());
let eventuality_task = let eventuality_task =
eventuality::EventualityTask::<_, _, _>::new(db, feed, scheduler, start_block); eventuality::EventualityTask::<_, _, _>::new(db, feed, scheduler, start_block);
let (index_task_def, _index_handle) = Task::new(); let (index_task_def, _index_handle) = Task::new();
let (scan_task_def, scan_handle) = Task::new(); let (scan_task_def, scan_handle) = Task::new();
let (batch_task_def, batch_handle) = Task::new();
let (report_task_def, report_handle) = Task::new(); let (report_task_def, report_handle) = Task::new();
let (substrate_task_def, substrate_handle) = Task::new(); let (substrate_task_def, substrate_handle) = Task::new();
let (eventuality_task_def, eventuality_handle) = Task::new(); let (eventuality_task_def, eventuality_handle) = Task::new();
// Upon indexing a new block, scan it // Upon indexing a new block, scan it
tokio::spawn(index_task.continually_run(index_task_def, vec![scan_handle.clone()])); tokio::spawn(index_task.continually_run(index_task_def, vec![scan_handle.clone()]));
// Upon scanning a block, report it // Upon scanning a block, creates the batches for it
tokio::spawn(scan_task.continually_run(scan_task_def, vec![report_handle])); tokio::spawn(scan_task.continually_run(scan_task_def, vec![batch_handle]));
// Upon reporting a block, we do nothing (as the burden is on Substrate which won't be // Upon creating batches for a block, we run the report task
// immediately ready) tokio::spawn(batch_task.continually_run(batch_task_def, vec![report_handle]));
// Upon reporting the batches for signing, we do nothing (as the burden is on a tributary which
// won't immediately yield a result)
tokio::spawn(report_task.continually_run(report_task_def, vec![])); tokio::spawn(report_task.continually_run(report_task_def, vec![]));
// Upon handling an event from Substrate, we run the Eventuality task (as it's what's affected) // Upon handling an event from Substrate, we run the Eventuality task (as it's what's affected)
tokio::spawn(substrate_task.continually_run(substrate_task_def, vec![eventuality_handle])); tokio::spawn(substrate_task.continually_run(substrate_task_def, vec![eventuality_handle]));
@@ -438,29 +444,17 @@ impl<S: ScannerFeed> Scanner<S> {
/// `queue_burns`. Doing so will cause them to be executed multiple times. /// `queue_burns`. Doing so will cause them to be executed multiple times.
/// ///
/// The calls to this function must be ordered with regards to `queue_burns`. /// The calls to this function must be ordered with regards to `queue_burns`.
#[allow(clippy::too_many_arguments)]
pub fn acknowledge_batch( pub fn acknowledge_batch(
&mut self, &mut self,
mut txn: impl DbTxn, mut txn: impl DbTxn,
batch_id: u32, batch: ExecutedBatch,
publisher: Session,
in_instructions_hash: [u8; 32],
in_instruction_results: Vec<messages::substrate::InInstructionResult>,
burns: Vec<OutInstructionWithBalance>, burns: Vec<OutInstructionWithBalance>,
key_to_activate: Option<KeyFor<S>>, key_to_activate: Option<KeyFor<S>>,
) { ) {
log::info!("acknowledging batch {batch_id}"); log::info!("acknowledging batch {}", batch.id);
// Queue acknowledging this block via the Substrate task // Queue acknowledging this block via the Substrate task
substrate::queue_acknowledge_batch::<S>( substrate::queue_acknowledge_batch::<S>(&mut txn, batch, burns, key_to_activate);
&mut txn,
batch_id,
publisher,
in_instructions_hash,
in_instruction_results,
burns,
key_to_activate,
);
// Commit this txn so this data is flushed // Commit this txn so this data is flushed
txn.commit(); txn.commit();
// Then run the Substrate task // Then run the Substrate task

View File

@@ -1,52 +1,16 @@
use core::marker::PhantomData;
use std::io::{Read, Write};
use group::GroupEncoding;
use scale::{Encode, Decode, IoReader};
use borsh::{BorshSerialize, BorshDeserialize};
use serai_db::{Get, DbTxn, create_db}; use serai_db::{Get, DbTxn, create_db};
use serai_primitives::Balance;
use serai_validator_sets_primitives::Session; use serai_validator_sets_primitives::Session;
use primitives::EncodableG;
use crate::{ScannerFeed, KeyFor, AddressFor};
#[derive(BorshSerialize, BorshDeserialize)]
pub(crate) struct BatchInfo<K: BorshSerialize> {
pub(crate) block_number: u64,
pub(crate) session_to_sign_batch: Session,
pub(crate) external_key_for_session_to_sign_batch: K,
pub(crate) in_instructions_hash: [u8; 32],
}
create_db!( create_db!(
ScannerReport { ScannerBatch {
// The next block to potentially report
NextToPotentiallyReportBlock: () -> u64,
// The last session to sign a Batch and their first Batch signed // The last session to sign a Batch and their first Batch signed
LastSessionToSignBatchAndFirstBatch: () -> (Session, u32), LastSessionToSignBatchAndFirstBatch: () -> (Session, u32),
// The next Batch ID to use
NextBatchId: () -> u32,
// The information needed to verify a batch
InfoForBatch: <G: GroupEncoding>(batch: u32) -> BatchInfo<EncodableG<G>>,
// The return addresses for the InInstructions within a Batch
SerializedReturnAddresses: (batch: u32) -> Vec<u8>,
} }
); );
pub(crate) struct ReturnInformation<S: ScannerFeed> { pub(crate) struct BatchDb;
pub(crate) address: AddressFor<S>, impl BatchDb {
pub(crate) balance: Balance,
}
pub(crate) struct ReportDb<S: ScannerFeed>(PhantomData<S>);
impl<S: ScannerFeed> ReportDb<S> {
pub(crate) fn set_last_session_to_sign_batch_and_first_batch( pub(crate) fn set_last_session_to_sign_batch_and_first_batch(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
session: Session, session: Session,
@@ -59,86 +23,4 @@ impl<S: ScannerFeed> ReportDb<S> {
) -> Option<(Session, u32)> { ) -> Option<(Session, u32)> {
LastSessionToSignBatchAndFirstBatch::get(getter) LastSessionToSignBatchAndFirstBatch::get(getter)
} }
pub(crate) fn set_next_to_potentially_report_block(
txn: &mut impl DbTxn,
next_to_potentially_report_block: u64,
) {
NextToPotentiallyReportBlock::set(txn, &next_to_potentially_report_block);
}
pub(crate) fn next_to_potentially_report_block(getter: &impl Get) -> Option<u64> {
NextToPotentiallyReportBlock::get(getter)
}
pub(crate) fn acquire_batch_id(txn: &mut impl DbTxn) -> u32 {
let id = NextBatchId::get(txn).unwrap_or(0);
NextBatchId::set(txn, &(id + 1));
id
}
pub(crate) fn save_batch_info(
txn: &mut impl DbTxn,
id: u32,
block_number: u64,
session_to_sign_batch: Session,
external_key_for_session_to_sign_batch: KeyFor<S>,
in_instructions_hash: [u8; 32],
) {
InfoForBatch::set(
txn,
id,
&BatchInfo {
block_number,
session_to_sign_batch,
external_key_for_session_to_sign_batch: EncodableG(external_key_for_session_to_sign_batch),
in_instructions_hash,
},
);
}
pub(crate) fn take_info_for_batch(
txn: &mut impl DbTxn,
id: u32,
) -> Option<BatchInfo<EncodableG<KeyFor<S>>>> {
InfoForBatch::take(txn, id)
}
pub(crate) fn save_return_information(
txn: &mut impl DbTxn,
id: u32,
return_information: &Vec<Option<ReturnInformation<S>>>,
) {
let mut buf = Vec::with_capacity(return_information.len() * (32 + 1 + 8));
for return_information in return_information {
if let Some(ReturnInformation { address, balance }) = return_information {
buf.write_all(&[1]).unwrap();
address.serialize(&mut buf).unwrap();
balance.encode_to(&mut buf);
} else {
buf.write_all(&[0]).unwrap();
}
}
SerializedReturnAddresses::set(txn, id, &buf);
}
pub(crate) fn take_return_information(
txn: &mut impl DbTxn,
id: u32,
) -> Option<Vec<Option<ReturnInformation<S>>>> {
let buf = SerializedReturnAddresses::take(txn, id)?;
let mut buf = buf.as_slice();
let mut res = Vec::with_capacity(buf.len() / (32 + 1 + 8));
while !buf.is_empty() {
let mut opt = [0xff];
buf.read_exact(&mut opt).unwrap();
assert!((opt[0] == 0) || (opt[0] == 1));
res.push((opt[0] == 1).then(|| {
let address = AddressFor::<S>::deserialize_reader(&mut buf).unwrap();
let balance = Balance::decode(&mut IoReader(&mut buf)).unwrap();
ReturnInformation { address, balance }
}));
}
Some(res)
}
} }

View File

@@ -1,45 +1,19 @@
use core::{marker::PhantomData, future::Future}; use core::{marker::PhantomData, future::Future};
use blake2::{digest::typenum::U32, Digest, Blake2b};
use scale::Encode;
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};
use serai_validator_sets_primitives::Session; use serai_validator_sets_primitives::Session;
use serai_in_instructions_primitives::{MAX_BATCH_SIZE, Batch};
use primitives::{EncodableG, task::ContinuallyRan}; use primitives::task::ContinuallyRan;
use crate::{ use crate::{
db::{Returnable, ScannerGlobalDb, InInstructionData, ScanToReportDb, Batches, BatchesToSign}, db::{BatchData, BatchToReportDb, BatchesToSign},
scan::next_to_scan_for_outputs_block, substrate, ScannerFeed,
substrate, ScannerFeed, KeyFor,
}; };
mod db; mod db;
pub(crate) use db::{BatchInfo, ReturnInformation}; use db::BatchDb;
use db::ReportDb;
pub(crate) fn take_info_for_batch<S: ScannerFeed>( // This task begins reporting Batches for signing once the pre-requisities are met.
txn: &mut impl DbTxn,
id: u32,
) -> Option<BatchInfo<EncodableG<KeyFor<S>>>> {
ReportDb::<S>::take_info_for_batch(txn, id)
}
pub(crate) fn take_return_information<S: ScannerFeed>(
txn: &mut impl DbTxn,
id: u32,
) -> Option<Vec<Option<ReturnInformation<S>>>> {
ReportDb::<S>::take_return_information(txn, id)
}
/*
This task produces Batches for notable blocks, with all InInstructions, in an ordered fashion.
We only report blocks once both tasks, scanning for received outputs and checking for resolved
Eventualities, have processed the block. This ensures we know if this block is notable, and have
the InInstructions for it.
*/
#[allow(non_snake_case)] #[allow(non_snake_case)]
pub(crate) struct ReportTask<D: Db, S: ScannerFeed> { pub(crate) struct ReportTask<D: Db, S: ScannerFeed> {
db: D, db: D,
@@ -47,14 +21,7 @@ pub(crate) struct ReportTask<D: Db, S: ScannerFeed> {
} }
impl<D: Db, S: ScannerFeed> ReportTask<D, S> { impl<D: Db, S: ScannerFeed> ReportTask<D, S> {
pub(crate) fn new(mut db: D, start_block: u64) -> Self { pub(crate) fn new(db: D) -> Self {
if ReportDb::<S>::next_to_potentially_report_block(&db).is_none() {
// Initialize the DB
let mut txn = db.txn();
ReportDb::<S>::set_next_to_potentially_report_block(&mut txn, start_block);
txn.commit();
}
Self { db, _S: PhantomData } Self { db, _S: PhantomData }
} }
} }
@@ -62,166 +29,77 @@ impl<D: Db, S: ScannerFeed> ReportTask<D, S> {
impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> { impl<D: Db, S: ScannerFeed> ContinuallyRan for ReportTask<D, S> {
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> { fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
async move { async move {
let highest_reportable = { let mut made_progress = false;
// Fetch the next to scan block loop {
let next_to_scan = next_to_scan_for_outputs_block::<S>(&self.db)
.expect("ReportTask run before writing the start block");
// If we haven't done any work, return
if next_to_scan == 0 {
return Ok(false);
}
// The last scanned block is the block prior to this
#[allow(clippy::let_and_return)]
let last_scanned = next_to_scan - 1;
// The last scanned block is the highest reportable block as we only scan blocks within a
// window where it's safe to immediately report the block
// See `eventuality.rs` for more info
last_scanned
};
let next_to_potentially_report = ReportDb::<S>::next_to_potentially_report_block(&self.db)
.expect("ReportTask run before writing the start block");
for block_number in next_to_potentially_report ..= highest_reportable {
let mut txn = self.db.txn(); let mut txn = self.db.txn();
let Some(BatchData {
// Receive the InInstructions for this block
// We always do this as we can't trivially tell if we should recv InInstructions before we
// do
let InInstructionData {
session_to_sign_batch, session_to_sign_batch,
external_key_for_session_to_sign_batch, external_key_for_session_to_sign_batch,
returnable_in_instructions: in_instructions, batch,
} = ScanToReportDb::<S>::recv_in_instructions(&mut txn, block_number); }) = BatchToReportDb::<S>::try_recv_batch(&mut txn)
else {
break;
};
let notable = ScannerGlobalDb::<S>::is_block_notable(&txn, block_number); /*
if !notable { If this is the handover Batch, the first Batch signed by a session which retires the
assert!(in_instructions.is_empty(), "block wasn't notable yet had InInstructions"); prior validator set, then this should only be signed after the prior validator set's
} actions are fully validated.
// If this block is notable, create the Batch(s) for it
if notable {
let network = S::NETWORK;
let mut batch_id = ReportDb::<S>::acquire_batch_id(&mut txn);
/* The new session will only be responsible for signing this Batch if the prior key has
If this is the handover Batch, the first Batch signed by a session which retires the retired, successfully completed all its on-external-network actions.
prior validator set, then this should only be signed after the prior validator set's
actions are fully validated.
The new session will only be responsible for signing this Batch if the prior key has We check here the prior session has successfully completed all its on-Serai-network
retired, successfully completed all its on-external-network actions. actions by ensuring we've validated all Batches expected from it. Only then do we sign
the Batch confirming the handover.
We check here the prior session has successfully completed all its on-Serai-network We also wait for the Batch confirming the handover to be accepted on-chain, ensuring we
actions by ensuring we've validated all Batches expected from it. Only then do we sign don't verify the prior session's Batches, sign the handover Batch and the following
the Batch confirming the handover. Batch, have the prior session publish a malicious Batch where our handover Batch should
be, before our following Batch becomes our handover Batch.
We also wait for the Batch confirming the handover to be accepted on-chain, ensuring we */
don't verify the prior session's Batches, sign the handover Batch and the following if session_to_sign_batch != Session(0) {
Batch, have the prior session publish a malicious Batch where our handover Batch should // We may have Session(1)'s first Batch be Batch 0 if Session(0) never publishes a
be, before our following Batch becomes our handover Batch. // Batch. This is fine as we'll hit the distinct Session check and then set the correct
*/ // values into this DB entry. All other sessions must complete the handover process,
if session_to_sign_batch != Session(0) { // which requires having published at least one Batch
// We may have Session(1)'s first Batch be Batch 0 if Session(0) never publishes a let (last_session, first_batch) =
// Batch. This is fine as we'll hit the distinct Session check and then set the correct BatchDb::last_session_to_sign_batch_and_first_batch(&txn).unwrap_or((Session(0), 0));
// values into this DB entry. All other sessions must complete the handover process, // Because this boolean was expanded, we lose short-circuiting. That's fine
// which requires having published at least one Batch let handover_batch = last_session != session_to_sign_batch;
let (last_session, first_batch) = let batch_after_handover_batch =
ReportDb::<S>::last_session_to_sign_batch_and_first_batch(&txn) (last_session == session_to_sign_batch) && ((first_batch + 1) == batch.id);
.unwrap_or((Session(0), 0)); if handover_batch || batch_after_handover_batch {
// Because this boolean was expanded, we lose short-circuiting. That's fine let verified_prior_batch = substrate::last_acknowledged_batch::<S>(&txn)
let handover_batch = last_session != session_to_sign_batch; // Since `batch.id = 0` in the Session(0)-never-published-a-Batch case, we don't
let batch_after_handover_batch = // check `last_acknowledged_batch >= (batch.id - 1)` but instead this
(last_session == session_to_sign_batch) && ((first_batch + 1) == batch_id); .map(|last_acknowledged_batch| (last_acknowledged_batch + 1) >= batch.id)
if handover_batch || batch_after_handover_batch { // We've never verified any Batches
let verified_prior_batch = substrate::last_acknowledged_batch::<S>(&txn) .unwrap_or(false);
// Since `batch_id = 0` in the Session(0)-never-published-a-Batch case, we don't if !verified_prior_batch {
// check `last_acknowledged_batch >= (batch_id - 1)` but instead this // Drop the txn to restore the Batch to report to the DB
.map(|last_acknowledged_batch| (last_acknowledged_batch + 1) >= batch_id) drop(txn);
// We've never verified any Batches break;
.unwrap_or(false);
if !verified_prior_batch {
// Drop this txn, restoring the Batch to be worked on in the future
drop(txn);
return Ok(block_number > next_to_potentially_report);
}
}
// If this is the handover Batch, update the last session to sign a Batch
if handover_batch {
ReportDb::<S>::set_last_session_to_sign_batch_and_first_batch(
&mut txn,
session_to_sign_batch,
batch_id,
);
} }
} }
// TODO: The above code doesn't work if we end up with two Batches (the handover and the // If this is the handover Batch, update the last session to sign a Batch
// following) within this one Block due to Batch size limits if handover_batch {
BatchDb::set_last_session_to_sign_batch_and_first_batch(
// start with empty batch
let mut batches = vec![Batch { network, id: batch_id, instructions: vec![] }];
// We also track the return information for the InInstructions within a Batch in case
// they error
let mut return_information = vec![vec![]];
for Returnable { return_address, in_instruction } in in_instructions {
let balance = in_instruction.balance;
let batch = batches.last_mut().unwrap();
batch.instructions.push(in_instruction);
// check if batch is over-size
if batch.encode().len() > MAX_BATCH_SIZE {
// pop the last instruction so it's back in size
let in_instruction = batch.instructions.pop().unwrap();
// bump the id for the new batch
batch_id = ReportDb::<S>::acquire_batch_id(&mut txn);
// make a new batch with this instruction included
batches.push(Batch { network, id: batch_id, instructions: vec![in_instruction] });
// Since we're allocating a new batch, allocate a new set of return addresses for it
return_information.push(vec![]);
}
// For the set of return addresses for the InInstructions for the batch we just pushed
// onto, push this InInstruction's return addresses
return_information
.last_mut()
.unwrap()
.push(return_address.map(|address| ReturnInformation { address, balance }));
}
// Now that we've finalized the Batches, save the information for each to the database
assert_eq!(batches.len(), return_information.len());
for (batch, return_information) in batches.iter().zip(&return_information) {
assert_eq!(batch.instructions.len(), return_information.len());
ReportDb::<S>::save_batch_info(
&mut txn, &mut txn,
batch.id,
block_number,
session_to_sign_batch, session_to_sign_batch,
external_key_for_session_to_sign_batch, batch.id,
Blake2b::<U32>::digest(batch.instructions.encode()).into(),
); );
ReportDb::<S>::save_return_information(&mut txn, batch.id, return_information);
}
for batch in batches {
Batches::send(&mut txn, &batch);
BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch, &batch);
} }
} }
// Update the next to potentially report block BatchesToSign::send(&mut txn, &external_key_for_session_to_sign_batch.0, &batch);
ReportDb::<S>::set_next_to_potentially_report_block(&mut txn, block_number + 1);
txn.commit(); txn.commit();
made_progress = true;
} }
// Run dependents if we decided to report any blocks Ok(made_progress)
Ok(next_to_potentially_report <= highest_reportable)
} }
} }
} }

View File

@@ -14,7 +14,7 @@ use crate::{
lifetime::LifetimeStage, lifetime::LifetimeStage,
db::{ db::{
OutputWithInInstruction, Returnable, SenderScanData, ScannerGlobalDb, InInstructionData, OutputWithInInstruction, Returnable, SenderScanData, ScannerGlobalDb, InInstructionData,
ScanToReportDb, ScanToEventualityDb, ScanToBatchDb, ScanToEventualityDb,
}, },
BlockExt, ScannerFeed, AddressFor, OutputFor, Return, sort_outputs, BlockExt, ScannerFeed, AddressFor, OutputFor, Return, sort_outputs,
eventuality::latest_scannable_block, eventuality::latest_scannable_block,
@@ -345,7 +345,7 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for ScanTask<D, S> {
// We need to also specify which key is responsible for signing the Batch for these, which // We need to also specify which key is responsible for signing the Batch for these, which
// will always be the oldest key (as the new key signing the Batch signifies handover // will always be the oldest key (as the new key signing the Batch signifies handover
// acceptance) // acceptance)
ScanToReportDb::<S>::send_in_instructions( ScanToBatchDb::<S>::send_in_instructions(
&mut txn, &mut txn,
b, b,
&InInstructionData { &InInstructionData {

View File

@@ -6,16 +6,14 @@ use borsh::{BorshSerialize, BorshDeserialize};
use serai_db::{Get, DbTxn, create_db, db_channel}; use serai_db::{Get, DbTxn, create_db, db_channel};
use serai_coins_primitives::OutInstructionWithBalance; use serai_coins_primitives::OutInstructionWithBalance;
use serai_validator_sets_primitives::Session;
use messages::substrate::ExecutedBatch;
use crate::{ScannerFeed, KeyFor}; use crate::{ScannerFeed, KeyFor};
#[derive(BorshSerialize, BorshDeserialize)] #[derive(BorshSerialize, BorshDeserialize)]
struct AcknowledgeBatchEncodable { struct AcknowledgeBatchEncodable {
batch_id: u32, batch: ExecutedBatch,
publisher: Session,
in_instructions_hash: [u8; 32],
in_instruction_results: Vec<messages::substrate::InInstructionResult>,
burns: Vec<OutInstructionWithBalance>, burns: Vec<OutInstructionWithBalance>,
key_to_activate: Option<Vec<u8>>, key_to_activate: Option<Vec<u8>>,
} }
@@ -27,10 +25,7 @@ enum ActionEncodable {
} }
pub(crate) struct AcknowledgeBatch<S: ScannerFeed> { pub(crate) struct AcknowledgeBatch<S: ScannerFeed> {
pub(crate) batch_id: u32, pub(crate) batch: ExecutedBatch,
pub(crate) publisher: Session,
pub(crate) in_instructions_hash: [u8; 32],
pub(crate) in_instruction_results: Vec<messages::substrate::InInstructionResult>,
pub(crate) burns: Vec<OutInstructionWithBalance>, pub(crate) burns: Vec<OutInstructionWithBalance>,
pub(crate) key_to_activate: Option<KeyFor<S>>, pub(crate) key_to_activate: Option<KeyFor<S>>,
} }
@@ -64,20 +59,14 @@ impl<S: ScannerFeed> SubstrateDb<S> {
pub(crate) fn queue_acknowledge_batch( pub(crate) fn queue_acknowledge_batch(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
batch_id: u32, batch: ExecutedBatch,
publisher: Session,
in_instructions_hash: [u8; 32],
in_instruction_results: Vec<messages::substrate::InInstructionResult>,
burns: Vec<OutInstructionWithBalance>, burns: Vec<OutInstructionWithBalance>,
key_to_activate: Option<KeyFor<S>>, key_to_activate: Option<KeyFor<S>>,
) { ) {
Actions::send( Actions::send(
txn, txn,
&ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { &ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable {
batch_id, batch,
publisher,
in_instructions_hash,
in_instruction_results,
burns, burns,
key_to_activate: key_to_activate.map(|key| key.to_bytes().as_ref().to_vec()), key_to_activate: key_to_activate.map(|key| key.to_bytes().as_ref().to_vec()),
}), }),
@@ -91,17 +80,11 @@ impl<S: ScannerFeed> SubstrateDb<S> {
let action_encodable = Actions::try_recv(txn)?; let action_encodable = Actions::try_recv(txn)?;
Some(match action_encodable { Some(match action_encodable {
ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable { ActionEncodable::AcknowledgeBatch(AcknowledgeBatchEncodable {
batch_id, batch,
publisher,
in_instructions_hash,
in_instruction_results,
burns, burns,
key_to_activate, key_to_activate,
}) => Action::AcknowledgeBatch(AcknowledgeBatch { }) => Action::AcknowledgeBatch(AcknowledgeBatch {
batch_id, batch,
publisher,
in_instructions_hash,
in_instruction_results,
burns, burns,
key_to_activate: key_to_activate.map(|key| { key_to_activate: key_to_activate.map(|key| {
let mut repr = <KeyFor<S> as GroupEncoding>::Repr::default(); let mut repr = <KeyFor<S> as GroupEncoding>::Repr::default();

View File

@@ -3,12 +3,12 @@ use core::{marker::PhantomData, future::Future};
use serai_db::{Get, DbTxn, Db}; use serai_db::{Get, DbTxn, Db};
use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance}; use serai_coins_primitives::{OutInstruction, OutInstructionWithBalance};
use serai_validator_sets_primitives::Session;
use messages::substrate::ExecutedBatch;
use primitives::task::ContinuallyRan; use primitives::task::ContinuallyRan;
use crate::{ use crate::{
db::{ScannerGlobalDb, SubstrateToEventualityDb, AcknowledgedBatches}, db::{ScannerGlobalDb, SubstrateToEventualityDb, AcknowledgedBatches},
report, ScannerFeed, KeyFor, index, batch, ScannerFeed, KeyFor,
}; };
mod db; mod db;
@@ -19,22 +19,11 @@ pub(crate) fn last_acknowledged_batch<S: ScannerFeed>(getter: &impl Get) -> Opti
} }
pub(crate) fn queue_acknowledge_batch<S: ScannerFeed>( pub(crate) fn queue_acknowledge_batch<S: ScannerFeed>(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
batch_id: u32, batch: ExecutedBatch,
publisher: Session,
in_instructions_hash: [u8; 32],
in_instruction_results: Vec<messages::substrate::InInstructionResult>,
burns: Vec<OutInstructionWithBalance>, burns: Vec<OutInstructionWithBalance>,
key_to_activate: Option<KeyFor<S>>, key_to_activate: Option<KeyFor<S>>,
) { ) {
SubstrateDb::<S>::queue_acknowledge_batch( SubstrateDb::<S>::queue_acknowledge_batch(txn, batch, burns, key_to_activate)
txn,
batch_id,
publisher,
in_instructions_hash,
in_instruction_results,
burns,
key_to_activate,
)
} }
pub(crate) fn queue_queue_burns<S: ScannerFeed>( pub(crate) fn queue_queue_burns<S: ScannerFeed>(
txn: &mut impl DbTxn, txn: &mut impl DbTxn,
@@ -73,40 +62,38 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for SubstrateTask<D, S> {
}; };
match action { match action {
Action::AcknowledgeBatch(AcknowledgeBatch { Action::AcknowledgeBatch(AcknowledgeBatch { batch, mut burns, key_to_activate }) => {
batch_id,
publisher,
in_instructions_hash,
in_instruction_results,
mut burns,
key_to_activate,
}) => {
// Check if we have the information for this batch // Check if we have the information for this batch
let Some(report::BatchInfo { let Some(batch::BatchInfo {
block_number, block_number,
session_to_sign_batch, session_to_sign_batch,
external_key_for_session_to_sign_batch, external_key_for_session_to_sign_batch,
in_instructions_hash: expected_in_instructions_hash, in_instructions_hash,
}) = report::take_info_for_batch::<S>(&mut txn, batch_id) }) = batch::take_info_for_batch::<S>(&mut txn, batch.id)
else { else {
// If we don't, drop this txn (restoring the action to the database) // If we don't, drop this txn (restoring the action to the database)
drop(txn); drop(txn);
return Ok(made_progress); return Ok(made_progress);
}; };
assert_eq!( assert_eq!(
publisher, session_to_sign_batch, batch.publisher, session_to_sign_batch,
"batch acknowledged on-chain was acknowledged by an unexpected publisher" "batch acknowledged on-chain was acknowledged by an unexpected publisher"
); );
assert_eq!( assert_eq!(
in_instructions_hash, expected_in_instructions_hash, batch.external_network_block_hash,
"batch acknowledged on-chain was distinct" index::block_id(&txn, block_number),
"batch acknowledged on-chain was for a distinct block"
);
assert_eq!(
batch.in_instructions_hash, in_instructions_hash,
"batch acknowledged on-chain had distinct InInstructions"
); );
SubstrateDb::<S>::set_last_acknowledged_batch(&mut txn, batch_id); SubstrateDb::<S>::set_last_acknowledged_batch(&mut txn, batch.id);
AcknowledgedBatches::send( AcknowledgedBatches::send(
&mut txn, &mut txn,
&external_key_for_session_to_sign_batch.0, &external_key_for_session_to_sign_batch.0,
batch_id, batch.id,
); );
// Mark we made progress and handle this // Mark we made progress and handle this
@@ -143,23 +130,23 @@ impl<D: Db, S: ScannerFeed> ContinuallyRan for SubstrateTask<D, S> {
// Return the balances for any InInstructions which failed to execute // Return the balances for any InInstructions which failed to execute
{ {
let return_information = report::take_return_information::<S>(&mut txn, batch_id) let return_information = batch::take_return_information::<S>(&mut txn, batch.id)
.expect("didn't save the return information for Batch we published"); .expect("didn't save the return information for Batch we published");
assert_eq!( assert_eq!(
in_instruction_results.len(), batch.in_instruction_results.len(),
return_information.len(), return_information.len(),
"amount of InInstruction succeededs differed from amount of return information saved" "amount of InInstruction succeededs differed from amount of return information saved"
); );
// We map these into standard Burns // We map these into standard Burns
for (result, return_information) in for (result, return_information) in
in_instruction_results.into_iter().zip(return_information) batch.in_instruction_results.into_iter().zip(return_information)
{ {
if result == messages::substrate::InInstructionResult::Succeeded { if result == messages::substrate::InInstructionResult::Succeeded {
continue; continue;
} }
if let Some(report::ReturnInformation { address, balance }) = return_information { if let Some(batch::ReturnInformation { address, balance }) = return_information {
burns.push(OutInstructionWithBalance { burns.push(OutInstructionWithBalance {
instruction: OutInstruction { address: address.into() }, instruction: OutInstruction { address: address.into() },
balance, balance,

View File

@@ -136,20 +136,6 @@ impl<D: Db, C: Coordinator> ContinuallyRan for CoordinatorTask<D, C> {
} }
} }
// Publish the Batches
{
let mut txn = self.db.txn();
while let Some(batch) = scanner::Batches::try_recv(&mut txn) {
iterated = true;
self
.coordinator
.publish_batch(batch)
.await
.map_err(|e| format!("couldn't publish Batch: {e:?}"))?;
}
txn.commit();
}
// Publish the signed Batches // Publish the signed Batches
{ {
let mut txn = self.db.txn(); let mut txn = self.db.txn();

View File

@@ -12,7 +12,7 @@ use frost::dkg::{ThresholdCore, ThresholdKeys};
use serai_primitives::Signature; use serai_primitives::Signature;
use serai_validator_sets_primitives::{Session, Slash}; use serai_validator_sets_primitives::{Session, Slash};
use serai_in_instructions_primitives::{Batch, SignedBatch}; use serai_in_instructions_primitives::SignedBatch;
use serai_db::{DbTxn, Db}; use serai_db::{DbTxn, Db};
@@ -64,12 +64,6 @@ pub trait Coordinator: 'static + Send + Sync {
signature: Signature, signature: Signature,
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>>; ) -> impl Send + Future<Output = Result<(), Self::EphemeralError>>;
/// Publish a `Batch`.
fn publish_batch(
&mut self,
batch: Batch,
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>>;
/// Publish a `SignedBatch`. /// Publish a `SignedBatch`.
fn publish_signed_batch( fn publish_signed_batch(
&mut self, &mut self,
@@ -422,7 +416,7 @@ impl<
block: [u8; 32], block: [u8; 32],
) { ) {
// Don't cosign blocks with already retired keys // Don't cosign blocks with already retired keys
if Some(session.0) <= db::LatestRetiredSession::get(txn).map(|session| session.0) { if Some(session.0) <= db::LatestRetiredSession::get(&txn).map(|session| session.0) {
return; return;
} }
@@ -444,7 +438,7 @@ impl<
slash_report: &Vec<Slash>, slash_report: &Vec<Slash>,
) { ) {
// Don't sign slash reports with already retired keys // Don't sign slash reports with already retired keys
if Some(session.0) <= db::LatestRetiredSession::get(txn).map(|session| session.0) { if Some(session.0) <= db::LatestRetiredSession::get(&txn).map(|session| session.0) {
return; return;
} }

View File

@@ -2,6 +2,7 @@ use serai_primitives::*;
pub use serai_in_instructions_primitives as primitives; pub use serai_in_instructions_primitives as primitives;
use primitives::SignedBatch; use primitives::SignedBatch;
use serai_validator_sets_primitives::Session;
#[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)]
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]
@@ -12,11 +13,18 @@ pub enum Call {
} }
#[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)]
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))]
#[cfg_attr(feature = "serde", derive(serde::Serialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize))]
#[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))] #[cfg_attr(all(feature = "std", feature = "serde"), derive(serde::Deserialize))]
pub enum Event { pub enum Event {
Batch { network: NetworkId, id: u32, block: BlockHash, instructions_hash: [u8; 32] }, Batch {
InstructionFailure { network: NetworkId, id: u32, index: u32 }, network: NetworkId,
Halt { network: NetworkId }, publishing_session: Session,
id: u32,
external_network_block_hash: [u8; 32],
in_instructions_hash: [u8; 32],
in_instruction_results: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
},
Halt {
network: NetworkId,
},
} }

View File

@@ -1,10 +1,7 @@
pub use serai_abi::in_instructions::primitives; pub use serai_abi::in_instructions::primitives;
use primitives::SignedBatch; use primitives::SignedBatch;
use crate::{ use crate::{primitives::NetworkId, Transaction, SeraiError, Serai, TemporalSerai};
primitives::{BlockHash, NetworkId},
Transaction, SeraiError, Serai, TemporalSerai,
};
pub type InInstructionsEvent = serai_abi::in_instructions::Event; pub type InInstructionsEvent = serai_abi::in_instructions::Event;

View File

@@ -45,13 +45,13 @@ impl Block {
} }
/// Returns the time of this block, set by its producer, in milliseconds since the epoch. /// Returns the time of this block, set by its producer, in milliseconds since the epoch.
pub fn time(&self) -> Result<u64, SeraiError> { pub fn time(&self) -> Option<u64> {
for transaction in &self.transactions { for transaction in &self.transactions {
if let Call::Timestamp(timestamp::Call::set { now }) = transaction.call() { if let Call::Timestamp(timestamp::Call::set { now }) = transaction.call() {
return Ok(*now); return Some(*now);
} }
} }
Err(SeraiError::InvalidNode("no time was present in block".to_string())) None
} }
} }

View File

@@ -65,8 +65,7 @@ pub async fn set_up_genesis(
}) })
.or_insert(0); .or_insert(0);
let batch = let batch = Batch { network: coin.network(), id: batch_ids[&coin.network()], instructions };
Batch { network: coin.network(), id: batch_ids[&coin.network()], block, instructions };
provide_batch(serai, batch).await; provide_batch(serai, batch).await;
} }

View File

@@ -60,9 +60,17 @@ pub mod pallet {
#[pallet::event] #[pallet::event]
#[pallet::generate_deposit(fn deposit_event)] #[pallet::generate_deposit(fn deposit_event)]
pub enum Event<T: Config> { pub enum Event<T: Config> {
Batch { network: NetworkId, id: u32, block: BlockHash, instructions_hash: [u8; 32] }, Batch {
InstructionFailure { network: NetworkId, id: u32, index: u32 }, network: NetworkId,
Halt { network: NetworkId }, publishing_session: Session,
external_network_block_hash: [u8; 32],
id: u32,
in_instructions_hash: [u8; 32],
in_instruction_results: BitVec<u8, Lsb0>,
},
Halt {
network: NetworkId,
},
} }
#[pallet::error] #[pallet::error]
@@ -254,22 +262,7 @@ pub mod pallet {
pub fn execute_batch(origin: OriginFor<T>, batch: SignedBatch) -> DispatchResult { pub fn execute_batch(origin: OriginFor<T>, batch: SignedBatch) -> DispatchResult {
ensure_none(origin)?; ensure_none(origin)?;
let batch = batch.batch; // The entire Batch execution is handled in pre_dispatch
Self::deposit_event(Event::Batch {
network: batch.network,
id: batch.id,
instructions_hash: blake2_256(&batch.instructions.encode()),
});
for (i, instruction) in batch.instructions.into_iter().enumerate() {
if Self::execute(instruction).is_err() {
Self::deposit_event(Event::InstructionFailure {
network: batch.network,
id: batch.id,
index: u32::try_from(i).unwrap(),
});
}
}
Ok(()) Ok(())
} }
@@ -300,6 +293,7 @@ pub mod pallet {
// verify the signature // verify the signature
let (current_session, prior, current) = keys_for_network::<T>(network)?; let (current_session, prior, current) = keys_for_network::<T>(network)?;
let prior_session = Session(current_session.0 - 1);
let batch_message = batch_message(&batch.batch); let batch_message = batch_message(&batch.batch);
// Check the prior key first since only a single `Batch` (the last one) will be when prior is // Check the prior key first since only a single `Batch` (the last one) will be when prior is
// Some yet prior wasn't the signing key // Some yet prior wasn't the signing key
@@ -315,6 +309,8 @@ pub mod pallet {
Err(InvalidTransaction::BadProof)?; Err(InvalidTransaction::BadProof)?;
} }
let batch = batch.batch;
if Halted::<T>::contains_key(network) { if Halted::<T>::contains_key(network) {
Err(InvalidTransaction::Custom(1))?; Err(InvalidTransaction::Custom(1))?;
} }
@@ -323,10 +319,7 @@ pub mod pallet {
// key is publishing `Batch`s. This should only happen once the current key has verified all // key is publishing `Batch`s. This should only happen once the current key has verified all
// `Batch`s published by the prior key, meaning they are accepting the hand-over. // `Batch`s published by the prior key, meaning they are accepting the hand-over.
if prior.is_some() && (!valid_by_prior) { if prior.is_some() && (!valid_by_prior) {
ValidatorSets::<T>::retire_set(ValidatorSet { ValidatorSets::<T>::retire_set(ValidatorSet { network, session: prior_session });
network,
session: Session(current_session.0 - 1),
});
} }
// check that this validator set isn't publishing a batch more than once per block // check that this validator set isn't publishing a batch more than once per block
@@ -335,34 +328,40 @@ pub mod pallet {
if last_block >= current_block { if last_block >= current_block {
Err(InvalidTransaction::Future)?; Err(InvalidTransaction::Future)?;
} }
LastBatchBlock::<T>::insert(batch.batch.network, frame_system::Pallet::<T>::block_number()); LastBatchBlock::<T>::insert(batch.network, frame_system::Pallet::<T>::block_number());
// Verify the batch is sequential // Verify the batch is sequential
// LastBatch has the last ID set. The next ID should be it + 1 // LastBatch has the last ID set. The next ID should be it + 1
// If there's no ID, the next ID should be 0 // If there's no ID, the next ID should be 0
let expected = LastBatch::<T>::get(network).map_or(0, |prev| prev + 1); let expected = LastBatch::<T>::get(network).map_or(0, |prev| prev + 1);
if batch.batch.id < expected { if batch.id < expected {
Err(InvalidTransaction::Stale)?; Err(InvalidTransaction::Stale)?;
} }
if batch.batch.id > expected { if batch.id > expected {
Err(InvalidTransaction::Future)?; Err(InvalidTransaction::Future)?;
} }
LastBatch::<T>::insert(batch.batch.network, batch.batch.id); LastBatch::<T>::insert(batch.network, batch.id);
// Verify all Balances in this Batch are for this network let in_instructions_hash = blake2_256(&batch.instructions.encode());
for instruction in &batch.batch.instructions { let mut in_instruction_results = BitVec::new();
for (i, instruction) in batch.instructions.into_iter().enumerate() {
// Verify this coin is for this network // Verify this coin is for this network
// If this is ever hit, it means the validator set has turned malicious and should be fully if instruction.balance.coin.network() != batch.network {
// slashed
// Because we have an error here, no validator set which turns malicious should execute
// this code path
// Accordingly, there's no value in writing code to fully slash the network, when such an
// even would require a runtime upgrade to fully resolve anyways
if instruction.balance.coin.network() != batch.batch.network {
Err(InvalidTransaction::Custom(2))?; Err(InvalidTransaction::Custom(2))?;
} }
in_instruction_results.push(Self::execute(instruction).is_ok());
} }
Self::deposit_event(Event::Batch {
network: batch.network,
publishing_session: if valid_by_prior { prior_session } else { current_session },
id: batch.id,
external_network_block_hash: batch.external_network_block_hash,
in_instructions_hash,
in_instruction_results,
});
ValidTransaction::with_tag_prefix("in-instructions") ValidTransaction::with_tag_prefix("in-instructions")
.and_provides((batch.batch.network, batch.batch.id)) .and_provides((batch.batch.network, batch.batch.id))
// Set a 10 block longevity, though this should be included in the next block // Set a 10 block longevity, though this should be included in the next block

View File

@@ -106,6 +106,7 @@ pub struct InInstructionWithBalance {
pub struct Batch { pub struct Batch {
pub network: NetworkId, pub network: NetworkId,
pub id: u32, pub id: u32,
pub external_network_block_hash: [u8; 32],
pub instructions: Vec<InInstructionWithBalance>, pub instructions: Vec<InInstructionWithBalance>,
} }