8 Commits

Author SHA1 Message Date
Luke Parker
8bafeab5b3 Tidy serai-signals-pallet
Adds `serai-validator-sets-pallet` and `serai-signals-pallet` to the runtime.
2025-09-16 08:45:02 -04:00
Luke Parker
3722df7326 Introduce KeyShares struct to represent the amount of key shares
Improvements, bug fixes associated.
2025-09-16 08:45:02 -04:00
Luke Parker
ddb8e1398e Finally make modular-frost work with alloc alone
Carries the update to `frost-schnorrkel` and `bitcoin-serai`.
2025-09-16 08:45:02 -04:00
Luke Parker
2be69b23b1 Tweak multiexp to compile on core
On `core`, it'll use a serial implementation of no benefit other than the fact
that when `alloc` _is_ enabled, it'll use the multi-scalar multiplication
algorithms.

`schnorr-signatures` was prior tweaked to include a shim for
`SchnorrSignature::verify` which didn't use `multiexp_vartime` yet this same
premise. Now, instead of callers writing these shims, it's within `multiexp`.
2025-09-16 08:45:02 -04:00
Luke Parker
a82ccadbb0 Correct std-shims feature flagging 2025-09-16 08:45:02 -04:00
Luke Parker
1ff2934927 cargo update 2025-09-16 08:44:54 -04:00
Luke Parker
cd4ffa862f Remove coins, validator-sets use of Substrate's event system
We've defined our own.
2025-09-15 21:32:20 -04:00
Luke Parker
c0a4d85ae6 Restore claim_deallocation call to validator-sets pallet 2025-09-15 21:32:01 -04:00
50 changed files with 1294 additions and 954 deletions

315
Cargo.lock generated
View File

@@ -23,11 +23,11 @@ dependencies = [
[[package]]
name = "addr2line"
version = "0.25.0"
version = "0.25.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9acbfca36652500c911ddb767ed433e3ed99b032b5d935be73c6923662db1d43"
checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b"
dependencies = [
"gimli 0.32.2",
"gimli 0.32.3",
]
[[package]]
@@ -197,9 +197,9 @@ dependencies = [
[[package]]
name = "alloy-eips"
version = "1.0.30"
version = "1.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a15b4b0f6bab47aae017d52bb5a739bda381553c09fb9918b7172721ef5f5de"
checksum = "5cd749c57f38f8cbf433e651179fc5a676255e6b95044f467d49255d2b81725a"
dependencies = [
"alloy-eip2124",
"alloy-eip2930",
@@ -219,9 +219,9 @@ dependencies = [
[[package]]
name = "alloy-genesis"
version = "1.0.30"
version = "1.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "33ba1cbc25a07e0142e8875fcbe80e1fdb02be8160ae186b90f4b9a69a72ed2b"
checksum = "7d32cbf6c26d7d87e8a4e5925bbce41456e0bbeed95601add3443af277cd604e"
dependencies = [
"alloy-eips",
"alloy-primitives",
@@ -257,9 +257,9 @@ dependencies = [
[[package]]
name = "alloy-json-rpc"
version = "1.0.30"
version = "1.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f8882ec8e4542cfd02aadc6dccbe90caa73038f60016d936734eb6ced53d2167"
checksum = "f614019a029c8fec14ae661aa7d4302e6e66bdbfb869dab40e78dcfba935fc97"
dependencies = [
"alloy-primitives",
"alloy-sol-types",
@@ -343,7 +343,7 @@ dependencies = [
"derive_more 2.0.1",
"foldhash 0.1.5",
"hashbrown 0.15.5",
"indexmap 2.11.1",
"indexmap 2.11.3",
"itoa",
"k256",
"keccak-asm",
@@ -419,9 +419,9 @@ dependencies = [
[[package]]
name = "alloy-rpc-client"
version = "1.0.30"
version = "1.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "25289674cd8c58fcca2568b5350423cb0dd7bca8c596c5e2869bfe4c5c57ed14"
checksum = "33732242ca63f107f5f8284190244038905fb233280f4b7c41f641d4f584d40d"
dependencies = [
"alloy-json-rpc",
"alloy-primitives",
@@ -450,9 +450,9 @@ dependencies = [
[[package]]
name = "alloy-rpc-types-debug"
version = "1.0.30"
version = "1.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c2fe118e6c152d54cb4549b9835fb87d38b12754bb121375183ee3ec84bd0849"
checksum = "d46cb226f1c8071875f4d0d8a0eb3ac571fcc49cd3bcdc20a5818de7b6ef0634"
dependencies = [
"alloy-primitives",
"derive_more 2.0.1",
@@ -497,9 +497,9 @@ dependencies = [
[[package]]
name = "alloy-serde"
version = "1.0.30"
version = "1.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1b3b1078b8775077525bc9fe9f6577e815ceaecd6c412a4f3b4d8aa2836e8f6"
checksum = "04dfe41a47805a34b848c83448946ca96f3d36842e8c074bcf8fa0870e337d12"
dependencies = [
"alloy-primitives",
"serde",
@@ -508,9 +508,9 @@ dependencies = [
[[package]]
name = "alloy-signer"
version = "1.0.30"
version = "1.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "10ab1b8d4649bf7d0db8ab04e31658a6cc20364d920795484d886c35bed3bab4"
checksum = "f79237b4c1b0934d5869deea4a54e6f0a7425a8cd943a739d6293afdf893d847"
dependencies = [
"alloy-primitives",
"async-trait",
@@ -571,7 +571,7 @@ dependencies = [
"alloy-sol-macro-input",
"const-hex",
"heck 0.5.0",
"indexmap 2.11.1",
"indexmap 2.11.3",
"proc-macro-error2",
"proc-macro2",
"quote",
@@ -620,9 +620,9 @@ dependencies = [
[[package]]
name = "alloy-transport"
version = "1.0.30"
version = "1.0.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dce5129146a76ca6139a19832c75ad408857a56bcd18cd2c684183b8eacd78d8"
checksum = "cb43750e137fe3a69a325cd89a8f8e2bbf4f83e70c0f60fbe49f22511ca075e8"
dependencies = [
"alloy-json-rpc",
"alloy-primitives",
@@ -1115,11 +1115,11 @@ dependencies = [
[[package]]
name = "async-io"
version = "2.5.0"
version = "2.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "19634d6336019ef220f09fd31168ce5c184b295cbf80345437cc36094ef223ca"
checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc"
dependencies = [
"async-lock",
"autocfg",
"cfg-if",
"concurrent-queue",
"futures-io",
@@ -1128,7 +1128,7 @@ dependencies = [
"polling",
"rustix",
"slab",
"windows-sys 0.60.2",
"windows-sys 0.61.0",
]
[[package]]
@@ -1300,7 +1300,7 @@ version = "0.72.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
"cexpr",
"clang-sys",
"itertools 0.13.0",
@@ -1425,9 +1425,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
[[package]]
name = "bitflags"
version = "2.9.3"
version = "2.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "34efbcccd345379ca2868b2b2c9d3782e9cc58ba87bc7d79d5b53d9c9ae6f25d"
checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394"
[[package]]
name = "bitvec"
@@ -1666,11 +1666,11 @@ dependencies = [
[[package]]
name = "camino"
version = "1.1.12"
version = "1.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd0b03af37dad7a14518b7691d81acb0f8222604ad3d1b02f6b4bed5188c0cd5"
checksum = "e1de8bc0aa9e9385ceb3bf0c152e3a9b9544f6c4a912c8ae504e80c1f0368603"
dependencies = [
"serde",
"serde_core",
]
[[package]]
@@ -1690,7 +1690,7 @@ checksum = "eee4243f1f26fc7a42710e7439c149e2b10b05472f88090acce52632f231a73a"
dependencies = [
"camino",
"cargo-platform",
"semver 1.0.26",
"semver 1.0.27",
"serde",
"serde_json",
"thiserror 1.0.69",
@@ -1912,28 +1912,27 @@ dependencies = [
[[package]]
name = "console"
version = "0.16.0"
version = "0.16.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2e09ced7ebbccb63b4c65413d821f2e00ce54c5ca4514ddc6b3c892fdbcbc69d"
checksum = "b430743a6eb14e9764d4260d4c0d8123087d504eeb9c48f2b2a5e810dd369df4"
dependencies = [
"encode_unicode",
"libc",
"once_cell",
"unicode-width",
"windows-sys 0.60.2",
"windows-sys 0.61.0",
]
[[package]]
name = "const-hex"
version = "1.15.0"
version = "1.16.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dccd746bf9b1038c0507b7cec21eb2b11222db96a2902c96e8c185d6d20fb9c4"
checksum = "b6407bff74dea37e0fa3dc1c1c974e5d46405f0c987bf9997a0762adce71eda6"
dependencies = [
"cfg-if",
"cpufeatures",
"hex",
"proptest",
"serde",
"serde_core",
]
[[package]]
@@ -2084,7 +2083,7 @@ dependencies = [
"cranelift-control",
"cranelift-entity",
"cranelift-isle",
"gimli 0.32.2",
"gimli 0.32.3",
"hashbrown 0.15.5",
"log",
"pulley-interpreter",
@@ -2326,11 +2325,12 @@ dependencies = [
[[package]]
name = "cxx"
version = "1.0.180"
version = "1.0.184"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6ecd70e33fb57b5fec1608d572bf8dc382f5385a19529056b32307a29ac329be"
checksum = "be4a0beb369d20d0de6aa7084ee523e4c9a31d7d8c61ba357b119bb574d7f368"
dependencies = [
"cc",
"cxx-build",
"cxxbridge-cmd",
"cxxbridge-flags",
"cxxbridge-macro",
@@ -2340,13 +2340,13 @@ dependencies = [
[[package]]
name = "cxx-build"
version = "1.0.175"
version = "1.0.184"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d4e2aa0ea9f398b72f329197cfad624fcb16b2538d3ffb0f71f51cd19fa2a512"
checksum = "27d955b93e56a8e45cbc34df0ae920d8b5ad01541a4571222c78527c00e1a40a"
dependencies = [
"cc",
"codespan-reporting",
"indexmap 2.11.1",
"indexmap 2.11.3",
"proc-macro2",
"quote",
"scratch",
@@ -2355,13 +2355,13 @@ dependencies = [
[[package]]
name = "cxxbridge-cmd"
version = "1.0.180"
version = "1.0.184"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "64320fd0856fdf2421f8404b67d41e91291cbcfa3d57457b390f0a2618ee9a68"
checksum = "052f6c468d9dabdc2b8b228bcb2d7843b2bea0f3fb9c4e2c6ba5852574ec0150"
dependencies = [
"clap",
"codespan-reporting",
"indexmap 2.11.1",
"indexmap 2.11.3",
"proc-macro2",
"quote",
"syn 2.0.106",
@@ -2369,17 +2369,17 @@ dependencies = [
[[package]]
name = "cxxbridge-flags"
version = "1.0.180"
version = "1.0.184"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "77e40964f209961217b972415a8e3a0c23299076a0b2dfe79fa8366b5e5c833e"
checksum = "0fd145fa180986cb8002c63217d03b2c782fdcd5fa323adcd1f62d2d6ece6144"
[[package]]
name = "cxxbridge-macro"
version = "1.0.180"
version = "1.0.184"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51afdec15d8072d1b69f54f645edaf54250088a7e54c4fe493016781278136bd"
checksum = "02ac4a3bc4484a2daa0a8421c9588bd26522be9682a2fe02c7087bc4e8bc3c60"
dependencies = [
"indexmap 2.11.1",
"indexmap 2.11.3",
"proc-macro2",
"quote",
"rustversion",
@@ -2666,7 +2666,7 @@ dependencies = [
"libc",
"option-ext",
"redox_users",
"windows-sys 0.60.2",
"windows-sys 0.61.0",
]
[[package]]
@@ -2995,7 +2995,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb"
dependencies = [
"libc",
"windows-sys 0.60.2",
"windows-sys 0.59.0",
]
[[package]]
@@ -3377,11 +3377,11 @@ version = "0.2.0"
dependencies = [
"ciphersuite 0.4.2",
"flexible-transcript",
"group",
"modular-frost",
"rand_core 0.6.4",
"schnorr-signatures",
"schnorrkel",
"std-shims",
"zeroize",
]
@@ -3640,7 +3640,7 @@ dependencies = [
"js-sys",
"libc",
"r-efi",
"wasi 0.14.5+wasi-0.2.4",
"wasi 0.14.7+wasi-0.2.4",
"wasm-bindgen",
]
@@ -3672,12 +3672,12 @@ checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f"
[[package]]
name = "gimli"
version = "0.32.2"
version = "0.32.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc6298e594375a7fead9efd5568f0a46e6a154fb6a9bdcbe3c06946ffd81a5f6"
checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7"
dependencies = [
"fallible-iterator",
"indexmap 2.11.1",
"indexmap 2.11.3",
"stable_deref_trait",
]
@@ -3743,7 +3743,7 @@ dependencies = [
"futures-core",
"futures-sink",
"http",
"indexmap 2.11.1",
"indexmap 2.11.3",
"slab",
"tokio",
"tokio-util",
@@ -3839,9 +3839,6 @@ name = "hex"
version = "0.4.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70"
dependencies = [
"serde",
]
[[package]]
name = "hex-conservative"
@@ -3998,15 +3995,15 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9"
[[package]]
name = "humantime"
version = "2.2.0"
version = "2.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f"
checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424"
[[package]]
name = "hybrid-array"
version = "0.4.0"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fe39a812f039072707ce38020acbab2f769087952eddd9e2b890f37654b2349"
checksum = "c7116c472cf19838450b1d421b4e842569f52b519d640aee9ace1ebcf5b21051"
dependencies = [
"typenum",
]
@@ -4068,9 +4065,9 @@ dependencies = [
[[package]]
name = "hyper-util"
version = "0.1.16"
version = "0.1.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8d9b05277c7e8da2c93a568989bb6207bef0112e8d17df7a6eda4a3cf143bc5e"
checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8"
dependencies = [
"bytes",
"futures-channel",
@@ -4081,7 +4078,7 @@ dependencies = [
"hyper",
"libc",
"pin-project-lite",
"socket2 0.6.0",
"socket2 0.5.10",
"tokio",
"tower-service",
"tracing",
@@ -4104,9 +4101,9 @@ dependencies = [
[[package]]
name = "iana-time-zone"
version = "0.1.63"
version = "0.1.64"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8"
checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb"
dependencies = [
"android_system_properties",
"core-foundation-sys",
@@ -4279,13 +4276,14 @@ dependencies = [
[[package]]
name = "indexmap"
version = "2.11.1"
version = "2.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "206a8042aec68fa4a62e8d3f7aa4ceb508177d9324faf261e1959e495b7a1921"
checksum = "92119844f513ffa41556430369ab02c295a3578af21cf945caa3e9e0c2481ac3"
dependencies = [
"equivalent",
"hashbrown 0.15.5",
"serde",
"serde_core",
]
[[package]]
@@ -4312,7 +4310,7 @@ version = "0.7.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
"cfg-if",
"libc",
]
@@ -5015,11 +5013,11 @@ dependencies = [
[[package]]
name = "libredox"
version = "0.1.9"
version = "0.1.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "391290121bad3d37fbddad76d8f5d1c1c314cfc646d143d7e07a3086ddff0ce3"
checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
"libc",
]
@@ -5443,6 +5441,7 @@ dependencies = [
"schnorr-signatures",
"serde_json",
"sha2 0.10.9",
"std-shims",
"subtle",
"thiserror 2.0.16",
"zeroize",
@@ -5716,8 +5715,6 @@ dependencies = [
"group",
"k256",
"rand_core 0.6.4",
"rustversion",
"std-shims",
"zeroize",
]
@@ -6067,7 +6064,7 @@ checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe"
dependencies = [
"crc32fast",
"hashbrown 0.15.5",
"indexmap 2.11.1",
"indexmap 2.11.3",
"memchr",
]
@@ -6340,9 +6337,9 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220"
[[package]]
name = "pest"
version = "2.8.1"
version = "2.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1db05f56d34358a8b1066f67cbb203ee3e7ed2ba674a6263a1d5ec6db2204323"
checksum = "21e0a3a33733faeaf8651dfee72dd0f388f0c8e5ad496a3478fa5a922f49cfa8"
dependencies = [
"memchr",
"thiserror 2.0.16",
@@ -6356,7 +6353,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772"
dependencies = [
"fixedbitset",
"indexmap 2.11.1",
"indexmap 2.11.3",
]
[[package]]
@@ -6451,16 +6448,16 @@ checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c"
[[package]]
name = "polling"
version = "3.10.0"
version = "3.11.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b5bd19146350fe804f7cb2669c851c03d69da628803dab0d98018142aaa5d829"
checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218"
dependencies = [
"cfg-if",
"concurrent-queue",
"hermit-abi",
"pin-project-lite",
"rustix",
"windows-sys 0.60.2",
"windows-sys 0.61.0",
]
[[package]]
@@ -6731,7 +6728,7 @@ checksum = "6fcdab19deb5195a31cf7726a210015ff1496ba1464fd42cb4f537b8b01b471f"
dependencies = [
"bit-set",
"bit-vec",
"bitflags 2.9.3",
"bitflags 2.9.4",
"lazy_static",
"num-traits",
"rand 0.9.2",
@@ -6760,7 +6757,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1"
dependencies = [
"heck 0.4.1",
"itertools 0.14.0",
"itertools 0.13.0",
"log",
"multimap",
"once_cell",
@@ -6780,7 +6777,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425"
dependencies = [
"anyhow",
"itertools 0.14.0",
"itertools 0.13.0",
"proc-macro2",
"quote",
"syn 2.0.106",
@@ -6875,7 +6872,7 @@ dependencies = [
"quinn-udp",
"rustc-hash",
"rustls",
"socket2 0.6.0",
"socket2 0.5.10",
"thiserror 2.0.16",
"tokio",
"tracing",
@@ -6912,9 +6909,9 @@ dependencies = [
"cfg_aliases",
"libc",
"once_cell",
"socket2 0.6.0",
"socket2 0.5.10",
"tracing",
"windows-sys 0.60.2",
"windows-sys 0.59.0",
]
[[package]]
@@ -7034,7 +7031,7 @@ version = "11.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
]
[[package]]
@@ -7082,7 +7079,7 @@ version = "0.5.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
]
[[package]]
@@ -7161,9 +7158,9 @@ checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001"
[[package]]
name = "resolv-conf"
version = "0.7.4"
version = "0.7.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "95325155c684b1c89f7765e30bc1c42e4a6da51ca513615660cb8a62ef9a88e3"
checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799"
[[package]]
name = "revm"
@@ -7348,7 +7345,7 @@ version = "7.0.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f64fbacb86008394aaebd3454f9643b7d5a782bd251135e17c5b33da592d84d"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
"revm-bytecode",
"revm-primitives",
"serde",
@@ -7530,7 +7527,7 @@ version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
dependencies = [
"semver 1.0.26",
"semver 1.0.27",
]
[[package]]
@@ -7548,11 +7545,11 @@ version = "1.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
"errno",
"libc",
"linux-raw-sys",
"windows-sys 0.60.2",
"windows-sys 0.59.0",
]
[[package]]
@@ -7593,9 +7590,9 @@ dependencies = [
[[package]]
name = "rustls-webpki"
version = "0.103.4"
version = "0.103.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0a17884ae0c1b773f1ccd2bd4a8c72f16da897310a98b0e84bf349ad5ead92fc"
checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb"
dependencies = [
"ring",
"rustls-pki-types",
@@ -8524,7 +8521,7 @@ dependencies = [
"async-trait",
"futures",
"futures-timer",
"indexmap 2.11.1",
"indexmap 2.11.3",
"itertools 0.14.0",
"linked-hash-map",
"parity-scale-codec",
@@ -8553,7 +8550,7 @@ source = "git+https://github.com/serai-dex/patch-polkadot-sdk?rev=d4624c561765c1
dependencies = [
"async-trait",
"futures",
"indexmap 2.11.1",
"indexmap 2.11.3",
"log",
"parity-scale-codec",
"serde",
@@ -8604,11 +8601,11 @@ dependencies = [
[[package]]
name = "schannel"
version = "0.1.27"
version = "0.1.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d"
checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1"
dependencies = [
"windows-sys 0.59.0",
"windows-sys 0.61.0",
]
[[package]]
@@ -8775,7 +8772,7 @@ version = "3.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "60b369d18893388b345804dc0007963c99b7d665ae71d275812d828c6f089640"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
"core-foundation 0.10.1",
"core-foundation-sys",
"libc",
@@ -8803,11 +8800,12 @@ dependencies = [
[[package]]
name = "semver"
version = "1.0.26"
version = "1.0.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0"
checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
dependencies = [
"serde",
"serde_core",
]
[[package]]
@@ -9297,10 +9295,15 @@ dependencies = [
"ciphersuite 0.4.2",
"dalek-ff-group 0.5.0",
"dkg",
"dkg-dealer",
"dkg-evrf",
"dkg-musig",
"dkg-recovery",
"embedwards25519",
"flexible-transcript",
"frost-schnorrkel",
"minimal-ed448",
"modular-frost",
"multiexp",
"prime-field",
"schnorr-signatures",
@@ -9687,6 +9690,8 @@ dependencies = [
"parity-scale-codec",
"serai-abi",
"serai-coins-pallet",
"serai-signals-pallet",
"serai-validator-sets-pallet",
"sp-api",
"sp-core",
"sp-runtime",
@@ -9701,8 +9706,7 @@ dependencies = [
"frame-support",
"frame-system",
"parity-scale-codec",
"serai-in-instructions-pallet",
"serai-primitives",
"serai-abi",
"serai-validator-sets-pallet",
"sp-core",
"sp-io",
@@ -9737,27 +9741,38 @@ dependencies = [
[[package]]
name = "serde"
version = "1.0.219"
version = "1.0.225"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6"
checksum = "fd6c24dee235d0da097043389623fb913daddf92c76e9f5a1db88607a0bcbd1d"
dependencies = [
"serde_core",
"serde_derive",
]
[[package]]
name = "serde_bytes"
version = "0.11.17"
version = "0.11.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8437fd221bde2d4ca316d61b90e337e9e702b3820b87d63caa9ba6c02bd06d96"
checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8"
dependencies = [
"serde",
"serde_core",
]
[[package]]
name = "serde_core"
version = "1.0.225"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "659356f9a0cb1e529b24c01e43ad2bdf520ec4ceaf83047b83ddcc2251f96383"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.219"
version = "1.0.225"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00"
checksum = "0ea936adf78b1f766949a4977b91d2f5595825bd6ec079aa9543ad2685fc4516"
dependencies = [
"proc-macro2",
"quote",
@@ -9766,15 +9781,16 @@ dependencies = [
[[package]]
name = "serde_json"
version = "1.0.143"
version = "1.0.145"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a"
checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c"
dependencies = [
"indexmap 2.11.1",
"indexmap 2.11.3",
"itoa",
"memchr",
"ryu",
"serde",
"serde_core",
]
[[package]]
@@ -9819,7 +9835,7 @@ dependencies = [
"chrono",
"hex",
"indexmap 1.9.3",
"indexmap 2.11.1",
"indexmap 2.11.3",
"serde",
"serde_derive",
"serde_json",
@@ -10890,7 +10906,7 @@ version = "0.6.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
"core-foundation 0.9.4",
"system-configuration-sys",
]
@@ -10933,7 +10949,7 @@ dependencies = [
"getrandom 0.3.3",
"once_cell",
"rustix",
"windows-sys 0.60.2",
"windows-sys 0.59.0",
]
[[package]]
@@ -11201,7 +11217,7 @@ version = "0.22.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
dependencies = [
"indexmap 2.11.1",
"indexmap 2.11.3",
"serde",
"serde_spanned",
"toml_datetime",
@@ -11250,7 +11266,7 @@ version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
"bytes",
"http",
"http-body",
@@ -11474,9 +11490,9 @@ checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5"
[[package]]
name = "unicode-ident"
version = "1.0.18"
version = "1.0.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512"
checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d"
[[package]]
name = "unicode-joining-type"
@@ -11628,18 +11644,18 @@ checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b"
[[package]]
name = "wasi"
version = "0.14.5+wasi-0.2.4"
version = "0.14.7+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4494f6290a82f5fe584817a676a34b9d6763e8d9d18204009fb31dceca98fd4"
checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c"
dependencies = [
"wasip2",
]
[[package]]
name = "wasip2"
version = "1.0.0+wasi-0.2.4"
version = "1.0.1+wasi-0.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "03fa2761397e5bd52002cd7e73110c71af2109aca4e521a9f40473fe685b0a24"
checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7"
dependencies = [
"wit-bindgen",
]
@@ -11767,10 +11783,10 @@ version = "0.236.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a9b1e81f3eb254cf7404a82cee6926a4a3ccc5aad80cc3d43608a070c67aa1d7"
dependencies = [
"bitflags 2.9.3",
"bitflags 2.9.4",
"hashbrown 0.15.5",
"indexmap 2.11.1",
"semver 1.0.26",
"indexmap 2.11.3",
"semver 1.0.27",
"serde",
]
@@ -11791,14 +11807,14 @@ version = "36.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b3e1fab634681494213138ea3a18e958e5ea99da13a4a01a4b870d51a41680b"
dependencies = [
"addr2line 0.25.0",
"addr2line 0.25.1",
"anyhow",
"bitflags 2.9.3",
"bitflags 2.9.4",
"bumpalo",
"cc",
"cfg-if",
"hashbrown 0.15.5",
"indexmap 2.11.1",
"indexmap 2.11.3",
"libc",
"log",
"mach2",
@@ -11837,8 +11853,8 @@ dependencies = [
"anyhow",
"cranelift-bitset",
"cranelift-entity",
"gimli 0.32.2",
"indexmap 2.11.1",
"gimli 0.32.3",
"indexmap 2.11.3",
"log",
"object 0.37.3",
"postcard",
@@ -11893,7 +11909,7 @@ dependencies = [
"cranelift-entity",
"cranelift-frontend",
"cranelift-native",
"gimli 0.32.2",
"gimli 0.32.3",
"itertools 0.14.0",
"log",
"object 0.37.3",
@@ -12214,6 +12230,15 @@ dependencies = [
"windows-targets 0.53.3",
]
[[package]]
name = "windows-sys"
version = "0.61.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e201184e40b2ede64bc2ea34968b28e33622acdbbf37104f0e4a33f7abe657aa"
dependencies = [
"windows-link 0.2.0",
]
[[package]]
name = "windows-targets"
version = "0.48.5"
@@ -12421,9 +12446,9 @@ dependencies = [
[[package]]
name = "wit-bindgen"
version = "0.45.1"
version = "0.46.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5c573471f125075647d03df72e026074b7203790d41351cd6edc96f46bcccd36"
checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59"
[[package]]
name = "wyz"

View File

@@ -1,6 +1,8 @@
pub use core::sync::atomic;
#[cfg(all(feature = "alloc", not(feature = "std")))]
pub use extern_alloc::sync::{Arc, Weak};
#[cfg(feature = "std")]
pub use std::sync::{Arc, Weak};
mod mutex_shim {
#[cfg(not(feature = "std"))]

View File

@@ -21,7 +21,7 @@ zeroize = { version = "^1.5", default-features = false, features = ["zeroize_der
thiserror = { version = "2", default-features = false }
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false, features = ["alloc"] }
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }

View File

@@ -20,7 +20,7 @@ workspace = true
zeroize = { version = "^1.5", default-features = false }
rand_core = { version = "0.6", default-features = false }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, features = ["alloc"] }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
dkg = { path = "../", version = "0.6", default-features = false }

View File

@@ -23,7 +23,7 @@ rand_core = { version = "0.6", default-features = false, features = ["alloc"] }
zeroize = { version = "^1.5", default-features = false, features = ["alloc", "zeroize_derive"] }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, features = ["alloc"] }
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }

View File

@@ -23,7 +23,7 @@ rand_core = { version = "0.6", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, features = ["alloc"] }
multiexp = { path = "../../multiexp", version = "0.4", default-features = false }
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }

View File

@@ -17,33 +17,35 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
thiserror = { version = "2", default-features = false, features = ["std"] }
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false, features = ["alloc"] }
rand_core = { version = "0.6", default-features = false, features = ["std"] }
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
thiserror = { version = "2", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
subtle = { version = "^2.4", default-features = false, features = ["std"] }
rand_core = { version = "0.6", default-features = false, features = ["alloc"] }
rand_chacha = { version = "0.3", default-features = false }
hex = { version = "0.4", default-features = false, features = ["std"], optional = true }
zeroize = { version = "^1.5", default-features = false, features = ["alloc", "zeroize_derive"] }
subtle = { version = "^2.4", default-features = false }
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["std", "recommended"] }
hex = { version = "0.4", default-features = false, features = ["alloc"], optional = true }
dalek-ff-group = { path = "../dalek-ff-group", version = "0.5", default-features = false, features = ["std"], optional = true }
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, features = ["std"], optional = true }
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
dalek-ff-group = { path = "../dalek-ff-group", version = "0.5", default-features = false, features = ["alloc"], optional = true }
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, features = ["alloc"], optional = true }
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] }
sha2 = { version = "0.10.0", default-features = false, optional = true }
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"], optional = true }
ciphersuite-kp256 = { path = "../ciphersuite/kp256", version = "0.4", default-features = false, features = ["std"], optional = true }
ciphersuite-kp256 = { path = "../ciphersuite/kp256", version = "0.4", default-features = false, features = ["alloc"], optional = true }
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] }
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["alloc", "batch"] }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["alloc"] }
dkg = { path = "../dkg", version = "0.6.1", default-features = false, features = ["std"] }
dkg-recovery = { path = "../dkg/recovery", version = "0.6", default-features = false, features = ["std"], optional = true }
dkg-dealer = { path = "../dkg/dealer", version = "0.6", default-features = false, features = ["std"], optional = true }
dkg = { path = "../dkg", version = "0.6.1", default-features = false }
dkg-recovery = { path = "../dkg/recovery", version = "0.6", default-features = false, optional = true }
dkg-dealer = { path = "../dkg/dealer", version = "0.6", default-features = false, optional = true }
[dev-dependencies]
hex = "0.4"
@@ -54,6 +56,38 @@ dkg-recovery = { path = "../dkg/recovery", default-features = false, features =
dkg-dealer = { path = "../dkg/dealer", default-features = false, features = ["std"] }
[features]
std = [
"std-shims/std",
"thiserror/std",
"rand_core/std",
"rand_chacha/std",
"zeroize/std",
"subtle/std",
"hex?/std",
"transcript/std",
"dalek-ff-group?/std",
"minimal-ed448?/std",
"ciphersuite/std",
"sha2?/std",
"elliptic-curve?/std",
"ciphersuite-kp256?/std",
"multiexp/std",
"schnorr/std",
"dkg/std",
"dkg-recovery?/std",
"dkg-dealer?/std",
]
ed25519 = ["dalek-ff-group"]
ristretto = ["dalek-ff-group"]
@@ -63,3 +97,5 @@ p256 = ["sha2", "elliptic-curve", "ciphersuite-kp256"]
ed448 = ["minimal-ed448"]
tests = ["hex", "rand_core/getrandom", "dkg-dealer", "dkg-recovery"]
default = ["std"]

View File

@@ -1,5 +1,7 @@
use core::{marker::PhantomData, fmt::Debug};
use std::io::{self, Read, Write};
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::io::{self, Read, Write};
use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng};

View File

@@ -1,5 +1,7 @@
use core::{ops::Deref, convert::AsRef};
use std::io::{self, Read};
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::io::{self, Read};
use rand_core::{RngCore, CryptoRng};

View File

@@ -1,8 +1,11 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
use core::fmt::Debug;
use std::collections::HashMap;
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::collections::HashMap;
use thiserror::Error;

View File

@@ -6,7 +6,9 @@
// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)
use core::ops::Deref;
use std::{
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::{
io::{self, Read, Write},
collections::HashMap,
};

View File

@@ -1,5 +1,7 @@
use core::{ops::Deref, fmt::Debug};
use std::{
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::{
io::{self, Read, Write},
collections::HashMap,
};

View File

@@ -1,4 +1,4 @@
use std::collections::HashMap;
use std_shims::collections::HashMap;
use rand_core::{RngCore, CryptoRng};

View File

@@ -1,4 +1,4 @@
use std::io::{self, Read};
use std_shims::io::{self, Read};
use zeroize::Zeroizing;

View File

@@ -1,8 +1,8 @@
use core::ops::Deref;
use std::collections::HashMap;
use std_shims::collections::HashMap;
#[cfg(test)]
use std::str::FromStr;
use core::str::FromStr;
use zeroize::Zeroizing;

View File

@@ -17,11 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
rustversion = "1"
std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-features = false }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive", "alloc"] }
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
ff = { version = "0.13", default-features = false, features = ["bits"] }
group = { version = "0.13", default-features = false }
@@ -35,8 +31,9 @@ k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic"
dalek-ff-group = { path = "../dalek-ff-group" }
[features]
std = ["std-shims/std", "zeroize/std", "ff/std", "rand_core?/std"]
alloc = ["zeroize/alloc"]
std = ["alloc", "zeroize/std", "ff/std", "rand_core?/std"]
batch = ["rand_core"]
batch = ["alloc", "rand_core"]
default = ["std"]

View File

@@ -12,5 +12,6 @@ culminating in commit
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
Any subsequent changes have not undergone auditing.
This library is usable under no_std, via alloc, when the default features are
disabled.
This library is usable under no-`std` and no-`alloc`. With the `alloc` feature,
the library is fully functional. Without the `alloc` feature, the `multiexp`
function is shimmed with a serial implementation.

View File

@@ -1,4 +1,4 @@
use std_shims::vec::Vec;
use alloc::vec::Vec;
use rand_core::{RngCore, CryptoRng};

View File

@@ -2,200 +2,177 @@
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
#[macro_use]
#[cfg(feature = "alloc")]
extern crate alloc;
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::vec::Vec;
use zeroize::Zeroize;
use ff::PrimeFieldBits;
use group::Group;
#[cfg(feature = "alloc")]
mod straus;
use straus::*;
#[cfg(feature = "alloc")]
mod pippenger;
use pippenger::*;
#[cfg(feature = "batch")]
mod batch;
#[cfg(feature = "batch")]
pub use batch::BatchVerifier;
#[cfg(test)]
#[cfg(all(test, feature = "alloc"))]
mod tests;
// Use black_box when possible
#[rustversion::since(1.66)]
use core::hint::black_box;
#[rustversion::before(1.66)]
fn black_box<T>(val: T) -> T {
val
}
#[cfg(feature = "alloc")]
mod underlying {
use super::*;
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref);
use core::hint::black_box;
use alloc::{vec, vec::Vec};
let mut bit = black_box(*bit_ref);
#[allow(clippy::cast_lossless)]
let res = black_box(bit as u8);
bit.zeroize();
debug_assert!((res | 1) == 1);
pub(crate) use straus::*;
bit_ref.zeroize();
res
}
pub(crate) use pippenger::*;
// Convert scalars to `window`-sized bit groups, as needed to index a table
// This algorithm works for `window <= 8`
pub(crate) fn prep_bits<G: Group<Scalar: PrimeFieldBits>>(
pairs: &[(G::Scalar, G)],
window: u8,
) -> Vec<Vec<u8>> {
let w_usize = usize::from(window);
#[cfg(feature = "batch")]
pub use batch::BatchVerifier;
let mut groupings = vec![];
for pair in pairs {
let p = groupings.len();
let mut bits = pair.0.to_le_bits();
groupings.push(vec![0; bits.len().div_ceil(w_usize)]);
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
let bit_ref = black_box(bit_ref);
for (i, mut bit) in bits.iter_mut().enumerate() {
let mut bit = u8_from_bool(&mut bit);
groupings[p][i / w_usize] |= bit << (i % w_usize);
bit.zeroize();
let mut bit = black_box(*bit_ref);
#[allow(clippy::cast_lossless)]
let res = black_box(bit as u8);
bit.zeroize();
debug_assert!((res | 1) == 1);
bit_ref.zeroize();
res
}
// Convert scalars to `window`-sized bit groups, as needed to index a table
// This algorithm works for `window <= 8`
pub(crate) fn prep_bits<G: Group<Scalar: PrimeFieldBits>>(
pairs: &[(G::Scalar, G)],
window: u8,
) -> Vec<Vec<u8>> {
let w_usize = usize::from(window);
let mut groupings = vec![];
for pair in pairs {
let p = groupings.len();
let mut bits = pair.0.to_le_bits();
groupings.push(vec![0; bits.len().div_ceil(w_usize)]);
for (i, mut bit) in bits.iter_mut().enumerate() {
let mut bit = u8_from_bool(&mut bit);
groupings[p][i / w_usize] |= bit << (i % w_usize);
bit.zeroize();
}
}
groupings
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum Algorithm {
Null,
Single,
Straus(u8),
Pippenger(u8),
}
// These are 'rule of thumb's obtained via benchmarking `k256` and `curve25519-dalek`
fn algorithm(len: usize) -> Algorithm {
#[cfg(not(debug_assertions))]
if len == 0 {
Algorithm::Null
} else if len == 1 {
Algorithm::Single
} else if len < 10 {
// Straus 2 never showed a performance benefit, even with just 2 elements
Algorithm::Straus(3)
} else if len < 20 {
Algorithm::Straus(4)
} else if len < 50 {
Algorithm::Straus(5)
} else if len < 100 {
Algorithm::Pippenger(4)
} else if len < 125 {
Algorithm::Pippenger(5)
} else if len < 275 {
Algorithm::Pippenger(6)
} else if len < 400 {
Algorithm::Pippenger(7)
} else {
Algorithm::Pippenger(8)
}
#[cfg(debug_assertions)]
if len == 0 {
Algorithm::Null
} else if len == 1 {
Algorithm::Single
} else if len < 10 {
Algorithm::Straus(3)
} else if len < 80 {
Algorithm::Straus(4)
} else if len < 100 {
Algorithm::Straus(5)
} else if len < 125 {
Algorithm::Pippenger(4)
} else if len < 275 {
Algorithm::Pippenger(5)
} else if len < 475 {
Algorithm::Pippenger(6)
} else if len < 750 {
Algorithm::Pippenger(7)
} else {
Algorithm::Pippenger(8)
}
}
groupings
}
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
enum Algorithm {
Null,
Single,
Straus(u8),
Pippenger(u8),
}
/*
Release (with runs 20, so all of these are off by 20x):
k256
Straus 3 is more efficient at 5 with 678µs per
Straus 4 is more efficient at 10 with 530µs per
Straus 5 is more efficient at 35 with 467µs per
Pippenger 5 is more efficient at 125 with 431µs per
Pippenger 6 is more efficient at 275 with 349µs per
Pippenger 7 is more efficient at 375 with 360µs per
dalek
Straus 3 is more efficient at 5 with 519µs per
Straus 4 is more efficient at 10 with 376µs per
Straus 5 is more efficient at 170 with 330µs per
Pippenger 5 is more efficient at 125 with 305µs per
Pippenger 6 is more efficient at 275 with 250µs per
Pippenger 7 is more efficient at 450 with 205µs per
Pippenger 8 is more efficient at 800 with 213µs per
Debug (with runs 5, so...):
k256
Straus 3 is more efficient at 5 with 2532µs per
Straus 4 is more efficient at 10 with 1930µs per
Straus 5 is more efficient at 80 with 1632µs per
Pippenger 5 is more efficient at 150 with 1441µs per
Pippenger 6 is more efficient at 300 with 1235µs per
Pippenger 7 is more efficient at 475 with 1182µs per
Pippenger 8 is more efficient at 625 with 1170µs per
dalek:
Straus 3 is more efficient at 5 with 971µs per
Straus 4 is more efficient at 10 with 782µs per
Straus 5 is more efficient at 75 with 778µs per
Straus 6 is more efficient at 165 with 867µs per
Pippenger 5 is more efficient at 125 with 677µs per
Pippenger 6 is more efficient at 250 with 655µs per
Pippenger 7 is more efficient at 475 with 500µs per
Pippenger 8 is more efficient at 875 with 499µs per
*/
fn algorithm(len: usize) -> Algorithm {
#[cfg(not(debug_assertions))]
if len == 0 {
Algorithm::Null
} else if len == 1 {
Algorithm::Single
} else if len < 10 {
// Straus 2 never showed a performance benefit, even with just 2 elements
Algorithm::Straus(3)
} else if len < 20 {
Algorithm::Straus(4)
} else if len < 50 {
Algorithm::Straus(5)
} else if len < 100 {
Algorithm::Pippenger(4)
} else if len < 125 {
Algorithm::Pippenger(5)
} else if len < 275 {
Algorithm::Pippenger(6)
} else if len < 400 {
Algorithm::Pippenger(7)
} else {
Algorithm::Pippenger(8)
/// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the
/// amount of pairs.
pub fn multiexp<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(
pairs: &[(G::Scalar, G)],
) -> G {
match algorithm(pairs.len()) {
Algorithm::Null => Group::identity(),
Algorithm::Single => pairs[0].1 * pairs[0].0,
// These functions panic if called without any pairs
Algorithm::Straus(window) => straus(pairs, window),
Algorithm::Pippenger(window) => pippenger(pairs, window),
}
}
#[cfg(debug_assertions)]
if len == 0 {
Algorithm::Null
} else if len == 1 {
Algorithm::Single
} else if len < 10 {
Algorithm::Straus(3)
} else if len < 80 {
Algorithm::Straus(4)
} else if len < 100 {
Algorithm::Straus(5)
} else if len < 125 {
Algorithm::Pippenger(4)
} else if len < 275 {
Algorithm::Pippenger(5)
} else if len < 475 {
Algorithm::Pippenger(6)
} else if len < 750 {
Algorithm::Pippenger(7)
} else {
Algorithm::Pippenger(8)
/// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm
/// based on the amount of pairs.
pub fn multiexp_vartime<G: Group<Scalar: PrimeFieldBits>>(pairs: &[(G::Scalar, G)]) -> G {
match algorithm(pairs.len()) {
Algorithm::Null => Group::identity(),
Algorithm::Single => pairs[0].1 * pairs[0].0,
Algorithm::Straus(window) => straus_vartime(pairs, window),
Algorithm::Pippenger(window) => pippenger_vartime(pairs, window),
}
}
}
/// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the
/// amount of pairs.
pub fn multiexp<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(
pairs: &[(G::Scalar, G)],
) -> G {
match algorithm(pairs.len()) {
Algorithm::Null => Group::identity(),
Algorithm::Single => pairs[0].1 * pairs[0].0,
// These functions panic if called without any pairs
Algorithm::Straus(window) => straus(pairs, window),
Algorithm::Pippenger(window) => pippenger(pairs, window),
#[cfg(not(feature = "alloc"))]
mod underlying {
use super::*;
/// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the
/// amount of pairs.
pub fn multiexp<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(
pairs: &[(G::Scalar, G)],
) -> G {
pairs.iter().map(|(scalar, point)| *point * scalar).sum()
}
/// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm
/// based on the amount of pairs.
pub fn multiexp_vartime<G: Group<Scalar: PrimeFieldBits>>(pairs: &[(G::Scalar, G)]) -> G {
pairs.iter().map(|(scalar, point)| *point * scalar).sum()
}
}
/// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm
/// based on the amount of pairs.
pub fn multiexp_vartime<G: Group<Scalar: PrimeFieldBits>>(pairs: &[(G::Scalar, G)]) -> G {
match algorithm(pairs.len()) {
Algorithm::Null => Group::identity(),
Algorithm::Single => pairs[0].1 * pairs[0].0,
Algorithm::Straus(window) => straus_vartime(pairs, window),
Algorithm::Pippenger(window) => pippenger_vartime(pairs, window),
}
}
pub use underlying::*;

View File

@@ -1,3 +1,5 @@
use alloc::vec;
use zeroize::Zeroize;
use ff::PrimeFieldBits;

View File

@@ -1,4 +1,4 @@
use std_shims::vec::Vec;
use alloc::{vec, vec::Vec};
use zeroize::Zeroize;

View File

@@ -27,7 +27,7 @@ digest = { version = "0.11.0-rc.1", default-features = false, features = ["block
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, optional = true }
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false }
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["batch"], optional = true }
multiexp = { path = "../multiexp", version = "0.4", default-features = false }
[dev-dependencies]
hex = "0.4"
@@ -40,7 +40,7 @@ dalek-ff-group = { path = "../dalek-ff-group" }
ciphersuite = { path = "../ciphersuite" }
[features]
alloc = ["zeroize/alloc", "digest/alloc", "ciphersuite/alloc", "multiexp"]
alloc = ["zeroize/alloc", "digest/alloc", "ciphersuite/alloc", "multiexp/alloc", "multiexp/batch"]
aggregate = ["alloc", "transcript"]
std = ["alloc", "std-shims/std", "rand_core/std", "zeroize/std", "transcript?/std", "ciphersuite/std", "multiexp/std"]
default = ["std"]

View File

@@ -23,8 +23,9 @@ use ciphersuite::{
},
GroupIo,
};
use multiexp::multiexp_vartime;
#[cfg(feature = "alloc")]
use multiexp::{multiexp_vartime, BatchVerifier};
use multiexp::BatchVerifier;
/// Half-aggregation from <https://eprint.iacr.org/2021/350>.
#[cfg(feature = "aggregate")]
@@ -109,12 +110,7 @@ impl<C: GroupIo> SchnorrSignature<C> {
/// different keys/messages.
#[must_use]
pub fn verify(&self, public_key: C::G, challenge: C::F) -> bool {
let statements = self.batch_statements(public_key, challenge);
#[cfg(feature = "alloc")]
let res = multiexp_vartime(&statements);
#[cfg(not(feature = "alloc"))]
let res = statements.into_iter().map(|(scalar, point)| point * scalar).sum::<C::G>();
res.is_identity().into()
multiexp_vartime(&self.batch_statements(public_key, challenge)).is_identity().into()
}
/// Queue a signature for batch verification.

View File

@@ -17,18 +17,35 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
rand_core = "0.6"
zeroize = "^1.5"
std-shims = { version = "0.1", default-features = false, features = ["alloc"] }
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", features = ["merlin"] }
rand_core = { version = "0.6", default-features = false }
zeroize = { version = "1.5", default-features = false, features = ["zeroize_derive", "alloc"] }
group = "0.13"
transcript = { package = "flexible-transcript", path = "../transcript", version = "0.3.2", default-features = false, features = ["merlin"] }
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", features = ["std"] }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1" }
frost = { path = "../frost", package = "modular-frost", version = "0.11.0", features = ["ristretto"] }
ciphersuite = { path = "../ciphersuite", version = "0.4.1", default-features = false, features = ["alloc"] }
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "0.5.1", default-features = false, features = ["alloc"] }
frost = { path = "../frost", package = "modular-frost", version = "0.11.0", default-features = false, features = ["ristretto"] }
schnorrkel = { version = "0.11" }
schnorrkel = { version = "0.11", default-features = false, features = ["alloc"] }
[dev-dependencies]
frost = { path = "../frost", package = "modular-frost", features = ["tests"] }
[features]
std = [
"std-shims/std",
"rand_core/std",
"zeroize/std",
"transcript/std",
"ciphersuite/std",
"schnorr/std",
"frost/std",
"schnorrkel/std",
]
default = ["std"]

View File

@@ -1,7 +1,10 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
use std::io::{self, Read};
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::io::{self, Read};
use rand_core::{RngCore, CryptoRng};

View File

@@ -1,6 +1,6 @@
use rand_core::OsRng;
use group::GroupEncoding;
use ciphersuite::group::GroupEncoding;
use frost::{
Participant,
tests::{key_gen, algorithm_machines, sign},

View File

@@ -125,6 +125,8 @@ deny = [
{ name = "hashbrown", version = "=0.15.0" },
# Legacy which _no one_ should use anymore
{ name = "is-terminal", version = "*" },
# Stop introduction into the tree without realizing it
{ name = "once_cell_polyfill", version = "*" },
]
[sources]

View File

@@ -16,7 +16,7 @@ rustdoc-args = ["--cfg", "docsrs"]
workspace = true
[dependencies]
std-shims = { version = "0.1.1", path = "../../common/std-shims", default-features = false }
std-shims = { version = "0.1.1", path = "../../common/std-shims", default-features = false, features = ["alloc"] }
thiserror = { version = "2", default-features = false }
@@ -27,7 +27,7 @@ rand_core = { version = "0.6", default-features = false }
bitcoin = { version = "0.32", default-features = false }
k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits"] }
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.11", default-features = false, features = ["secp256k1"], optional = true }
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.11", default-features = false, features = ["secp256k1"] }
hex = { version = "0.4", default-features = false, optional = true }
serde = { version = "1", default-features = false, features = ["derive"], optional = true }
@@ -55,7 +55,7 @@ std = [
"bitcoin/serde",
"k256/std",
"frost",
"frost/std",
"hex/std",
"serde/std",

View File

@@ -1,9 +1,27 @@
#[cfg(feature = "std")]
use core::fmt::Debug;
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::io;
use subtle::{Choice, ConstantTimeEq, ConditionallySelectable};
use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng};
use k256::{elliptic_curve::sec1::ToEncodedPoint, ProjectivePoint};
use k256::{
elliptic_curve::{ops::Reduce, sec1::ToEncodedPoint},
U256, Scalar, ProjectivePoint,
};
use bitcoin::key::XOnlyPublicKey;
use bitcoin::{
hashes::{HashEngine, Hash, sha256::Hash as Sha256},
key::XOnlyPublicKey,
};
use frost::{
curve::{WrappedGroup, Secp256k1},
Participant, ThresholdKeys, ThresholdView, FrostError,
algorithm::{Hram as HramTrait, Algorithm, IetfSchnorr as FrostSchnorr},
};
/// Get the x coordinate of a non-infinity point.
///
@@ -21,142 +39,118 @@ pub(crate) fn x_only(key: &ProjectivePoint) -> XOnlyPublicKey {
}
/// Return if a point must be negated to have an even Y coordinate and be eligible for use.
#[cfg(feature = "std")]
pub(crate) fn needs_negation(key: &ProjectivePoint) -> Choice {
use k256::elliptic_curve::sec1::Tag;
u8::from(key.to_encoded_point(true).tag()).ct_eq(&u8::from(Tag::CompressedOddY))
}
#[cfg(feature = "std")]
mod frost_crypto {
use core::fmt::Debug;
use std_shims::{vec::Vec, io};
/// A BIP-340 compatible HRAm for use with the modular-frost Schnorr Algorithm.
///
/// If passed an odd nonce, the challenge will be negated.
///
/// If either `R` or `A` is the point at infinity, this will panic.
#[derive(Clone, Copy, Debug)]
pub struct Hram;
#[allow(non_snake_case)]
impl HramTrait<Secp256k1> for Hram {
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
const TAG_HASH: Sha256 = Sha256::const_hash(b"BIP0340/challenge");
use zeroize::Zeroizing;
use rand_core::{RngCore, CryptoRng};
let mut data = Sha256::engine();
data.input(TAG_HASH.as_ref());
data.input(TAG_HASH.as_ref());
data.input(&x(R));
data.input(&x(A));
data.input(m);
use bitcoin::hashes::{HashEngine, Hash, sha256::Hash as Sha256};
use k256::{elliptic_curve::ops::Reduce, U256, Scalar};
use frost::{
curve::{WrappedGroup, Secp256k1},
Participant, ThresholdKeys, ThresholdView, FrostError,
algorithm::{Hram as HramTrait, Algorithm, IetfSchnorr as FrostSchnorr},
};
use super::*;
/// A BIP-340 compatible HRAm for use with the modular-frost Schnorr Algorithm.
///
/// If passed an odd nonce, the challenge will be negated.
///
/// If either `R` or `A` is the point at infinity, this will panic.
#[derive(Clone, Copy, Debug)]
pub struct Hram;
#[allow(non_snake_case)]
impl HramTrait<Secp256k1> for Hram {
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
const TAG_HASH: Sha256 = Sha256::const_hash(b"BIP0340/challenge");
let mut data = Sha256::engine();
data.input(TAG_HASH.as_ref());
data.input(TAG_HASH.as_ref());
data.input(&x(R));
data.input(&x(A));
data.input(m);
let c = Scalar::reduce(U256::from_be_slice(Sha256::from_engine(data).as_ref()));
// If the nonce was odd, sign `r - cx` instead of `r + cx`, allowing us to negate `s` at the
// end to sign as `-r + cx`
<_>::conditional_select(&c, &-c, needs_negation(R))
}
}
/// BIP-340 Schnorr signature algorithm.
///
/// This may panic if called with nonces/a group key which are the point at infinity (which have
/// a negligible probability for a well-reasoned caller, even with malicious participants
/// present).
///
/// `verify`, `verify_share` MUST be called after `sign_share` is called. Otherwise, this library
/// MAY panic.
#[derive(Clone)]
pub struct Schnorr(FrostSchnorr<Secp256k1, Hram>);
impl Schnorr {
/// Construct a Schnorr algorithm continuing the specified transcript.
#[allow(clippy::new_without_default)]
pub fn new() -> Schnorr {
Schnorr(FrostSchnorr::ietf())
}
}
impl Algorithm<Secp256k1> for Schnorr {
type Transcript = <FrostSchnorr<Secp256k1, Hram> as Algorithm<Secp256k1>>::Transcript;
type Addendum = ();
type Signature = [u8; 64];
fn transcript(&mut self) -> &mut Self::Transcript {
self.0.transcript()
}
fn nonces(&self) -> Vec<Vec<ProjectivePoint>> {
self.0.nonces()
}
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
keys: &ThresholdKeys<Secp256k1>,
) {
self.0.preprocess_addendum(rng, keys)
}
fn read_addendum<R: io::Read>(&self, reader: &mut R) -> io::Result<Self::Addendum> {
self.0.read_addendum(reader)
}
fn process_addendum(
&mut self,
view: &ThresholdView<Secp256k1>,
i: Participant,
addendum: (),
) -> Result<(), FrostError> {
self.0.process_addendum(view, i, addendum)
}
fn sign_share(
&mut self,
params: &ThresholdView<Secp256k1>,
nonce_sums: &[Vec<<Secp256k1 as WrappedGroup>::G>],
nonces: Vec<Zeroizing<<Secp256k1 as WrappedGroup>::F>>,
msg: &[u8],
) -> <Secp256k1 as WrappedGroup>::F {
self.0.sign_share(params, nonce_sums, nonces, msg)
}
fn verify(
&self,
group_key: ProjectivePoint,
nonces: &[Vec<ProjectivePoint>],
sum: Scalar,
) -> Option<Self::Signature> {
self.0.verify(group_key, nonces, sum).map(|mut sig| {
sig.s = <_>::conditional_select(&sum, &-sum, needs_negation(&sig.R));
// Convert to a Bitcoin signature by dropping the byte for the point's sign bit
sig.serialize()[1 ..].try_into().unwrap()
})
}
fn verify_share(
&self,
verification_share: ProjectivePoint,
nonces: &[Vec<ProjectivePoint>],
share: Scalar,
) -> Result<Vec<(Scalar, ProjectivePoint)>, ()> {
self.0.verify_share(verification_share, nonces, share)
}
let c = Scalar::reduce(U256::from_be_slice(Sha256::from_engine(data).as_ref()));
// If the nonce was odd, sign `r - cx` instead of `r + cx`, allowing us to negate `s` at the
// end to sign as `-r + cx`
<_>::conditional_select(&c, &-c, needs_negation(R))
}
}
/// BIP-340 Schnorr signature algorithm.
///
/// This may panic if called with nonces/a group key which are the point at infinity (which have
/// a negligible probability for a well-reasoned caller, even with malicious participants
/// present).
///
/// `verify`, `verify_share` MUST be called after `sign_share` is called. Otherwise, this library
/// MAY panic.
#[derive(Clone)]
pub struct Schnorr(FrostSchnorr<Secp256k1, Hram>);
impl Schnorr {
/// Construct a Schnorr algorithm continuing the specified transcript.
#[allow(clippy::new_without_default)]
pub fn new() -> Schnorr {
Schnorr(FrostSchnorr::ietf())
}
}
impl Algorithm<Secp256k1> for Schnorr {
type Transcript = <FrostSchnorr<Secp256k1, Hram> as Algorithm<Secp256k1>>::Transcript;
type Addendum = ();
type Signature = [u8; 64];
fn transcript(&mut self) -> &mut Self::Transcript {
self.0.transcript()
}
fn nonces(&self) -> Vec<Vec<ProjectivePoint>> {
self.0.nonces()
}
fn preprocess_addendum<R: RngCore + CryptoRng>(
&mut self,
rng: &mut R,
keys: &ThresholdKeys<Secp256k1>,
) {
self.0.preprocess_addendum(rng, keys)
}
fn read_addendum<R: io::Read>(&self, reader: &mut R) -> io::Result<Self::Addendum> {
self.0.read_addendum(reader)
}
fn process_addendum(
&mut self,
view: &ThresholdView<Secp256k1>,
i: Participant,
addendum: (),
) -> Result<(), FrostError> {
self.0.process_addendum(view, i, addendum)
}
fn sign_share(
&mut self,
params: &ThresholdView<Secp256k1>,
nonce_sums: &[Vec<<Secp256k1 as WrappedGroup>::G>],
nonces: Vec<Zeroizing<<Secp256k1 as WrappedGroup>::F>>,
msg: &[u8],
) -> <Secp256k1 as WrappedGroup>::F {
self.0.sign_share(params, nonce_sums, nonces, msg)
}
fn verify(
&self,
group_key: ProjectivePoint,
nonces: &[Vec<ProjectivePoint>],
sum: Scalar,
) -> Option<Self::Signature> {
self.0.verify(group_key, nonces, sum).map(|mut sig| {
sig.s = <_>::conditional_select(&sum, &-sum, needs_negation(&sig.R));
// Convert to a Bitcoin signature by dropping the byte for the point's sign bit
sig.serialize()[1 ..].try_into().unwrap()
})
}
fn verify_share(
&self,
verification_share: ProjectivePoint,
nonces: &[Vec<ProjectivePoint>],
share: Scalar,
) -> Result<Vec<(Scalar, ProjectivePoint)>, ()> {
self.0.verify_share(verification_share, nonces, share)
}
}
#[cfg(feature = "std")]
pub use frost_crypto::*;

View File

@@ -2,9 +2,6 @@
#![doc = include_str!("../README.md")]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(not(feature = "std"))]
extern crate alloc;
/// The bitcoin Rust library.
pub use bitcoin;

View File

@@ -1,36 +1,31 @@
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::{
vec::Vec,
collections::HashMap,
io::{self, Write},
io::{self, Read, Write},
};
#[cfg(feature = "std")]
use std::io::{Read, BufReader};
use k256::{
elliptic_curve::sec1::{Tag, ToEncodedPoint},
Scalar, ProjectivePoint,
};
#[cfg(feature = "std")]
use frost::{
curve::{WrappedGroup, GroupIo, Secp256k1},
ThresholdKeys,
};
use bitcoin::{
consensus::encode::serialize, key::TweakedPublicKey, OutPoint, ScriptBuf, TxOut, Transaction,
Block,
hashes::Hash,
key::TweakedPublicKey,
TapTweakHash,
consensus::encode::{Decodable, serialize},
OutPoint, ScriptBuf, TxOut, Transaction, Block,
};
#[cfg(feature = "std")]
use bitcoin::{hashes::Hash, consensus::encode::Decodable, TapTweakHash};
use crate::crypto::x_only;
#[cfg(feature = "std")]
use crate::crypto::needs_negation;
use crate::crypto::{x_only, needs_negation};
#[cfg(feature = "std")]
mod send;
#[cfg(feature = "std")]
pub use send::*;
/// Tweak keys to ensure they're usable with Bitcoin's Taproot upgrade.
@@ -42,7 +37,6 @@ pub use send::*;
/// After adding an unspendable script path, the key is negated if odd.
///
/// This has a neligible probability of returning keys whose group key is the point at infinity.
#[cfg(feature = "std")]
pub fn tweak_keys(keys: ThresholdKeys<Secp256k1>) -> ThresholdKeys<Secp256k1> {
// Adds the unspendable script path per
// https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki#cite_note-23
@@ -118,18 +112,23 @@ impl ReceivedOutput {
}
/// Read a ReceivedOutput from a generic satisfying Read.
#[cfg(feature = "std")]
pub fn read<R: Read>(r: &mut R) -> io::Result<ReceivedOutput> {
let offset = Secp256k1::read_F(r)?;
let output;
let outpoint;
{
let mut buf_r = BufReader::with_capacity(0, r);
output =
TxOut::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid TxOut"))?;
outpoint =
OutPoint::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid OutPoint"))?;
struct BitcoinRead<R: Read>(R);
impl<R: Read> bitcoin::io::Read for BitcoinRead<R> {
fn read(&mut self, buf: &mut [u8]) -> bitcoin::io::Result<usize> {
self
.0
.read(buf)
.map_err(|e| bitcoin::io::Error::new(bitcoin::io::ErrorKind::Other, e.to_string()))
}
}
let mut r = BitcoinRead(r);
let output = TxOut::consensus_decode(&mut r).map_err(|_| io::Error::other("invalid TxOut"))?;
let outpoint =
OutPoint::consensus_decode(&mut r).map_err(|_| io::Error::other("invalid OutPoint"))?;
Ok(ReceivedOutput { offset, output, outpoint })
}

View File

@@ -1,3 +1,5 @@
#[allow(unused_imports)]
use std_shims::prelude::*;
use std_shims::{
io::{self, Read},
collections::HashMap,

View File

@@ -237,7 +237,7 @@ mod substrate {
use scale::{Encode, Decode};
use sp_runtime::{
transaction_validity::*,
traits::{Verify, ExtrinsicLike, Dispatchable, ValidateUnsigned, Checkable, Applyable},
traits::{Verify, ExtrinsicLike, ExtrinsicCall, Dispatchable, ValidateUnsigned, Checkable, Applyable},
Weight,
};
#[rustfmt::skip]
@@ -318,6 +318,13 @@ mod substrate {
}
}
impl ExtrinsicCall for Transaction {
type Call = Self;
fn call(&self) -> &Self {
self
}
}
impl<Context: TransactionContext> GetDispatchInfo for TransactionWithContext<Context> {
fn get_dispatch_info(&self) -> DispatchInfo {
match &self.0 {

View File

@@ -54,8 +54,6 @@ mod pallet {
/// The configuration of this pallet.
#[pallet::config]
pub trait Config<I: 'static = ()>: frame_system::Config<AccountId = Public> {
/// The event type.
type RuntimeEvent: From<Event<Self, I>> + IsType<<Self as frame_system::Config>::RuntimeEvent>;
/// What decides if mints are allowed.
type AllowMint: AllowMint;
}

View File

@@ -24,7 +24,10 @@ const HUMAN_READABLE_PART: bech32::Hrp = bech32::Hrp::parse_unchecked("sri");
/// The address for an account on Serai.
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(feature = "non_canonical_scale_derivations", derive(scale::Encode, scale::Decode))]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen, scale::DecodeWithMemTracking)
)]
pub struct SeraiAddress(pub [u8; 32]);
// These share encodings as 32-byte arrays

View File

@@ -6,8 +6,3 @@ pub const TARGET_BLOCK_TIME: Duration = Duration::from_secs(6);
/// The intended duration for a session.
// 1 week
pub const SESSION_LENGTH: Duration = Duration::from_secs(7 * 24 * 60 * 60);
/// The maximum amount of key shares per set.
pub const MAX_KEY_SHARES_PER_SET: u16 = 150;
/// The maximum amount of key shares per set, as an u32.
pub const MAX_KEY_SHARES_PER_SET_U32: u32 = MAX_KEY_SHARES_PER_SET as u32;

View File

@@ -96,7 +96,7 @@ pub mod prelude {
pub use crate::coin::*;
pub use crate::balance::*;
pub use crate::network_id::*;
pub use crate::validator_sets::{Session, ValidatorSet, ExternalValidatorSet, Slash, SlashReport};
pub use crate::validator_sets::*;
pub use crate::instructions::*;
}

View File

@@ -1,16 +1,48 @@
use zeroize::Zeroize;
use borsh::{BorshSerialize, BorshDeserialize};
use crate::network_id::ExternalNetworkId;
use crate::{network_id::ExternalNetworkId, address::SeraiAddress};
/// The ID of an protocol.
pub type ProtocolId = [u8; 32];
/// The ID of a signal.
pub type SignalId = [u8; 32];
/// A signal.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
allow(clippy::cast_possible_truncation),
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen, scale::DecodeWithMemTracking)
)]
pub enum Signal {
/// A signal to retire the current protocol.
Retire {
/// The protocol to retire in favor of.
in_favor_of: [u8; 32],
/// The ID of the retirement signal being favored.
signal_id: SignalId,
},
/// A signal to halt an external network.
Halt(ExternalNetworkId),
}
/// A retirement signal, registered on chain.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen, scale::DecodeWithMemTracking)
)]
pub struct RegisteredRetirementSignal {
/// The protocol to retire in favor of.
pub in_favor_of: ProtocolId,
/// The registrant of this signal.
pub registrant: SeraiAddress,
/// The block number this was registered at.
pub registered_at: u64,
}
impl RegisteredRetirementSignal {
/// The ID of this signal.
pub fn id(&self) -> SignalId {
sp_core::blake2_256(&borsh::to_vec(self).unwrap())
}
}

View File

@@ -7,9 +7,9 @@ use ciphersuite::{group::GroupEncoding, GroupIo};
use dalek_ff_group::Ristretto;
use crate::{
constants::MAX_KEY_SHARES_PER_SET,
crypto::{Public, KeyPair},
network_id::{ExternalNetworkId, NetworkId},
balance::Amount,
};
mod slashes;
@@ -103,19 +103,86 @@ impl ExternalValidatorSet {
}
}
/// For a set of validators whose key shares may exceed the maximum, reduce until they are less
/// than or equal to the maximum.
///
/// This runs in time linear to the exceed key shares and assumes the excess fits within a usize,
/// panicking otherwise.
///
/// Reduction occurs by reducing each validator in a reverse round-robin. This means the worst
/// validators lose their key shares first.
pub fn amortize_excess_key_shares(validators: &mut [(sp_core::sr25519::Public, u64)]) {
let total_key_shares = validators.iter().map(|(_key, shares)| shares).sum::<u64>();
for i in 0 .. usize::try_from(total_key_shares.saturating_sub(u64::from(MAX_KEY_SHARES_PER_SET)))
.unwrap()
{
validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1;
/// The representation for an amount of key shares.
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize, BorshSerialize, BorshDeserialize)]
#[cfg_attr(
feature = "non_canonical_scale_derivations",
derive(scale::Encode, scale::Decode, scale::MaxEncodedLen)
)]
pub struct KeyShares(pub u16);
impl KeyShares {
/// Zero key shares.
pub const ZERO: KeyShares = KeyShares(0);
/// One key share.
pub const ONE: KeyShares = KeyShares(1);
/// The maximum amount of key shares per set.
pub const MAX_PER_SET: u16 = 150;
/// The maximum amount of key shares per set, represented as a `u32`.
pub const MAX_PER_SET_U32: u32 = 150;
/// Create key shares from a `u16`.
///
/// This will saturate the value if the `u16` exceeds the maximum amount of key shares.
pub fn saturating_from(key_shares: u16) -> KeyShares {
KeyShares(key_shares.min(Self::MAX_PER_SET))
}
/// Create key shares from an allocation.
///
/// Presumably panics if `allocation_per_key_share` is zero.
pub fn from_allocation(allocation: Amount, allocation_per_key_share: Amount) -> Self {
Self::saturating_from(
u16::try_from(allocation.0 / allocation_per_key_share.0).unwrap_or(u16::MAX),
)
}
/// For a set of validators whose key shares may exceed the maximum, reduce until they are less
/// than or equal to the maximum.
///
/// Returns the new amount of validators with a non-zero amount of key shares.
///
/// This runs in time linear to the exceeded key shares and may panic if:
/// - The total amount of key shares exceeds `u16::MAX`.
/// - The list of validators is absurdly long
/// - The list of validators includes validators without key shares
///
/// Reduction occurs by reducing each validator in a reverse round-robin. This means the
/// validators with the least key shares are evicted first.
#[must_use]
pub fn amortize_excess(validators: &mut [(sp_core::sr25519::Public, KeyShares)]) -> usize {
let total_key_shares = validators.iter().map(|(_key, shares)| shares.0).sum::<u16>();
let mut actual_len = validators.len();
let mut offset = 1;
for _ in 0 .. usize::from(total_key_shares.saturating_sub(Self::MAX_PER_SET)) {
// If the offset exceeds the new length, reset it
if offset > actual_len {
offset = 1;
}
// Take one key share from this validator
let index = actual_len - offset;
validators[index].1 .0 -= 1;
// If they now have zero key shares, shrink the length and continue
if validators[index].1 .0 == 0 {
actual_len -= 1;
continue;
}
// Increment the offset to take from the next validator on the next iteration
offset += 1;
}
actual_len
}
}
impl TryFrom<u16> for KeyShares {
type Error = ();
fn try_from(value: u16) -> Result<Self, ()> {
if value > Self::MAX_PER_SET {
Err(())
} else {
Ok(Self(value))
}
}
}

View File

@@ -8,8 +8,9 @@ use borsh::{BorshSerialize, BorshDeserialize};
use sp_core::{ConstU32, bounded::BoundedVec};
use crate::{
constants::{TARGET_BLOCK_TIME, SESSION_LENGTH, MAX_KEY_SHARES_PER_SET_U32},
constants::{TARGET_BLOCK_TIME, SESSION_LENGTH},
balance::Amount,
validator_sets::KeyShares,
};
/// Each slash point is equivalent to the downtime implied by missing a block proposal.
@@ -212,7 +213,7 @@ pub struct SlashReport(
serialize_with = "crate::borsh_serialize_bounded_vec",
deserialize_with = "crate::borsh_deserialize_bounded_vec"
)]
pub BoundedVec<Slash, ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>,
pub BoundedVec<Slash, ConstU32<{ KeyShares::MAX_PER_SET_U32 }>>,
);
/// An error when converting from a `Vec`.
@@ -251,7 +252,7 @@ impl SlashReport {
#[test]
fn test_penalty() {
for validators in [1, 50, 100, crate::constants::MAX_KEY_SHARES_PER_SET] {
for validators in [1, 50, 100, KeyShares::MAX_PER_SET_U32] {
let validators = NonZero::new(validators).unwrap();
// 12 hours of slash points should only decrease the rewards proportionately
let twelve_hours_of_slash_points =

View File

@@ -34,6 +34,8 @@ frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev =
frame-executive = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "d4624c561765c13b38eb566e435131a8c329a543", default-features = false }
serai-coins-pallet = { path = "../coins", default-features = false }
serai-validator-sets-pallet = { path = "../validator-sets", default-features = false }
serai-signals-pallet = { path = "../signals", default-features = false }
[build-dependencies]
substrate-wasm-builder = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "d4624c561765c13b38eb566e435131a8c329a543" }
@@ -54,6 +56,8 @@ std = [
"frame-executive/std",
"serai-coins-pallet/std",
"serai-validator-sets-pallet/std",
"serai-signals-pallet/std",
]
try-runtime = [
@@ -66,6 +70,8 @@ try-runtime = [
"frame-executive/try-runtime",
"serai-coins-pallet/try-runtime",
"serai-validator-sets-pallet/try-runtime",
"serai-signals-pallet/try-runtime",
]
runtime-benchmarks = [
@@ -73,6 +79,9 @@ runtime-benchmarks = [
"frame-system/runtime-benchmarks",
"frame-support/runtime-benchmarks",
"serai-validator-sets-pallet/runtime-benchmarks",
"serai-signals-pallet/runtime-benchmarks",
]
default = ["std"]

View File

@@ -82,6 +82,12 @@ mod runtime {
#[runtime::pallet_index(3)]
pub type LiquidityTokens = serai_coins_pallet::Pallet<Runtime, LiquidityTokensInstance>;
#[runtime::pallet_index(4)]
pub type ValidatorSets = serai_validator_sets_pallet::Pallet<Runtime>;
#[runtime::pallet_index(5)]
pub type Signals = serai_signals_pallet::Pallet<Runtime>;
}
impl frame_system::Config for Runtime {
@@ -127,13 +133,16 @@ impl frame_system::Config for Runtime {
impl core_pallet::Config for Runtime {}
impl serai_coins_pallet::Config<CoinsInstance> for Runtime {
type RuntimeEvent = RuntimeEvent;
type AllowMint = serai_coins_pallet::AlwaysAllowMint; // TODO
}
impl serai_coins_pallet::Config<LiquidityTokensInstance> for Runtime {
type RuntimeEvent = RuntimeEvent;
type AllowMint = serai_coins_pallet::AlwaysAllowMint;
}
impl serai_validator_sets_pallet::Config for Runtime {}
impl serai_signals_pallet::Config for Runtime {
type RetirementValidityDuration = sp_core::ConstU64<0>; // TODO
type RetirementLockInDuration = sp_core::ConstU64<0>; // TODO
}
impl From<Option<SeraiAddress>> for RuntimeOrigin {
fn from(signer: Option<SeraiAddress>) -> Self {

View File

@@ -27,10 +27,9 @@ sp-io = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "d4624c
frame-system = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "d4624c561765c13b38eb566e435131a8c329a543", default-features = false }
frame-support = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "d4624c561765c13b38eb566e435131a8c329a543", default-features = false }
serai-primitives = { path = "../primitives", default-features = false }
serai-abi = { path = "../abi", default-features = false, features = ["substrate"] }
validator-sets-pallet = { package = "serai-validator-sets-pallet", path = "../validator-sets", default-features = false }
in-instructions-pallet = { package = "serai-in-instructions-pallet", path = "../in-instructions", default-features = false }
[features]
std = [
@@ -42,10 +41,9 @@ std = [
"frame-system/std",
"frame-support/std",
"serai-primitives/std",
"serai-abi/std",
"validator-sets-pallet/std",
"in-instructions-pallet/std",
]
runtime-benchmarks = [

View File

@@ -0,0 +1 @@
# Serai Signals Pallet

View File

@@ -1,33 +1,31 @@
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
#![doc = include_str!("../README.md")]
#![deny(missing_docs)]
#![cfg_attr(not(feature = "std"), no_std)]
#[allow(
deprecated,
unreachable_patterns,
clippy::let_unit_value,
clippy::cast_possible_truncation,
clippy::ignored_unit_patterns
)] // TODO
extern crate alloc;
#[expect(clippy::cast_possible_truncation)]
#[frame_support::pallet]
pub mod pallet {
use sp_core::sr25519::Public;
use sp_io::hashing::blake2_256;
use serai_abi::{primitives::{prelude::*, signals::*}, SubstrateBlock};
use frame_system::pallet_prelude::*;
// False positive
#[allow(unused)]
use frame_support::{pallet_prelude::*, sp_runtime};
use frame_support::pallet_prelude::*;
use serai_primitives::*;
use serai_signals_primitives::SignalId;
use validator_sets_pallet::{primitives::ValidatorSet, Config as VsConfig, Pallet as VsPallet};
use in_instructions_pallet::{Config as IiConfig, Pallet as InInstructions};
use validator_sets_pallet::{Config as VsConfig, Pallet as VsPallet};
#[pallet::config]
pub trait Config: frame_system::Config<AccountId = Public> + VsConfig + IiConfig {
type RuntimeEvent: IsType<<Self as frame_system::Config>::RuntimeEvent> + From<Event<Self>>;
type RetirementValidityDuration: Get<u32>;
type RetirementLockInDuration: Get<u32>;
pub trait Config: frame_system::Config<AccountId = Public, Block = SubstrateBlock> + VsConfig {
/// How long a candidate retirement signal is valid for.
///
/// This MUST be equal to the rate at which new sets are attempted.
// TODO: Fetch from `validator_sets::Config`.
type RetirementValidityDuration: Get<u64>;
/// How long a retirement signal is locked-in for before retirement..
type RetirementLockInDuration: Get<u64>;
}
#[pallet::genesis_config]
@@ -43,8 +41,12 @@ pub mod pallet {
#[pallet::genesis_build]
impl<T: Config> BuildGenesisConfig for GenesisConfig<T> {
fn build(&self) {
// Assert the validity duration is less than the lock-in duration so lock-in periods
// automatically invalidate other retirement signals
/*
Assert the validity duration is less than the lock-in duration.
This way, while the the signal is locked-in, any/all other candidate retirement signals
will expire.
*/
assert!(T::RetirementValidityDuration::get() < T::RetirementLockInDuration::get());
}
}
@@ -52,204 +54,200 @@ pub mod pallet {
#[pallet::pallet]
pub struct Pallet<T>(PhantomData<T>);
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen)]
pub struct RegisteredRetirementSignal<T: Config> {
in_favor_of: [u8; 32],
registrant: T::AccountId,
registered_at: BlockNumberFor<T>,
}
impl<T: Config> RegisteredRetirementSignal<T> {
fn id(&self) -> [u8; 32] {
let mut preimage = b"Signal".to_vec();
preimage.extend(&self.encode());
blake2_256(&preimage)
}
}
/// The registered retirement signals.
#[pallet::storage]
type RegisteredRetirementSignals<T: Config> =
StorageMap<_, Blake2_128Concat, [u8; 32], RegisteredRetirementSignal<T>, OptionQuery>;
StorageMap<_, Blake2_128Concat, [u8; 32], RegisteredRetirementSignal, OptionQuery>;
/// The registered favors.
#[pallet::storage]
pub type Favors<T: Config> = StorageDoubleMap<
type Favors<T: Config> = StorageDoubleMap<
_,
Blake2_128Concat,
(SignalId, NetworkId),
(Signal, NetworkId),
Blake2_128Concat,
T::AccountId,
(),
OptionQuery,
>;
/// The networks in favor of a signal.
#[pallet::storage]
pub type SetsInFavor<T: Config> =
StorageMap<_, Blake2_128Concat, (SignalId, ValidatorSet), (), OptionQuery>;
type NetworksInFavor<T: Config> =
StorageMap<_, Blake2_128Concat, (Signal, NetworkId), (), OptionQuery>;
/// The locked-in retirement signal.
///
/// This is in the format `(protocol_id, retiry_block)`.
#[pallet::storage]
pub type LockedInRetirement<T: Config> =
StorageValue<_, ([u8; 32], BlockNumberFor<T>), OptionQuery>;
type LockedInRetirement<T: Config> =
StorageValue<_, (ProtocolId, BlockNumberFor<T>), OptionQuery>;
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
RetirementSignalRegistered {
signal_id: [u8; 32],
in_favor_of: [u8; 32],
registrant: T::AccountId,
},
RetirementSignalRevoked {
signal_id: [u8; 32],
},
SignalFavored {
signal_id: SignalId,
by: T::AccountId,
for_network: NetworkId,
},
SetInFavor {
signal_id: SignalId,
set: ValidatorSet,
},
RetirementSignalLockedIn {
signal_id: [u8; 32],
},
SetNoLongerInFavor {
signal_id: SignalId,
set: ValidatorSet,
},
FavorRevoked {
signal_id: SignalId,
by: T::AccountId,
for_network: NetworkId,
},
AgainstSignal {
signal_id: SignalId,
who: T::AccountId,
for_network: NetworkId,
},
/// Halted networks.
///
/// Halted networks will be halted for the remainder of this protocol's lifetime.
#[pallet::storage]
type Halted<T: Config> = StorageMap<_, Identity, ExternalNetworkId, (), OptionQuery>;
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_initialize(current_number: BlockNumberFor<T>) -> Weight {
/*
If this is the block at which a locked-in retirement signal has been locked-in for long
enough, panic, halting the blockchain, and retiring the current protocol.
*/
if let Some((protocol_id, block_number)) = LockedInRetirement::<T>::get() {
if block_number == current_number {
panic!(
"protocol retired in favor of {}",
sp_core::hexdisplay::HexDisplay::from(&protocol_id)
);
}
}
// Using `Weight::zero()` is fine here as this is a minute operation
Weight::zero()
}
}
#[pallet::error]
pub enum Error<T> {
RetirementSignalLockedIn,
RetirementSignalAlreadyRegistered,
NotRetirementSignalRegistrant,
NonExistentRetirementSignal,
ExpiredRetirementSignal,
NotValidator,
RevokingNonExistentFavor,
}
// 80% threshold
// TODO: Use 34% for halting a set (not 80%)
const REQUIREMENT_NUMERATOR: u64 = 4;
const REQUIREMENT_DIVISOR: u64 = 5;
impl<T: Config> Pallet<T> {
// Returns true if this network's current set is in favor of the signal.
//
// Must only be called for networks which have a set decided.
fn tally_for_network(signal_id: SignalId, network: NetworkId) -> bool {
let this_network_session = VsPallet::<T>::latest_decided_session(network).unwrap();
let this_set = ValidatorSet { network, session: this_network_session };
/// Tally the support for a signal by a network's current validator set.
///
/// This will mutate the storage with the result.
///
/// This returns `true` if the network is sufficiently in favor of the signal.
fn tally_for_network(signal: Signal, network: NetworkId) -> bool {
let Some(current_session) = VsPallet::<T>::current_session(network) else { return false };
let current_set = ValidatorSet { network, session: current_session };
let Some(latest_session) = VsPallet::<T>::latest_decided_session(network) else {
panic!("current session yet no latest decided session")
};
let latest_set = ValidatorSet { network, session: latest_session };
// This is a bounded O(n) (which is still acceptable) due to the infeasibility of caching
// here
// TODO: Make caching feasible? Do a first-pass with cache then actual pass before
// execution?
let mut iter = Favors::<T>::iter_prefix_values((signal_id, network));
let mut needed_favor = (VsPallet::<T>::total_allocated_stake(network).unwrap().0 *
REQUIREMENT_NUMERATOR)
.div_ceil(REQUIREMENT_DIVISOR);
while iter.next().is_some() && (needed_favor != 0) {
let item_key = iter.last_raw_key();
// `.len() - 32` is safe because AccountId is bound to being Public, which is 32 bytes
let account = T::AccountId::decode(&mut &item_key[(item_key.len() - 32) ..]).unwrap();
if VsPallet::<T>::in_latest_decided_set(network, account) {
// This call uses the current allocation, not the allocation at the time of set
// decision
// This is deemed safe due to the validator-set pallet's deallocation scheduling
// unwrap is safe due to being in the latest decided set
needed_favor =
needed_favor.saturating_sub(VsPallet::<T>::allocation((network, account)).unwrap().0);
}
/*
The following uses key shares, not allocations, as key shares are static while allocations
fluctuate during the duration of a validator set.
*/
let mut needed_favor = {
let current = VsPallet::<T>::key_shares(current_set)
.expect("current validator set without key shares set")
.0;
let latest = VsPallet::<T>::key_shares(latest_set)
.expect("latest validator set without key shares set")
.0;
current.max(latest)
};
for (validator, ()) in Favors::<T>::iter_prefix((signal, network)) {
/*
Fetch the amount of key shares the validator has.
This uses the minimum amount of key shares across the current validator set and the
latest decided validator set to ensure this validator represents this network and will
continue to do so.
*/
let key_shares = {
let current = VsPallet::<T>::key_shares_possessed_by_validator(current_set, validator)
.unwrap_or(KeyShares::ZERO);
let latest = VsPallet::<T>::key_shares_possessed_by_validator(latest_set, validator)
.unwrap_or(KeyShares::ZERO);
current.0.min(latest.0)
};
let Some(still_needed_favor) = needed_favor.checked_sub(key_shares) else {
needed_favor = 0;
break;
};
needed_favor = still_needed_favor;
}
if needed_favor == 0 {
// Set the set as in favor until someone triggers a re-tally
//
// Since a re-tally is an extra step we can't assume will occur, this effectively means a
// network in favor across any point in its Session is in favor for its entire Session
// While a malicious actor could increase their stake, favor a signal, then deallocate,
// this is largely prevented by deallocation scheduling
//
// At any given point, only just under 50% of a set can be immediately deallocated
// (if each validator has just under two key shares, they can deallocate the entire amount
// above a single key share)
//
// This means that if a signal has a 67% adoption threshold, and someone executes this
// attack, they still have a majority of the allocated stake (though less of a majority
// than desired)
//
// With the 80% threshold, removing 39.9% creates a 40.1% to 20% ratio, which is still
// the BFT threshold of 67%
if !SetsInFavor::<T>::contains_key((signal_id, this_set)) {
SetsInFavor::<T>::set((signal_id, this_set), Some(()));
Self::deposit_event(Event::SetInFavor { signal_id, set: this_set });
let now_in_favor = needed_favor == 0;
// Update the storage and emit an event, if appropriate
if now_in_favor {
let prior_in_favor = NetworksInFavor::<T>::contains_key((signal, network));
NetworksInFavor::<T>::set((signal, network), Some(()));
if !prior_in_favor {
todo!("Event");
}
true
} else {
if SetsInFavor::<T>::contains_key((signal_id, this_set)) {
// This should no longer be under the current tally
SetsInFavor::<T>::remove((signal_id, this_set));
Self::deposit_event(Event::SetNoLongerInFavor { signal_id, set: this_set });
#[allow(clippy::collapsible_else_if)]
if NetworksInFavor::<T>::take((signal, network)).is_some() {
todo!("Event");
}
false
}
now_in_favor
}
fn tally_for_all_networks(signal_id: SignalId) -> bool {
/// Tally support for a signal across all networks, weighted by stake.
///
/// Returns `true` if the signal has sufficient support.
fn tally_for_all_networks(signal: Signal) -> bool {
let mut total_in_favor_stake = 0;
let mut total_allocated_stake = 0;
for network in serai_primitives::NETWORKS {
let Some(latest_decided_session) = VsPallet::<T>::latest_decided_session(network) else {
continue;
};
// If it has a session, it should have a total allocated stake value
let network_stake = VsPallet::<T>::total_allocated_stake(network).unwrap();
if SetsInFavor::<T>::contains_key((
signal_id,
ValidatorSet { network, session: latest_decided_session },
)) {
for network in NetworkId::all() {
/*
This doesn't consider if the latest decided validator set has considerably less stake,
yet the bound validators vote by the minimum of their key shares, against the maximum of
the total key shares, should be sufficient in this regard.
*/
let network_stake =
VsPallet::<T>::stake_for_current_validator_set(network).unwrap_or(Amount(0));
if NetworksInFavor::<T>::contains_key((signal, network)) {
total_in_favor_stake += network_stake.0;
}
total_allocated_stake += network_stake.0;
}
total_in_favor_stake >=
(total_allocated_stake * REQUIREMENT_NUMERATOR).div_ceil(REQUIREMENT_DIVISOR)
/*
We use a 80% threshold for retirement, calculated as defined above, but just a 34%
threshold for halting another validator set. This is representative of how 34% of
validators can cause a liveness failure during asynchronous BFT>
*/
let threshold = match signal {
Signal::Retire { .. } => (total_allocated_stake * 4) / 5,
Signal::Halt { .. } => (total_allocated_stake * 2) / 3,
};
total_in_favor_stake > threshold
}
fn revoke_favor_internal(
account: T::AccountId,
signal_id: SignalId,
validator: T::AccountId,
signal: Signal,
for_network: NetworkId,
) -> DispatchResult {
if !Favors::<T>::contains_key((signal_id, for_network), account) {
if !Favors::<T>::contains_key((signal, for_network), validator) {
Err::<(), _>(Error::<T>::RevokingNonExistentFavor)?;
}
Favors::<T>::remove((signal_id, for_network), account);
Self::deposit_event(Event::<T>::FavorRevoked { signal_id, by: account, for_network });
// tally_for_network assumes the network is active, which is implied by having prior set a
// favor for it
// Technically, this tally may make the network in favor and justify re-tallying for all
// networks
// Its assumed not to
Self::tally_for_network(signal_id, for_network);
Favors::<T>::remove((signal, for_network), validator);
// TODO: Event
// Update the tally for this network
Self::tally_for_network(signal, for_network);
Ok(())
}
}
/// An error from the `signals` pallet.
#[pallet::error]
pub enum Error<T> {
/// A retirement signal has already been locked in.
RetirementSignalLockedIn,
/// This retirement signal has already been registered.
RetirementSignalAlreadyRegistered,
/// The caller is not the registrant of the retirement signal.
NotRetirementSignalRegistrant,
/// The retirement signal does not exist.
NonExistentRetirementSignal,
/// The retirement signal has expired.
ExpiredRetirementSignal,
/// The caller is already in favor.
AlreadyInFavor,
/// Revoking favor when no favor has been expressed.
RevokingNonExistentFavor,
}
#[pallet::call]
impl<T: Config> Pallet<T> {
/// Register a retirement signal, declaring the consensus protocol this signal is in favor of.
@@ -257,7 +255,7 @@ pub mod pallet {
/// Retirement signals are registered so that the proposer, presumably a developer, can revoke
/// the signal if there's a fault discovered.
#[pallet::call_index(0)]
#[pallet::weight(0)] // TODO
#[pallet::weight((0, DispatchClass::Normal))] // TODO
pub fn register_retirement_signal(
origin: OriginFor<T>,
in_favor_of: [u8; 32],
@@ -267,14 +265,17 @@ pub mod pallet {
Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;
}
let account = ensure_signed(origin)?;
let validator = ensure_signed(origin)?;
// Bind the signal ID to the proposer
// This prevents a malicious actor from frontrunning a proposal, causing them to be the
// registrant, just to cancel it later
/*
Bind the signal ID to the proposer.
This prevents a malicious actor from frontrunning a proposal, causing them to be the
registrant, just to cancel it later.
*/
let signal = RegisteredRetirementSignal {
in_favor_of,
registrant: account,
registrant: validator.into(),
registered_at: frame_system::Pallet::<T>::block_number(),
};
let signal_id = signal.id();
@@ -282,122 +283,108 @@ pub mod pallet {
if RegisteredRetirementSignals::<T>::get(signal_id).is_some() {
Err::<(), _>(Error::<T>::RetirementSignalAlreadyRegistered)?;
}
Self::deposit_event(Event::<T>::RetirementSignalRegistered {
signal_id,
in_favor_of,
registrant: account,
});
RegisteredRetirementSignals::<T>::set(signal_id, Some(signal));
// TODO: Event
Ok(())
}
/// Revoke a retirement signal.
#[pallet::call_index(1)]
#[pallet::weight(0)] // TODO
#[pallet::weight((0, DispatchClass::Normal))] // TODO
pub fn revoke_retirement_signal(
origin: OriginFor<T>,
retirement_signal_id: [u8; 32],
retirement_signal: [u8; 32],
) -> DispatchResult {
let account = ensure_signed(origin)?;
let Some(registered_signal) = RegisteredRetirementSignals::<T>::get(retirement_signal_id)
let validator = ensure_signed(origin)?;
let Some(registered_signal) = RegisteredRetirementSignals::<T>::get(retirement_signal)
else {
return Err::<(), _>(Error::<T>::NonExistentRetirementSignal.into());
};
if account != registered_signal.registrant {
if SeraiAddress::from(validator) != registered_signal.registrant {
Err::<(), _>(Error::<T>::NotRetirementSignalRegistrant)?;
}
RegisteredRetirementSignals::<T>::remove(retirement_signal_id);
RegisteredRetirementSignals::<T>::remove(retirement_signal);
// If this signal was locked in, remove it
// This lets a post-lock-in discovered fault be prevented from going live without
// intervention by all validators
if LockedInRetirement::<T>::get().map(|(signal_id, _block_number)| signal_id) ==
Some(retirement_signal_id)
/*
If this signal was locked in, remove it.
This lets a post-lock-in discovered fault be prevented from going live without intervention
by a supermajority of validators.
*/
if LockedInRetirement::<T>::get().map(|(signal, _block_number)| signal) ==
Some(retirement_signal)
{
LockedInRetirement::<T>::kill();
}
Self::deposit_event(Event::<T>::RetirementSignalRevoked { signal_id: retirement_signal_id });
// TODO: Event
Ok(())
}
/// Favor a signal.
#[pallet::call_index(2)]
#[pallet::weight(0)] // TODO
#[pallet::weight((0, DispatchClass::Normal))] // TODO
pub fn favor(
origin: OriginFor<T>,
signal_id: SignalId,
signal: Signal,
for_network: NetworkId,
) -> DispatchResult {
let account = ensure_signed(origin)?;
let validator = ensure_signed(origin)?;
// If this is a retirement signal, perform the relevant checks
if let SignalId::Retirement(signal_id) = signal_id {
// Make sure a retirement hasn't already been locked in
if LockedInRetirement::<T>::exists() {
Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;
}
// Perform the relevant checks for this class of signal
match signal {
Signal::Retire { signal_id } => {
// Make sure a retirement hasn't already been locked in
if LockedInRetirement::<T>::exists() {
Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;
}
// Make sure this is a registered retirement
// We don't have to do this for a `Halt` signal as `Halt` doesn't have the registration
// process
let Some(registered_signal) = RegisteredRetirementSignals::<T>::get(signal_id) else {
return Err::<(), _>(Error::<T>::NonExistentRetirementSignal.into());
};
/*
Make sure this is a registered retirement.
// Check the signal isn't out of date
// This isn't truly necessary since we only track votes from the most recent validator
// sets, ensuring modern relevancy
// The reason to still have it is because locking in a dated runtime may cause a corrupt
// blockchain and lead to a failure in system integrity
// `Halt`, which doesn't have this check, at worst causes temporary downtime
if (registered_signal.registered_at + T::RetirementValidityDuration::get().into()) <
frame_system::Pallet::<T>::block_number()
{
Err::<(), _>(Error::<T>::ExpiredRetirementSignal)?;
}
We don't have to do this for a `Halt` signal as `Halt` doesn't have the registration
process.
*/
let Some(registered_signal) = RegisteredRetirementSignals::<T>::get(signal_id) else {
return Err::<(), _>(Error::<T>::NonExistentRetirementSignal.into())
};
// Check the signal isn't out of date, and its tallies with it.
if (registered_signal.registered_at + T::RetirementValidityDuration::get()) <
frame_system::Pallet::<T>::block_number()
{
Err::<(), _>(Error::<T>::ExpiredRetirementSignal)?;
}
},
Signal::Halt { .. } => {}
}
// Check the signer is a validator
// Technically, in the case of Serai, this will check they're planned to be in the next set,
// not that they are in the current set
// This is a practical requirement due to the lack of tracking historical allocations, and
// fine for the purposes here
if !VsPallet::<T>::in_latest_decided_set(for_network, account) {
Err::<(), _>(Error::<T>::NotValidator)?;
if Favors::<T>::contains_key((signal, for_network), validator) {
Err::<(), _>(Error::<T>::AlreadyInFavor)?;
}
// Set them as in-favor
// Doesn't error if they already voted in order to let any validator trigger a re-tally
if !Favors::<T>::contains_key((signal_id, for_network), account) {
Favors::<T>::set((signal_id, for_network), account, Some(()));
Self::deposit_event(Event::SignalFavored { signal_id, by: account, for_network });
}
// Set the validator as in favor
Favors::<T>::set((signal, for_network), validator, Some(()));
// TODO: Event
// Check if the network is in favor
// tally_for_network expects the network to be active, which is implied by being in the
// latest decided set
let network_in_favor = Self::tally_for_network(signal_id, for_network);
let network_in_favor = Self::tally_for_network(signal, for_network);
// If this network is in favor, check if enough networks are
// We could optimize this by only running the following code when the network is *newly* in
// favor
// Re-running the following code ensures that if networks' allocated stakes change relative
// to each other, any new votes will cause a re-tally
if network_in_favor {
if network_in_favor && Self::tally_for_all_networks(signal) {
// If enough are, lock in the signal
if Self::tally_for_all_networks(signal_id) {
match signal_id {
SignalId::Retirement(signal_id) => {
LockedInRetirement::<T>::set(Some((
signal_id,
frame_system::Pallet::<T>::block_number() +
T::RetirementLockInDuration::get().into(),
)));
Self::deposit_event(Event::RetirementSignalLockedIn { signal_id });
}
SignalId::Halt(network) => {
InInstructions::<T>::halt(network)?;
}
match signal {
Signal::Retire { signal_id } => {
LockedInRetirement::<T>::set(Some((
signal_id,
frame_system::Pallet::<T>::block_number() + T::RetirementLockInDuration::get()
)));
// TODO: Event
}
Signal::Halt(network) => {
Halted::<T>::set(network, Some(()));
// TODO: Event
}
}
}
@@ -407,75 +394,110 @@ pub mod pallet {
/// Revoke favor into an abstaining position.
#[pallet::call_index(3)]
#[pallet::weight(0)] // TODO
#[pallet::weight((0, DispatchClass::Normal))] // TODO
pub fn revoke_favor(
origin: OriginFor<T>,
signal_id: SignalId,
signal: Signal,
for_network: NetworkId,
) -> DispatchResult {
if matches!(&signal_id, SignalId::Retirement(_)) && LockedInRetirement::<T>::exists() {
Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;
match signal {
Signal::Retire { .. } => {
if LockedInRetirement::<T>::exists() {
Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;
}
}
Signal::Halt { .. } => {}
}
// Doesn't check the signal exists due to later checking the favor exists
// While the signal may have been revoked, making this pointless, it's not worth the storage
// read on every call to check
// Since revoke will re-tally, this does technically mean a network will become in-favor of a
// revoked signal. Since revoke won't re-tally for all networks/lock-in, this is also fine
Self::revoke_favor_internal(ensure_signed(origin)?, signal_id, for_network)
let validator = ensure_signed(origin)?;
Self::revoke_favor_internal(validator, signal, for_network)
}
/// Emit an event standing against the signal.
///
/// While disapprovals aren't tracked explicitly, this is used to at least label a validator's
/// opinion and allow better collection of data.
///
/// If the origin is currently in favor of the signal, their favor will be revoked.
#[pallet::call_index(4)]
#[pallet::weight(0)] // TODO
#[pallet::weight((0, DispatchClass::Normal))] // TODO
pub fn stand_against(
origin: OriginFor<T>,
signal_id: SignalId,
signal: Signal,
for_network: NetworkId,
) -> DispatchResult {
if LockedInRetirement::<T>::exists() {
Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;
match signal {
Signal::Retire { .. } => {
if LockedInRetirement::<T>::exists() {
Err::<(), _>(Error::<T>::RetirementSignalLockedIn)?;
}
}
Signal::Halt { .. } => {}
}
let account = ensure_signed(origin)?;
let validator = ensure_signed(origin)?;
// If currently in favor, revoke the favor
if Favors::<T>::contains_key((signal_id, for_network), account) {
Self::revoke_favor_internal(account, signal_id, for_network)?;
if Favors::<T>::contains_key((signal, for_network), validator) {
Self::revoke_favor_internal(validator, signal, for_network)?;
} else {
// Check this Signal exists (which would've been implied by Favors for it existing)
if let SignalId::Retirement(signal_id) = signal_id {
if RegisteredRetirementSignals::<T>::get(signal_id).is_none() {
Err::<(), _>(Error::<T>::NonExistentRetirementSignal)?;
// Check this Signal exists (which would've been implied by `Favors` for it existing)
match signal {
Signal::Retire { signal_id } => {
if RegisteredRetirementSignals::<T>::get(signal_id).is_none() {
Err::<(), _>(Error::<T>::NonExistentRetirementSignal)?;
}
}
Signal::Halt { .. } => {}
}
}
// Emit an event that we're against the signal
// No actual effects happen besides this
Self::deposit_event(Event::<T>::AgainstSignal { signal_id, who: account, for_network });
// Emit the event
// TODO: Event
Ok(())
}
}
#[pallet::hooks]
impl<T: Config> Hooks<BlockNumberFor<T>> for Pallet<T> {
fn on_initialize(current_number: BlockNumberFor<T>) -> Weight {
// If this is the block at which a locked-in signal has been set for long enough, panic
// This will prevent this block from executing and halt the chain
if let Some((signal, block_number)) = LockedInRetirement::<T>::get() {
if block_number == current_number {
panic!(
"locked-in signal {} has been set for too long",
sp_core::hexdisplay::HexDisplay::from(&signal),
);
}
}
Weight::zero() // TODO
}
/* TODO
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {
RetirementSignalRegistered {
signal: [u8; 32],
in_favor_of: [u8; 32],
registrant: T::AccountId,
},
RetirementSignalRevoked {
signal_id: [u8; 32],
},
SignalFavored {
signal_id: Signal,
by: T::AccountId,
for_network: NetworkId,
},
SetInFavor {
signal_id: Signal,
set: ValidatorSet,
},
RetirementSignalLockedIn {
signal_id: [u8; 32],
},
SetNoLongerInFavor {
signal_id: Signal,
set: ValidatorSet,
},
FavorRevoked {
signal_id: Signal,
by: T::AccountId,
for_network: NetworkId,
},
AgainstSignal {
signal_id: Signal,
who: T::AccountId,
for_network: NetworkId,
},
}
*/
}
pub use pallet::*;

View File

@@ -1,6 +1,6 @@
use sp_core::{Encode, sr25519::Public};
use serai_primitives::{constants::MAX_KEY_SHARES_PER_SET, network_id::NetworkId, balance::Amount};
use serai_primitives::{network_id::NetworkId, balance::Amount, validator_sets::KeyShares};
use frame_support::storage::{StorageMap, StoragePrefixedMap};
@@ -63,7 +63,7 @@ pub(crate) trait Allocations {
) -> impl Iterator<Item = (Public, Amount)>;
/// Calculate the expected key shares for a network, per the current allocations.
fn expected_key_shares(network: NetworkId, allocation_per_key_share: Amount) -> u64;
fn expected_key_shares(network: NetworkId, allocation_per_key_share: Amount) -> KeyShares;
}
/// Reverses the lexicographic order of a given byte array.
@@ -149,17 +149,16 @@ impl<Storage: AllocationsStorage> Allocations for Storage {
.filter(move |(_key, allocation)| *allocation >= minimum_allocation)
}
fn expected_key_shares(network: NetworkId, allocation_per_key_share: Amount) -> u64 {
fn expected_key_shares(network: NetworkId, allocation_per_key_share: Amount) -> KeyShares {
let mut total_key_shares = 0;
for (_, amount) in Self::iter_allocations(network, allocation_per_key_share) {
let key_shares = amount.0 / allocation_per_key_share.0;
total_key_shares += key_shares;
total_key_shares += KeyShares::from_allocation(amount, allocation_per_key_share).0;
if total_key_shares >= u64::from(MAX_KEY_SHARES_PER_SET) {
if total_key_shares >= KeyShares::MAX_PER_SET {
break;
}
}
total_key_shares
KeyShares::saturating_from(total_key_shares)
}
}

View File

@@ -3,6 +3,7 @@
#![cfg_attr(not(feature = "std"), no_std)]
extern crate alloc;
use alloc::vec::Vec;
mod embedded_elliptic_curve_keys;
use embedded_elliptic_curve_keys::*;
@@ -72,7 +73,7 @@ impl<T: pallet::Config> GetValidatorCount for MembershipProof<T> {
}
*/
#[expect(clippy::ignored_unit_patterns, clippy::cast_possible_truncation)]
#[expect(clippy::cast_possible_truncation)]
#[frame_support::pallet]
mod pallet {
use sp_core::sr25519::Public;
@@ -81,7 +82,12 @@ mod pallet {
use frame_support::pallet_prelude::*;
use serai_primitives::{
crypto::KeyPair, network_id::*, coin::*, balance::*, validator_sets::*, address::SeraiAddress,
crypto::KeyPair,
network_id::*,
coin::*,
balance::*,
validator_sets::{Session, ExternalValidatorSet, ValidatorSet, KeyShares as KeySharesStruct},
address::SeraiAddress,
};
use coins_pallet::Pallet as Coins;
@@ -89,9 +95,7 @@ mod pallet {
use super::*;
#[pallet::config]
pub trait Config: frame_system::Config + coins_pallet::Config {
type RuntimeEvent: IsType<<Self as frame_system::Config>::RuntimeEvent> + From<Event<Self>>;
pub trait Config: frame_system::Config + coins_pallet::Config<coins_pallet::CoinsInstance> {
// type ShouldEndSession: ShouldEndSession<BlockNumberFor<Self>>;
}
@@ -199,10 +203,12 @@ mod pallet {
type CurrentSession<T: Config> = StorageMap<_, Identity, NetworkId, Session, OptionQuery>;
#[pallet::storage]
type LatestDecidedSession<T: Config> = StorageMap<_, Identity, NetworkId, Session, OptionQuery>;
#[pallet::storage]
type KeyShares<T: Config> = StorageMap<_, Identity, ValidatorSet, KeySharesStruct, OptionQuery>;
// This has to use `Identity` per the documentation of `SessionsStorage`
#[pallet::storage]
type SelectedValidators<T: Config> =
StorageMap<_, Identity, SelectedValidatorsKey, u64, OptionQuery>;
StorageMap<_, Identity, SelectedValidatorsKey, KeySharesStruct, OptionQuery>;
#[pallet::storage]
type TotalAllocatedStake<T: Config> = StorageMap<_, Identity, NetworkId, Amount, OptionQuery>;
#[pallet::storage]
@@ -214,15 +220,12 @@ mod pallet {
type AllocationPerKeyShare = AllocationPerKeyShare<T>;
type CurrentSession = CurrentSession<T>;
type LatestDecidedSession = LatestDecidedSession<T>;
type KeyShares = KeyShares<T>;
type SelectedValidators = SelectedValidators<T>;
type TotalAllocatedStake = TotalAllocatedStake<T>;
type DelayedDeallocations = DelayedDeallocations<T>;
}
#[pallet::event]
#[pallet::generate_deposit(pub(super) fn deposit_event)]
pub enum Event<T: Config> {}
/*
/// The generated key pair for a given validator set instance.
#[pallet::storage]
@@ -347,6 +350,45 @@ mod pallet {
SeraiAddress::system(b"ValidatorSets").into()
}
/// The current session for a network.
pub fn current_session(network: NetworkId) -> Option<Session> {
Abstractions::<T>::current_session(network)
}
/// The latest decided session for a network.
pub fn latest_decided_session(network: NetworkId) -> Option<Session> {
Abstractions::<T>::latest_decided_session(network)
}
/// The amount of key shares a validator has.
///
/// Returns `None` for historic sessions which we no longer have the data for.
pub fn key_shares(set: ValidatorSet) -> Option<KeySharesStruct> {
Abstractions::<T>::key_shares(set)
}
/// If a validator is present within the specified validator set.
///
/// This MAY return `false` for _any_ historic session, even if the validator _was_ present,
pub fn in_validator_set(set: ValidatorSet, validator: Public) -> bool {
Abstractions::<T>::in_validator_set(set, validator)
}
/// The key shares possessed by a validator, within a validator set.
///
/// This MAY return `None` for _any_ historic session, even if the validator _was_ present,
pub fn key_shares_possessed_by_validator(
set: ValidatorSet,
validator: Public,
) -> Option<KeySharesStruct> {
Abstractions::<T>::key_shares_possessed_by_validator(set, validator)
}
/// The stake for the current validator set.
pub fn stake_for_current_validator_set(network: NetworkId) -> Option<Amount> {
Abstractions::<T>::stake_for_current_validator_set(network)
}
/*
// is_bft returns if the network is able to survive any single node becoming byzantine.
fn is_bft(network: NetworkId) -> bool {
@@ -725,7 +767,7 @@ mod pallet {
impl<T: Config> Pallet<T> {
/*
#[pallet::call_index(0)]
#[pallet::weight(0)] // TODO
#[pallet::weight((0, DispatchClass::Operational))] // TODO
pub fn set_keys(
origin: OriginFor<T>,
network: ExternalNetworkId,
@@ -758,7 +800,7 @@ mod pallet {
}
#[pallet::call_index(1)]
#[pallet::weight(0)] // TODO
#[pallet::weight((0, DispatchClass::Operational))] // TODO
pub fn report_slashes(
origin: OriginFor<T>,
network: ExternalNetworkId,
@@ -787,7 +829,7 @@ mod pallet {
*/
#[pallet::call_index(2)]
#[pallet::weight(0)] // TODO
#[pallet::weight((0, DispatchClass::Normal))] // TODO
pub fn set_embedded_elliptic_curve_keys(
origin: OriginFor<T>,
keys: serai_primitives::crypto::SignedEmbeddedEllipticCurveKeys,
@@ -801,50 +843,42 @@ mod pallet {
}
#[pallet::call_index(3)]
#[pallet::weight(0)] // TODO
#[pallet::weight((0, DispatchClass::Normal))] // TODO
pub fn allocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
let validator = ensure_signed(origin)?;
Coins::<T>::transfer_fn(validator, Self::account(), Balance { coin: Coin::Serai, amount })?;
Coins::<T, coins_pallet::CoinsInstance>::transfer_fn(validator, Self::account(), Balance { coin: Coin::Serai, amount })?;
Abstractions::<T>::increase_allocation(network, validator, amount, false)
.map_err(Error::<T>::AllocationError)?;
Ok(())
}
#[pallet::call_index(4)]
#[pallet::weight(0)] // TODO
#[pallet::weight((0, DispatchClass::Normal))] // TODO
pub fn deallocate(origin: OriginFor<T>, network: NetworkId, amount: Amount) -> DispatchResult {
let account = ensure_signed(origin)?;
let deallocation_timeline = Abstractions::<T>::decrease_allocation(network, account, amount)
.map_err(Error::<T>::DeallocationError)?;
if matches!(deallocation_timeline, DeallocationTimeline::Immediate) {
Coins::<T>::transfer_fn(Self::account(), account, Balance { coin: Coin::Serai, amount })?;
Coins::<T, coins_pallet::CoinsInstance>::transfer_fn(Self::account(), account, Balance { coin: Coin::Serai, amount })?;
}
Ok(())
}
/*
#[pallet::call_index(5)]
#[pallet::weight((0, DispatchClass::Operational))] // TODO
#[pallet::weight((0, DispatchClass::Normal))] // TODO
pub fn claim_deallocation(
origin: OriginFor<T>,
network: NetworkId,
session: Session,
) -> DispatchResult {
let account = ensure_signed(origin)?;
let Some(amount) = Self::take_deallocatable_amount(network, session, account) else {
Err(Error::<T>::NonExistentDeallocation)?
};
Coins::<T>::transfer_fn(
Self::account(),
account,
Balance { coin: Coin::Serai, amount },
)?;
Self::deposit_event(Event::DeallocationClaimed { validator: account, network, session });
let amount = Abstractions::<T>::claim_delayed_deallocation(account, network, session)
.map_err(Error::<T>::DeallocationError)?;
Coins::<T, coins_pallet::CoinsInstance>::transfer_fn(Self::account(), account, Balance { coin: Coin::Serai, amount })?;
Ok(())
}
*/
}
/*

View File

@@ -1,10 +1,10 @@
use alloc::vec::Vec;
use sp_core::{Encode, Decode, ConstU32, sr25519::Public, bounded::BoundedVec};
use serai_primitives::{
constants::{MAX_KEY_SHARES_PER_SET, MAX_KEY_SHARES_PER_SET_U32},
network_id::NetworkId,
balance::Amount,
validator_sets::{Session, ValidatorSet, amortize_excess_key_shares},
validator_sets::{KeyShares as KeySharesStruct, Session, ValidatorSet},
};
use frame_support::storage::{StorageValue, StorageMap, StorageDoubleMap, StoragePrefixedMap};
@@ -12,7 +12,8 @@ use frame_support::storage::{StorageValue, StorageMap, StorageDoubleMap, Storage
use crate::{embedded_elliptic_curve_keys::EmbeddedEllipticCurveKeys, allocations::Allocations};
/// The list of genesis validators.
pub(crate) type GenesisValidators = BoundedVec<Public, ConstU32<{ MAX_KEY_SHARES_PER_SET_U32 }>>;
pub(crate) type GenesisValidators =
BoundedVec<Public, ConstU32<{ KeySharesStruct::MAX_PER_SET_U32 }>>;
/// The key for the SelectedValidators map.
pub(crate) type SelectedValidatorsKey = (ValidatorSet, [u8; 16], Public);
@@ -38,14 +39,23 @@ pub(crate) trait SessionsStorage: EmbeddedEllipticCurveKeys + Allocations {
/// This is opaque and to be exclusively read/write by `Sessions`.
type LatestDecidedSession: StorageMap<NetworkId, Session, Query = Option<Session>>;
/// The amount of key shares a validator set has.
///
/// This is opaque and to be exclusively read/write by `Sessions`.
type KeyShares: StorageMap<ValidatorSet, KeySharesStruct, Query = Option<KeySharesStruct>>;
/// The selected validators for a set.
///
/// This MUST be instantiated with a map using `Identity` for its hasher.
///
/// This is opaque and to be exclusively read/write by `Sessions`.
// The value is how many key shares the validator has.
type SelectedValidators: StorageMap<SelectedValidatorsKey, u64, Query = Option<u64>>
+ StoragePrefixedMap<u64>;
#[rustfmt::skip]
type SelectedValidators: StorageMap<
SelectedValidatorsKey,
KeySharesStruct,
Query = Option<KeySharesStruct>
> + StoragePrefixedMap<KeySharesStruct>;
/// The total allocated stake for a network.
///
@@ -64,9 +74,9 @@ fn selected_validators_key(set: ValidatorSet, key: Public) -> SelectedValidators
(set, hash, key)
}
fn selected_validators<Storage: StoragePrefixedMap<u64>>(
fn selected_validators<Storage: StoragePrefixedMap<KeySharesStruct>>(
set: ValidatorSet,
) -> impl Iterator<Item = (Public, u64)> {
) -> impl Iterator<Item = (Public, KeySharesStruct)> {
let mut prefix = Storage::final_prefix().to_vec();
prefix.extend(&set.encode());
frame_support::storage::PrefixIterator::<_, ()>::new(
@@ -77,13 +87,13 @@ fn selected_validators<Storage: StoragePrefixedMap<u64>>(
// Recover the validator's key from the storage key
<[u8; 32]>::try_from(&key[(key.len() - 32) ..]).unwrap().into(),
// Decode the key shares from the value
u64::decode(&mut key_shares).unwrap(),
KeySharesStruct::decode(&mut key_shares).unwrap(),
))
},
)
}
fn clear_selected_validators<Storage: StoragePrefixedMap<u64>>(set: ValidatorSet) {
fn clear_selected_validators<Storage: StoragePrefixedMap<KeySharesStruct>>(set: ValidatorSet) {
let mut prefix = Storage::final_prefix().to_vec();
prefix.extend(&set.encode());
assert!(matches!(
@@ -123,6 +133,10 @@ pub enum DeallocationError {
NotEnoughAllocated,
/// The remaining allocation was non-zero and would be less than a key share.
RemainingAllocationLessThanKeyShare,
/// The delay has yet to be satisfied.
DelayNotSatisfied,
/// No delayed deallocation was present.
NoDelayedDeallocation,
}
pub(crate) trait Sessions {
@@ -164,6 +178,43 @@ pub(crate) trait Sessions {
validator: Public,
amount: Amount,
) -> Result<DeallocationTimeline, DeallocationError>;
/// Claim a delayed allocation.
///
/// This does not perform any transfers of any coins/tokens. It solely performs the book-keeping
/// of it.
fn claim_delayed_deallocation(
validator: Public,
network: NetworkId,
session: Session,
) -> Result<Amount, DeallocationError>;
/// The currently active session for a network.
fn current_session(network: NetworkId) -> Option<Session>;
/// The latest decided session for a network.
fn latest_decided_session(network: NetworkId) -> Option<Session>;
/// The amount of key shares a validator has.
///
/// Returns `None` for historic sessions which we no longer have the data for.
fn key_shares(set: ValidatorSet) -> Option<KeySharesStruct>;
/// If a validator is present within the specified validator set.
///
/// This MAY return `false` for _any_ historic session, even if the validator _was_ present,
fn in_validator_set(set: ValidatorSet, validator: Public) -> bool;
/// The key shares possessed by a validator, within a validator set.
///
/// This MAY return `None` for _any_ historic session, even if the validator _was_ present,
fn key_shares_possessed_by_validator(
set: ValidatorSet,
validator: Public,
) -> Option<KeySharesStruct>;
/// The stake for the current validator set.
fn stake_for_current_validator_set(network: NetworkId) -> Option<Amount>;
}
impl<Storage: SessionsStorage> Sessions for Storage {
@@ -188,45 +239,40 @@ impl<Storage: SessionsStorage> Sessions for Storage {
}
}
let mut selected_validators = Vec::with_capacity(usize::from(MAX_KEY_SHARES_PER_SET / 2));
let mut selected_validators = Vec::with_capacity(usize::from(KeySharesStruct::MAX_PER_SET / 2));
let mut total_key_shares = 0;
if let Some(allocation_per_key_share) = Storage::AllocationPerKeyShare::get(network) {
for (validator, amount) in Self::iter_allocations(network, allocation_per_key_share) {
// If this allocation is absurd, causing this to not fit within a u16, bound to the max
let key_shares = amount.0 / allocation_per_key_share.0;
let key_shares = KeySharesStruct::from_allocation(amount, allocation_per_key_share);
selected_validators.push((validator, key_shares));
// We're tracking key shares as a u64 yet the max allowed is a u16, so this won't overflow
total_key_shares += key_shares;
if total_key_shares >= u64::from(MAX_KEY_SHARES_PER_SET) {
total_key_shares += key_shares.0;
if total_key_shares >= KeySharesStruct::MAX_PER_SET {
break;
}
}
}
// Perform amortization if we've exceeded the maximum amount of key shares
// This is guaranteed not to cause any validators have zero key shares as we'd only be over if
// the last-added (worst) validator had multiple key shares, meaning everyone has more shares
// than we'll amortize here
amortize_excess_key_shares(selected_validators.as_mut_slice());
{
let new_len = KeySharesStruct::amortize_excess(selected_validators.as_mut_slice());
selected_validators.truncate(new_len);
}
if include_genesis_validators {
let mut genesis_validators = Storage::GenesisValidators::get()
.expect("genesis validators wasn't set")
.into_iter()
.map(|validator| (validator, 1))
.map(|validator| (validator, KeySharesStruct::ONE))
.collect::<Vec<_>>();
let genesis_validator_key_shares = u64::try_from(genesis_validators.len()).unwrap();
while (total_key_shares + genesis_validator_key_shares) > u64::from(MAX_KEY_SHARES_PER_SET) {
let genesis_validator_key_shares = u16::try_from(genesis_validators.len()).unwrap();
total_key_shares += genesis_validator_key_shares;
while total_key_shares > KeySharesStruct::MAX_PER_SET {
let (_key, key_shares) = selected_validators.pop().unwrap();
total_key_shares -= key_shares;
total_key_shares -= key_shares.0;
}
selected_validators.append(&mut genesis_validators);
total_key_shares += genesis_validator_key_shares;
}
// We kept this accurate but don't actually further read from it
let _ = total_key_shares;
let latest_decided_session = Storage::LatestDecidedSession::mutate(network, |session| {
let next_session = session.map(|session| Session(session.0 + 1)).unwrap_or(Session(0));
*session = Some(next_session);
@@ -234,6 +280,10 @@ impl<Storage: SessionsStorage> Sessions for Storage {
});
let latest_decided_set = ValidatorSet { network, session: latest_decided_session };
Storage::KeyShares::insert(
latest_decided_set,
KeySharesStruct::try_from(total_key_shares).expect("amortization failure"),
);
for (key, key_shares) in selected_validators {
Storage::SelectedValidators::insert(
selected_validators_key(latest_decided_set, key),
@@ -271,10 +321,9 @@ impl<Storage: SessionsStorage> Sessions for Storage {
// Clean-up the historic set's storage, if one exists
if let Some(historic_session) = current.0.checked_sub(2).map(Session) {
clear_selected_validators::<Storage::SelectedValidators>(ValidatorSet {
network,
session: historic_session,
});
let historic_set = ValidatorSet { network, session: historic_session };
Storage::KeyShares::remove(historic_set);
clear_selected_validators::<Storage::SelectedValidators>(historic_set);
}
}
@@ -308,26 +357,28 @@ impl<Storage: SessionsStorage> Sessions for Storage {
{
// Check the validator set's current expected key shares
let expected_key_shares = Self::expected_key_shares(network, allocation_per_key_share);
// Check if the top validator in this set may be faulty under this f
let top_validator_may_be_faulty = if let Some(top_validator) =
// Check if the top validator in this set may be faulty without causing a halt under this f
let currently_tolerates_single_point_of_failure = if let Some(top_validator) =
Self::iter_allocations(network, allocation_per_key_share).next()
{
let (_key, amount) = top_validator;
let key_shares = amount.0 / allocation_per_key_share.0;
key_shares <= (expected_key_shares / 3)
let key_shares = KeySharesStruct::from_allocation(amount, allocation_per_key_share);
key_shares.0 <= (expected_key_shares.0 / 3)
} else {
// If there are no validators, we claim the top validator may not be faulty so the
// following check doesn't run
false
};
if top_validator_may_be_faulty {
let old_key_shares = old_allocation.0 / allocation_per_key_share.0;
let new_key_shares = new_allocation.0 / allocation_per_key_share.0;
// If the set currently tolerates the fault of the top validator, don't let that change
if currently_tolerates_single_point_of_failure {
let old_key_shares =
KeySharesStruct::from_allocation(old_allocation, allocation_per_key_share);
let new_key_shares =
KeySharesStruct::from_allocation(new_allocation, allocation_per_key_share);
// Update the amount of expected key shares per the key shares added
let expected_key_shares = (expected_key_shares + (new_key_shares - old_key_shares))
.min(u64::from(MAX_KEY_SHARES_PER_SET));
let expected_key_shares = KeySharesStruct::saturating_from(
expected_key_shares.0 + (new_key_shares.0 - old_key_shares.0),
);
// If the new key shares exceeds the fault tolerance, don't allow the allocation
if new_key_shares > (expected_key_shares / 3) {
if new_key_shares.0 > (expected_key_shares.0 / 3) {
Err(AllocationError::IntroducesSinglePointOfFailure)?
}
}
@@ -432,4 +483,45 @@ impl<Storage: SessionsStorage> Sessions for Storage {
// immediately handle the deallocation
Ok(DeallocationTimeline::Immediate)
}
fn claim_delayed_deallocation(
validator: Public,
network: NetworkId,
session: Session,
) -> Result<Amount, DeallocationError> {
if Storage::CurrentSession::get(network).map(|session| session.0) <
Some(session).map(|session| session.0)
{
Err(DeallocationError::DelayNotSatisfied)?;
}
Storage::DelayedDeallocations::take(validator, session)
.ok_or(DeallocationError::NoDelayedDeallocation)
}
fn current_session(network: NetworkId) -> Option<Session> {
Storage::CurrentSession::get(network)
}
fn latest_decided_session(network: NetworkId) -> Option<Session> {
Storage::LatestDecidedSession::get(network)
}
fn key_shares(set: ValidatorSet) -> Option<KeySharesStruct> {
Storage::KeyShares::get(set)
}
fn in_validator_set(set: ValidatorSet, validator: Public) -> bool {
Storage::SelectedValidators::contains_key(selected_validators_key(set, validator))
}
fn key_shares_possessed_by_validator(
set: ValidatorSet,
validator: Public,
) -> Option<KeySharesStruct> {
Storage::SelectedValidators::get(selected_validators_key(set, validator))
}
fn stake_for_current_validator_set(network: NetworkId) -> Option<Amount> {
Storage::TotalAllocatedStake::get(network)
}
}

View File

@@ -21,7 +21,7 @@ std-shims = { path = "../../common/std-shims", default-features = false }
flexible-transcript = { path = "../../crypto/transcript", default-features = false, features = ["recommended", "merlin"] }
multiexp = { path = "../../crypto/multiexp", default-features = false, features = ["batch"], optional = true }
multiexp = { path = "../../crypto/multiexp", default-features = false }
dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = false }
minimal-ed448 = { path = "../../crypto/ed448", default-features = false }
@@ -36,9 +36,13 @@ secq256k1 = { path = "../../crypto/secq256k1", default-features = false }
embedwards25519 = { path = "../../crypto/embedwards25519", default-features = false }
dkg = { path = "../../crypto/dkg", default-features = false, optional = true }
dkg-dealer = { path = "../../crypto/dkg/dealer", default-features = false, optional = true }
dkg-recovery = { path = "../../crypto/dkg/recovery", default-features = false, optional = true }
dkg-musig = { path = "../../crypto/dkg/musig", default-features = false, optional = true }
dkg-evrf = { path = "../../crypto/dkg/evrf", default-features = false, features = ["secp256k1", "ed25519"], optional = true }
# modular-frost = { path = "../../crypto/frost", default-features = false }
# frost-schnorrkel = { path = "../../crypto/schnorrkel", default-features = false }
modular-frost = { path = "../../crypto/frost", default-features = false, optional = true }
frost-schnorrkel = { path = "../../crypto/schnorrkel", default-features = false, optional = true }
bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, features = ["hazmat"], optional = true }
@@ -46,7 +50,8 @@ bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, fea
alloc = [
"std-shims/alloc",
"multiexp",
"multiexp/alloc",
"multiexp/batch",
"dalek-ff-group/alloc",
"minimal-ed448/alloc",
@@ -61,7 +66,13 @@ alloc = [
"embedwards25519/alloc",
"dkg",
"dkg-dealer",
"dkg-recovery",
"dkg-musig",
"dkg-evrf",
"modular-frost",
"frost-schnorrkel",
"bitcoin-serai",
]

View File

@@ -21,12 +21,13 @@ pub mod alloc {
pub use multiexp;
pub use dkg;
pub use dkg_dealer;
pub use dkg_recovery;
pub use dkg_musig;
pub use dkg_evrf;
pub use bitcoin_serai;
/*
pub use modular_frost;
pub use frost_schnorrkel;
*/
pub use bitcoin_serai;
}