From 5487844b9e288d315af092d401d84df11f23cfcb Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 25 Feb 2024 18:37:15 -0500 Subject: [PATCH 001/126] clippy && cargo update --- Cargo.lock | 389 +++++++++--------- .../client/tests/common/validator_sets.rs | 4 +- substrate/client/tests/validator_sets.rs | 6 +- 3 files changed, 196 insertions(+), 203 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5d4e75f9..692bdf2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -48,9 +48,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher", @@ -73,9 +73,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "d713b3834d76b85304d4d525563c1276e2e30dc97cc67bfb4585a4a29fc2c89f" dependencies = [ "cfg-if", "getrandom", @@ -125,9 +125,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e2e1ebcb11de5c03c67de28a7df593d32191b44939c482e97702baaaa6ab6a5" +checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" dependencies = [ "anstyle", "anstyle-parse", @@ -173,9 +173,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.79" +version = "1.0.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080e9890a082662b09c1ad45f567faeeb47f22b5fb23895fbe1e651e718e25ca" +checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" [[package]] name = "approx" @@ -298,7 +298,7 @@ checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -309,7 +309,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -355,7 +355,7 @@ checksum = "823b8bb275161044e2ac7a25879cb3e2480cb403e3943022c7c769c599b756aa" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -461,7 +461,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -689,7 +689,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", "syn_derive", ] @@ -716,9 +716,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", "serde", @@ -735,9 +735,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" [[package]] name = "byte-slice-cast" @@ -747,9 +747,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.14.2" +version = "1.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea31d69bda4949c1c1562c1e6f042a1caefac98cdc8a298260a2ff41c1e2d42b" +checksum = "a2ef034f05691a48569bd920a96c81b9d91bbad1ab5ac7c4616c1f6ef36cb79f" [[package]] name = "byteorder" @@ -788,9 +788,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceed8ef69d8518a5dda55c07425450b58a4e1946f4951eab6d7191ee86c2443d" +checksum = "694c8807f2ae16faecc43dc17d74b3eb042482789fd0eb64b39a2e04e087053f" dependencies = [ "serde", ] @@ -803,7 +803,7 @@ checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592" dependencies = [ "camino", "cargo-platform", - "semver 1.0.21", + "semver 1.0.22", "serde", "serde_json", "thiserror", @@ -811,11 +811,10 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.83" +version = "1.0.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +checksum = "02f341c093d19155a6e41631ce5971aac4e9a868262212153124c15fa22d1cdc" dependencies = [ - "jobserver", "libc", ] @@ -830,9 +829,9 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.6" +version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6100bc57b6209840798d95cb2775684849d332f7bd788db2a8c8caf7ef82a41a" +checksum = "fa50868b64a9a6fda9d593ce778849ea8715cd2a3d2cc17ffdb4a2f2f2f1961d" dependencies = [ "smallvec", ] @@ -875,9 +874,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.33" +version = "0.4.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f13690e35a5e4ace198e7beea2895d29f3a9cc55015fcebe6336bd2010af9eb" +checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" dependencies = [ "android-tzdata", "iana-time-zone", @@ -885,7 +884,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-targets 0.52.3", ] [[package]] @@ -948,9 +947,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.4.18" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e578d6ec4194633722ccf9544794b71b1385c3c027efe0c55db226fc880865c" +checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da" dependencies = [ "clap_builder", "clap_derive", @@ -958,9 +957,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.18" +version = "4.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4df4df40ec50c46000231c914968278b1eb05098cf8f1b3a518a95030e71d1c7" +checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb" dependencies = [ "anstream", "anstyle", @@ -970,21 +969,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.4.7" +version = "4.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] name = "clap_lex" -version = "0.6.0" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "codespan-reporting" @@ -1217,9 +1216,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" dependencies = [ "cfg-if", ] @@ -1314,14 +1313,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] name = "cxx" -version = "1.0.115" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de00f15a6fa069c99b88c5c78c4541d0e7899a33b86f7480e23df2431fce0bc" +checksum = "0c15f3b597018782655a05d417f28bac009f6eb60f4b6703eb818998c1aaa16a" dependencies = [ "cc", "cxxbridge-flags", @@ -1331,9 +1330,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.115" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a71e1e631fa2f2f5f92e8b0d860a00c198c6771623a6cefcc863e3554f0d8d6" +checksum = "81699747d109bba60bd6f87e7cb24b626824b8427b32f199b95c7faa06ee3dc9" dependencies = [ "cc", "codespan-reporting", @@ -1341,24 +1340,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] name = "cxxbridge-flags" -version = "1.0.115" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f3fed61d56ba497c4efef9144dfdbaa25aa58f2f6b3a7cf441d4591c583745c" +checksum = "7a7eb4c4fd18505f5a935f9c2ee77780350dcdb56da7cd037634e806141c5c43" [[package]] name = "cxxbridge-macro" -version = "1.0.115" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8908e380a8efd42150c017b0cfa31509fc49b6d47f7cb6b33e93ffb8f4e3661e" +checksum = "5d914fcc6452d133236ee067a9538be25ba6a644a450e1a6c617da84bf029854" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -1544,7 +1543,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -1705,9 +1704,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" [[package]] name = "elliptic-curve" @@ -1777,7 +1776,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -1920,7 +1919,7 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.48", + "syn 2.0.50", "toml 0.7.8", "walkdir", ] @@ -1938,7 +1937,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -1964,7 +1963,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.48", + "syn 2.0.50", "tempfile", "thiserror", "tiny-keccak", @@ -2053,7 +2052,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -2327,7 +2326,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -2339,7 +2338,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -2349,7 +2348,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -2508,7 +2507,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -2546,9 +2545,9 @@ dependencies = [ [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ "gloo-timers", "send_wrapper 0.4.0", @@ -2719,7 +2718,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.11", - "indexmap 2.2.2", + "indexmap 2.2.3", "slab", "tokio", "tokio-util", @@ -2783,9 +2782,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.5" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c62115964e08cb8039170eb33c1d0e2388a256930279edca206fff675f82c3" +checksum = "379dada1584ad501b383485dd706b8afb7a70fcbc7f4da7d780638a5a6124a60" [[package]] name = "hex" @@ -2964,9 +2963,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5aa53871fc917b1a9ed87b683a5d86db645e23acb32c2e0785a353e522fb75" +checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" dependencies = [ "bytes", "futures-channel", @@ -2976,6 +2975,7 @@ dependencies = [ "httparse", "itoa", "pin-project-lite 0.2.13", + "smallvec", "tokio", "want", ] @@ -2988,7 +2988,7 @@ checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ "futures-util", "http 1.0.0", - "hyper 1.1.0", + "hyper 1.2.0", "hyper-util", "rustls 0.22.2", "rustls-native-certs", @@ -3009,7 +3009,7 @@ dependencies = [ "futures-util", "http 1.0.0", "http-body 1.0.0", - "hyper 1.1.0", + "hyper 1.2.0", "pin-project-lite 0.2.13", "socket2 0.5.5", "tokio", @@ -3180,9 +3180,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.2" +version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "824b2ae422412366ba479e8111fd301f7b5faece8149317bb81925979a53f520" +checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -3260,15 +3260,6 @@ version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" -[[package]] -name = "jobserver" -version = "0.1.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c37f63953c4c63420ed5fd3d6d398c719489b9f872b9fa683262f8edd363c7d" -dependencies = [ - "libc", -] - [[package]] name = "js-sys" version = "0.3.68" @@ -3843,7 +3834,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -4042,9 +4033,9 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2c024b41519440580066ba82aab04092b333e09066a5eb86c7c4890df31f22" +checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ "hashbrown 0.14.3", ] @@ -4096,7 +4087,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -4110,7 +4101,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -4121,7 +4112,7 @@ checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -4132,7 +4123,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -4539,9 +4530,9 @@ dependencies = [ [[package]] name = "nalgebra" -version = "0.32.3" +version = "0.32.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307ed9b18cc2423f29e83f84fd23a8e73628727990181f18641a8b5dc2ab1caa" +checksum = "4541eb06dce09c0241ebbaab7102f0a01a0c8994afed2e5d0d66775016e25ac2" dependencies = [ "approx", "matrixmultiply", @@ -4771,7 +4762,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -5216,7 +5207,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.2", + "indexmap 2.2.3", ] [[package]] @@ -5246,7 +5237,7 @@ checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -5279,9 +5270,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.29" +version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2900ede94e305130c13ddd391e0ab7cbaeb783945ae07a279c268cb05109c6cb" +checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "platforms" @@ -5291,9 +5282,9 @@ checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" [[package]] name = "polling" -version = "3.4.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30054e72317ab98eddd8561db0f6524df3367636884b7b21b703e4b280a84a14" +checksum = "24f040dee2588b4963afb4e420540439d126f73fdacf4a9c486a96d840bac3c9" dependencies = [ "cfg-if", "concurrent-queue", @@ -5385,7 +5376,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -5469,7 +5460,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -5515,7 +5506,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -5816,7 +5807,7 @@ checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -5948,16 +5939,17 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6061,7 +6053,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.21", + "semver 1.0.22", ] [[package]] @@ -6093,7 +6085,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ "log", - "ring 0.17.7", + "ring 0.17.8", "rustls-webpki 0.101.7", "sct", ] @@ -6104,7 +6096,7 @@ version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", "rustls-webpki 0.102.2", "subtle", @@ -6126,9 +6118,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +checksum = "3c333bb734fcdedcea57de1602543590f545f127dc8b533324318fd492c5c70b" dependencies = [ "base64 0.21.7", "rustls-pki-types", @@ -6136,9 +6128,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a716eb65e3158e90e17cd93d855216e27bde02745ab842f2cab4a39dba1bacf" +checksum = "048a63e5b3ac996d78d402940b5fa47973d2d080c6c6fffa1d0f19c4445310b7" [[package]] name = "rustls-webpki" @@ -6146,7 +6138,7 @@ version = "0.101.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b6275d1ee7a1cd780b64aca7726599a1dbc893b1e64144529e55c3c2f745765" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -6156,7 +6148,7 @@ version = "0.102.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "rustls-pki-types", "untrusted 0.9.0", ] @@ -6180,9 +6172,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f98d2aa92eebf49b69786be48e4477826b256916e84a57ff2a4f21923b48eb4c" +checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" [[package]] name = "safe_arch" @@ -6306,7 +6298,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -7067,7 +7059,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -7230,7 +7222,7 @@ version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da046153aa2352493d6cb7da4b6e5c0c057d8a1d0a9aa8560baffdd945acd414" dependencies = [ - "ring 0.17.7", + "ring 0.17.8", "untrusted 0.9.0", ] @@ -7312,9 +7304,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.21" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97ed7a9823b74f99c7742f5336af7be5ecd3eeafcb1507d1fa93347b1d589b0" +checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" dependencies = [ "serde", ] @@ -7895,9 +7887,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "870026e60fa08c69f064aa766c10f10b1d62db9ccd4d0abb206472bee0ce3b32" +checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" dependencies = [ "serde_derive", ] @@ -7913,20 +7905,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.196" +version = "1.0.197" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c85360c95e7d137454dc81d9a4ed2b8efd8fbe19cee57357b32b9771fccb67" +checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] name = "serde_json" -version = "1.0.113" +version = "1.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69801b70b1c3dac963ecb03a364ba0ceda9cf60c71cfe475e99864759c8b8a79" +checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" dependencies = [ "itoa", "ryu", @@ -7941,7 +7933,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -7967,16 +7959,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.6.0" +version = "3.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b0ed1662c5a68664f45b76d18deb0e234aff37207086803165c961eb695e981" +checksum = "15d167997bd841ec232f5b2b8e0e26606df2e7caa4c31b95ea9ca52b200bd270" dependencies = [ "base64 0.21.7", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.2", + "indexmap 2.2.3", "serde", + "serde_derive", "serde_json", "time", ] @@ -8079,7 +8072,7 @@ version = "0.1.0" dependencies = [ "base64ct", "http-body-util", - "hyper 1.1.0", + "hyper 1.2.0", "hyper-rustls", "hyper-util", "tokio", @@ -8143,7 +8136,7 @@ dependencies = [ "chacha20poly1305", "curve25519-dalek", "rand_core", - "ring 0.17.7", + "ring 0.17.8", "rustc_version", "sha2", "subtle", @@ -8217,7 +8210,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -8413,7 +8406,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -8432,7 +8425,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -8604,7 +8597,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -8757,7 +8750,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -8894,9 +8887,9 @@ dependencies = [ [[package]] name = "strsim" -version = "0.10.0" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" +checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" [[package]] name = "strum" @@ -8936,7 +8929,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -9024,9 +9017,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.48" +version = "2.0.50" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f3531638e407dfc0814761abb7c00a5b54992b849452a0646b7f65c9f770f3f" +checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" dependencies = [ "proc-macro2", "quote", @@ -9042,7 +9035,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -9086,9 +9079,9 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "target-lexicon" -version = "0.12.13" +version = "0.12.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69758bda2e78f098e4ccb393021a0963bb3442eac05f135c30f61b7370bbafae" +checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" [[package]] name = "tempfile" @@ -9133,29 +9126,29 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54378c645627613241d077a3a79db965db602882668f9136ac42af9ecb730ad" +checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.56" +version = "1.0.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa0faa943b50f3db30a20aa7e265dbc66076993efed8463e8de414e5d06d3471" +checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -9279,7 +9272,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -9356,7 +9349,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "serde", "serde_spanned", "toml_datetime", @@ -9369,7 +9362,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.2", + "indexmap 2.2.3", "toml_datetime", "winnow", ] @@ -9440,7 +9433,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -9699,9 +9692,9 @@ checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" [[package]] name = "unicode-normalization" -version = "0.1.22" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" +checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" dependencies = [ "tinyvec", ] @@ -9845,7 +9838,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", "wasm-bindgen-shared", ] @@ -9879,7 +9872,7 @@ checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9969,8 +9962,8 @@ version = "0.110.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dfcdb72d96f01e6c85b6bf20102e7423bdbaad5c337301bab2bbf253d26413c" dependencies = [ - "indexmap 2.2.2", - "semver 1.0.21", + "indexmap 2.2.3", + "semver 1.0.22", ] [[package]] @@ -9984,7 +9977,7 @@ dependencies = [ "bumpalo", "cfg-if", "fxprof-processed-profile", - "indexmap 2.2.2", + "indexmap 2.2.3", "libc", "log", "object 0.31.1", @@ -10083,7 +10076,7 @@ dependencies = [ "anyhow", "cranelift-entity", "gimli 0.27.3", - "indexmap 2.2.2", + "indexmap 2.2.3", "log", "object 0.31.1", "serde", @@ -10150,7 +10143,7 @@ dependencies = [ "anyhow", "cc", "cfg-if", - "indexmap 2.2.2", + "indexmap 2.2.3", "libc", "log", "mach", @@ -10188,7 +10181,7 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -10309,7 +10302,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.3", ] [[package]] @@ -10329,17 +10322,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "d380ba1dc7187569a8a9e91ed34b8ccfc33123bbacb8c0aed2d1ad7f3ef2dc5f" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.3", + "windows_aarch64_msvc 0.52.3", + "windows_i686_gnu 0.52.3", + "windows_i686_msvc 0.52.3", + "windows_x86_64_gnu 0.52.3", + "windows_x86_64_gnullvm 0.52.3", + "windows_x86_64_msvc 0.52.3", ] [[package]] @@ -10350,9 +10343,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "68e5dcfb9413f53afd9c8f86e56a7b4d86d9a2fa26090ea2dc9e40fba56c6ec6" [[package]] name = "windows_aarch64_msvc" @@ -10362,9 +10355,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "8dab469ebbc45798319e69eebf92308e541ce46760b49b18c6b3fe5e8965b30f" [[package]] name = "windows_i686_gnu" @@ -10374,9 +10367,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "2a4e9b6a7cac734a8b4138a4e1044eac3404d8326b6c0f939276560687a033fb" [[package]] name = "windows_i686_msvc" @@ -10386,9 +10379,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "28b0ec9c422ca95ff34a78755cfa6ad4a51371da2a5ace67500cf7ca5f232c58" [[package]] name = "windows_x86_64_gnu" @@ -10398,9 +10391,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "704131571ba93e89d7cd43482277d6632589b18ecf4468f591fbae0a8b101614" [[package]] name = "windows_x86_64_gnullvm" @@ -10410,9 +10403,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "42079295511643151e98d61c38c0acc444e52dd42ab456f7ccfd5152e8ecf21c" [[package]] name = "windows_x86_64_msvc" @@ -10422,15 +10415,15 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "0770833d60a970638e989b3fa9fd2bb1aaadcf88963d1659fd7d9990196ed2d6" [[package]] name = "winnow" -version = "0.5.39" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5389a154b01683d28c77f8f68f49dea75f0a4da32557a58f68ee51ebba472d29" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] @@ -10565,7 +10558,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] @@ -10585,7 +10578,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.48", + "syn 2.0.50", ] [[package]] diff --git a/substrate/client/tests/common/validator_sets.rs b/substrate/client/tests/common/validator_sets.rs index e5ec464c..b7257a1c 100644 --- a/substrate/client/tests/common/validator_sets.rs +++ b/substrate/client/tests/common/validator_sets.rs @@ -73,7 +73,7 @@ pub async fn allocate_stake( nonce: u32, ) -> [u8; 32] { // get the call - let tx = serai.sign(&pair, SeraiValidatorSets::allocate(network, amount), nonce, 0); + let tx = serai.sign(pair, SeraiValidatorSets::allocate(network, amount), nonce, 0); publish_tx(serai, &tx).await } @@ -86,6 +86,6 @@ pub async fn deallocate_stake( nonce: u32, ) -> [u8; 32] { // get the call - let tx = serai.sign(&pair, SeraiValidatorSets::deallocate(network, amount), nonce, 0); + let tx = serai.sign(pair, SeraiValidatorSets::deallocate(network, amount), nonce, 0); publish_tx(serai, &tx).await } diff --git a/substrate/client/tests/validator_sets.rs b/substrate/client/tests/validator_sets.rs index 2ab8c423..fc284f64 100644 --- a/substrate/client/tests/validator_sets.rs +++ b/substrate/client/tests/validator_sets.rs @@ -176,7 +176,7 @@ async fn validator_set_rotation() { // we start the chain with 4 default participants that has a single key share each participants.sort(); - verify_session_and_active_validators(&serai, network, 0, &participants).await; + verify_session_and_active_validators(&serai, network, 0, participants).await; // add 1 participant & verify let hash = @@ -188,7 +188,7 @@ async fn validator_set_rotation() { &serai, network, get_active_session(&serai, network, hash).await, - &participants, + participants, ) .await; @@ -199,7 +199,7 @@ async fn validator_set_rotation() { participants.swap_remove(participants.iter().position(|k| *k == pair2.public()).unwrap()); let active_session = get_active_session(&serai, network, hash).await; participants.sort(); - verify_session_and_active_validators(&serai, network, active_session, &participants).await; + verify_session_and_active_validators(&serai, network, active_session, participants).await; // check pending deallocations let pending = serai From 1096ddb7ea538d2ac1ec15d418ee0d61ead146f7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 26 Feb 2024 10:46:16 -0500 Subject: [PATCH 002/126] Add bootnodes --- Cargo.lock | 1 + substrate/node/Cargo.toml | 2 + substrate/node/src/chain_spec.rs | 244 ++++++++++++++++++++++++++++++- substrate/node/src/command.rs | 3 +- 4 files changed, 241 insertions(+), 9 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 692bdf2f..1e81e7c7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7624,6 +7624,7 @@ dependencies = [ "futures-util", "hex", "jsonrpsee", + "libp2p", "pallet-transaction-payment-rpc", "rand_core", "sc-authority-discovery", diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index e35bc0ea..bcc21b56 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -26,6 +26,8 @@ hex = "0.4" schnorrkel = "0.11" +libp2p = "0.52" + sp-core = { git = "https://github.com/serai-dex/substrate" } sp-keystore = { git = "https://github.com/serai-dex/substrate" } sp-timestamp = { git = "https://github.com/serai-dex/substrate" } diff --git a/substrate/node/src/chain_spec.rs b/substrate/node/src/chain_spec.rs index 042f5178..ec8384da 100644 --- a/substrate/node/src/chain_spec.rs +++ b/substrate/node/src/chain_spec.rs @@ -1,6 +1,7 @@ use core::marker::PhantomData; +use std::collections::HashSet; -use sp_core::Pair as PairTrait; +use sp_core::{Decode, Pair as PairTrait, sr25519::Public}; use sc_service::ChainType; @@ -15,7 +16,7 @@ fn account_from_name(name: &'static str) -> PublicKey { insecure_pair_from_name(name).public() } -fn testnet_genesis( +fn devnet_genesis( wasm_binary: &[u8], validators: &[&'static str], endowed_accounts: Vec, @@ -64,6 +65,57 @@ fn testnet_genesis( } } +fn testnet_genesis(wasm_binary: &[u8], validators: Vec<&'static str>) -> RuntimeGenesisConfig { + let validators = validators + .into_iter() + .map(|validator| Public::decode(&mut hex::decode(validator).unwrap().as_slice()).unwrap()) + .collect::>(); + + assert_eq!(validators.iter().collect::>().len(), validators.len()); + + RuntimeGenesisConfig { + system: SystemConfig { code: wasm_binary.to_vec(), _config: PhantomData }, + + transaction_payment: Default::default(), + + coins: CoinsConfig { + accounts: validators + .iter() + .map(|a| (*a, Balance { coin: Coin::Serai, amount: Amount(1 << 60) })) + .collect(), + _ignore: Default::default(), + }, + + dex: DexConfig { + pools: vec![Coin::Bitcoin, Coin::Ether, Coin::Dai, Coin::Monero], + _ignore: Default::default(), + }, + + validator_sets: ValidatorSetsConfig { + networks: serai_runtime::primitives::NETWORKS + .iter() + .map(|network| match network { + NetworkId::Serai => (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), + NetworkId::Bitcoin => (NetworkId::Bitcoin, Amount(1_000_000 * 10_u64.pow(8))), + NetworkId::Ethereum => (NetworkId::Ethereum, Amount(1_000_000 * 10_u64.pow(8))), + NetworkId::Monero => (NetworkId::Monero, Amount(100_000 * 10_u64.pow(8))), + }) + .collect(), + participants: validators.clone(), + }, + signals: SignalsConfig::default(), + babe: BabeConfig { + authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(), + epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), + _config: PhantomData, + }, + grandpa: GrandpaConfig { + authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(), + _config: PhantomData, + }, + } +} + pub fn development_config() -> Result { let wasm_binary = WASM_BINARY.ok_or("Development wasm not available")?; @@ -74,7 +126,7 @@ pub fn development_config() -> Result { "devnet", ChainType::Development, || { - testnet_genesis( + devnet_genesis( wasm_binary, &["Alice"], vec![ @@ -92,7 +144,7 @@ pub fn development_config() -> Result { // Telemetry None, // Protocol ID - Some("serai"), + Some("serai-devnet"), // Fork ID None, // Properties @@ -102,8 +154,8 @@ pub fn development_config() -> Result { )) } -pub fn testnet_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Testnet wasm not available")?; +pub fn local_config() -> Result { + let wasm_binary = WASM_BINARY.ok_or("Local wasm not available")?; Ok(ChainSpec::from_genesis( // Name @@ -112,7 +164,7 @@ pub fn testnet_config() -> Result { "local", ChainType::Local, || { - testnet_genesis( + devnet_genesis( wasm_binary, &["Alice", "Bob", "Charlie", "Dave"], vec![ @@ -130,7 +182,183 @@ pub fn testnet_config() -> Result { // Telemetry None, // Protocol ID - Some("serai"), + Some("serai-local"), + // Fork ID + None, + // Properties + None, + // Extensions + None, + )) +} + +pub fn testnet_config() -> Result { + let wasm_binary = WASM_BINARY.ok_or("Testnet wasm not available")?; + + let bootnode_multiaddrs: Vec = vec![ + "/ip6/2604:180:f1::70/tcp/30333".parse().unwrap(), + "/ip4/103.18.20.202/tcp/30333".parse().unwrap(), + "/ip4/37.60.255.101/tcp/30333".parse().unwrap(), + "/ip4/23.227.173.218/tcp/30333".parse().unwrap(), + "/ip4/65.21.156.202/tcp/30333".parse().unwrap(), + "/ip4/174.3.203.20/tcp/30333".parse().unwrap(), + "/ip4/51.195.60.217/tcp/30333".parse().unwrap(), + ]; + // Transforms the above Multiaddrs into MultiaddrWithPeerIds + // While the PeerIds *should* be known in advance and hardcoded, that data wasn't collected in + // time and this fine for a testnet + let bootnodes = || async { + #[rustfmt::skip] + use libp2p::{ + Transport as TransportTrait, OutboundUpgrade, tcp::tokio::Transport, noise::Config + }; + let mut tasks = vec![]; + for multiaddr in bootnode_multiaddrs { + tasks.push(tokio::time::timeout( + core::time::Duration::from_secs(30), + tokio::task::spawn(async { + let Ok(transport) = Transport::default().dial(multiaddr.clone()) else { None? }; + let Ok(transport) = transport.await else { None? }; + // Uses a random key pair as we only care about their ID + let Ok(noise) = Config::new(&sc_network::Keypair::generate_ed25519()) else { None? }; + let Ok(result) = noise.upgrade_outbound(transport, "/ipfs/id/1.0.0").await else { None? }; + let peer_id = result.0; + Some(sc_network::config::MultiaddrWithPeerId { multiaddr, peer_id }) + }), + )); + } + + let mut res = vec![]; + for task in tasks { + if let Ok(Ok(Some(bootnode))) = task.await { + res.push(bootnode); + } + } + res + }; + + Ok(ChainSpec::from_genesis( + // Name + "Test Network 0", + // ID + "testnet-0", + ChainType::Live, + || { + testnet_genesis( + wasm_binary, + vec![ + // Kayaba + "4cef4080d00c6ff5ad93d61d1ca631cc10f8c9bd733e8c0c873a85b5fbe5c625", + // CommunityStaking + "587723d333049d9f4e6f027bbd701d603544a422329ea4e1027d60f7947e1074", + // Adorid + "28800b36a7e92d8c210668ccff4a759d0b179f09178818dc7d7037a057ca8e61", + // SHossain + "6e30ec71b331d73992307fa7c53719ff238666d7d895487a1b691cc1e4481344", + // Yangu + "c692a906f9c63b7e4d12ad3cde204c6715b9a96b5b8ce565794917b7eaaa5f08", + // StormyCloud + "b0ebef6d712b3eb0f01e69a80519e55feff4be8b226fa64d84691e4b3ca2fb38", + // rlking1255 + "82592430fe65e353510d3c1018cebc9806290e2d9098a94a1190f120f471c52b", + // Ghalleb + "48f903ed592638cee1c7f239a6ac14cbb0224a3153cff0f85eb0873113cf163f", + // monerobull + "56a2e3b410cb87bdb8125ae19d76a7be042de49693dc27f03e7a0dcc72b42f6c", + "322a0a63102e4b4ed727a968a6bdcfd1a71af6ed03664d3db8a8ba285e199019", + // vdo + "1c87bbcd666099abc1ee2ec3f065abd073c237f95c4d0658b945e9d66d67622d", + // t-900 + "6a9d5a3ca9422baec670e47238decf4a8515f2de0060b0a566a56dfd72686e52", + // tappokone + "36acb4be05513bed670ef3b43dc3a0fdfde8dc45339f81c80b53b2289dc3730c", + // untraceable + "46894302ff717b73def8eaa180e5f162e845ef3d0d8aef44fefc8df4c342271f", + // kim0 + "3e6ed40b3fecd2adf0ba70d1d59b5e07813ee997a0d8400bd2f3bd4444b1ba13", + // Helios + "56a0e89cffe57337e9e232e41dca0ad7306a17fa0ca63fbac048190fdd45d511", + // hbs + "805a64c49a50adaf2281b54a90cc2ab96410bc4a5faed93b5d5d97c448fba457", + // ElectricityMachine + "3e1d8fcfd4887f4c2eb28fda3c8a857c870e6a70ed60ea92ebe11049c3905002", + // FlyR9 + "2e114afcb26055e7cd337c2d0145e52365142bb850370777a24f83dfa6442c0e", + // boog900 + "aad4faf130e4d8fc2279ffbd1c166994b581f29148a4331d204f314b7d4c2001", + // KeepKey + "a8ba046fa30cd9b734560a89e96315bb15e072596c5929d9e81db1c470c28830", + // ripplemcgee1 + "02e076222b59189f3e4c24f7cbf66c7c24d33e6b38153022fc61075c619e7b65", + // krytie + "82815723c498d2aaaead050e63b979bb49a94a00c97b971c22340dffeaa36829", + // akil + "1caffa33b0ea1c7ed95c8450c0baf57baf9e1c1f43af3e28a722ef6d3d4db27e", + // JimmyT + "4ee69d489677f915c08328ece5138705d67a40ea598da47b724c10ec89a0253e", + // Sleipnir + "0e87d766c9acec45b39445579cd3f40c8a4b42e9a34049bdbef0da83d000410e", + "c2f96300a956e949883a5e8952270fb8193154a68533d0dd6b10076224e30167", + "7a66312c53dfb153e842456f4e9a38dcda7e1788a3366df3e54125e29821f870", + // username12345678901 + "76434119e3c38885e6cda1167571ad2cec46e129a9156fe79cbac66b314e8762", + // sgp + "565fe4384ef416f3a29e2d4e9c47fdae0f04c2fc8afb4eb10ad41c519589a04e", + // jberman + "b6e23eec7dbdb2bf72a087e335b44464cedfcc11c669033d6e520b3bc8de1650", + // Eumaios + "9ec7b5edf854f6285205468ed7402e40e5bed8238dc226dd4fd718a40efdce44", + // pigeons + "66c71ebf040542ab467def0ad935ec30ea693953d4322b3b168f6f4e9fcacb63", + // joe_land1 + "94e25d8247b2f0e718bee169213052c693b78743dd91f403398a8837c34e0e6a", + // detherminal + "0852729a8653454e176b8f7a372eb51abccc2b91f548ddaea3e4bc8e35c89452", + // 0x221f + "5aa02a2ff0ca8b22b68cb5e6de1c6790db0b8d2eba80e267aae8ab44eb9cc834", + // Seth For Privacy + "f8ebbdb8ff2a77527528577bad6fd3297017f7b35a0613ba31d8af8e7e78cd7b", + // ludo + "40352580f976f4b69a924034f8a63cf025f64894ff65796750fdccf4646f980f", + // lemon_respector + "ce4a4cd996e4601a0226f3c8d9c9cae84519a1a7277b4822e1694b4a8c3ef10b", + // tuxsudo + "c6804a561d07d77c2806844a59c24bb9472df16043767721aae0caa20e82391e", + // Awakeninghumanity.eth + "5046c9f55a65e08df86c132c142f055db0376563fabc190f47a6851e0ff2af2b", + // freQniK + "42cc47732664ffefe8cca0e675015924c0f778840e3c58e39c5db48913b1727a", + // ART3MIS.CLOUD + "5c1793880b0c06a5ce232288c7789cf4451ab20a8da49b84c88789965bc67356", + // Rucknium + "8cd62eedcda504b3204b5593120863b4316cf84205f6d1cd4652877d724b2151", + // PotR + "b29ffbb4a4c0f14eb8c22fabaaacb43f92a62214ff45f0b4f50b7031c3a61a5a", + // michnovka + "98db8174ec40046b1bae39cad69ea0000d67e120524d46bc298d167407410618", + // helpinghand + "fe563aa039c3499ca379765e63f708cc3bce82145cdc2abb7dbcc94d52eec539", + // toplel + "4243da92918333bfc46f4d17ddeda0c3420d920231627dca1b6049f2f13cac6d", + // clamking + "941a6efa9e4dee6c3015cc42339fe56f43c2230133787746828befcee957cb1f", + // worksmarter + "c4f2f6ffead84fcaa2e3c894d57c342a24c461eab5d1d17cae3d1a9e61d73e46", + // kgminer + "8eca72a4bf684d7c4a20a34048003b504a046bce1289d3ae79a3b4422afaf808", + // Benny + "74b4f2d2347a4426c536e6ba48efa14b989b05f03c0ea9b1c67b23696c1a831d", + // Argo + "4025bbbe9c9be72769a27e5e6a3749782f4c9b2a47624bdcb0bfbd29f5e2056a", + ], + ) + }, + // Bootnodes + tokio::runtime::Handle::current().block_on(bootnodes()), + // Telemetry + None, + // Protocol ID + Some("serai-testnet-0"), // Fork ID None, // Properties diff --git a/substrate/node/src/command.rs b/substrate/node/src/command.rs index 3588f95f..a57874bc 100644 --- a/substrate/node/src/command.rs +++ b/substrate/node/src/command.rs @@ -40,7 +40,8 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> Result, String> { match id { "dev" | "devnet" => Ok(Box::new(chain_spec::development_config()?)), - "local" => Ok(Box::new(chain_spec::testnet_config()?)), + "local" => Ok(Box::new(chain_spec::local_config()?)), + "testnet" => Ok(Box::new(chain_spec::testnet_config()?)), _ => panic!("Unknown network ID"), } } From b427f4b8abed2d92d292f2a84e47b746f9bd82c1 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 26 Feb 2024 10:47:42 -0500 Subject: [PATCH 003/126] Revert "Add bootnodes" This reverts commit 1096ddb7ea538d2ac1ec15d418ee0d61ead146f7. This commit was intended for the testnet branch alone. --- Cargo.lock | 1 - substrate/node/Cargo.toml | 2 - substrate/node/src/chain_spec.rs | 244 +------------------------------ substrate/node/src/command.rs | 3 +- 4 files changed, 9 insertions(+), 241 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1e81e7c7..692bdf2f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7624,7 +7624,6 @@ dependencies = [ "futures-util", "hex", "jsonrpsee", - "libp2p", "pallet-transaction-payment-rpc", "rand_core", "sc-authority-discovery", diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index bcc21b56..e35bc0ea 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -26,8 +26,6 @@ hex = "0.4" schnorrkel = "0.11" -libp2p = "0.52" - sp-core = { git = "https://github.com/serai-dex/substrate" } sp-keystore = { git = "https://github.com/serai-dex/substrate" } sp-timestamp = { git = "https://github.com/serai-dex/substrate" } diff --git a/substrate/node/src/chain_spec.rs b/substrate/node/src/chain_spec.rs index ec8384da..042f5178 100644 --- a/substrate/node/src/chain_spec.rs +++ b/substrate/node/src/chain_spec.rs @@ -1,7 +1,6 @@ use core::marker::PhantomData; -use std::collections::HashSet; -use sp_core::{Decode, Pair as PairTrait, sr25519::Public}; +use sp_core::Pair as PairTrait; use sc_service::ChainType; @@ -16,7 +15,7 @@ fn account_from_name(name: &'static str) -> PublicKey { insecure_pair_from_name(name).public() } -fn devnet_genesis( +fn testnet_genesis( wasm_binary: &[u8], validators: &[&'static str], endowed_accounts: Vec, @@ -65,57 +64,6 @@ fn devnet_genesis( } } -fn testnet_genesis(wasm_binary: &[u8], validators: Vec<&'static str>) -> RuntimeGenesisConfig { - let validators = validators - .into_iter() - .map(|validator| Public::decode(&mut hex::decode(validator).unwrap().as_slice()).unwrap()) - .collect::>(); - - assert_eq!(validators.iter().collect::>().len(), validators.len()); - - RuntimeGenesisConfig { - system: SystemConfig { code: wasm_binary.to_vec(), _config: PhantomData }, - - transaction_payment: Default::default(), - - coins: CoinsConfig { - accounts: validators - .iter() - .map(|a| (*a, Balance { coin: Coin::Serai, amount: Amount(1 << 60) })) - .collect(), - _ignore: Default::default(), - }, - - dex: DexConfig { - pools: vec![Coin::Bitcoin, Coin::Ether, Coin::Dai, Coin::Monero], - _ignore: Default::default(), - }, - - validator_sets: ValidatorSetsConfig { - networks: serai_runtime::primitives::NETWORKS - .iter() - .map(|network| match network { - NetworkId::Serai => (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), - NetworkId::Bitcoin => (NetworkId::Bitcoin, Amount(1_000_000 * 10_u64.pow(8))), - NetworkId::Ethereum => (NetworkId::Ethereum, Amount(1_000_000 * 10_u64.pow(8))), - NetworkId::Monero => (NetworkId::Monero, Amount(100_000 * 10_u64.pow(8))), - }) - .collect(), - participants: validators.clone(), - }, - signals: SignalsConfig::default(), - babe: BabeConfig { - authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(), - epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), - _config: PhantomData, - }, - grandpa: GrandpaConfig { - authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(), - _config: PhantomData, - }, - } -} - pub fn development_config() -> Result { let wasm_binary = WASM_BINARY.ok_or("Development wasm not available")?; @@ -126,7 +74,7 @@ pub fn development_config() -> Result { "devnet", ChainType::Development, || { - devnet_genesis( + testnet_genesis( wasm_binary, &["Alice"], vec![ @@ -144,7 +92,7 @@ pub fn development_config() -> Result { // Telemetry None, // Protocol ID - Some("serai-devnet"), + Some("serai"), // Fork ID None, // Properties @@ -154,8 +102,8 @@ pub fn development_config() -> Result { )) } -pub fn local_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Local wasm not available")?; +pub fn testnet_config() -> Result { + let wasm_binary = WASM_BINARY.ok_or("Testnet wasm not available")?; Ok(ChainSpec::from_genesis( // Name @@ -164,7 +112,7 @@ pub fn local_config() -> Result { "local", ChainType::Local, || { - devnet_genesis( + testnet_genesis( wasm_binary, &["Alice", "Bob", "Charlie", "Dave"], vec![ @@ -182,183 +130,7 @@ pub fn local_config() -> Result { // Telemetry None, // Protocol ID - Some("serai-local"), - // Fork ID - None, - // Properties - None, - // Extensions - None, - )) -} - -pub fn testnet_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Testnet wasm not available")?; - - let bootnode_multiaddrs: Vec = vec![ - "/ip6/2604:180:f1::70/tcp/30333".parse().unwrap(), - "/ip4/103.18.20.202/tcp/30333".parse().unwrap(), - "/ip4/37.60.255.101/tcp/30333".parse().unwrap(), - "/ip4/23.227.173.218/tcp/30333".parse().unwrap(), - "/ip4/65.21.156.202/tcp/30333".parse().unwrap(), - "/ip4/174.3.203.20/tcp/30333".parse().unwrap(), - "/ip4/51.195.60.217/tcp/30333".parse().unwrap(), - ]; - // Transforms the above Multiaddrs into MultiaddrWithPeerIds - // While the PeerIds *should* be known in advance and hardcoded, that data wasn't collected in - // time and this fine for a testnet - let bootnodes = || async { - #[rustfmt::skip] - use libp2p::{ - Transport as TransportTrait, OutboundUpgrade, tcp::tokio::Transport, noise::Config - }; - let mut tasks = vec![]; - for multiaddr in bootnode_multiaddrs { - tasks.push(tokio::time::timeout( - core::time::Duration::from_secs(30), - tokio::task::spawn(async { - let Ok(transport) = Transport::default().dial(multiaddr.clone()) else { None? }; - let Ok(transport) = transport.await else { None? }; - // Uses a random key pair as we only care about their ID - let Ok(noise) = Config::new(&sc_network::Keypair::generate_ed25519()) else { None? }; - let Ok(result) = noise.upgrade_outbound(transport, "/ipfs/id/1.0.0").await else { None? }; - let peer_id = result.0; - Some(sc_network::config::MultiaddrWithPeerId { multiaddr, peer_id }) - }), - )); - } - - let mut res = vec![]; - for task in tasks { - if let Ok(Ok(Some(bootnode))) = task.await { - res.push(bootnode); - } - } - res - }; - - Ok(ChainSpec::from_genesis( - // Name - "Test Network 0", - // ID - "testnet-0", - ChainType::Live, - || { - testnet_genesis( - wasm_binary, - vec![ - // Kayaba - "4cef4080d00c6ff5ad93d61d1ca631cc10f8c9bd733e8c0c873a85b5fbe5c625", - // CommunityStaking - "587723d333049d9f4e6f027bbd701d603544a422329ea4e1027d60f7947e1074", - // Adorid - "28800b36a7e92d8c210668ccff4a759d0b179f09178818dc7d7037a057ca8e61", - // SHossain - "6e30ec71b331d73992307fa7c53719ff238666d7d895487a1b691cc1e4481344", - // Yangu - "c692a906f9c63b7e4d12ad3cde204c6715b9a96b5b8ce565794917b7eaaa5f08", - // StormyCloud - "b0ebef6d712b3eb0f01e69a80519e55feff4be8b226fa64d84691e4b3ca2fb38", - // rlking1255 - "82592430fe65e353510d3c1018cebc9806290e2d9098a94a1190f120f471c52b", - // Ghalleb - "48f903ed592638cee1c7f239a6ac14cbb0224a3153cff0f85eb0873113cf163f", - // monerobull - "56a2e3b410cb87bdb8125ae19d76a7be042de49693dc27f03e7a0dcc72b42f6c", - "322a0a63102e4b4ed727a968a6bdcfd1a71af6ed03664d3db8a8ba285e199019", - // vdo - "1c87bbcd666099abc1ee2ec3f065abd073c237f95c4d0658b945e9d66d67622d", - // t-900 - "6a9d5a3ca9422baec670e47238decf4a8515f2de0060b0a566a56dfd72686e52", - // tappokone - "36acb4be05513bed670ef3b43dc3a0fdfde8dc45339f81c80b53b2289dc3730c", - // untraceable - "46894302ff717b73def8eaa180e5f162e845ef3d0d8aef44fefc8df4c342271f", - // kim0 - "3e6ed40b3fecd2adf0ba70d1d59b5e07813ee997a0d8400bd2f3bd4444b1ba13", - // Helios - "56a0e89cffe57337e9e232e41dca0ad7306a17fa0ca63fbac048190fdd45d511", - // hbs - "805a64c49a50adaf2281b54a90cc2ab96410bc4a5faed93b5d5d97c448fba457", - // ElectricityMachine - "3e1d8fcfd4887f4c2eb28fda3c8a857c870e6a70ed60ea92ebe11049c3905002", - // FlyR9 - "2e114afcb26055e7cd337c2d0145e52365142bb850370777a24f83dfa6442c0e", - // boog900 - "aad4faf130e4d8fc2279ffbd1c166994b581f29148a4331d204f314b7d4c2001", - // KeepKey - "a8ba046fa30cd9b734560a89e96315bb15e072596c5929d9e81db1c470c28830", - // ripplemcgee1 - "02e076222b59189f3e4c24f7cbf66c7c24d33e6b38153022fc61075c619e7b65", - // krytie - "82815723c498d2aaaead050e63b979bb49a94a00c97b971c22340dffeaa36829", - // akil - "1caffa33b0ea1c7ed95c8450c0baf57baf9e1c1f43af3e28a722ef6d3d4db27e", - // JimmyT - "4ee69d489677f915c08328ece5138705d67a40ea598da47b724c10ec89a0253e", - // Sleipnir - "0e87d766c9acec45b39445579cd3f40c8a4b42e9a34049bdbef0da83d000410e", - "c2f96300a956e949883a5e8952270fb8193154a68533d0dd6b10076224e30167", - "7a66312c53dfb153e842456f4e9a38dcda7e1788a3366df3e54125e29821f870", - // username12345678901 - "76434119e3c38885e6cda1167571ad2cec46e129a9156fe79cbac66b314e8762", - // sgp - "565fe4384ef416f3a29e2d4e9c47fdae0f04c2fc8afb4eb10ad41c519589a04e", - // jberman - "b6e23eec7dbdb2bf72a087e335b44464cedfcc11c669033d6e520b3bc8de1650", - // Eumaios - "9ec7b5edf854f6285205468ed7402e40e5bed8238dc226dd4fd718a40efdce44", - // pigeons - "66c71ebf040542ab467def0ad935ec30ea693953d4322b3b168f6f4e9fcacb63", - // joe_land1 - "94e25d8247b2f0e718bee169213052c693b78743dd91f403398a8837c34e0e6a", - // detherminal - "0852729a8653454e176b8f7a372eb51abccc2b91f548ddaea3e4bc8e35c89452", - // 0x221f - "5aa02a2ff0ca8b22b68cb5e6de1c6790db0b8d2eba80e267aae8ab44eb9cc834", - // Seth For Privacy - "f8ebbdb8ff2a77527528577bad6fd3297017f7b35a0613ba31d8af8e7e78cd7b", - // ludo - "40352580f976f4b69a924034f8a63cf025f64894ff65796750fdccf4646f980f", - // lemon_respector - "ce4a4cd996e4601a0226f3c8d9c9cae84519a1a7277b4822e1694b4a8c3ef10b", - // tuxsudo - "c6804a561d07d77c2806844a59c24bb9472df16043767721aae0caa20e82391e", - // Awakeninghumanity.eth - "5046c9f55a65e08df86c132c142f055db0376563fabc190f47a6851e0ff2af2b", - // freQniK - "42cc47732664ffefe8cca0e675015924c0f778840e3c58e39c5db48913b1727a", - // ART3MIS.CLOUD - "5c1793880b0c06a5ce232288c7789cf4451ab20a8da49b84c88789965bc67356", - // Rucknium - "8cd62eedcda504b3204b5593120863b4316cf84205f6d1cd4652877d724b2151", - // PotR - "b29ffbb4a4c0f14eb8c22fabaaacb43f92a62214ff45f0b4f50b7031c3a61a5a", - // michnovka - "98db8174ec40046b1bae39cad69ea0000d67e120524d46bc298d167407410618", - // helpinghand - "fe563aa039c3499ca379765e63f708cc3bce82145cdc2abb7dbcc94d52eec539", - // toplel - "4243da92918333bfc46f4d17ddeda0c3420d920231627dca1b6049f2f13cac6d", - // clamking - "941a6efa9e4dee6c3015cc42339fe56f43c2230133787746828befcee957cb1f", - // worksmarter - "c4f2f6ffead84fcaa2e3c894d57c342a24c461eab5d1d17cae3d1a9e61d73e46", - // kgminer - "8eca72a4bf684d7c4a20a34048003b504a046bce1289d3ae79a3b4422afaf808", - // Benny - "74b4f2d2347a4426c536e6ba48efa14b989b05f03c0ea9b1c67b23696c1a831d", - // Argo - "4025bbbe9c9be72769a27e5e6a3749782f4c9b2a47624bdcb0bfbd29f5e2056a", - ], - ) - }, - // Bootnodes - tokio::runtime::Handle::current().block_on(bootnodes()), - // Telemetry - None, - // Protocol ID - Some("serai-testnet-0"), + Some("serai"), // Fork ID None, // Properties diff --git a/substrate/node/src/command.rs b/substrate/node/src/command.rs index a57874bc..3588f95f 100644 --- a/substrate/node/src/command.rs +++ b/substrate/node/src/command.rs @@ -40,8 +40,7 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> Result, String> { match id { "dev" | "devnet" => Ok(Box::new(chain_spec::development_config()?)), - "local" => Ok(Box::new(chain_spec::local_config()?)), - "testnet" => Ok(Box::new(chain_spec::testnet_config()?)), + "local" => Ok(Box::new(chain_spec::testnet_config()?)), _ => panic!("Unknown network ID"), } } From 5629c94b8bacfbb1aadee5820403206402cfa1aa Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 2 Mar 2024 17:15:16 -0500 Subject: [PATCH 004/126] Reconcile the two copies of scalar_vector.rs in monero-serai --- .../src/ringct/bulletproofs/original.rs | 41 ++-- .../plus/aggregate_range_proof.rs | 26 ++- .../src/ringct/bulletproofs/plus/mod.rs | 3 +- .../ringct/bulletproofs/plus/scalar_vector.rs | 114 ---------- .../plus/weighted_inner_product.rs | 21 +- .../src/ringct/bulletproofs/scalar_vector.rs | 194 ++++++++++-------- .../plus/weighted_inner_product.rs | 3 +- 7 files changed, 164 insertions(+), 238 deletions(-) delete mode 100644 coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs diff --git a/coins/monero/src/ringct/bulletproofs/original.rs b/coins/monero/src/ringct/bulletproofs/original.rs index 5e50c02e..0e841080 100644 --- a/coins/monero/src/ringct/bulletproofs/original.rs +++ b/coins/monero/src/ringct/bulletproofs/original.rs @@ -9,7 +9,7 @@ use curve25519_dalek::{scalar::Scalar as DalekScalar, edwards::EdwardsPoint as D use group::{ff::Field, Group}; use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint}; -use multiexp::BatchVerifier; +use multiexp::{BatchVerifier, multiexp}; use crate::{Commitment, ringct::bulletproofs::core::*}; @@ -17,7 +17,20 @@ include!(concat!(env!("OUT_DIR"), "/generators.rs")); static IP12_CELL: OnceLock = OnceLock::new(); pub(crate) fn IP12() -> Scalar { - *IP12_CELL.get_or_init(|| inner_product(&ScalarVector(vec![Scalar::ONE; N]), TWO_N())) + *IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N())) +} + +pub(crate) fn hadamard_fold( + l: &[EdwardsPoint], + r: &[EdwardsPoint], + a: Scalar, + b: Scalar, +) -> Vec { + let mut res = Vec::with_capacity(l.len() / 2); + for i in 0 .. l.len() { + res.push(multiexp(&[(a, l[i]), (b, r[i])])); + } + res } #[derive(Clone, PartialEq, Eq, Debug)] @@ -57,7 +70,7 @@ impl OriginalStruct { let mut cache = hash_to_scalar(&y.to_bytes()); let z = cache; - let l0 = &aL - z; + let l0 = aL - z; let l1 = sL; let mut zero_twos = Vec::with_capacity(MN); @@ -69,12 +82,12 @@ impl OriginalStruct { } let yMN = ScalarVector::powers(y, MN); - let r0 = (&(aR + z) * &yMN) + ScalarVector(zero_twos); - let r1 = yMN * sR; + let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos); + let r1 = yMN * &sR; let (T1, T2, x, mut taux) = { - let t1 = inner_product(&l0, &r1) + inner_product(&l1, &r0); - let t2 = inner_product(&l1, &r1); + let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1); + let t2 = l1.clone().inner_product(&r1); let mut tau1 = Scalar::random(&mut *rng); let mut tau2 = Scalar::random(&mut *rng); @@ -100,10 +113,10 @@ impl OriginalStruct { taux += zpow[i + 2] * gamma; } - let l = &l0 + &(l1 * x); - let r = &r0 + &(r1 * x); + let l = l0 + &(l1 * x); + let r = r0 + &(r1 * x); - let t = inner_product(&l, &r); + let t = l.clone().inner_product(&r); let x_ip = hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]); @@ -126,8 +139,8 @@ impl OriginalStruct { let (aL, aR) = a.split(); let (bL, bR) = b.split(); - let cL = inner_product(&aL, &bR); - let cR = inner_product(&aR, &bL); + let cL = aL.clone().inner_product(&bR); + let cR = aR.clone().inner_product(&bL); let (G_L, G_R) = G_proof.split_at(aL.len()); let (H_L, H_R) = H_proof.split_at(aL.len()); @@ -140,8 +153,8 @@ impl OriginalStruct { let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]); let winv = w.invert().unwrap(); - a = (aL * w) + (aR * winv); - b = (bL * winv) + (bR * w); + a = (aL * w) + &(aR * winv); + b = (bL * winv) + &(bR * w); if a.len() != 1 { G_proof = hadamard_fold(G_L, G_R, winv, w); diff --git a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs index 859cb1e4..af5c0275 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs @@ -112,7 +112,7 @@ impl AggregateRangeStatement { let mut d = ScalarVector::new(mn); for j in 1 ..= V.len() { z_pow.push(z.pow(Scalar::from(2 * u64::try_from(j).unwrap()))); // TODO: Optimize this - d = d.add_vec(&Self::d_j(j, V.len()).mul(z_pow[j - 1])); + d = d + &(Self::d_j(j, V.len()) * (z_pow[j - 1])); } let mut ascending_y = ScalarVector(vec![y]); @@ -124,7 +124,8 @@ impl AggregateRangeStatement { let mut descending_y = ascending_y.clone(); descending_y.0.reverse(); - let d_descending_y = d.mul_vec(&descending_y); + let d_descending_y = d.clone() * &descending_y; + let d_descending_y_plus_z = d_descending_y + z; let y_mn_plus_one = descending_y[0] * y; @@ -135,9 +136,9 @@ impl AggregateRangeStatement { let neg_z = -z; let mut A_terms = Vec::with_capacity((generators.len() * 2) + 2); - for (i, d_y_z) in d_descending_y.add(z).0.drain(..).enumerate() { + for (i, d_y_z) in d_descending_y_plus_z.0.iter().enumerate() { A_terms.push((neg_z, generators.generator(GeneratorsList::GBold1, i))); - A_terms.push((d_y_z, generators.generator(GeneratorsList::HBold1, i))); + A_terms.push((*d_y_z, generators.generator(GeneratorsList::HBold1, i))); } A_terms.push((y_mn_plus_one, commitment_accum)); A_terms.push(( @@ -145,7 +146,14 @@ impl AggregateRangeStatement { Generators::g(), )); - (y, d_descending_y, y_mn_plus_one, z, ScalarVector(z_pow), A + multiexp_vartime(&A_terms)) + ( + y, + d_descending_y_plus_z, + y_mn_plus_one, + z, + ScalarVector(z_pow), + A + multiexp_vartime(&A_terms), + ) } pub(crate) fn prove( @@ -191,7 +199,7 @@ impl AggregateRangeStatement { a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0); } - let a_r = a_l.sub(Scalar::ONE); + let a_r = a_l.clone() - Scalar::ONE; let alpha = Scalar::random(&mut *rng); @@ -209,11 +217,11 @@ impl AggregateRangeStatement { // Multiply by INV_EIGHT per earlier commentary A.0 *= crate::INV_EIGHT(); - let (y, d_descending_y, y_mn_plus_one, z, z_pow, A_hat) = + let (y, d_descending_y_plus_z, y_mn_plus_one, z, z_pow, A_hat) = Self::compute_A_hat(PointVector(V), &generators, &mut transcript, A); - let a_l = a_l.sub(z); - let a_r = a_r.add_vec(&d_descending_y).add(z); + let a_l = a_l - z; + let a_r = a_r + &d_descending_y_plus_z; let mut alpha = alpha; for j in 1 ..= witness.gammas.len() { alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one; diff --git a/coins/monero/src/ringct/bulletproofs/plus/mod.rs b/coins/monero/src/ringct/bulletproofs/plus/mod.rs index 6a2d7b9c..30417821 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/mod.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/mod.rs @@ -3,8 +3,7 @@ use group::Group; use dalek_ff_group::{Scalar, EdwardsPoint}; -mod scalar_vector; -pub(crate) use scalar_vector::{ScalarVector, weighted_inner_product}; +pub(crate) use crate::ringct::bulletproofs::scalar_vector::ScalarVector; mod point_vector; pub(crate) use point_vector::PointVector; diff --git a/coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs b/coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs deleted file mode 100644 index 7bc0c3f4..00000000 --- a/coins/monero/src/ringct/bulletproofs/plus/scalar_vector.rs +++ /dev/null @@ -1,114 +0,0 @@ -use core::{ - borrow::Borrow, - ops::{Index, IndexMut}, -}; -use std_shims::vec::Vec; - -use zeroize::Zeroize; - -use group::ff::Field; -use dalek_ff_group::Scalar; - -#[derive(Clone, PartialEq, Eq, Debug, Zeroize)] -pub(crate) struct ScalarVector(pub(crate) Vec); - -impl Index for ScalarVector { - type Output = Scalar; - fn index(&self, index: usize) -> &Scalar { - &self.0[index] - } -} - -impl IndexMut for ScalarVector { - fn index_mut(&mut self, index: usize) -> &mut Scalar { - &mut self.0[index] - } -} - -impl ScalarVector { - pub(crate) fn new(len: usize) -> Self { - ScalarVector(vec![Scalar::ZERO; len]) - } - - pub(crate) fn add(&self, scalar: impl Borrow) -> Self { - let mut res = self.clone(); - for val in &mut res.0 { - *val += scalar.borrow(); - } - res - } - - pub(crate) fn sub(&self, scalar: impl Borrow) -> Self { - let mut res = self.clone(); - for val in &mut res.0 { - *val -= scalar.borrow(); - } - res - } - - pub(crate) fn mul(&self, scalar: impl Borrow) -> Self { - let mut res = self.clone(); - for val in &mut res.0 { - *val *= scalar.borrow(); - } - res - } - - pub(crate) fn add_vec(&self, vector: &Self) -> Self { - debug_assert_eq!(self.len(), vector.len()); - let mut res = self.clone(); - for (i, val) in res.0.iter_mut().enumerate() { - *val += vector.0[i]; - } - res - } - - pub(crate) fn mul_vec(&self, vector: &Self) -> Self { - debug_assert_eq!(self.len(), vector.len()); - let mut res = self.clone(); - for (i, val) in res.0.iter_mut().enumerate() { - *val *= vector.0[i]; - } - res - } - - pub(crate) fn inner_product(&self, vector: &Self) -> Scalar { - self.mul_vec(vector).sum() - } - - pub(crate) fn powers(x: Scalar, len: usize) -> Self { - debug_assert!(len != 0); - - let mut res = Vec::with_capacity(len); - res.push(Scalar::ONE); - res.push(x); - for i in 2 .. len { - res.push(res[i - 1] * x); - } - res.truncate(len); - ScalarVector(res) - } - - pub(crate) fn sum(mut self) -> Scalar { - self.0.drain(..).sum() - } - - pub(crate) fn len(&self) -> usize { - self.0.len() - } - - pub(crate) fn split(mut self) -> (Self, Self) { - debug_assert!(self.len() > 1); - let r = self.0.split_off(self.0.len() / 2); - debug_assert_eq!(self.len(), r.len()); - (self, ScalarVector(r)) - } -} - -pub(crate) fn weighted_inner_product( - a: &ScalarVector, - b: &ScalarVector, - y: &ScalarVector, -) -> Scalar { - a.inner_product(&b.mul_vec(y)) -} diff --git a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs index 1bc1e85d..09bb6748 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs @@ -4,7 +4,7 @@ use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, ZeroizeOnDrop}; -use multiexp::{multiexp, multiexp_vartime, BatchVerifier}; +use multiexp::{BatchVerifier, multiexp, multiexp_vartime}; use group::{ ff::{Field, PrimeField}, GroupEncoding, @@ -12,8 +12,7 @@ use group::{ use dalek_ff_group::{Scalar, EdwardsPoint}; use crate::ringct::bulletproofs::plus::{ - ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, weighted_inner_product, - transcript::*, + ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*, }; // Figure 1 @@ -219,7 +218,7 @@ impl WipStatement { .zip(g_bold.0.iter().copied()) .chain(witness.b.0.iter().copied().zip(h_bold.0.iter().copied())) .collect::>(); - P_terms.push((weighted_inner_product(&witness.a, &witness.b, &y), g)); + P_terms.push((witness.a.clone().weighted_inner_product(&witness.b, &y), g)); P_terms.push((witness.alpha, h)); debug_assert_eq!(multiexp(&P_terms), P); P_terms.zeroize(); @@ -258,14 +257,13 @@ impl WipStatement { let d_l = Scalar::random(&mut *rng); let d_r = Scalar::random(&mut *rng); - let c_l = weighted_inner_product(&a1, &b2, &y); - let c_r = weighted_inner_product(&(a2.mul(y_n_hat)), &b1, &y); + let c_l = a1.clone().weighted_inner_product(&b2, &y); + let c_r = (a2.clone() * y_n_hat).weighted_inner_product(&b1, &y); // TODO: Calculate these with a batch inversion let y_inv_n_hat = y_n_hat.invert().unwrap(); - let mut L_terms = a1 - .mul(y_inv_n_hat) + let mut L_terms = (a1.clone() * y_inv_n_hat) .0 .drain(..) .zip(g_bold2.0.iter().copied()) @@ -277,8 +275,7 @@ impl WipStatement { L_vec.push(L); L_terms.zeroize(); - let mut R_terms = a2 - .mul(y_n_hat) + let mut R_terms = (a2.clone() * y_n_hat) .0 .drain(..) .zip(g_bold1.0.iter().copied()) @@ -294,8 +291,8 @@ impl WipStatement { (e, inv_e, e_square, inv_e_square, g_bold, h_bold) = Self::next_G_H(&mut transcript, g_bold1, g_bold2, h_bold1, h_bold2, L, R, y_inv_n_hat); - a = a1.mul(e).add_vec(&a2.mul(y_n_hat * inv_e)); - b = b1.mul(inv_e).add_vec(&b2.mul(e)); + a = (a1 * e) + &(a2 * (y_n_hat * inv_e)); + b = (b1 * inv_e) + &(b2 * e); alpha += (d_l * e_square) + (d_r * inv_e_square); debug_assert_eq!(g_bold.len(), a.len()); diff --git a/coins/monero/src/ringct/bulletproofs/scalar_vector.rs b/coins/monero/src/ringct/bulletproofs/scalar_vector.rs index 6f94f228..e6288367 100644 --- a/coins/monero/src/ringct/bulletproofs/scalar_vector.rs +++ b/coins/monero/src/ringct/bulletproofs/scalar_vector.rs @@ -1,85 +1,17 @@ -use core::ops::{Add, Sub, Mul, Index}; +use core::{ + borrow::Borrow, + ops::{Index, IndexMut, Add, Sub, Mul}, +}; use std_shims::vec::Vec; use zeroize::{Zeroize, ZeroizeOnDrop}; use group::ff::Field; use dalek_ff_group::{Scalar, EdwardsPoint}; - use multiexp::multiexp; #[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)] pub(crate) struct ScalarVector(pub(crate) Vec); -macro_rules! math_op { - ($Op: ident, $op: ident, $f: expr) => { - #[allow(clippy::redundant_closure_call)] - impl $Op for ScalarVector { - type Output = ScalarVector; - fn $op(self, b: Scalar) -> ScalarVector { - ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect()) - } - } - - #[allow(clippy::redundant_closure_call)] - impl $Op for &ScalarVector { - type Output = ScalarVector; - fn $op(self, b: Scalar) -> ScalarVector { - ScalarVector(self.0.iter().map(|a| $f((a, &b))).collect()) - } - } - - #[allow(clippy::redundant_closure_call)] - impl $Op for ScalarVector { - type Output = ScalarVector; - fn $op(self, b: ScalarVector) -> ScalarVector { - debug_assert_eq!(self.len(), b.len()); - ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect()) - } - } - - #[allow(clippy::redundant_closure_call)] - impl $Op<&ScalarVector> for &ScalarVector { - type Output = ScalarVector; - fn $op(self, b: &ScalarVector) -> ScalarVector { - debug_assert_eq!(self.len(), b.len()); - ScalarVector(self.0.iter().zip(b.0.iter()).map($f).collect()) - } - } - }; -} -math_op!(Add, add, |(a, b): (&Scalar, &Scalar)| *a + *b); -math_op!(Sub, sub, |(a, b): (&Scalar, &Scalar)| *a - *b); -math_op!(Mul, mul, |(a, b): (&Scalar, &Scalar)| *a * *b); - -impl ScalarVector { - pub(crate) fn new(len: usize) -> ScalarVector { - ScalarVector(vec![Scalar::ZERO; len]) - } - - pub(crate) fn powers(x: Scalar, len: usize) -> ScalarVector { - debug_assert!(len != 0); - - let mut res = Vec::with_capacity(len); - res.push(Scalar::ONE); - for i in 1 .. len { - res.push(res[i - 1] * x); - } - ScalarVector(res) - } - - pub(crate) fn sum(mut self) -> Scalar { - self.0.drain(..).sum() - } - - pub(crate) fn len(&self) -> usize { - self.0.len() - } - - pub(crate) fn split(self) -> (ScalarVector, ScalarVector) { - let (l, r) = self.0.split_at(self.0.len() / 2); - (ScalarVector(l.to_vec()), ScalarVector(r.to_vec())) - } -} impl Index for ScalarVector { type Output = Scalar; @@ -87,28 +19,120 @@ impl Index for ScalarVector { &self.0[index] } } +impl IndexMut for ScalarVector { + fn index_mut(&mut self, index: usize) -> &mut Scalar { + &mut self.0[index] + } +} -pub(crate) fn inner_product(a: &ScalarVector, b: &ScalarVector) -> Scalar { - (a * b).sum() +impl> Add for ScalarVector { + type Output = ScalarVector; + fn add(mut self, scalar: S) -> ScalarVector { + for s in &mut self.0 { + *s += scalar.borrow(); + } + self + } +} +impl> Sub for ScalarVector { + type Output = ScalarVector; + fn sub(mut self, scalar: S) -> ScalarVector { + for s in &mut self.0 { + *s -= scalar.borrow(); + } + self + } +} +impl> Mul for ScalarVector { + type Output = ScalarVector; + fn mul(mut self, scalar: S) -> ScalarVector { + for s in &mut self.0 { + *s *= scalar.borrow(); + } + self + } +} + +impl Add<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn add(mut self, other: &ScalarVector) -> ScalarVector { + debug_assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s += o; + } + self + } +} +impl Sub<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn sub(mut self, other: &ScalarVector) -> ScalarVector { + debug_assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s -= o; + } + self + } +} +impl Mul<&ScalarVector> for ScalarVector { + type Output = ScalarVector; + fn mul(mut self, other: &ScalarVector) -> ScalarVector { + debug_assert_eq!(self.len(), other.len()); + for (s, o) in self.0.iter_mut().zip(other.0.iter()) { + *s *= o; + } + self + } } impl Mul<&[EdwardsPoint]> for &ScalarVector { type Output = EdwardsPoint; fn mul(self, b: &[EdwardsPoint]) -> EdwardsPoint { debug_assert_eq!(self.len(), b.len()); - multiexp(&self.0.iter().copied().zip(b.iter().copied()).collect::>()) + let mut multiexp_args = self.0.iter().copied().zip(b.iter().copied()).collect::>(); + let res = multiexp(&multiexp_args); + multiexp_args.zeroize(); + res } } -pub(crate) fn hadamard_fold( - l: &[EdwardsPoint], - r: &[EdwardsPoint], - a: Scalar, - b: Scalar, -) -> Vec { - let mut res = Vec::with_capacity(l.len() / 2); - for i in 0 .. l.len() { - res.push(multiexp(&[(a, l[i]), (b, r[i])])); +impl ScalarVector { + pub(crate) fn new(len: usize) -> Self { + ScalarVector(vec![Scalar::ZERO; len]) + } + + pub(crate) fn powers(x: Scalar, len: usize) -> Self { + debug_assert!(len != 0); + + let mut res = Vec::with_capacity(len); + res.push(Scalar::ONE); + res.push(x); + for i in 2 .. len { + res.push(res[i - 1] * x); + } + res.truncate(len); + ScalarVector(res) + } + + pub(crate) fn len(&self) -> usize { + self.0.len() + } + + pub(crate) fn sum(mut self) -> Scalar { + self.0.drain(..).sum() + } + + pub(crate) fn inner_product(self, vector: &Self) -> Scalar { + (self * vector).sum() + } + + pub(crate) fn weighted_inner_product(self, vector: &Self, y: &Self) -> Scalar { + (self * vector * y).sum() + } + + pub(crate) fn split(mut self) -> (Self, Self) { + debug_assert!(self.len() > 1); + let r = self.0.split_off(self.0.len() / 2); + debug_assert_eq!(self.len(), r.len()); + (self, ScalarVector(r)) } - res } diff --git a/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs b/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs index 7db2ecc8..b0890cf8 100644 --- a/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs +++ b/coins/monero/src/tests/bulletproofs/plus/weighted_inner_product.rs @@ -9,7 +9,6 @@ use dalek_ff_group::{Scalar, EdwardsPoint}; use crate::ringct::bulletproofs::plus::{ ScalarVector, PointVector, GeneratorsList, Generators, weighted_inner_product::{WipStatement, WipWitness}, - weighted_inner_product, }; #[test] @@ -68,7 +67,7 @@ fn test_weighted_inner_product() { #[allow(non_snake_case)] let P = g_bold.multiexp(&a) + h_bold.multiexp(&b) + - (g * weighted_inner_product(&a, &b, &y_vec)) + + (g * a.clone().weighted_inner_product(&b, &y_vec)) + (h * alpha); let statement = WipStatement::new(generators, P, y); From 06c23368f2522f1f3f4ba93734ffd365c8d059d2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 6 Mar 2024 21:37:40 -0500 Subject: [PATCH 005/126] Mitigate https://github.com/serai-dex/serai/issues/539 by making keystore deterministic --- substrate/node/src/keystore.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/substrate/node/src/keystore.rs b/substrate/node/src/keystore.rs index 333cd9c3..ca00e79b 100644 --- a/substrate/node/src/keystore.rs +++ b/substrate/node/src/keystore.rs @@ -1,5 +1,4 @@ use zeroize::Zeroize; -use rand_core::RngCore; use sp_core::{crypto::*, ed25519, sr25519}; use sp_keystore::*; @@ -13,8 +12,7 @@ impl Keystore { key_hex.zeroize(); assert_eq!(key.len(), 32, "KEY from environment wasn't 32 bytes"); - key.extend([0; 32]); - rand_core::OsRng.fill_bytes(&mut key[32 ..]); + key.extend(sp_core::blake2_256(&key)); let res = Self(sr25519::Pair::from(schnorrkel::SecretKey::from_bytes(&key).unwrap())); key.zeroize(); From 6c8a0bfda6f72b2c35f0dc721e57b453620e3640 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 6 Mar 2024 21:49:55 -0500 Subject: [PATCH 006/126] Limit docker logs to 300MB per container --- orchestration/src/main.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index a3643e7a..1a2c48ca 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -346,6 +346,8 @@ fn start(network: Network, services: HashSet) { let command = command.arg("create").arg("--name").arg(&docker_name); let command = command.arg("--network").arg("serai"); let command = command.arg("--restart").arg("always"); + let command = command.arg("--log-opt").arg("max-size=100m"); + let command = command.arg("--log-opt").arg("max-file=3"); let command = match name { "bitcoin" => { if network == Network::Dev { From e266bc2e3260dd529805ce0cdd7234fee6526bc7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Mar 2024 22:40:04 -0500 Subject: [PATCH 007/126] Stop validators from equivocating on reboot Part of https://github.com/serai-dex/serai/issues/345. The lack of full DB persistence does mean enough nodes rebooting at the same time may cause a halt. This will prevent slashes. --- Cargo.lock | 1 + coordinator/tributary/src/lib.rs | 2 +- coordinator/tributary/src/tendermint/mod.rs | 2 + coordinator/tributary/tendermint/Cargo.toml | 2 + coordinator/tributary/tendermint/src/block.rs | 38 +++++++++++++++++-- coordinator/tributary/tendermint/src/ext.rs | 3 ++ coordinator/tributary/tendermint/src/lib.rs | 7 ++++ coordinator/tributary/tendermint/tests/ext.rs | 5 +++ 8 files changed, 55 insertions(+), 5 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 692bdf2f..6622efab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -9105,6 +9105,7 @@ dependencies = [ "hex", "log", "parity-scale-codec", + "serai-db", "thiserror", "tokio", ] diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 5a5df1a7..7f174d72 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -218,7 +218,7 @@ impl Tributary { TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p }; let TendermintHandle { synced_block, synced_block_result, messages, machine } = - TendermintMachine::new(network.clone(), block_number, start_time, proposal).await; + TendermintMachine::new(db.clone(), network.clone(), block_number, start_time, proposal).await; tokio::spawn(machine.run()); Some(Self { diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index dc62c798..d362364c 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -302,6 +302,8 @@ fn assert_target_block_time() { #[async_trait] impl Network for TendermintNetwork { + type Db = D; + type ValidatorId = [u8; 32]; type SignatureScheme = Arc; type Weights = Arc; diff --git a/coordinator/tributary/tendermint/Cargo.toml b/coordinator/tributary/tendermint/Cargo.toml index ba640391..5a290590 100644 --- a/coordinator/tributary/tendermint/Cargo.toml +++ b/coordinator/tributary/tendermint/Cargo.toml @@ -27,5 +27,7 @@ futures-util = { version = "0.3", default-features = false, features = ["std", " futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] } tokio = { version = "1", default-features = false, features = ["time"] } +serai-db = { path = "../../../common/db", version = "0.1", default-features = false } + [dev-dependencies] tokio = { version = "1", features = ["sync", "rt-multi-thread", "macros"] } diff --git a/coordinator/tributary/tendermint/src/block.rs b/coordinator/tributary/tendermint/src/block.rs index 8136f888..5b2e86ea 100644 --- a/coordinator/tributary/tendermint/src/block.rs +++ b/coordinator/tributary/tendermint/src/block.rs @@ -3,6 +3,9 @@ use std::{ collections::{HashSet, HashMap}, }; +use parity_scale_codec::Encode; +use serai_db::{Get, DbTxn, Db}; + use crate::{ time::CanonicalInstant, ext::{RoundNumber, BlockNumber, Block, Network}, @@ -12,6 +15,8 @@ use crate::{ }; pub(crate) struct BlockData { + db: N::Db, + pub(crate) number: BlockNumber, pub(crate) validator_id: Option, pub(crate) proposal: Option, @@ -32,12 +37,15 @@ pub(crate) struct BlockData { impl BlockData { pub(crate) fn new( + db: N::Db, weights: Arc, number: BlockNumber, validator_id: Option, proposal: Option, ) -> BlockData { BlockData { + db, + number, validator_id, proposal, @@ -128,12 +136,34 @@ impl BlockData { // 27, 33, 41, 46, 60, 64 self.round_mut().step = data.step(); - // Only return a message to if we're actually a current validator - self.validator_id.map(|validator_id| Message { + // Only return a message to if we're actually a current validator and haven't prior posted a + // message + let round_number = self.round().number; + let step = data.step(); + let res = self.validator_id.map(|validator_id| Message { sender: validator_id, block: self.number, - round: self.round().number, + round: round_number, data, - }) + }); + + if res.is_some() { + let mut txn = self.db.txn(); + let key = [ + b"tendermint-machine_already_sent_message".as_ref(), + &self.number.0.to_le_bytes(), + &round_number.0.to_le_bytes(), + &step.encode(), + ] + .concat(); + // If we've already sent a message, return + if txn.get(&key).is_some() { + None?; + } + txn.put(&key, []); + txn.commit(); + } + + res } } diff --git a/coordinator/tributary/tendermint/src/ext.rs b/coordinator/tributary/tendermint/src/ext.rs index 3d13a3b3..b3d568a2 100644 --- a/coordinator/tributary/tendermint/src/ext.rs +++ b/coordinator/tributary/tendermint/src/ext.rs @@ -212,6 +212,9 @@ pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode /// Trait representing the distributed system Tendermint is providing consensus over. #[async_trait] pub trait Network: Sized + Send + Sync { + /// The database used to back this. + type Db: serai_db::Db; + // Type used to identify validators. type ValidatorId: ValidatorId; /// Signature scheme used by validators. diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index 163db6fc..8faf6798 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -231,6 +231,8 @@ pub enum SlashEvent { /// A machine executing the Tendermint protocol. pub struct TendermintMachine { + db: N::Db, + network: N, signer: ::Signer, validators: N::SignatureScheme, @@ -322,6 +324,7 @@ impl TendermintMachine { // Create the new block self.block = BlockData::new( + self.db.clone(), self.weights.clone(), BlockNumber(self.block.number.0 + 1), self.signer.validator_id().await, @@ -370,6 +373,7 @@ impl TendermintMachine { /// the machine itself. The machine should have `run` called from an asynchronous task. #[allow(clippy::new_ret_no_self)] pub async fn new( + db: N::Db, network: N, last_block: BlockNumber, last_time: u64, @@ -409,6 +413,8 @@ impl TendermintMachine { let validator_id = signer.validator_id().await; // 01-10 let mut machine = TendermintMachine { + db: db.clone(), + network, signer, validators, @@ -420,6 +426,7 @@ impl TendermintMachine { synced_block_result_send, block: BlockData::new( + db, weights, BlockNumber(last_block.0 + 1), validator_id, diff --git a/coordinator/tributary/tendermint/tests/ext.rs b/coordinator/tributary/tendermint/tests/ext.rs index e3df7e48..f919b003 100644 --- a/coordinator/tributary/tendermint/tests/ext.rs +++ b/coordinator/tributary/tendermint/tests/ext.rs @@ -10,6 +10,8 @@ use parity_scale_codec::{Encode, Decode}; use futures_util::sink::SinkExt; use tokio::{sync::RwLock, time::sleep}; +use serai_db::MemDb; + use tendermint_machine::{ ext::*, SignedMessageFor, SyncedBlockSender, SyncedBlockResultReceiver, MessageSender, SlashEvent, TendermintMachine, TendermintHandle, @@ -111,6 +113,8 @@ struct TestNetwork( #[async_trait] impl Network for TestNetwork { + type Db = MemDb; + type ValidatorId = TestValidatorId; type SignatureScheme = TestSignatureScheme; type Weights = TestWeights; @@ -170,6 +174,7 @@ impl TestNetwork { let i = u16::try_from(i).unwrap(); let TendermintHandle { messages, synced_block, synced_block_result, machine } = TendermintMachine::new( + MemDb::new(), TestNetwork(i, arc.clone()), BlockNumber(1), start_time, From 480acfd4305da78c6d5ff6319864ab35197b1812 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Mar 2024 23:00:17 -0500 Subject: [PATCH 008/126] Fix machete --- Cargo.lock | 1 - substrate/node/Cargo.toml | 1 - 2 files changed, 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6622efab..d37e639c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7625,7 +7625,6 @@ dependencies = [ "hex", "jsonrpsee", "pallet-transaction-payment-rpc", - "rand_core", "sc-authority-discovery", "sc-basic-authorship", "sc-cli", diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index e35bc0ea..f66a9705 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -21,7 +21,6 @@ name = "serai-node" [dependencies] zeroize = "1" -rand_core = "0.6" hex = "0.4" schnorrkel = "0.11" From 0d569ff7a3ce75a410bb557b93221133cd53a9f9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 7 Mar 2024 23:21:15 -0500 Subject: [PATCH 009/126] cargo update Resolves the current deny warning. --- Cargo.lock | 358 ++++++++++++++++++++++++++--------------------------- 1 file changed, 179 insertions(+), 179 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d37e639c..dd2cc6f7 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -73,9 +73,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.9" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d713b3834d76b85304d4d525563c1276e2e30dc97cc67bfb4585a4a29fc2c89f" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "getrandom", @@ -125,9 +125,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.12" +version = "0.6.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b09b5178381e0874812a9b157f7fe84982617e48f71f4e3235482775e5b540" +checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" dependencies = [ "anstyle", "anstyle-parse", @@ -298,7 +298,7 @@ checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -309,7 +309,7 @@ checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -349,13 +349,13 @@ dependencies = [ [[package]] name = "auto_impl" -version = "1.1.2" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "823b8bb275161044e2ac7a25879cb3e2480cb403e3943022c7c769c599b756aa" +checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -461,7 +461,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -689,7 +689,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", "syn_derive", ] @@ -735,9 +735,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.3" +version = "3.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ea184aa71bb362a1157c896979544cc23974e08fd265f29ea96b59f0b4a555b" +checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" [[package]] name = "byte-slice-cast" @@ -874,9 +874,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a" dependencies = [ "android-tzdata", "iana-time-zone", @@ -884,7 +884,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.3", + "windows-targets 0.52.4", ] [[package]] @@ -947,9 +947,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.1" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c918d541ef2913577a0f9566e9ce27cb35b6df072075769e0b26cb5a554520da" +checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651" dependencies = [ "clap_builder", "clap_derive", @@ -957,9 +957,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.1" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9f3e7391dad68afb0c2ede1bf619f579a3dc9c2ec67f089baa397123a2f3d1eb" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -976,7 +976,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -1030,9 +1030,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-random" -version = "0.1.17" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaf16c9c2c612020bcfd042e170f6e32de9b9d75adb5277cdbbd2e2c8c8299a" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" dependencies = [ "const-random-macro", ] @@ -1313,14 +1313,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] name = "cxx" -version = "1.0.117" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c15f3b597018782655a05d417f28bac009f6eb60f4b6703eb818998c1aaa16a" +checksum = "635179be18797d7e10edb9cd06c859580237750c7351f39ed9b298bfc17544ad" dependencies = [ "cc", "cxxbridge-flags", @@ -1330,9 +1330,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.117" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81699747d109bba60bd6f87e7cb24b626824b8427b32f199b95c7faa06ee3dc9" +checksum = "9324397d262f63ef77eb795d900c0d682a34a43ac0932bec049ed73055d52f63" dependencies = [ "cc", "codespan-reporting", @@ -1340,24 +1340,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] name = "cxxbridge-flags" -version = "1.0.117" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a7eb4c4fd18505f5a935f9c2ee77780350dcdb56da7cd037634e806141c5c43" +checksum = "a87ff7342ffaa54b7c61618e0ce2bbcf827eba6d55b923b83d82551acbbecfe5" [[package]] name = "cxxbridge-macro" -version = "1.0.117" +version = "1.0.119" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d914fcc6452d133236ee067a9538be25ba6a644a450e1a6c617da84bf029854" +checksum = "70b5b86cf65fa0626d85720619d80b288013477a91a0389fa8bc716bf4903ad1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -1543,7 +1543,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -1644,9 +1644,9 @@ dependencies = [ [[package]] name = "dyn-clone" -version = "1.0.16" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "545b22097d44f8a9581187cdf93de7a71e4722bf51200cfaba810865b49a495d" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "ecdsa" @@ -1776,7 +1776,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -1919,7 +1919,7 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.50", + "syn 2.0.52", "toml 0.7.8", "walkdir", ] @@ -1937,7 +1937,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -1963,7 +1963,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.50", + "syn 2.0.52", "tempfile", "thiserror", "tiny-keccak", @@ -2052,7 +2052,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2326,7 +2326,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2338,7 +2338,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2348,7 +2348,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2507,7 +2507,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -2639,9 +2639,9 @@ dependencies = [ [[package]] name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", "polyval", @@ -2679,7 +2679,7 @@ dependencies = [ "aho-corasick", "bstr", "log", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "regex-syntax 0.8.2", ] @@ -2718,7 +2718,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.11", - "indexmap 2.2.3", + "indexmap 2.2.5", "slab", "tokio", "tokio-util", @@ -2782,9 +2782,9 @@ checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" [[package]] name = "hermit-abi" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "379dada1584ad501b383485dd706b8afb7a70fcbc7f4da7d780638a5a6124a60" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -2870,9 +2870,9 @@ dependencies = [ [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", @@ -2897,7 +2897,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", - "http 1.0.0", + "http 1.1.0", ] [[package]] @@ -2908,7 +2908,7 @@ checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" dependencies = [ "bytes", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "pin-project-lite 0.2.13", ] @@ -2970,7 +2970,7 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "httparse", "itoa", @@ -2987,7 +2987,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" dependencies = [ "futures-util", - "http 1.0.0", + "http 1.1.0", "hyper 1.2.0", "hyper-util", "rustls 0.22.2", @@ -3007,11 +3007,11 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "http 1.0.0", + "http 1.1.0", "http-body 1.0.0", "hyper 1.2.0", "pin-project-lite 0.2.13", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tower", "tower-service", @@ -3180,9 +3180,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -3229,7 +3229,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.5", + "socket2 0.5.6", "widestring", "windows-sys 0.48.0", "winreg", @@ -3262,9 +3262,9 @@ checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -3455,12 +3455,12 @@ checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] name = "libloading" -version = "0.8.1" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c571b676ddfc9a8c12f1f3d3085a7b163966a8fd8098a90640953ce5f6170161" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-sys 0.48.0", + "windows-targets 0.52.4", ] [[package]] @@ -3692,7 +3692,7 @@ dependencies = [ "log", "rand", "smallvec", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "trust-dns-proto 0.22.0", "void", @@ -3778,7 +3778,7 @@ dependencies = [ "rand", "ring 0.16.20", "rustls 0.21.10", - "socket2 0.5.5", + "socket2 0.5.6", "thiserror", "tokio", ] @@ -3834,7 +3834,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -3850,7 +3850,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", ] @@ -4014,9 +4014,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "loom" @@ -4087,7 +4087,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -4101,7 +4101,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -4112,7 +4112,7 @@ checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -4123,7 +4123,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -4274,9 +4274,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi", @@ -4762,7 +4762,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -4803,9 +4803,9 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "open-fastrlp" @@ -5207,7 +5207,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.3", + "indexmap 2.2.5", ] [[package]] @@ -5222,22 +5222,22 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -5307,9 +5307,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", @@ -5376,7 +5376,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -5460,7 +5460,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -5506,7 +5506,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -5654,7 +5654,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.5", + "socket2 0.5.6", "tracing", "windows-sys 0.48.0", ] @@ -5731,9 +5731,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7237101a77a10773db45d62004a272517633fbcc3df19d96455ede1122e051" +checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" dependencies = [ "either", "rayon-core", @@ -5807,7 +5807,7 @@ checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -5831,7 +5831,7 @@ checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", + "regex-automata 0.4.6", "regex-syntax 0.8.2", ] @@ -5846,9 +5846,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", @@ -6118,9 +6118,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c333bb734fcdedcea57de1602543590f545f127dc8b533324318fd492c5c70b" +checksum = "f48172685e6ff52a556baa527774f61fcaa884f59daf3375c62a3f1cd2549dab" dependencies = [ "base64 0.21.7", "rustls-pki-types", @@ -6128,9 +6128,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "048a63e5b3ac996d78d402940b5fa47973d2d080c6c6fffa1d0f19c4445310b7" +checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" [[package]] name = "rustls-webpki" @@ -6298,7 +6298,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -7059,7 +7059,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -7910,7 +7910,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -7932,7 +7932,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -7966,7 +7966,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_derive", "serde_json", @@ -8153,12 +8153,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -8209,7 +8209,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -8405,7 +8405,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -8424,7 +8424,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -8596,7 +8596,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -8749,7 +8749,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -8928,7 +8928,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -9016,9 +9016,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.50" +version = "2.0.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" +checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" dependencies = [ "proc-macro2", "quote", @@ -9034,7 +9034,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -9084,9 +9084,9 @@ checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", "fastrand", @@ -9141,7 +9141,7 @@ checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -9259,7 +9259,7 @@ dependencies = [ "parking_lot 0.12.1", "pin-project-lite 0.2.13", "signal-hook-registry", - "socket2 0.5.5", + "socket2 0.5.6", "tokio-macros", "windows-sys 0.48.0", ] @@ -9272,7 +9272,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -9349,7 +9349,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "serde", "serde_spanned", "toml_datetime", @@ -9362,7 +9362,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "toml_datetime", "winnow", ] @@ -9433,7 +9433,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -9794,9 +9794,9 @@ checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -9819,9 +9819,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -9829,24 +9829,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -9856,9 +9856,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9866,22 +9866,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-encoder" @@ -9962,7 +9962,7 @@ version = "0.110.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dfcdb72d96f01e6c85b6bf20102e7423bdbaad5c337301bab2bbf253d26413c" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.5", "semver 1.0.22", ] @@ -9977,7 +9977,7 @@ dependencies = [ "bumpalo", "cfg-if", "fxprof-processed-profile", - "indexmap 2.2.3", + "indexmap 2.2.5", "libc", "log", "object 0.31.1", @@ -10076,7 +10076,7 @@ dependencies = [ "anyhow", "cranelift-entity", "gimli 0.27.3", - "indexmap 2.2.3", + "indexmap 2.2.5", "log", "object 0.31.1", "serde", @@ -10143,7 +10143,7 @@ dependencies = [ "anyhow", "cc", "cfg-if", - "indexmap 2.2.3", + "indexmap 2.2.5", "libc", "log", "mach", @@ -10181,14 +10181,14 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -10302,7 +10302,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.3", + "windows-targets 0.52.4", ] [[package]] @@ -10322,17 +10322,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d380ba1dc7187569a8a9e91ed34b8ccfc33123bbacb8c0aed2d1ad7f3ef2dc5f" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.3", - "windows_aarch64_msvc 0.52.3", - "windows_i686_gnu 0.52.3", - "windows_i686_msvc 0.52.3", - "windows_x86_64_gnu 0.52.3", - "windows_x86_64_gnullvm 0.52.3", - "windows_x86_64_msvc 0.52.3", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -10343,9 +10343,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68e5dcfb9413f53afd9c8f86e56a7b4d86d9a2fa26090ea2dc9e40fba56c6ec6" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -10355,9 +10355,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8dab469ebbc45798319e69eebf92308e541ce46760b49b18c6b3fe5e8965b30f" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -10367,9 +10367,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a4e9b6a7cac734a8b4138a4e1044eac3404d8326b6c0f939276560687a033fb" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -10379,9 +10379,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28b0ec9c422ca95ff34a78755cfa6ad4a51371da2a5ace67500cf7ca5f232c58" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -10391,9 +10391,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "704131571ba93e89d7cd43482277d6632589b18ecf4468f591fbae0a8b101614" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -10403,9 +10403,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42079295511643151e98d61c38c0acc444e52dd42ab456f7ccfd5152e8ecf21c" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -10415,9 +10415,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.3" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0770833d60a970638e989b3fa9fd2bb1aaadcf88963d1659fd7d9990196ed2d6" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "winnow" @@ -10558,7 +10558,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] @@ -10578,7 +10578,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.52", ] [[package]] From 454bebaa77c9690d098d747811e68da31f0d5b98 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 8 Mar 2024 01:15:10 -0500 Subject: [PATCH 010/126] Have the TendermintMachine domain-separate by genesis Enbables support for multiple machines over the same DB. --- coordinator/tributary/src/lib.rs | 10 +++++++++- coordinator/tributary/tendermint/src/block.rs | 4 ++++ coordinator/tributary/tendermint/src/lib.rs | 5 +++++ coordinator/tributary/tendermint/tests/ext.rs | 1 + 4 files changed, 19 insertions(+), 1 deletion(-) diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 7f174d72..92fb98da 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -218,7 +218,15 @@ impl Tributary { TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p }; let TendermintHandle { synced_block, synced_block_result, messages, machine } = - TendermintMachine::new(db.clone(), network.clone(), block_number, start_time, proposal).await; + TendermintMachine::new( + db.clone(), + network.clone(), + genesis, + block_number, + start_time, + proposal, + ) + .await; tokio::spawn(machine.run()); Some(Self { diff --git a/coordinator/tributary/tendermint/src/block.rs b/coordinator/tributary/tendermint/src/block.rs index 5b2e86ea..71dfb3cc 100644 --- a/coordinator/tributary/tendermint/src/block.rs +++ b/coordinator/tributary/tendermint/src/block.rs @@ -16,6 +16,7 @@ use crate::{ pub(crate) struct BlockData { db: N::Db, + genesis: [u8; 32], pub(crate) number: BlockNumber, pub(crate) validator_id: Option, @@ -38,6 +39,7 @@ pub(crate) struct BlockData { impl BlockData { pub(crate) fn new( db: N::Db, + genesis: [u8; 32], weights: Arc, number: BlockNumber, validator_id: Option, @@ -45,6 +47,7 @@ impl BlockData { ) -> BlockData { BlockData { db, + genesis, number, validator_id, @@ -151,6 +154,7 @@ impl BlockData { let mut txn = self.db.txn(); let key = [ b"tendermint-machine_already_sent_message".as_ref(), + &self.genesis, &self.number.0.to_le_bytes(), &round_number.0.to_le_bytes(), &step.encode(), diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index 8faf6798..77805677 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -232,6 +232,7 @@ pub enum SlashEvent { /// A machine executing the Tendermint protocol. pub struct TendermintMachine { db: N::Db, + genesis: [u8; 32], network: N, signer: ::Signer, @@ -325,6 +326,7 @@ impl TendermintMachine { // Create the new block self.block = BlockData::new( self.db.clone(), + self.genesis, self.weights.clone(), BlockNumber(self.block.number.0 + 1), self.signer.validator_id().await, @@ -375,6 +377,7 @@ impl TendermintMachine { pub async fn new( db: N::Db, network: N, + genesis: [u8; 32], last_block: BlockNumber, last_time: u64, proposal: N::Block, @@ -414,6 +417,7 @@ impl TendermintMachine { // 01-10 let mut machine = TendermintMachine { db: db.clone(), + genesis, network, signer, @@ -427,6 +431,7 @@ impl TendermintMachine { block: BlockData::new( db, + genesis, weights, BlockNumber(last_block.0 + 1), validator_id, diff --git a/coordinator/tributary/tendermint/tests/ext.rs b/coordinator/tributary/tendermint/tests/ext.rs index f919b003..3b3cf7c3 100644 --- a/coordinator/tributary/tendermint/tests/ext.rs +++ b/coordinator/tributary/tendermint/tests/ext.rs @@ -176,6 +176,7 @@ impl TestNetwork { TendermintMachine::new( MemDb::new(), TestNetwork(i, arc.clone()), + [0; 32], BlockNumber(1), start_time, TestBlock { id: 1u32.to_le_bytes(), valid: Ok(()) }, From 10f5ec51ca86bccd18cc1b7b2a9108359a612171 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 8 Mar 2024 09:19:34 -0500 Subject: [PATCH 011/126] Explicitly limit RocksDB logs --- common/db/src/rocks.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/common/db/src/rocks.rs b/common/db/src/rocks.rs index 74fd33ad..01762151 100644 --- a/common/db/src/rocks.rs +++ b/common/db/src/rocks.rs @@ -38,5 +38,12 @@ pub fn new_rocksdb(path: &str) -> RocksDB { let mut options = Options::default(); options.create_if_missing(true); options.set_compression_type(DBCompressionType::Lz4); + options.set_wal_size_limit_mb(128); + // 1 GB + options.set_max_total_wal_size(1 << 30); + // 128 MB + options.set_max_log_file_size(1 << 27); + options.set_recycle_log_file_num(5); + options.set_keep_log_file_num(5); Arc::new(TransactionDB::open(&options, &Default::default(), path).unwrap()) } From 97f433c6947c890af7bd9f9a944cb913439f135f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Mar 2024 01:58:58 -0500 Subject: [PATCH 012/126] Redo how WAL/logs are limited by the DB Adds a patch to the latest rocksdb. --- Cargo.lock | 37 +++++++++++++++++++------------------ Cargo.toml | 3 +++ common/db/Cargo.toml | 2 +- common/db/src/rocks.rs | 21 +++++++++++++-------- patches/rocksdb/Cargo.toml | 26 ++++++++++++++++++++++++++ patches/rocksdb/src/lib.rs | 1 + 6 files changed, 63 insertions(+), 27 deletions(-) create mode 100644 patches/rocksdb/Cargo.toml create mode 100644 patches/rocksdb/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index dd2cc6f7..64516b0b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -445,17 +445,16 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.65.1" +version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfdf7b466f9a4903edc73f95d6d2bcd5baf8ae620638762244d3f60143643cc5" +checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.4.2", "cexpr", "clang-sys", + "itertools", "lazy_static", "lazycell", - "peeking_take_while", - "prettyplease 0.2.16", "proc-macro2", "quote", "regex", @@ -2954,7 +2953,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.13", - "socket2 0.4.10", + "socket2 0.5.5", "tokio", "tower-service", "tracing", @@ -3423,7 +3422,7 @@ dependencies = [ "num_cpus", "parking_lot 0.12.1", "regex", - "rocksdb", + "rocksdb 0.21.0", "smallvec", ] @@ -3938,9 +3937,9 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "0.11.0+8.1.1" +version = "0.16.0+8.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3386f101bcb4bd252d8e9d2fb41ec3b0862a15a62b478c355b2982efa469e3e" +checksum = "ce3d60bc059831dc1c83903fb45c103f75db65c5a7bf22272764d9cc683e348c" dependencies = [ "bindgen", "bzip2-sys", @@ -3950,6 +3949,7 @@ dependencies = [ "libz-sys", "lz4-sys", "tikv-jemalloc-sys", + "zstd-sys", ] [[package]] @@ -4759,7 +4759,7 @@ version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 2.0.52", @@ -5179,12 +5179,6 @@ dependencies = [ "sha2", ] -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "1.1.1" @@ -5986,8 +5980,15 @@ dependencies = [ [[package]] name = "rocksdb" version = "0.21.0" +dependencies = [ + "rocksdb 0.22.0", +] + +[[package]] +name = "rocksdb" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb6f170a4041d50a0ce04b0d2e14916d6ca863ea2e422689a5b694395d299ffe" +checksum = "6bd13e55d6d7b8cd0ea569161127567cd587676c99f4472f779a0279aa60a7a7" dependencies = [ "libc", "librocksdb-sys", @@ -7468,7 +7469,7 @@ name = "serai-db" version = "0.1.0" dependencies = [ "parity-db", - "rocksdb", + "rocksdb 0.21.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 25601c46..bcc344ed 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -3,6 +3,7 @@ resolver = "2" members = [ # Version patches "patches/zstd", + "patches/rocksdb", "patches/proc-macro-crate", # std patches @@ -112,6 +113,8 @@ dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "a # wasmtime pulls in an old version for this zstd = { path = "patches/zstd" } +# Needed for WAL compression +rocksdb = { path = "patches/rocksdb" } # proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3 proc-macro-crate = { path = "patches/proc-macro-crate" } diff --git a/common/db/Cargo.toml b/common/db/Cargo.toml index 78d486a1..e422b346 100644 --- a/common/db/Cargo.toml +++ b/common/db/Cargo.toml @@ -18,7 +18,7 @@ workspace = true [dependencies] parity-db = { version = "0.4", default-features = false, optional = true } -rocksdb = { version = "0.21", default-features = false, features = ["lz4"], optional = true } +rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true } [features] parity-db = ["dep:parity-db"] diff --git a/common/db/src/rocks.rs b/common/db/src/rocks.rs index 01762151..046bbfd3 100644 --- a/common/db/src/rocks.rs +++ b/common/db/src/rocks.rs @@ -1,6 +1,8 @@ use std::sync::Arc; -use rocksdb::{DBCompressionType, ThreadMode, SingleThreaded, Options, Transaction, TransactionDB}; +use rocksdb::{ + DBCompressionType, ThreadMode, SingleThreaded, LogLevel, Options, Transaction, TransactionDB, +}; use crate::*; @@ -37,13 +39,16 @@ pub type RocksDB = Arc>; pub fn new_rocksdb(path: &str) -> RocksDB { let mut options = Options::default(); options.create_if_missing(true); - options.set_compression_type(DBCompressionType::Lz4); - options.set_wal_size_limit_mb(128); - // 1 GB - options.set_max_total_wal_size(1 << 30); + options.set_compression_type(DBCompressionType::Zstd); + // 128 MB - options.set_max_log_file_size(1 << 27); - options.set_recycle_log_file_num(5); - options.set_keep_log_file_num(5); + options.set_wal_compression_type(DBCompressionType::Zstd); + options.set_max_total_wal_size(128 * 1024 * 1024); + + // 1 MB + options.set_log_level(LogLevel::Warn); + options.set_max_log_file_size(1024 * 1024); + options.set_recycle_log_file_num(1); + Arc::new(TransactionDB::open(&options, &Default::default(), path).unwrap()) } diff --git a/patches/rocksdb/Cargo.toml b/patches/rocksdb/Cargo.toml new file mode 100644 index 00000000..d9967deb --- /dev/null +++ b/patches/rocksdb/Cargo.toml @@ -0,0 +1,26 @@ +[package] +name = "rocksdb" +version = "0.21.0" +description = "rocksdb which patches to the latest update" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/rocksdb" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.70" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +rocksdb = { version = "0.22", default-features = false } + +[features] +jemalloc = ["rocksdb/jemalloc"] +snappy = ["rocksdb/snappy"] +lz4 = ["rocksdb/lz4"] +zstd = ["rocksdb/zstd"] +zlib = ["rocksdb/zlib"] +bzip2 = ["rocksdb/bzip2"] +default = ["snappy", "lz4", "zstd", "zlib", "bzip2"] diff --git a/patches/rocksdb/src/lib.rs b/patches/rocksdb/src/lib.rs new file mode 100644 index 00000000..bd209ce8 --- /dev/null +++ b/patches/rocksdb/src/lib.rs @@ -0,0 +1 @@ +pub use rocksdb::*; From 2347bf5fd3338f43888db5d11b47a036bb7200b7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Mar 2024 02:19:41 -0500 Subject: [PATCH 013/126] Bound cosign work and ensure it progress forward even when cosigns don't occur Should resolve the DB load observed on testnet. --- coordinator/src/substrate/cosign.rs | 33 +++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index 2443c811..ccdd9ade 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -41,6 +41,7 @@ enum HasEvents { create_db!( SubstrateCosignDb { + ScanCosignFrom: () -> u64, IntendedCosign: () -> (u64, Option), BlockHasEvents: (block: u64) -> HasEvents, LatestCosignedBlock: () -> u64, @@ -178,7 +179,7 @@ async fn potentially_cosign_block( which should be cosigned). Accordingly, it is necessary to call multiple times even if `latest_number` doesn't change. */ -pub async fn advance_cosign_protocol( +async fn advance_cosign_protocol_inner( db: &mut impl Db, key: &Zeroizing<::F>, serai: &Serai, @@ -227,7 +228,16 @@ pub async fn advance_cosign_protocol( // A list of sets which are cosigning, along with a boolean of if we're in the set let mut cosigning = vec![]; - for block in (last_intended_to_cosign_block + 1) ..= latest_number { + // The consensus rules for this are `last_intended_to_cosign_block + 1` + let scan_start_block = last_intended_to_cosign_block + 1; + // As a practical optimization, we don't re-scan old blocks since old blocks are independent to + // new state + let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(0)); + for block in scan_start_block ..= latest_number { + // This TX is committed, always re-run this loop from immediately before this block + // That allows the below loop to break out on a block it wants to revisit later + ScanCosignFrom::set(&mut txn, &(scan_start_block - 1)); + let actual_block = serai .finalized_block_by_number(block) .await? @@ -297,3 +307,22 @@ pub async fn advance_cosign_protocol( Ok(()) } + +pub async fn advance_cosign_protocol( + db: &mut impl Db, + key: &Zeroizing<::F>, + serai: &Serai, + latest_number: u64, +) -> Result<(), SeraiError> { + loop { + let scan_from = ScanCosignFrom::get(db).unwrap_or(0); + // Only scan 1000 blocks at a time to limit a massive txn from forming + let scan_to = latest_number.min(scan_from + 1000); + advance_cosign_protocol_inner(db, key, serai, scan_to).await?; + // If we didn't limit the scan_to, break + if scan_to == latest_number { + break; + } + } + Ok(()) +} From 89b237af7ec22cf9592a20706436af557f1a0575 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Mar 2024 02:44:04 -0500 Subject: [PATCH 014/126] Correct the return value of block_has_events --- coordinator/src/substrate/cosign.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index ccdd9ade..43497ca4 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -109,7 +109,7 @@ async fn block_has_events( let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes }; BlockHasEvents::set(txn, block, &has_events); - Ok(HasEvents::Yes) + Ok(has_events) } Some(code) => Ok(code), } From 68dc872b8829aaf07f8028fa4fcafd0f52273e0e Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Mar 2024 03:18:52 -0500 Subject: [PATCH 015/126] sync every txn --- Cargo.lock | 2 +- common/db/src/rocks.rs | 7 +++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 64516b0b..5011cee4 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2953,7 +2953,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.13", - "socket2 0.5.5", + "socket2 0.4.10", "tokio", "tower-service", "tracing", diff --git a/common/db/src/rocks.rs b/common/db/src/rocks.rs index 046bbfd3..c48f6c49 100644 --- a/common/db/src/rocks.rs +++ b/common/db/src/rocks.rs @@ -1,7 +1,8 @@ use std::sync::Arc; use rocksdb::{ - DBCompressionType, ThreadMode, SingleThreaded, LogLevel, Options, Transaction, TransactionDB, + DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions, Transaction, Options, + TransactionDB, }; use crate::*; @@ -31,7 +32,9 @@ impl Get for Arc> { impl Db for Arc> { type Transaction<'a> = Transaction<'a, TransactionDB>; fn txn(&mut self) -> Self::Transaction<'_> { - self.transaction() + let mut opts = WriteOptions::default(); + opts.set_sync(true); + self.transaction_opt(&opts, &Default::default()) } } From 61a81e53e1d0e03eccc963f898d91d68c2b0111f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Mar 2024 03:31:06 -0500 Subject: [PATCH 016/126] Further optimize cosign DB --- coordinator/src/substrate/cosign.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index 43497ca4..7b553d68 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -207,13 +207,19 @@ async fn advance_cosign_protocol_inner( window_end_exclusive = 0; } + // The consensus rules for this are `last_intended_to_cosign_block + 1` + let scan_start_block = last_intended_to_cosign_block + 1; + // As a practical optimization, we don't re-scan old blocks since old blocks are independent to + // new state + let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(0)); + // Check all blocks within the window to see if they should be cosigned // If so, we're skipping them and need to flag them as skipped so that once the window closes, we // do cosign them // We only perform this check if we haven't already marked a block as skipped since the cosign // the skipped block will cause will cosign all other blocks within this window if skipped_block.is_none() { - for b in (last_intended_to_cosign_block + 1) .. window_end_exclusive.min(latest_number) { + for b in scan_start_block .. window_end_exclusive.min(latest_number) { if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { skipped_block = Some(b); log::debug!("skipping cosigning {b} due to proximity to prior cosign"); @@ -228,16 +234,7 @@ async fn advance_cosign_protocol_inner( // A list of sets which are cosigning, along with a boolean of if we're in the set let mut cosigning = vec![]; - // The consensus rules for this are `last_intended_to_cosign_block + 1` - let scan_start_block = last_intended_to_cosign_block + 1; - // As a practical optimization, we don't re-scan old blocks since old blocks are independent to - // new state - let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(0)); for block in scan_start_block ..= latest_number { - // This TX is committed, always re-run this loop from immediately before this block - // That allows the below loop to break out on a block it wants to revisit later - ScanCosignFrom::set(&mut txn, &(scan_start_block - 1)); - let actual_block = serai .finalized_block_by_number(block) .await? @@ -286,6 +283,9 @@ async fn advance_cosign_protocol_inner( break; } + + // If this TX is committed, always start future scanning from the next block + ScanCosignFrom::set(&mut txn, &(scan_start_block + 1)); } if let Some((number, hash)) = to_cosign { From c93f6bf901ec98510606b0d324a44fa2cc4d56bd Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Mar 2024 03:34:31 -0500 Subject: [PATCH 017/126] Replace yield_now with sleep 100 to prevent hammering a task, despite still being over-eager --- coordinator/src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 690fb342..4de23ae0 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -836,8 +836,8 @@ async fn handle_cosigns_and_batch_publication( ) { let mut tributaries = HashMap::new(); 'outer: loop { - // TODO: Create a better async flow for this, as this does still hammer this task - tokio::task::yield_now().await; + // TODO: Create a better async flow for this + tokio::time::sleep(core::time::Duration::from_millis(100)).await; match tributary_event.try_recv() { Ok(event) => match event { From 6374d9987e43c1bbc5dd423307ec59f14f3213a1 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Mar 2024 03:48:44 -0500 Subject: [PATCH 018/126] Correct how we save the block to scan from --- coordinator/src/substrate/cosign.rs | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index 7b553d68..6293d5e8 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -136,6 +136,7 @@ async fn potentially_cosign_block( if (block_has_events == HasEvents::No) && (LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1)) { + log::debug!("automatically co-signing next block ({block}) since it has no events"); LatestCosignedBlock::set(txn, &block); } @@ -219,7 +220,8 @@ async fn advance_cosign_protocol_inner( // We only perform this check if we haven't already marked a block as skipped since the cosign // the skipped block will cause will cosign all other blocks within this window if skipped_block.is_none() { - for b in scan_start_block .. window_end_exclusive.min(latest_number) { + let window_end_inclusive = window_end_exclusive - 1; + for b in scan_start_block ..= window_end_inclusive.min(latest_number) { if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes { skipped_block = Some(b); log::debug!("skipping cosigning {b} due to proximity to prior cosign"); @@ -285,7 +287,7 @@ async fn advance_cosign_protocol_inner( } // If this TX is committed, always start future scanning from the next block - ScanCosignFrom::set(&mut txn, &(scan_start_block + 1)); + ScanCosignFrom::set(&mut txn, &(block + 1)); } if let Some((number, hash)) = to_cosign { From ae0ecf9efea195cdcea8af6687b3a7837fb3562e Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Mar 2024 04:26:24 -0500 Subject: [PATCH 019/126] Disable jemalloc for rocksdb 0.22 to fix windows builds --- Cargo.lock | 11 ----------- patches/rocksdb/Cargo.toml | 2 +- 2 files changed, 1 insertion(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5011cee4..9e66c478 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3948,7 +3948,6 @@ dependencies = [ "libc", "libz-sys", "lz4-sys", - "tikv-jemalloc-sys", "zstd-sys", ] @@ -9164,16 +9163,6 @@ dependencies = [ "num_cpus", ] -[[package]] -name = "tikv-jemalloc-sys" -version = "0.5.4+5.3.0-patched" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9402443cb8fd499b6f327e40565234ff34dbda27460c5b47db0db77443dd85d1" -dependencies = [ - "cc", - "libc", -] - [[package]] name = "time" version = "0.3.31" diff --git a/patches/rocksdb/Cargo.toml b/patches/rocksdb/Cargo.toml index d9967deb..3a92fafc 100644 --- a/patches/rocksdb/Cargo.toml +++ b/patches/rocksdb/Cargo.toml @@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"] rocksdb = { version = "0.22", default-features = false } [features] -jemalloc = ["rocksdb/jemalloc"] +jemalloc = [] snappy = ["rocksdb/snappy"] lz4 = ["rocksdb/lz4"] zstd = ["rocksdb/zstd"] From 157acc47ca7e690e38a3f5d59ca8a01e4af6e638 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Mar 2024 05:05:43 -0500 Subject: [PATCH 020/126] More aggresive WAL parameters --- common/db/src/rocks.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/common/db/src/rocks.rs b/common/db/src/rocks.rs index c48f6c49..20d39666 100644 --- a/common/db/src/rocks.rs +++ b/common/db/src/rocks.rs @@ -44,12 +44,13 @@ pub fn new_rocksdb(path: &str) -> RocksDB { options.create_if_missing(true); options.set_compression_type(DBCompressionType::Zstd); - // 128 MB options.set_wal_compression_type(DBCompressionType::Zstd); - options.set_max_total_wal_size(128 * 1024 * 1024); + // 10 MB + options.set_max_total_wal_size(10 * 1024 * 1024); + options.set_wal_size_limit_mb(10); - // 1 MB options.set_log_level(LogLevel::Warn); + // 1 MB options.set_max_log_file_size(1024 * 1024); options.set_recycle_log_file_num(1); From f7d16b3fc56702d640ff199f6941a226d100d2ae Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 9 Mar 2024 05:37:36 -0500 Subject: [PATCH 021/126] Fix 0 - 1 which caused a panic --- coordinator/src/substrate/cosign.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index 6293d5e8..d885e729 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -205,14 +205,14 @@ async fn advance_cosign_protocol_inner( let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE; // If we've never triggered a cosign, don't skip any cosigns based on proximity if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN { - window_end_exclusive = 0; + window_end_exclusive = 1; } // The consensus rules for this are `last_intended_to_cosign_block + 1` let scan_start_block = last_intended_to_cosign_block + 1; // As a practical optimization, we don't re-scan old blocks since old blocks are independent to // new state - let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(0)); + let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1)); // Check all blocks within the window to see if they should be cosigned // If so, we're skipping them and need to flag them as skipped so that once the window closes, we @@ -317,7 +317,7 @@ pub async fn advance_cosign_protocol( latest_number: u64, ) -> Result<(), SeraiError> { loop { - let scan_from = ScanCosignFrom::get(db).unwrap_or(0); + let scan_from = ScanCosignFrom::get(db).unwrap_or(1); // Only scan 1000 blocks at a time to limit a massive txn from forming let scan_to = latest_number.min(scan_from + 1000); advance_cosign_protocol_inner(db, key, serai, scan_to).await?; From ace41c79fd9ff0f2ab1c7d2f3aa078a878db2256 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 11 Mar 2024 01:44:00 -0400 Subject: [PATCH 022/126] Tidy the BlockHasEvents cache --- coordinator/src/substrate/cosign.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index d885e729..aa585f2f 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -43,7 +43,7 @@ create_db!( SubstrateCosignDb { ScanCosignFrom: () -> u64, IntendedCosign: () -> (u64, Option), - BlockHasEvents: (block: u64) -> HasEvents, + BlockHasEventsCache: (block: u64) -> HasEvents, LatestCosignedBlock: () -> u64, } ); @@ -86,7 +86,7 @@ async fn block_has_events( serai: &Serai, block: u64, ) -> Result { - let cached = BlockHasEvents::get(txn, block); + let cached = BlockHasEventsCache::get(txn, block); match cached { None => { let serai = serai.as_of( @@ -108,7 +108,7 @@ async fn block_has_events( let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes }; - BlockHasEvents::set(txn, block, &has_events); + BlockHasEventsCache::set(txn, block, &has_events); Ok(has_events) } Some(code) => Ok(code), @@ -288,6 +288,8 @@ async fn advance_cosign_protocol_inner( // If this TX is committed, always start future scanning from the next block ScanCosignFrom::set(&mut txn, &(block + 1)); + // Since we're scanning *from* the next block, tidy the cache + BlockHasEventsCache::del(&mut txn, &block); } if let Some((number, hash)) = to_cosign { From 0889627e60846dae0537554fa1c4e569dbdc23ce Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 11 Mar 2024 02:20:51 -0400 Subject: [PATCH 023/126] Typo fix for prior commit --- coordinator/src/substrate/cosign.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/src/substrate/cosign.rs b/coordinator/src/substrate/cosign.rs index aa585f2f..00560763 100644 --- a/coordinator/src/substrate/cosign.rs +++ b/coordinator/src/substrate/cosign.rs @@ -289,7 +289,7 @@ async fn advance_cosign_protocol_inner( // If this TX is committed, always start future scanning from the next block ScanCosignFrom::set(&mut txn, &(block + 1)); // Since we're scanning *from* the next block, tidy the cache - BlockHasEventsCache::del(&mut txn, &block); + BlockHasEventsCache::del(&mut txn, block); } if let Some((number, hash)) = to_cosign { From a3a009a7e9875eaf6b9fdfe678fc21e3354ea3d2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 11 Mar 2024 17:55:05 -0400 Subject: [PATCH 024/126] Move docs to spec --- {docs => spec}/DKG Exclusions.md | 0 {docs => spec}/Getting Started.md | 0 {docs => spec}/Serai.md | 0 {docs => spec}/coordinator/Coordinator.md | 0 {docs => spec}/coordinator/Tributary.md | 0 {docs => spec}/cryptography/Distributed Key Generation.md | 0 {docs => spec}/cryptography/FROST.md | 0 {docs => spec}/integrations/Bitcoin.md | 0 {docs => spec}/integrations/Ethereum.md | 0 {docs => spec}/integrations/Instructions.md | 0 {docs => spec}/integrations/Monero.md | 0 {docs => spec}/media/icon.svg | 0 {docs => spec}/policy/Canonical Chain.md | 0 {docs => spec}/processor/Multisig Rotation.md | 0 {docs => spec}/processor/Processor.md | 0 {docs => spec}/processor/Scanning.md | 0 {docs => spec}/processor/UTXO Management.md | 0 {docs => spec}/protocol/Constants.md | 0 {docs => spec}/protocol/In Instructions.md | 0 {docs => spec}/protocol/Validator Sets.md | 0 20 files changed, 0 insertions(+), 0 deletions(-) rename {docs => spec}/DKG Exclusions.md (100%) rename {docs => spec}/Getting Started.md (100%) rename {docs => spec}/Serai.md (100%) rename {docs => spec}/coordinator/Coordinator.md (100%) rename {docs => spec}/coordinator/Tributary.md (100%) rename {docs => spec}/cryptography/Distributed Key Generation.md (100%) rename {docs => spec}/cryptography/FROST.md (100%) rename {docs => spec}/integrations/Bitcoin.md (100%) rename {docs => spec}/integrations/Ethereum.md (100%) rename {docs => spec}/integrations/Instructions.md (100%) rename {docs => spec}/integrations/Monero.md (100%) rename {docs => spec}/media/icon.svg (100%) rename {docs => spec}/policy/Canonical Chain.md (100%) rename {docs => spec}/processor/Multisig Rotation.md (100%) rename {docs => spec}/processor/Processor.md (100%) rename {docs => spec}/processor/Scanning.md (100%) rename {docs => spec}/processor/UTXO Management.md (100%) rename {docs => spec}/protocol/Constants.md (100%) rename {docs => spec}/protocol/In Instructions.md (100%) rename {docs => spec}/protocol/Validator Sets.md (100%) diff --git a/docs/DKG Exclusions.md b/spec/DKG Exclusions.md similarity index 100% rename from docs/DKG Exclusions.md rename to spec/DKG Exclusions.md diff --git a/docs/Getting Started.md b/spec/Getting Started.md similarity index 100% rename from docs/Getting Started.md rename to spec/Getting Started.md diff --git a/docs/Serai.md b/spec/Serai.md similarity index 100% rename from docs/Serai.md rename to spec/Serai.md diff --git a/docs/coordinator/Coordinator.md b/spec/coordinator/Coordinator.md similarity index 100% rename from docs/coordinator/Coordinator.md rename to spec/coordinator/Coordinator.md diff --git a/docs/coordinator/Tributary.md b/spec/coordinator/Tributary.md similarity index 100% rename from docs/coordinator/Tributary.md rename to spec/coordinator/Tributary.md diff --git a/docs/cryptography/Distributed Key Generation.md b/spec/cryptography/Distributed Key Generation.md similarity index 100% rename from docs/cryptography/Distributed Key Generation.md rename to spec/cryptography/Distributed Key Generation.md diff --git a/docs/cryptography/FROST.md b/spec/cryptography/FROST.md similarity index 100% rename from docs/cryptography/FROST.md rename to spec/cryptography/FROST.md diff --git a/docs/integrations/Bitcoin.md b/spec/integrations/Bitcoin.md similarity index 100% rename from docs/integrations/Bitcoin.md rename to spec/integrations/Bitcoin.md diff --git a/docs/integrations/Ethereum.md b/spec/integrations/Ethereum.md similarity index 100% rename from docs/integrations/Ethereum.md rename to spec/integrations/Ethereum.md diff --git a/docs/integrations/Instructions.md b/spec/integrations/Instructions.md similarity index 100% rename from docs/integrations/Instructions.md rename to spec/integrations/Instructions.md diff --git a/docs/integrations/Monero.md b/spec/integrations/Monero.md similarity index 100% rename from docs/integrations/Monero.md rename to spec/integrations/Monero.md diff --git a/docs/media/icon.svg b/spec/media/icon.svg similarity index 100% rename from docs/media/icon.svg rename to spec/media/icon.svg diff --git a/docs/policy/Canonical Chain.md b/spec/policy/Canonical Chain.md similarity index 100% rename from docs/policy/Canonical Chain.md rename to spec/policy/Canonical Chain.md diff --git a/docs/processor/Multisig Rotation.md b/spec/processor/Multisig Rotation.md similarity index 100% rename from docs/processor/Multisig Rotation.md rename to spec/processor/Multisig Rotation.md diff --git a/docs/processor/Processor.md b/spec/processor/Processor.md similarity index 100% rename from docs/processor/Processor.md rename to spec/processor/Processor.md diff --git a/docs/processor/Scanning.md b/spec/processor/Scanning.md similarity index 100% rename from docs/processor/Scanning.md rename to spec/processor/Scanning.md diff --git a/docs/processor/UTXO Management.md b/spec/processor/UTXO Management.md similarity index 100% rename from docs/processor/UTXO Management.md rename to spec/processor/UTXO Management.md diff --git a/docs/protocol/Constants.md b/spec/protocol/Constants.md similarity index 100% rename from docs/protocol/Constants.md rename to spec/protocol/Constants.md diff --git a/docs/protocol/In Instructions.md b/spec/protocol/In Instructions.md similarity index 100% rename from docs/protocol/In Instructions.md rename to spec/protocol/In Instructions.md diff --git a/docs/protocol/Validator Sets.md b/spec/protocol/Validator Sets.md similarity index 100% rename from docs/protocol/Validator Sets.md rename to spec/protocol/Validator Sets.md From c32d3413ba444f040651fc2efb344ec1f074f282 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 11 Mar 2024 17:55:27 -0400 Subject: [PATCH 025/126] Add just-the-docs based user-facing documentation --- .github/workflows/pages.yml | 90 +++++++++++++++++++++++++++++++++++++ README.md | 5 ++- docs/.gitignore | 7 +++ docs/.ruby-version | 1 + docs/2_amm.md | 5 +++ docs/3_cross_chain.md | 5 +++ docs/4_0_economics.md | 6 +++ docs/4_1_genesis.md | 6 +++ docs/4_2_pre.md | 6 +++ docs/4_3_post.md | 6 +++ docs/5_0_infrastructure.md | 6 +++ docs/5_1_message_queue.md | 6 +++ docs/5_2_processor.md | 6 +++ docs/5_3_coordinator.md | 6 +++ docs/5_4_serai.md | 6 +++ docs/6_0_validator.md | 6 +++ docs/Gemfile | 4 ++ docs/Gemfile.lock | 82 +++++++++++++++++++++++++++++++++ docs/_config.yml | 5 +++ docs/index.md | 7 +++ 20 files changed, 270 insertions(+), 1 deletion(-) create mode 100644 .github/workflows/pages.yml create mode 100644 docs/.gitignore create mode 100644 docs/.ruby-version create mode 100644 docs/2_amm.md create mode 100644 docs/3_cross_chain.md create mode 100644 docs/4_0_economics.md create mode 100644 docs/4_1_genesis.md create mode 100644 docs/4_2_pre.md create mode 100644 docs/4_3_post.md create mode 100644 docs/5_0_infrastructure.md create mode 100644 docs/5_1_message_queue.md create mode 100644 docs/5_2_processor.md create mode 100644 docs/5_3_coordinator.md create mode 100644 docs/5_4_serai.md create mode 100644 docs/6_0_validator.md create mode 100644 docs/Gemfile create mode 100644 docs/Gemfile.lock create mode 100644 docs/_config.yml create mode 100644 docs/index.md diff --git a/.github/workflows/pages.yml b/.github/workflows/pages.yml new file mode 100644 index 00000000..7c4a1f12 --- /dev/null +++ b/.github/workflows/pages.yml @@ -0,0 +1,90 @@ +# MIT License +# +# Copyright (c) 2022 just-the-docs +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# This workflow uses actions that are not certified by GitHub. +# They are provided by a third-party and are governed by +# separate terms of service, privacy policy, and support +# documentation. + +# Sample workflow for building and deploying a Jekyll site to GitHub Pages +name: Deploy Jekyll site to Pages + +on: + push: + branches: + - "develop" + paths: + - "docs/**" + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages +permissions: + contents: read + pages: write + id-token: write + +# Allow one concurrent deployment +concurrency: + group: "pages" + cancel-in-progress: true + +jobs: + # Build job + build: + runs-on: ubuntu-latest + defaults: + run: + working-directory: docs + steps: + - name: Checkout + uses: actions/checkout@v3 + - name: Setup Ruby + uses: ruby/setup-ruby@v1 + with: + bundler-cache: true + cache-version: 0 + working-directory: "${{ github.workspace }}/docs" + - name: Setup Pages + id: pages + uses: actions/configure-pages@v3 + - name: Build with Jekyll + run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}" + env: + JEKYLL_ENV: production + - name: Upload artifact + uses: actions/upload-pages-artifact@v1 + with: + path: "docs/_site/" + + # Deployment job + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v2 diff --git a/README.md b/README.md index 8f8c1982..900de39d 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,10 @@ wallet. - `audits`: Audits for various parts of Serai. -- `docs`: Documentation on the Serai protocol. +- `spec`: The specification of the Serai protocol, both internally and as + networked. + +- `docs`: User-facing documentation on the Serai protocol. - `common`: Crates containing utilities common to a variety of areas under Serai, none neatly fitting under another category. diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 00000000..be76315d --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,7 @@ +_site/ +.sass-cache/ +.jekyll-cache/ +.jekyll-metadata + +.bundle/ +vendor/ diff --git a/docs/.ruby-version b/docs/.ruby-version new file mode 100644 index 00000000..8c50098d --- /dev/null +++ b/docs/.ruby-version @@ -0,0 +1 @@ +3.1 diff --git a/docs/2_amm.md b/docs/2_amm.md new file mode 100644 index 00000000..e3f14cc2 --- /dev/null +++ b/docs/2_amm.md @@ -0,0 +1,5 @@ +--- +title: Automatic Market Makers +layout: default +nav_order: 2 +--- diff --git a/docs/3_cross_chain.md b/docs/3_cross_chain.md new file mode 100644 index 00000000..212aa5b5 --- /dev/null +++ b/docs/3_cross_chain.md @@ -0,0 +1,5 @@ +--- +title: Cross-Chain Architecture +layout: default +nav_order: 3 +--- diff --git a/docs/4_0_economics.md b/docs/4_0_economics.md new file mode 100644 index 00000000..a77ed2c3 --- /dev/null +++ b/docs/4_0_economics.md @@ -0,0 +1,6 @@ +--- +title: Economics +layout: default +nav_order: 4 +has_children: true +--- diff --git a/docs/4_1_genesis.md b/docs/4_1_genesis.md new file mode 100644 index 00000000..d4dbef04 --- /dev/null +++ b/docs/4_1_genesis.md @@ -0,0 +1,6 @@ +--- +title: Genesis +layout: default +nav_order: 1 +parent: Economics +--- diff --git a/docs/4_2_pre.md b/docs/4_2_pre.md new file mode 100644 index 00000000..d891c51d --- /dev/null +++ b/docs/4_2_pre.md @@ -0,0 +1,6 @@ +--- +title: Pre-Economic Security +layout: default +nav_order: 2 +parent: Economics +--- diff --git a/docs/4_3_post.md b/docs/4_3_post.md new file mode 100644 index 00000000..4a41bd19 --- /dev/null +++ b/docs/4_3_post.md @@ -0,0 +1,6 @@ +--- +title: Post-Economic Security +layout: default +nav_order: 3 +parent: Economics +--- diff --git a/docs/5_0_infrastructure.md b/docs/5_0_infrastructure.md new file mode 100644 index 00000000..acb2be9f --- /dev/null +++ b/docs/5_0_infrastructure.md @@ -0,0 +1,6 @@ +--- +title: Infrastructure +layout: default +nav_order: 5 +has_children: true +--- diff --git a/docs/5_1_message_queue.md b/docs/5_1_message_queue.md new file mode 100644 index 00000000..b4d7b548 --- /dev/null +++ b/docs/5_1_message_queue.md @@ -0,0 +1,6 @@ +--- +title: Message Queue +layout: default +nav_order: 1 +parent: Infrastructure +--- diff --git a/docs/5_2_processor.md b/docs/5_2_processor.md new file mode 100644 index 00000000..3654bd15 --- /dev/null +++ b/docs/5_2_processor.md @@ -0,0 +1,6 @@ +--- +title: Processor +layout: default +nav_order: 2 +parent: Infrastructure +--- diff --git a/docs/5_3_coordinator.md b/docs/5_3_coordinator.md new file mode 100644 index 00000000..854cafc8 --- /dev/null +++ b/docs/5_3_coordinator.md @@ -0,0 +1,6 @@ +--- +title: Coordinator +layout: default +nav_order: 3 +parent: Infrastructure +--- diff --git a/docs/5_4_serai.md b/docs/5_4_serai.md new file mode 100644 index 00000000..bbd04aa0 --- /dev/null +++ b/docs/5_4_serai.md @@ -0,0 +1,6 @@ +--- +title: Serai +layout: default +nav_order: 4 +parent: Infrastructure +--- diff --git a/docs/6_0_validator.md b/docs/6_0_validator.md new file mode 100644 index 00000000..81db7be3 --- /dev/null +++ b/docs/6_0_validator.md @@ -0,0 +1,6 @@ +--- +title: Running a Validator +layout: default +nav_order: 6 +has_children: true +--- diff --git a/docs/Gemfile b/docs/Gemfile new file mode 100644 index 00000000..0b800b1f --- /dev/null +++ b/docs/Gemfile @@ -0,0 +1,4 @@ +source 'https://rubygems.org' + +gem "jekyll", "~> 4.3.3" +gem "just-the-docs", "0.8.1" diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock new file mode 100644 index 00000000..34e40cd9 --- /dev/null +++ b/docs/Gemfile.lock @@ -0,0 +1,82 @@ +GEM + remote: https://rubygems.org/ + specs: + addressable (2.8.6) + public_suffix (>= 2.0.2, < 6.0) + colorator (1.1.0) + concurrent-ruby (1.2.3) + em-websocket (0.5.3) + eventmachine (>= 0.12.9) + http_parser.rb (~> 0) + eventmachine (1.2.7) + ffi (1.16.3) + forwardable-extended (2.6.0) + google-protobuf (3.25.3-x86_64-linux) + http_parser.rb (0.8.0) + i18n (1.14.4) + concurrent-ruby (~> 1.0) + jekyll (4.3.3) + addressable (~> 2.4) + colorator (~> 1.0) + em-websocket (~> 0.5) + i18n (~> 1.0) + jekyll-sass-converter (>= 2.0, < 4.0) + jekyll-watch (~> 2.0) + kramdown (~> 2.3, >= 2.3.1) + kramdown-parser-gfm (~> 1.0) + liquid (~> 4.0) + mercenary (>= 0.3.6, < 0.5) + pathutil (~> 0.9) + rouge (>= 3.0, < 5.0) + safe_yaml (~> 1.0) + terminal-table (>= 1.8, < 4.0) + webrick (~> 1.7) + jekyll-include-cache (0.2.1) + jekyll (>= 3.7, < 5.0) + jekyll-sass-converter (3.0.0) + sass-embedded (~> 1.54) + jekyll-seo-tag (2.8.0) + jekyll (>= 3.8, < 5.0) + jekyll-watch (2.2.1) + listen (~> 3.0) + just-the-docs (0.8.1) + jekyll (>= 3.8.5) + jekyll-include-cache + jekyll-seo-tag (>= 2.0) + rake (>= 12.3.1) + kramdown (2.4.0) + rexml + kramdown-parser-gfm (1.1.0) + kramdown (~> 2.0) + liquid (4.0.4) + listen (3.9.0) + rb-fsevent (~> 0.10, >= 0.10.3) + rb-inotify (~> 0.9, >= 0.9.10) + mercenary (0.4.0) + pathutil (0.16.2) + forwardable-extended (~> 2.6) + public_suffix (5.0.4) + rake (13.1.0) + rb-fsevent (0.11.2) + rb-inotify (0.10.1) + ffi (~> 1.0) + rexml (3.2.6) + rouge (4.2.0) + safe_yaml (1.0.5) + sass-embedded (1.63.6) + google-protobuf (~> 3.23) + rake (>= 13.0.0) + terminal-table (3.0.2) + unicode-display_width (>= 1.1.1, < 3) + unicode-display_width (2.5.0) + webrick (1.8.1) + +PLATFORMS + x86_64-linux + +DEPENDENCIES + jekyll (~> 4.3.3) + just-the-docs (= 0.8.1) + +BUNDLED WITH + 2.2.5 diff --git a/docs/_config.yml b/docs/_config.yml new file mode 100644 index 00000000..45909735 --- /dev/null +++ b/docs/_config.yml @@ -0,0 +1,5 @@ +title: Serai Documentation +description: Documentation for the Serai protocol. +theme: just-the-docs + +url: https://serai-dex.github.io/serai diff --git a/docs/index.md b/docs/index.md new file mode 100644 index 00000000..99aa9831 --- /dev/null +++ b/docs/index.md @@ -0,0 +1,7 @@ +--- +title: Home +layout: home +nav_order: 1 +--- + +Documentation for Serai. From d1be9eaa2d6152a4d32236206679b2064cb20b2a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 11 Mar 2024 18:02:54 -0400 Subject: [PATCH 026/126] Change baseurl to /docs --- docs/_config.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/_config.yml b/docs/_config.yml index 45909735..6822eeac 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -2,4 +2,5 @@ title: Serai Documentation description: Documentation for the Serai protocol. theme: just-the-docs -url: https://serai-dex.github.io/serai +url: https://serai.exchange/docs +baseurl: /docs From 442d8c02fce9345a348dded42d38eef645a43482 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 11 Mar 2024 20:00:01 -0400 Subject: [PATCH 027/126] Add docs, correct URL --- README.md | 2 +- docs/2_amm.md | 5 ---- docs/5_1_message_queue.md | 6 ----- docs/5_2_processor.md | 6 ----- docs/5_3_coordinator.md | 6 ----- docs/_config.yml | 3 +-- docs/amm/index.md | 17 +++++++++++++ .../index.md} | 0 docs/{4_1_genesis.md => economics/genesis.md} | 0 docs/{4_0_economics.md => economics/index.md} | 0 docs/{4_3_post.md => economics/post.md} | 0 docs/{4_2_pre.md => economics/pre.md} | 0 docs/index.md | 6 ++++- docs/infrastructure/coordinator.md | 19 ++++++++++++++ .../index.md} | 0 docs/infrastructure/message_queue.md | 25 +++++++++++++++++++ docs/infrastructure/processor.md | 19 ++++++++++++++ .../{5_4_serai.md => infrastructure/serai.md} | 0 docs/integrating/index.md | 6 +++++ docs/{6_0_validator.md => validator/index.md} | 2 +- 20 files changed, 94 insertions(+), 28 deletions(-) delete mode 100644 docs/2_amm.md delete mode 100644 docs/5_1_message_queue.md delete mode 100644 docs/5_2_processor.md delete mode 100644 docs/5_3_coordinator.md create mode 100644 docs/amm/index.md rename docs/{3_cross_chain.md => cross_chain/index.md} (100%) rename docs/{4_1_genesis.md => economics/genesis.md} (100%) rename docs/{4_0_economics.md => economics/index.md} (100%) rename docs/{4_3_post.md => economics/post.md} (100%) rename docs/{4_2_pre.md => economics/pre.md} (100%) create mode 100644 docs/infrastructure/coordinator.md rename docs/{5_0_infrastructure.md => infrastructure/index.md} (100%) create mode 100644 docs/infrastructure/message_queue.md create mode 100644 docs/infrastructure/processor.md rename docs/{5_4_serai.md => infrastructure/serai.md} (100%) create mode 100644 docs/integrating/index.md rename docs/{6_0_validator.md => validator/index.md} (84%) diff --git a/README.md b/README.md index 900de39d..4a8ac4d5 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ Bitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading experience. Funds are stored in an economically secured threshold-multisig wallet. -[Getting Started](docs/Getting%20Started.md) +[Getting Started](spec/Getting%20Started.md) ### Layout diff --git a/docs/2_amm.md b/docs/2_amm.md deleted file mode 100644 index e3f14cc2..00000000 --- a/docs/2_amm.md +++ /dev/null @@ -1,5 +0,0 @@ ---- -title: Automatic Market Makers -layout: default -nav_order: 2 ---- diff --git a/docs/5_1_message_queue.md b/docs/5_1_message_queue.md deleted file mode 100644 index b4d7b548..00000000 --- a/docs/5_1_message_queue.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Message Queue -layout: default -nav_order: 1 -parent: Infrastructure ---- diff --git a/docs/5_2_processor.md b/docs/5_2_processor.md deleted file mode 100644 index 3654bd15..00000000 --- a/docs/5_2_processor.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Processor -layout: default -nav_order: 2 -parent: Infrastructure ---- diff --git a/docs/5_3_coordinator.md b/docs/5_3_coordinator.md deleted file mode 100644 index 854cafc8..00000000 --- a/docs/5_3_coordinator.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Coordinator -layout: default -nav_order: 3 -parent: Infrastructure ---- diff --git a/docs/_config.yml b/docs/_config.yml index 6822eeac..4db46720 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -2,5 +2,4 @@ title: Serai Documentation description: Documentation for the Serai protocol. theme: just-the-docs -url: https://serai.exchange/docs -baseurl: /docs +url: https://docs.serai.exchange diff --git a/docs/amm/index.md b/docs/amm/index.md new file mode 100644 index 00000000..ef48ceea --- /dev/null +++ b/docs/amm/index.md @@ -0,0 +1,17 @@ +--- +title: Automatic Market Makers +layout: default +nav_order: 2 +--- + +*text on how AMMs work* + +Serai uses a symmetric liquidity pool with the `xy=k` formula. + +Concentrated liquidity would presumably offer less slippage on swaps, and there are +[discussions to evolve to a concentrated liquidity/order book environment](https://github.com/serai-dex/serai/issues/420). +Unfortunately, it effectively requires active management of provided liquidity. +This disenfranchises small liquidity providers who may not have the knowledge +and resources necessary to perform such management. Since Serai is expected to +have a community-bootstrapped start, starting with concentrated liquidity would +accordingly be contradictory. diff --git a/docs/3_cross_chain.md b/docs/cross_chain/index.md similarity index 100% rename from docs/3_cross_chain.md rename to docs/cross_chain/index.md diff --git a/docs/4_1_genesis.md b/docs/economics/genesis.md similarity index 100% rename from docs/4_1_genesis.md rename to docs/economics/genesis.md diff --git a/docs/4_0_economics.md b/docs/economics/index.md similarity index 100% rename from docs/4_0_economics.md rename to docs/economics/index.md diff --git a/docs/4_3_post.md b/docs/economics/post.md similarity index 100% rename from docs/4_3_post.md rename to docs/economics/post.md diff --git a/docs/4_2_pre.md b/docs/economics/pre.md similarity index 100% rename from docs/4_2_pre.md rename to docs/economics/pre.md diff --git a/docs/index.md b/docs/index.md index 99aa9831..2246e51c 100644 --- a/docs/index.md +++ b/docs/index.md @@ -4,4 +4,8 @@ layout: home nav_order: 1 --- -Documentation for Serai. +Serai is a cross-chain decentralized exchange, integrating Bitcoin, Ethereum, +and Monero. + +This documentation site is still under active development and may have missing +sections, errors, and typos. diff --git a/docs/infrastructure/coordinator.md b/docs/infrastructure/coordinator.md new file mode 100644 index 00000000..2d69e096 --- /dev/null +++ b/docs/infrastructure/coordinator.md @@ -0,0 +1,19 @@ +--- +title: Coordinator +layout: default +nav_order: 3 +parent: Infrastructure +--- + +The coordinator is a local service which communicates with other validators' +coordinators. It provides a verifiable broadcast layer for various consensus +messages, such as agreement on external blockchains, key generation and signing +protocols, and the latest Serai block. + +The verifiable broadcast layer is implemented via a blockchain, referred to as a +Tributary, which is agreed upon using Tendermint consensus. This consensus is +not as offered by Tendermint Core/CometBFT, as used in the Cosmos SDK +(historically/presently), yet by our own implementation designed to be used as a +library and not as another daemon. Tributaries are ephemeral, only used by the +current validators, and deleted upon the next epoch. All of the results from it +are verifiable via the external network and the Serai blockchain alone. diff --git a/docs/5_0_infrastructure.md b/docs/infrastructure/index.md similarity index 100% rename from docs/5_0_infrastructure.md rename to docs/infrastructure/index.md diff --git a/docs/infrastructure/message_queue.md b/docs/infrastructure/message_queue.md new file mode 100644 index 00000000..fa8e0f8e --- /dev/null +++ b/docs/infrastructure/message_queue.md @@ -0,0 +1,25 @@ +--- +title: Message Queue +layout: default +nav_order: 1 +parent: Infrastructure +--- + +The Message Queue is a microservice to authenticate and relay messages between +services. It offers just three functions: + +1) Queue a message. +2) Receive the next message. +3) Acknowledge a message, removing it from the queue. + +This ensures messages are delivered between services, with their order +preserved. This also ensures that if a service reboots while handling a message, +it'll still handle the message once rebooted (and the message will not be lost). + +The Message Queue also aims to offer increased liveliness and performance. +If services directly communicated, the rate at which one service could operate +would always be bottlenecked by the service it communicates with. If the +receiving service ever went offline, the sending service wouldn't be able to +deliver messages until the receiver came back online, halting its own work. By +defining a dedicated microservice, with a lack of complex logic, it's much less +likely to go offline or suffer from degraded performance. diff --git a/docs/infrastructure/processor.md b/docs/infrastructure/processor.md new file mode 100644 index 00000000..fb542cad --- /dev/null +++ b/docs/infrastructure/processor.md @@ -0,0 +1,19 @@ +--- +title: Processor +layout: default +nav_order: 2 +parent: Infrastructure +--- + +The processor performs several important tasks with regards to the external +network. Each of them are documented in the following sections. + +# Key Generation + +# Scanning + +# Signing Batches + +# Planning Transactions + +# Cosigning diff --git a/docs/5_4_serai.md b/docs/infrastructure/serai.md similarity index 100% rename from docs/5_4_serai.md rename to docs/infrastructure/serai.md diff --git a/docs/integrating/index.md b/docs/integrating/index.md new file mode 100644 index 00000000..764441e2 --- /dev/null +++ b/docs/integrating/index.md @@ -0,0 +1,6 @@ +--- +title: Integrating with Serai +layout: default +nav_order: 6 +has_children: true +--- diff --git a/docs/6_0_validator.md b/docs/validator/index.md similarity index 84% rename from docs/6_0_validator.md rename to docs/validator/index.md index 81db7be3..a652fd16 100644 --- a/docs/6_0_validator.md +++ b/docs/validator/index.md @@ -1,6 +1,6 @@ --- title: Running a Validator layout: default -nav_order: 6 +nav_order: 7 has_children: true --- From 233164cefd70b362752cbcc673f08865f9d3aabb Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Mon, 11 Mar 2024 23:51:44 -0400 Subject: [PATCH 028/126] Flesh out docs more --- docs/_config.yml | 9 +++++++ docs/amm/index.md | 2 ++ docs/cross_chain/index.md | 2 ++ docs/economics/index.md | 39 ++++++++++++++++++++++++++++ docs/evolutions/index.md | 6 +++++ docs/index.md | 29 ++++++++++++++++++--- docs/infrastructure/coordinator.md | 2 ++ docs/infrastructure/index.md | 2 +- docs/infrastructure/message_queue.md | 4 +++ docs/infrastructure/processor.md | 12 +++++---- docs/integrating/index.md | 2 +- docs/validator/index.md | 2 +- 12 files changed, 99 insertions(+), 12 deletions(-) create mode 100644 docs/evolutions/index.md diff --git a/docs/_config.yml b/docs/_config.yml index 4db46720..75c8f131 100644 --- a/docs/_config.yml +++ b/docs/_config.yml @@ -3,3 +3,12 @@ description: Documentation for the Serai protocol. theme: just-the-docs url: https://docs.serai.exchange + +callouts: + warning: + title: Warning + color: red + + definition: + title: Definition + color: blue diff --git a/docs/amm/index.md b/docs/amm/index.md index ef48ceea..678f6ee9 100644 --- a/docs/amm/index.md +++ b/docs/amm/index.md @@ -4,6 +4,8 @@ layout: default nav_order: 2 --- +# Automatic Market Makers + *text on how AMMs work* Serai uses a symmetric liquidity pool with the `xy=k` formula. diff --git a/docs/cross_chain/index.md b/docs/cross_chain/index.md index 212aa5b5..a7b6a480 100644 --- a/docs/cross_chain/index.md +++ b/docs/cross_chain/index.md @@ -3,3 +3,5 @@ title: Cross-Chain Architecture layout: default nav_order: 3 --- + +# Cross-Chain Architecture diff --git a/docs/economics/index.md b/docs/economics/index.md index a77ed2c3..c8d3bde4 100644 --- a/docs/economics/index.md +++ b/docs/economics/index.md @@ -4,3 +4,42 @@ layout: default nav_order: 4 has_children: true --- + +# Economics + +Serai's economics change depending on which of three eras is currently +occurring. + +## Genesis Era + +The network starts with the "Genesis" era, where the goal of the network is to +attract the liquidity necessary to facilitate swaps. This period will last for +30 days and will let anyone add liquidity to the protocol. Only with its +conclusion will SRI start being distributed. + +After the Genesis era, the network enters the "Pre-Economic Security" era. + +## Pre-Economic Security + +{: .definition-title } +> Definition: Economic Security +> +> Economic security is derived from it being unprofitable to misbehave. +> This is by the economic penalty which is presumed to occur upon misbehavior +> exceeding the value which would presumably be gained. +> Accordingly, rational actors would behave properly, causing the protocol to +> maintain its integrity. +> +> For Serai specifically, the stake required to produce unintended signatures +> must exceed the value accessible via producing unintended signatures. + +With liquidity provided, and swaps enabled, the goal is to have validators stake +sufficiently for economic security to be achieved. This is primarily via +offering freshly minted, staked SRI to would-be validators who decide to swap +external coins for their stake. + +## Post-Economic Security + +Having achieved economic security, the protocol changes its economics one last +time (barring future upgrades to the protocol) to a 'normal' state of +operations. diff --git a/docs/evolutions/index.md b/docs/evolutions/index.md new file mode 100644 index 00000000..f24d634f --- /dev/null +++ b/docs/evolutions/index.md @@ -0,0 +1,6 @@ +--- +title: Evolutions +layout: default +nav_order: 5 +has_children: true +--- diff --git a/docs/index.md b/docs/index.md index 2246e51c..c7685d13 100644 --- a/docs/index.md +++ b/docs/index.md @@ -4,8 +4,29 @@ layout: home nav_order: 1 --- -Serai is a cross-chain decentralized exchange, integrating Bitcoin, Ethereum, -and Monero. - +{: .warning } This documentation site is still under active development and may have missing -sections, errors, and typos. +sections, errors, and typos. Even once this documentation site is 'complete', it +may become out-of-date (as Serai is an evolving protocol yet to release) or have +minor errors. + +# Serai + +Serai is a fairly launched cross-chain decentralized exchange, integrating +Bitcoin (BTC), Ethereum (ETH, DAI), and Monero (XMR). + +The Serai mainnet has yet to launch, and until then, all details are subject to +change. + +Prior to the Serai mainnet launching, SRI, Serai's native coin, will not +exist. As a fairly launched project, SRI will have no ICO, no IEO, no presale, +no developers' tax/fund, and no airdrop for out-of-mainnet activity. + +Out-of-mainnet activity includes: + +- Being a community member (such as on Discord or on Twitter) +- Participating in testnets +- Contributing to the GitHub + +None of these will be awarded any airdrop. All distributions of SRI will happen +on-chain per the protocols' defined rules, based on on-chain activity. diff --git a/docs/infrastructure/coordinator.md b/docs/infrastructure/coordinator.md index 2d69e096..cf6acaca 100644 --- a/docs/infrastructure/coordinator.md +++ b/docs/infrastructure/coordinator.md @@ -5,6 +5,8 @@ nav_order: 3 parent: Infrastructure --- +# Coordinator + The coordinator is a local service which communicates with other validators' coordinators. It provides a verifiable broadcast layer for various consensus messages, such as agreement on external blockchains, key generation and signing diff --git a/docs/infrastructure/index.md b/docs/infrastructure/index.md index acb2be9f..2db1a791 100644 --- a/docs/infrastructure/index.md +++ b/docs/infrastructure/index.md @@ -1,6 +1,6 @@ --- title: Infrastructure layout: default -nav_order: 5 +nav_order: 6 has_children: true --- diff --git a/docs/infrastructure/message_queue.md b/docs/infrastructure/message_queue.md index fa8e0f8e..4ce21277 100644 --- a/docs/infrastructure/message_queue.md +++ b/docs/infrastructure/message_queue.md @@ -5,11 +5,15 @@ nav_order: 1 parent: Infrastructure --- +# Message Queue + The Message Queue is a microservice to authenticate and relay messages between services. It offers just three functions: 1) Queue a message. + 2) Receive the next message. + 3) Acknowledge a message, removing it from the queue. This ensures messages are delivered between services, with their order diff --git a/docs/infrastructure/processor.md b/docs/infrastructure/processor.md index fb542cad..ca49120e 100644 --- a/docs/infrastructure/processor.md +++ b/docs/infrastructure/processor.md @@ -5,15 +5,17 @@ nav_order: 2 parent: Infrastructure --- +# Processor + The processor performs several important tasks with regards to the external network. Each of them are documented in the following sections. -# Key Generation +## Key Generation -# Scanning +## Scanning -# Signing Batches +## Signing Batches -# Planning Transactions +## Planning Transactions -# Cosigning +## Cosigning diff --git a/docs/integrating/index.md b/docs/integrating/index.md index 764441e2..58a6ea06 100644 --- a/docs/integrating/index.md +++ b/docs/integrating/index.md @@ -1,6 +1,6 @@ --- title: Integrating with Serai layout: default -nav_order: 6 +nav_order: 7 has_children: true --- diff --git a/docs/validator/index.md b/docs/validator/index.md index a652fd16..753d5976 100644 --- a/docs/validator/index.md +++ b/docs/validator/index.md @@ -1,6 +1,6 @@ --- title: Running a Validator layout: default -nav_order: 7 +nav_order: 8 has_children: true --- From 9662d94bf99135fafc9eb140670c4d62891cc24b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 12 Mar 2024 00:56:06 -0400 Subject: [PATCH 029/126] Document the signals pallet in the user-facing docs --- docs/evolutions/index.md | 6 ----- docs/protocol_changes/index.md | 42 ++++++++++++++++++++++++++++++++ docs/protocol_changes/signals.md | 0 3 files changed, 42 insertions(+), 6 deletions(-) delete mode 100644 docs/evolutions/index.md create mode 100644 docs/protocol_changes/index.md create mode 100644 docs/protocol_changes/signals.md diff --git a/docs/evolutions/index.md b/docs/evolutions/index.md deleted file mode 100644 index f24d634f..00000000 --- a/docs/evolutions/index.md +++ /dev/null @@ -1,6 +0,0 @@ ---- -title: Evolutions -layout: default -nav_order: 5 -has_children: true ---- diff --git a/docs/protocol_changes/index.md b/docs/protocol_changes/index.md new file mode 100644 index 00000000..692a0084 --- /dev/null +++ b/docs/protocol_changes/index.md @@ -0,0 +1,42 @@ +--- +title: Protocol Changes +layout: default +nav_order: 5 +--- + +The protocol has no central authority nor organization nor actors (such as +liquidity providers/validators) who can compel new protocol rules. The Serai +protocol is as-written with all granted functionality and declared rules +present. + +Validators are explicitly granted the ability to signal for two things to occur: + +1) Halt another validator set. + +This will presumably occur if another validator set turns malicious and is the +expected incident response in order to apply an economic penalty of ideally +greater value than damage wrecked. Halting a validator set prevents further +publication of `Batch`s, preventing improper actions on the Serai blockchain, +and preventing validators from unstaking (as unstaking only occurs once future +validator sets have accepted responsibility, and accepting responsibility +requires `Batch` publication). This effectively burns the malicious validators' +stake. + +2) Retire the protocol. + +A supermajority of validators may favor a signal (an opaque 32-byte ID). A +common signal gaining sufficient favor will cause the protocol to stop producing +blocks in two weeks. + +Nodes will presumably, as individual entities, hard fork to new consensus rules. +These rules presumably will remove the rule to stop producing blocks in two +weeks, they may declare new validators, and they may declare new functionality +entirely. + +While nodes individually hard fork, across every hard fork the state of the +various `sriXYZ` coins (such as `sriBTC`, `sriETH`, `sriDAI`, and `sriXMR`) +remains intact (unless the new rules modify such state). These coins can still +be burned with instructions (unless the new rules prevent that) and if a +validator set doesn't send `XYZ` as expected, they can be halted (effectively +burning their `SRI` stake). Accordingly, every node decides if and how to future +participate, with the abilities and powers they declare themselves to have. diff --git a/docs/protocol_changes/signals.md b/docs/protocol_changes/signals.md new file mode 100644 index 00000000..e69de29b From 4a6496a90ba6c0beb7c6ac78525f6cfb5850db1f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 12 Mar 2024 00:59:51 -0400 Subject: [PATCH 030/126] Add slightly nicer formatting re: Protocol Changes doc --- docs/protocol_changes/index.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/protocol_changes/index.md b/docs/protocol_changes/index.md index 692a0084..263e2cd7 100644 --- a/docs/protocol_changes/index.md +++ b/docs/protocol_changes/index.md @@ -4,6 +4,8 @@ layout: default nav_order: 5 --- +# Protocol Changes + The protocol has no central authority nor organization nor actors (such as liquidity providers/validators) who can compel new protocol rules. The Serai protocol is as-written with all granted functionality and declared rules @@ -11,7 +13,7 @@ present. Validators are explicitly granted the ability to signal for two things to occur: -1) Halt another validator set. +### 1) Halt another validator set. This will presumably occur if another validator set turns malicious and is the expected incident response in order to apply an economic penalty of ideally @@ -22,7 +24,7 @@ validator sets have accepted responsibility, and accepting responsibility requires `Batch` publication). This effectively burns the malicious validators' stake. -2) Retire the protocol. +### 2) Retire the protocol. A supermajority of validators may favor a signal (an opaque 32-byte ID). A common signal gaining sufficient favor will cause the protocol to stop producing From 13b147cbf663152ce409ec8376de0e6d0411dbe4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 20 Mar 2024 08:23:23 -0400 Subject: [PATCH 031/126] Reduce coordinator tests contention re: cosign messages --- tests/coordinator/src/lib.rs | 318 +++++++++++++-------------- tests/coordinator/src/tests/batch.rs | 2 +- 2 files changed, 158 insertions(+), 162 deletions(-) diff --git a/tests/coordinator/src/lib.rs b/tests/coordinator/src/lib.rs index 0c197a92..0541c4fd 100644 --- a/tests/coordinator/src/lib.rs +++ b/tests/coordinator/src/lib.rs @@ -5,7 +5,10 @@ use std::{ time::Duration, }; -use tokio::{task::AbortHandle, sync::Mutex as AsyncMutex}; +use tokio::{ + task::AbortHandle, + sync::{Mutex as AsyncMutex, mpsc}, +}; use rand_core::{RngCore, OsRng}; @@ -96,7 +99,6 @@ pub struct Handles { pub(crate) message_queue: String, } -#[derive(Clone)] pub struct Processor { network: NetworkId, @@ -104,7 +106,8 @@ pub struct Processor { #[allow(unused)] handles: Handles, - queue: Arc>, + msgs: mpsc::UnboundedReceiver, + queue_for_sending: MessageQueue, abort_handle: Option>, substrate_key: Arc::F>>>>, @@ -145,156 +148,173 @@ impl Processor { // The Serai RPC may or may not be started // Assume it is and continue, so if it's a few seconds late, it's still within tolerance + // Create the queue + let mut queue = ( + 0, + Arc::new(MessageQueue::new( + Service::Processor(network), + message_queue_rpc.clone(), + Zeroizing::new(processor_key), + )), + ); + + let (msg_send, msg_recv) = mpsc::unbounded_channel(); + + let substrate_key = Arc::new(AsyncMutex::new(None)); let mut res = Processor { network, serai_rpc, handles, - queue: Arc::new(AsyncMutex::new(( - 0, - 0, - MessageQueue::new( - Service::Processor(network), - message_queue_rpc, - Zeroizing::new(processor_key), - ), - ))), + queue_for_sending: MessageQueue::new( + Service::Processor(network), + message_queue_rpc, + Zeroizing::new(processor_key), + ), + msgs: msg_recv, abort_handle: None, - substrate_key: Arc::new(AsyncMutex::new(None)), + substrate_key: substrate_key.clone(), }; - // Handle any cosigns which come up - res.abort_handle = Some(Arc::new( - tokio::spawn({ - let mut res = res.clone(); - async move { - loop { - tokio::task::yield_now().await; + // Spawn a task to handle cosigns and forward messages as appropriate + let abort_handle = tokio::spawn({ + async move { + loop { + // Get new messages + let (next_recv_id, queue) = &mut queue; + let msg = queue.next(Service::Coordinator).await; + assert_eq!(msg.from, Service::Coordinator); + assert_eq!(msg.id, *next_recv_id); + queue.ack(Service::Coordinator, msg.id).await; + *next_recv_id += 1; - let msg = { - let mut queue_lock = res.queue.lock().await; - let (_, next_recv_id, queue) = &mut *queue_lock; - let Ok(msg) = - tokio::time::timeout(Duration::from_secs(1), queue.next(Service::Coordinator)) - .await - else { - continue; - }; - assert_eq!(msg.from, Service::Coordinator); - assert_eq!(msg.id, *next_recv_id); + let msg_msg = borsh::from_slice(&msg.msg).unwrap(); - let msg_msg = borsh::from_slice(&msg.msg).unwrap(); - // Remove any BatchReattempts clogging the pipe - // TODO: Set up a wrapper around serai-client so we aren't throwing this away yet - // leave it for the tests - if matches!( - msg_msg, - messages::CoordinatorMessage::Coordinator( - messages::coordinator::CoordinatorMessage::BatchReattempt { .. } - ) - ) { - queue.ack(Service::Coordinator, msg.id).await; - *next_recv_id += 1; - continue; - } - if !is_cosign_message(&msg_msg) { - continue; - }; - queue.ack(Service::Coordinator, msg.id).await; - *next_recv_id += 1; - msg_msg - }; + // Remove any BatchReattempts clogging the pipe + // TODO: Set up a wrapper around serai-client so we aren't throwing this away yet + // leave it for the tests + if matches!( + msg_msg, + messages::CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::BatchReattempt { .. } + ) + ) { + continue; + } - struct CurrentCosign { - block_number: u64, - block: [u8; 32], - } - static CURRENT_COSIGN: OnceLock>> = OnceLock::new(); - let mut current_cosign = - CURRENT_COSIGN.get_or_init(|| AsyncMutex::new(None)).lock().await; - match msg { - // If this is a CosignSubstrateBlock, reset the CurrentCosign - // While technically, each processor should individually track the current cosign, - // this is fine for current testing purposes - CoordinatorMessage::Coordinator( - messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { - id, - block_number, + if !is_cosign_message(&msg_msg) { + msg_send.send(msg_msg).unwrap(); + continue; + } + let msg = msg_msg; + + let send_message = |msg: ProcessorMessage| async move { + queue + .queue( + Metadata { + from: Service::Processor(network), + to: Service::Coordinator, + intent: msg.intent(), }, - ) => { - let SubstrateSignId { - id: SubstrateSignableId::CosigningSubstrateBlock(block), .. - } = id - else { - panic!("CosignSubstrateBlock didn't have CosigningSubstrateBlock ID") - }; + borsh::to_vec(&msg).unwrap(), + ) + .await; + }; - let new_cosign = CurrentCosign { block_number, block }; - if current_cosign.is_none() || (current_cosign.as_ref().unwrap().block != block) { - *current_cosign = Some(new_cosign); + struct CurrentCosign { + block_number: u64, + block: [u8; 32], + } + static CURRENT_COSIGN: OnceLock>> = OnceLock::new(); + let mut current_cosign = + CURRENT_COSIGN.get_or_init(|| AsyncMutex::new(None)).lock().await; + match msg { + // If this is a CosignSubstrateBlock, reset the CurrentCosign + // While technically, each processor should individually track the current cosign, + // this is fine for current testing purposes + CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::CosignSubstrateBlock { id, block_number }, + ) => { + let SubstrateSignId { + id: SubstrateSignableId::CosigningSubstrateBlock(block), .. + } = id + else { + panic!("CosignSubstrateBlock didn't have CosigningSubstrateBlock ID") + }; + + let new_cosign = CurrentCosign { block_number, block }; + if current_cosign.is_none() || (current_cosign.as_ref().unwrap().block != block) { + *current_cosign = Some(new_cosign); + } + send_message( + messages::coordinator::ProcessorMessage::CosignPreprocess { + id: id.clone(), + preprocesses: vec![[raw_i; 64]], } - res - .send_message(messages::coordinator::ProcessorMessage::CosignPreprocess { - id: id.clone(), - preprocesses: vec![[raw_i; 64]], - }) - .await; - } - CoordinatorMessage::Coordinator( - messages::coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. }, - ) => { - // TODO: Assert the ID matches CURRENT_COSIGN - // TODO: Verify the received preprocesses - res - .send_message(messages::coordinator::ProcessorMessage::SubstrateShare { - id, - shares: vec![[raw_i; 32]], - }) - .await; - } - CoordinatorMessage::Coordinator( - messages::coordinator::CoordinatorMessage::SubstrateShares { .. }, - ) => { - // TODO: Assert the ID matches CURRENT_COSIGN - // TODO: Verify the shares - - let block_number = current_cosign.as_ref().unwrap().block_number; - let block = current_cosign.as_ref().unwrap().block; - - let substrate_key = res.substrate_key.lock().await.clone().unwrap(); - - // Expand to a key pair as Schnorrkel expects - // It's the private key + 32-bytes of entropy for nonces + the public key - let mut schnorrkel_key_pair = [0; 96]; - schnorrkel_key_pair[.. 32].copy_from_slice(&substrate_key.to_repr()); - OsRng.fill_bytes(&mut schnorrkel_key_pair[32 .. 64]); - schnorrkel_key_pair[64 ..].copy_from_slice( - &(::generator() * *substrate_key).to_bytes(), - ); - let signature = Signature( - schnorrkel::keys::Keypair::from_bytes(&schnorrkel_key_pair) - .unwrap() - .sign_simple(b"substrate", &cosign_block_msg(block_number, block)) - .to_bytes(), - ); - - res - .send_message(messages::coordinator::ProcessorMessage::CosignedBlock { - block_number, - block, - signature: signature.0.to_vec(), - }) - .await; - } - _ => panic!("unexpected message passed is_cosign_message"), + .into(), + ) + .await; } + CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::SubstratePreprocesses { id, .. }, + ) => { + // TODO: Assert the ID matches CURRENT_COSIGN + // TODO: Verify the received preprocesses + send_message( + messages::coordinator::ProcessorMessage::SubstrateShare { + id, + shares: vec![[raw_i; 32]], + } + .into(), + ) + .await; + } + CoordinatorMessage::Coordinator( + messages::coordinator::CoordinatorMessage::SubstrateShares { .. }, + ) => { + // TODO: Assert the ID matches CURRENT_COSIGN + // TODO: Verify the shares + + let block_number = current_cosign.as_ref().unwrap().block_number; + let block = current_cosign.as_ref().unwrap().block; + + let substrate_key = substrate_key.lock().await.clone().unwrap(); + + // Expand to a key pair as Schnorrkel expects + // It's the private key + 32-bytes of entropy for nonces + the public key + let mut schnorrkel_key_pair = [0; 96]; + schnorrkel_key_pair[.. 32].copy_from_slice(&substrate_key.to_repr()); + OsRng.fill_bytes(&mut schnorrkel_key_pair[32 .. 64]); + schnorrkel_key_pair[64 ..].copy_from_slice( + &(::generator() * *substrate_key).to_bytes(), + ); + let signature = Signature( + schnorrkel::keys::Keypair::from_bytes(&schnorrkel_key_pair) + .unwrap() + .sign_simple(b"substrate", &cosign_block_msg(block_number, block)) + .to_bytes(), + ); + + send_message( + messages::coordinator::ProcessorMessage::CosignedBlock { + block_number, + block, + signature: signature.0.to_vec(), + } + .into(), + ) + .await; + } + _ => panic!("unexpected message passed is_cosign_message"), } } - }) - .abort_handle(), - )); + } + }) + .abort_handle(); + + res.abort_handle = Some(Arc::new(abort_handle)); res } @@ -307,9 +327,8 @@ impl Processor { pub async fn send_message(&mut self, msg: impl Into) { let msg: ProcessorMessage = msg.into(); - let mut queue_lock = self.queue.lock().await; - let (next_send_id, _, queue) = &mut *queue_lock; - queue + self + .queue_for_sending .queue( Metadata { from: Service::Processor(self.network), @@ -319,36 +338,13 @@ impl Processor { borsh::to_vec(&msg).unwrap(), ) .await; - *next_send_id += 1; - } - - async fn recv_message_inner(&mut self) -> CoordinatorMessage { - loop { - tokio::task::yield_now().await; - - let mut queue_lock = self.queue.lock().await; - let (_, next_recv_id, queue) = &mut *queue_lock; - let msg = queue.next(Service::Coordinator).await; - assert_eq!(msg.from, Service::Coordinator); - assert_eq!(msg.id, *next_recv_id); - - // If this is a cosign message, let the cosign task handle it - let msg_msg = borsh::from_slice(&msg.msg).unwrap(); - if is_cosign_message(&msg_msg) { - continue; - } - - queue.ack(Service::Coordinator, msg.id).await; - *next_recv_id += 1; - return msg_msg; - } } /// Receive a message from the coordinator as a processor. pub async fn recv_message(&mut self) -> CoordinatorMessage { // Set a timeout of 20 minutes to allow effectively any protocol to occur without a fear of // an arbitrary timeout cutting it short - tokio::time::timeout(Duration::from_secs(20 * 60), self.recv_message_inner()).await.unwrap() + tokio::time::timeout(Duration::from_secs(20 * 60), self.msgs.recv()).await.unwrap().unwrap() } pub async fn set_substrate_key( diff --git a/tests/coordinator/src/tests/batch.rs b/tests/coordinator/src/tests/batch.rs index 67bafa24..ebba957b 100644 --- a/tests/coordinator/src/tests/batch.rs +++ b/tests/coordinator/src/tests/batch.rs @@ -245,7 +245,7 @@ pub async fn batch( ) ); - // Send the ack as expected, though it shouldn't trigger any observable behavior + // Send the ack as expected processor .send_message(messages::ProcessorMessage::Coordinator( messages::coordinator::ProcessorMessage::SubstrateBlockAck { From 1f2b9376f9583b65e7237e73252d889e1a2c6368 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 20 Mar 2024 21:53:57 -0400 Subject: [PATCH 032/126] zstd 0.13 --- Cargo.lock | 24 +++++++++++++++++++++--- patches/zstd/Cargo.toml | 2 +- 2 files changed, 22 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e66c478..9c5d097e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2953,7 +2953,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.13", - "socket2 0.4.10", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -10575,7 +10575,7 @@ dependencies = [ name = "zstd" version = "0.11.2+zstd.1.5.2" dependencies = [ - "zstd 0.12.4", + "zstd 0.13.0", ] [[package]] @@ -10584,7 +10584,16 @@ version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" dependencies = [ - "zstd-safe", + "zstd-safe 6.0.6", +] + +[[package]] +name = "zstd" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" +dependencies = [ + "zstd-safe 7.0.0", ] [[package]] @@ -10597,6 +10606,15 @@ dependencies = [ "zstd-sys", ] +[[package]] +name = "zstd-safe" +version = "7.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" +dependencies = [ + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.9+zstd.1.5.5" diff --git a/patches/zstd/Cargo.toml b/patches/zstd/Cargo.toml index f7bf11d6..0d1368e4 100644 --- a/patches/zstd/Cargo.toml +++ b/patches/zstd/Cargo.toml @@ -14,4 +14,4 @@ all-features = true rustdoc-args = ["--cfg", "docsrs"] [dependencies] -zstd = "0.12" +zstd = "0.13" From c706d8664a3417b7fbe8baba36d15d6cab7b24c2 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 20 Mar 2024 21:44:53 -0400 Subject: [PATCH 033/126] Use OptimisticTransactionDb Exposes flush calls. Adds safety, at the cost of a panic risk, as multiple TXNs simultaneously writing to a key will now cause a panic. This should be fine and the safety is appreciated. --- common/db/src/rocks.rs | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/common/db/src/rocks.rs b/common/db/src/rocks.rs index 20d39666..6a724563 100644 --- a/common/db/src/rocks.rs +++ b/common/db/src/rocks.rs @@ -1,44 +1,51 @@ use std::sync::Arc; use rocksdb::{ - DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions, Transaction, Options, - TransactionDB, + DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions, + Transaction as RocksTransaction, Options, OptimisticTransactionDB, }; use crate::*; -impl Get for Transaction<'_, TransactionDB> { +pub struct Transaction<'a, T: ThreadMode>( + RocksTransaction<'a, OptimisticTransactionDB>, + &'a OptimisticTransactionDB, +); + +impl Get for Transaction<'_, T> { fn get(&self, key: impl AsRef<[u8]>) -> Option> { - self.get(key).expect("couldn't read from RocksDB via transaction") + self.0.get(key).expect("couldn't read from RocksDB via transaction") } } -impl DbTxn for Transaction<'_, TransactionDB> { +impl DbTxn for Transaction<'_, T> { fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) { - Transaction::put(self, key, value).expect("couldn't write to RocksDB via transaction") + self.0.put(key, value).expect("couldn't write to RocksDB via transaction") } fn del(&mut self, key: impl AsRef<[u8]>) { - self.delete(key).expect("couldn't delete from RocksDB via transaction") + self.0.delete(key).expect("couldn't delete from RocksDB via transaction") } fn commit(self) { - Transaction::commit(self).expect("couldn't commit to RocksDB via transaction") + self.0.commit().expect("couldn't commit to RocksDB via transaction"); + self.1.flush_wal(true).expect("couldn't flush RocksDB WAL"); + self.1.flush().expect("couldn't flush RocksDB"); } } -impl Get for Arc> { +impl Get for Arc> { fn get(&self, key: impl AsRef<[u8]>) -> Option> { - TransactionDB::get(self, key).expect("couldn't read from RocksDB") + OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB") } } -impl Db for Arc> { - type Transaction<'a> = Transaction<'a, TransactionDB>; +impl Db for Arc> { + type Transaction<'a> = Transaction<'a, T>; fn txn(&mut self) -> Self::Transaction<'_> { let mut opts = WriteOptions::default(); opts.set_sync(true); - self.transaction_opt(&opts, &Default::default()) + Transaction(self.transaction_opt(&opts, &Default::default()), &**self) } } -pub type RocksDB = Arc>; +pub type RocksDB = Arc>; pub fn new_rocksdb(path: &str) -> RocksDB { let mut options = Options::default(); options.create_if_missing(true); @@ -54,5 +61,5 @@ pub fn new_rocksdb(path: &str) -> RocksDB { options.set_max_log_file_size(1024 * 1024); options.set_recycle_log_file_num(1); - Arc::new(TransactionDB::open(&options, &Default::default(), path).unwrap()) + Arc::new(OptimisticTransactionDB::open(&options, path).unwrap()) } From 84cee06ac17f780a322d1b5182670c23349343b7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 21 Mar 2024 20:05:34 -0400 Subject: [PATCH 034/126] Rust 1.77 --- orchestration/runtime/Dockerfile | 2 +- orchestration/src/main.rs | 2 +- rust-toolchain.toml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/orchestration/runtime/Dockerfile b/orchestration/runtime/Dockerfile index 4df69842..21da0a75 100644 --- a/orchestration/runtime/Dockerfile +++ b/orchestration/runtime/Dockerfile @@ -1,4 +1,4 @@ -FROM --platform=linux/amd64 rust:1.76.0-slim-bookworm as builder +FROM --platform=linux/amd64 rust:1.77.0-slim-bookworm as builder # Move to a Debian package snapshot RUN rm -rf /etc/apt/sources.list.d/debian.sources && \ diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 1a2c48ca..0ec5913f 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -129,7 +129,7 @@ fn build_serai_service(release: bool, features: &str, package: &str) -> String { format!( r#" -FROM rust:1.76-slim-bookworm as builder +FROM rust:1.77-slim-bookworm as builder COPY --from=mimalloc-debian libmimalloc.so /usr/lib RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload diff --git a/rust-toolchain.toml b/rust-toolchain.toml index 227b9c21..77a0cea2 100644 --- a/rust-toolchain.toml +++ b/rust-toolchain.toml @@ -1,5 +1,5 @@ [toolchain] -channel = "1.76" +channel = "1.77" targets = ["wasm32-unknown-unknown"] profile = "minimal" components = ["rust-src", "rustfmt", "clippy"] From fab7a0a7cbaad0f529b3b0f092ec0c5d268a1c2d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 02:19:09 -0400 Subject: [PATCH 035/126] Use the deterministically built wasm Has the Dockerfile output to a volume. Has the node use the wasm from the volume, if it exists. --- orchestration/runtime/Dockerfile | 14 +++-- orchestration/src/main.rs | 82 +++++++++++++++++++++++++++++ orchestration/src/serai.rs | 2 +- substrate/node/src/chain_spec.rs | 32 ++++++----- substrate/node/src/command.rs | 4 +- substrate/signals/pallet/src/lib.rs | 1 + 6 files changed, 117 insertions(+), 18 deletions(-) diff --git a/orchestration/runtime/Dockerfile b/orchestration/runtime/Dockerfile index 21da0a75..2801f070 100644 --- a/orchestration/runtime/Dockerfile +++ b/orchestration/runtime/Dockerfile @@ -1,17 +1,20 @@ -FROM --platform=linux/amd64 rust:1.77.0-slim-bookworm as builder +# rust:1.77.0-slim-bookworm as of March 22nd, 2024 (GMT) +FROM --platform=linux/amd64 rust@sha256:e785e4aa81f87bc1ee02fa2026ffbc491e0410bdaf6652cea74884373f452664 as deterministic # Move to a Debian package snapshot RUN rm -rf /etc/apt/sources.list.d/debian.sources && \ rm -rf /var/lib/apt/lists/* && \ - echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240201T000000Z bookworm main" > /etc/apt/sources.list && \ + echo "deb [arch=amd64] http://snapshot.debian.org/archive/debian/20240301T000000Z bookworm main" > /etc/apt/sources.list && \ apt update # Install dependencies -RUN apt install clang -y +RUN apt update && apt upgrade && apt install clang -y # Add the wasm toolchain RUN rustup target add wasm32-unknown-unknown +FROM deterministic + # Add files for build ADD patches /serai/patches ADD common /serai/common @@ -30,3 +33,8 @@ ADD Cargo.lock /serai ADD AGPL-3.0 /serai WORKDIR /serai + +# Build the runtime, copying it to the volume if it exists +CMD cargo build --release -p serai-runtime && \ + mkdir -p /volume && \ + cp /serai/target/release/wbuild/serai-runtime/serai_runtime.wasm /volume/serai.wasm diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 0ec5913f..a2533c49 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -325,6 +325,87 @@ fn start(network: Network, services: HashSet) { _ => panic!("starting unrecognized service"), }; + // If we're building the Serai service, first build the runtime + let serai_runtime_volume = format!("serai-{}-runtime-volume", network.label()); + if name == "serai" { + // Check if it's built by checking if the volume has the expected runtime file + let built = || { + if let Ok(path) = Command::new("docker") + .arg("volume") + .arg("inspect") + .arg("-f") + .arg("{{ .Mountpoint }}") + .arg(&serai_runtime_volume) + .output() + { + if let Ok(path) = String::from_utf8(path.stdout) { + if let Ok(iter) = std::fs::read_dir(PathBuf::from(path.trim())) { + for item in iter.flatten() { + if item.file_name() == "serai.wasm" { + return true; + } + } + } + } + } + false + }; + + if !built() { + let mut repo_path = env::current_exe().unwrap(); + repo_path.pop(); + if repo_path.as_path().ends_with("deps") { + repo_path.pop(); + } + assert!(repo_path.as_path().ends_with("debug") || repo_path.as_path().ends_with("release")); + repo_path.pop(); + assert!(repo_path.as_path().ends_with("target")); + repo_path.pop(); + + // Build the image to build the runtime + if !Command::new("docker") + .current_dir(&repo_path) + .arg("build") + .arg("-f") + .arg("orchestration/runtime/Dockerfile") + .arg(".") + .arg("-t") + .arg(format!("serai-{}-runtime-img", network.label())) + .spawn() + .unwrap() + .wait() + .unwrap() + .success() + { + panic!("failed to build runtime image"); + } + + // Run the image, building the runtime + println!("Building the Serai runtime"); + let container_name = format!("serai-{}-runtime", network.label()); + let _ = + Command::new("docker").arg("rm").arg("-f").arg(&container_name).spawn().unwrap().wait(); + let _ = Command::new("docker") + .arg("run") + .arg("--name") + .arg(container_name) + .arg("--volume") + .arg(format!("{serai_runtime_volume}:/volume")) + .arg(format!("serai-{}-runtime-img", network.label())) + .spawn(); + + // Wait until its built + let mut ticks = 0; + while !built() { + std::thread::sleep(core::time::Duration::from_secs(60)); + ticks += 1; + if ticks > 6 * 60 { + panic!("couldn't build the runtime after 6 hours") + } + } + } + } + // Build it println!("Building {service}"); docker::build(&orchestration_path(network), network, name); @@ -367,6 +448,7 @@ fn start(network: Network, services: HashSet) { assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev"); command.arg("-p").arg("18082:18082") } + "serai" => command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime")), _ => command, }; assert!( diff --git a/orchestration/src/serai.rs b/orchestration/src/serai.rs index a3382acb..74fa78e6 100644 --- a/orchestration/src/serai.rs +++ b/orchestration/src/serai.rs @@ -21,7 +21,7 @@ EXPOSE 30333 9615 9933 9944 ADD /orchestration/{}/serai/run.sh / CMD ["/run.sh"] "#, - network.label() + network.label(), ); let run = os(Os::Debian, "", "serai") + &run_serai; diff --git a/substrate/node/src/chain_spec.rs b/substrate/node/src/chain_spec.rs index 042f5178..b630c00b 100644 --- a/substrate/node/src/chain_spec.rs +++ b/substrate/node/src/chain_spec.rs @@ -15,6 +15,14 @@ fn account_from_name(name: &'static str) -> PublicKey { insecure_pair_from_name(name).public() } +fn wasm_binary() -> Vec { + // TODO: Accept a config of runtime path + if let Ok(binary) = std::fs::read("/runtime/serai.wasm") { + return binary; + } + WASM_BINARY.ok_or("compiled in wasm not available").unwrap().to_vec() +} + fn testnet_genesis( wasm_binary: &[u8], validators: &[&'static str], @@ -64,18 +72,18 @@ fn testnet_genesis( } } -pub fn development_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Development wasm not available")?; +pub fn development_config() -> ChainSpec { + let wasm_binary = wasm_binary(); - Ok(ChainSpec::from_genesis( + ChainSpec::from_genesis( // Name "Development Network", // ID "devnet", ChainType::Development, - || { + move || { testnet_genesis( - wasm_binary, + &wasm_binary, &["Alice"], vec![ account_from_name("Alice"), @@ -99,21 +107,21 @@ pub fn development_config() -> Result { None, // Extensions None, - )) + ) } -pub fn testnet_config() -> Result { - let wasm_binary = WASM_BINARY.ok_or("Testnet wasm not available")?; +pub fn testnet_config() -> ChainSpec { + let wasm_binary = wasm_binary(); - Ok(ChainSpec::from_genesis( + ChainSpec::from_genesis( // Name "Local Test Network", // ID "local", ChainType::Local, - || { + move || { testnet_genesis( - wasm_binary, + &wasm_binary, &["Alice", "Bob", "Charlie", "Dave"], vec![ account_from_name("Alice"), @@ -137,5 +145,5 @@ pub fn testnet_config() -> Result { None, // Extensions None, - )) + ) } diff --git a/substrate/node/src/command.rs b/substrate/node/src/command.rs index 3588f95f..2f7ea0f7 100644 --- a/substrate/node/src/command.rs +++ b/substrate/node/src/command.rs @@ -39,8 +39,8 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> Result, String> { match id { - "dev" | "devnet" => Ok(Box::new(chain_spec::development_config()?)), - "local" => Ok(Box::new(chain_spec::testnet_config()?)), + "dev" | "devnet" => Ok(Box::new(chain_spec::development_config())), + "local" => Ok(Box::new(chain_spec::testnet_config())), _ => panic!("Unknown network ID"), } } diff --git a/substrate/signals/pallet/src/lib.rs b/substrate/signals/pallet/src/lib.rs index 3fad27c9..54d6086a 100644 --- a/substrate/signals/pallet/src/lib.rs +++ b/substrate/signals/pallet/src/lib.rs @@ -142,6 +142,7 @@ pub mod pallet { } // 80% threshold + // TODO: Use 34% for halting a set (not 80%) const REQUIREMENT_NUMERATOR: u64 = 4; const REQUIREMENT_DIVISOR: u64 = 5; From e0259f2fe59b516184537aa72929c441ab9ca48b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 16:06:04 -0400 Subject: [PATCH 036/126] Add TODO re: Monero --- coins/monero/src/ringct/clsag/multisig.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index 9cb930ce..85748b78 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -199,6 +199,7 @@ impl Algorithm for ClsagMultisig { l: Participant, addendum: ClsagAddendum, ) -> Result<(), FrostError> { + // TODO: This check is faulty if two shares are additive inverses of each other if self.image.is_identity().into() { self.transcript.domain_separate(b"CLSAG"); self.input().transcript(&mut self.transcript); From 2f07d04d881731869ae0ed8f13dc3dcc275035b6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 16:06:26 -0400 Subject: [PATCH 037/126] Extend timeout for rebroadcast of consensus messages in coordinator --- coordinator/tributary/src/lib.rs | 2 +- coordinator/tributary/src/tendermint/mod.rs | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 92fb98da..81b4fc17 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -207,7 +207,7 @@ impl Tributary { for msg in to_rebroadcast { p2p.broadcast(genesis, msg).await; } - tokio::time::sleep(core::time::Duration::from_secs(1)).await; + tokio::time::sleep(core::time::Duration::from_secs(60)).await; } } }) diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index d362364c..40d01380 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -331,14 +331,12 @@ impl Network for TendermintNetwork // until the block it's trying to build is complete // If the P2P layer drops a message before all nodes obtained access, or a node had an // intermittent failure, this will ensure reconcilliation - // Resolves halts caused by timing discrepancies, which technically are violations of - // Tendermint as a BFT protocol, and shouldn't occur yet have in low-powered testing - // environments // This is atrocious if there's no content-based deduplication protocol for messages actively // being gossiped // LibP2p, as used by Serai, is configured to content-based deduplicate let mut to_broadcast = vec![TENDERMINT_MESSAGE]; to_broadcast.extend(msg.encode()); + // TODO: Prune messages from old rounds which are no longer necessary self.to_rebroadcast.write().await.push(to_broadcast.clone()); self.p2p.broadcast(self.genesis, to_broadcast).await } From 6658d95c85c04da9a417301ec0f96e4ecf83080f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 16:06:56 -0400 Subject: [PATCH 038/126] Extend orchestration as actually needed for testnet Contains various bug fixes. --- orchestration/src/coordinator.rs | 7 +++++-- orchestration/src/main.rs | 19 ++++++++++++++++--- orchestration/src/message_queue.rs | 2 +- orchestration/src/processor.rs | 4 ++-- orchestration/src/serai.rs | 20 ++++++++++++++++---- orchestration/testnet/serai/run.sh | 2 +- 6 files changed, 41 insertions(+), 13 deletions(-) diff --git a/orchestration/src/coordinator.rs b/orchestration/src/coordinator.rs index a8556a00..9995dbbf 100644 --- a/orchestration/src/coordinator.rs +++ b/orchestration/src/coordinator.rs @@ -11,7 +11,7 @@ pub fn coordinator( orchestration_path: &Path, network: Network, coordinator_key: Zeroizing<::F>, - serai_key: Zeroizing<::F>, + serai_key: &Zeroizing<::F>, ) { let db = network.db(); let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" }; @@ -27,13 +27,16 @@ pub fn coordinator( RUN apt install -y ca-certificates "#; + #[rustfmt::skip] + const DEFAULT_RUST_LOG: &str = "info,serai_coordinator=debug,tributary_chain=debug,tendermint=debug,libp2p_gossipsub::behaviour=error"; + let env_vars = [ ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), ("MESSAGE_QUEUE_KEY", hex::encode(coordinator_key.to_repr())), ("DB_PATH", "./coordinator-db".to_string()), ("SERAI_KEY", hex::encode(serai_key.to_repr())), ("SERAI_HOSTNAME", format!("serai-{}-serai", network.label())), - ("RUST_LOG", "serai_coordinator=debug,tributary_chain=debug,tendermint=debug".to_string()), + ("RUST_LOG", DEFAULT_RUST_LOG.to_string()), ]; let mut env_vars_str = String::new(); for (env_var, value) in env_vars { diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index a2533c49..548aca8b 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -276,9 +276,9 @@ fn dockerfiles(network: Network) { Zeroizing::new(::F::from_repr(*serai_key_repr).unwrap()) }; - coordinator(&orchestration_path, network, coordinator_key.0, serai_key); + coordinator(&orchestration_path, network, coordinator_key.0, &serai_key); - serai(&orchestration_path, network); + serai(&orchestration_path, network, &serai_key); } fn key_gen(network: Network) { @@ -448,7 +448,20 @@ fn start(network: Network, services: HashSet) { assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev"); command.arg("-p").arg("18082:18082") } - "serai" => command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime")), + "coordinator" => { + if network != Network::Dev { + command.arg("-p").arg("30563:30563") + } else { + command + } + } + "serai" => { + let mut command = command; + if network != Network::Dev { + command = command.arg("-p").arg("30333:30333"); + } + command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime")) + } _ => command, }; assert!( diff --git a/orchestration/src/message_queue.rs b/orchestration/src/message_queue.rs index ef6bdcbf..3e47571c 100644 --- a/orchestration/src/message_queue.rs +++ b/orchestration/src/message_queue.rs @@ -21,7 +21,7 @@ pub fn message_queue( ("ETHEREUM_KEY", hex::encode(ethereum_key.to_bytes())), ("MONERO_KEY", hex::encode(monero_key.to_bytes())), ("DB_PATH", "./message-queue-db".to_string()), - ("RUST_LOG", "serai_message_queue=trace".to_string()), + ("RUST_LOG", "info,serai_message_queue=trace".to_string()), ]; let mut env_vars_str = String::new(); for (env_var, value) in env_vars { diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index e2afde09..3d76a6c9 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -40,7 +40,7 @@ RUN apt install -y ca-certificates }; let env_vars = [ - ("MESSAGE_QUEUE_RPC", format!("serai-{}-message_queue", network.label())), + ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), ("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())), ("ENTROPY", hex::encode(entropy.as_ref())), ("NETWORK", coin.to_string()), @@ -48,7 +48,7 @@ RUN apt install -y ca-certificates ("NETWORK_RPC_HOSTNAME", hostname), ("NETWORK_RPC_PORT", format!("{port}")), ("DB_PATH", "./processor-db".to_string()), - ("RUST_LOG", "serai_processor=debug".to_string()), + ("RUST_LOG", "info,serai_processor=debug".to_string()), ]; let mut env_vars_str = String::new(); for (env_var, value) in env_vars { diff --git a/orchestration/src/serai.rs b/orchestration/src/serai.rs index 74fa78e6..1487b70d 100644 --- a/orchestration/src/serai.rs +++ b/orchestration/src/serai.rs @@ -1,14 +1,26 @@ use std::{path::Path}; +use zeroize::Zeroizing; +use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto}; + use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile}; -pub fn serai(orchestration_path: &Path, network: Network) { +pub fn serai( + orchestration_path: &Path, + network: Network, + serai_key: &Zeroizing<::F>, +) { // Always builds in release for performance reasons let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node"); let setup_fast_epoch = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "fast-epoch", "serai-node"); - // TODO: Review the ports exposed here + let env_vars = [("KEY", hex::encode(serai_key.to_repr()))]; + let mut env_vars_str = String::new(); + for (env_var, value) in env_vars { + env_vars_str += &format!(r#"{env_var}=${{{env_var}}}:="{value}"}} "#); + } + let run_serai = format!( r#" # Copy the Serai binary and relevant license @@ -16,10 +28,10 @@ COPY --from=builder --chown=serai /serai/bin/serai-node /bin/ COPY --from=builder --chown=serai /serai/AGPL-3.0 . # Run the Serai node -EXPOSE 30333 9615 9933 9944 +EXPOSE 30333 9944 ADD /orchestration/{}/serai/run.sh / -CMD ["/run.sh"] +CMD {env_vars_str} "/run.sh" "#, network.label(), ); diff --git a/orchestration/testnet/serai/run.sh b/orchestration/testnet/serai/run.sh index 2bb8d868..7400ff50 100755 --- a/orchestration/testnet/serai/run.sh +++ b/orchestration/testnet/serai/run.sh @@ -1,3 +1,3 @@ #!/bin/sh -exit 1 +serai-node --unsafe-rpc-external --rpc-cors all --chain testnet --validator From e861859deca64af978017b3d2de335d4b2f8cd0e Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 16:18:01 -0400 Subject: [PATCH 039/126] Update EpochDuration in runtime --- substrate/runtime/src/lib.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/substrate/runtime/src/lib.rs b/substrate/runtime/src/lib.rs index 10340567..9a534a72 100644 --- a/substrate/runtime/src/lib.rs +++ b/substrate/runtime/src/lib.rs @@ -314,12 +314,10 @@ pub type ReportLongevity = ::EpochDuration; impl babe::Config for Runtime { #[cfg(feature = "fast-epoch")] - #[allow(clippy::identity_op)] - type EpochDuration = ConstU64<{ DAYS / (24 * 60 * 2) }>; // 30 seconds + type EpochDuration = ConstU64<{ MINUTES / 2 }>; // 30 seconds #[cfg(not(feature = "fast-epoch"))] - #[allow(clippy::identity_op)] - type EpochDuration = ConstU64<{ DAYS }>; + type EpochDuration = ConstU64<{ 4 * 7 * DAYS }>; type ExpectedBlockTime = ConstU64<{ TARGET_BLOCK_TIME * 1000 }>; type EpochChangeTrigger = babe::ExternalTrigger; From bdf5a66e95dcf2a2e43d4aee09f44d299d19a4fb Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 17:11:44 -0400 Subject: [PATCH 040/126] Correct Serai key provision --- orchestration/src/serai.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/orchestration/src/serai.rs b/orchestration/src/serai.rs index 1487b70d..77d098b6 100644 --- a/orchestration/src/serai.rs +++ b/orchestration/src/serai.rs @@ -18,7 +18,7 @@ pub fn serai( let env_vars = [("KEY", hex::encode(serai_key.to_repr()))]; let mut env_vars_str = String::new(); for (env_var, value) in env_vars { - env_vars_str += &format!(r#"{env_var}=${{{env_var}}}:="{value}"}} "#); + env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); } let run_serai = format!( From 08c7c1b413768c8ba3d22113c06576303f534122 Mon Sep 17 00:00:00 2001 From: j-berman Date: Fri, 22 Mar 2024 14:26:42 -0700 Subject: [PATCH 041/126] monero: reference updated PR in fee test comment --- coins/monero/tests/wallet2_compatibility.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coins/monero/tests/wallet2_compatibility.rs b/coins/monero/tests/wallet2_compatibility.rs index 2002f3bd..c6b58978 100644 --- a/coins/monero/tests/wallet2_compatibility.rs +++ b/coins/monero/tests/wallet2_compatibility.rs @@ -88,7 +88,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) { .unwrap(); let tx_hash = hex::decode(tx.tx_hash).unwrap().try_into().unwrap(); - // TODO: Needs https://github.com/monero-project/monero/pull/8882 + // TODO: Needs https://github.com/monero-project/monero/pull/9260 // let fee_rate = daemon_rpc // .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant) // .await @@ -107,7 +107,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) { let tx = daemon_rpc.get_transaction(tx_hash).await.unwrap(); let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0); - // TODO: Needs https://github.com/monero-project/monero/pull/8882 + // TODO: Needs https://github.com/monero-project/monero/pull/9260 // runner::check_weight_and_fee(&tx, fee_rate); match spec { From e5afcda76bc709f0decc8184dc2a1c849c5ce7fb Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 22:34:18 -0400 Subject: [PATCH 042/126] Explicitly use "" for KEY within the tests Causes the provided keystore to be used over our keystore. --- substrate/node/src/keystore.rs | 3 +++ tests/coordinator/src/lib.rs | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/substrate/node/src/keystore.rs b/substrate/node/src/keystore.rs index ca00e79b..f7b9110f 100644 --- a/substrate/node/src/keystore.rs +++ b/substrate/node/src/keystore.rs @@ -8,6 +8,9 @@ pub struct Keystore(sr25519::Pair); impl Keystore { pub fn from_env() -> Option { let mut key_hex = serai_env::var("KEY")?; + if key_hex.is_empty() { + None?; + } let mut key = hex::decode(&key_hex).expect("KEY from environment wasn't hex"); key_hex.zeroize(); diff --git a/tests/coordinator/src/lib.rs b/tests/coordinator/src/lib.rs index 0541c4fd..d09f4487 100644 --- a/tests/coordinator/src/lib.rs +++ b/tests/coordinator/src/lib.rs @@ -66,7 +66,9 @@ pub fn serai_composition(name: &str) -> TestBodySpecification { TestBodySpecification::with_image( Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), ) - .replace_env([("SERAI_NAME".to_string(), name.to_lowercase())].into()) + .replace_env( + [("SERAI_NAME".to_string(), name.to_lowercase()), ("KEY".to_string(), String::new())].into(), + ) .set_publish_all_ports(true) } From af9b1ad5f91777dc38cfe7dee4d3b488ef9a4dc9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 23:18:53 -0400 Subject: [PATCH 043/126] Initial pruning of backlogged consensus messages --- coordinator/tributary/src/lib.rs | 4 +- coordinator/tributary/src/tendermint/mod.rs | 51 ++++++++----------- coordinator/tributary/src/tests/mod.rs | 3 ++ coordinator/tributary/src/tests/tendermint.rs | 28 ++++++++++ 4 files changed, 54 insertions(+), 32 deletions(-) create mode 100644 coordinator/tributary/src/tests/tendermint.rs diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 81b4fc17..99deb588 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -1,5 +1,5 @@ use core::{marker::PhantomData, fmt::Debug}; -use std::{sync::Arc, io}; +use std::{sync::Arc, io, collections::VecDeque}; use async_trait::async_trait; @@ -194,7 +194,7 @@ impl Tributary { ); let blockchain = Arc::new(RwLock::new(blockchain)); - let to_rebroadcast = Arc::new(RwLock::new(vec![])); + let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new())); // Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the // P2P layer let p2p_meta_task_handle = Arc::new( diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index 40d01380..df8f7219 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -1,5 +1,8 @@ use core::ops::Deref; -use std::{sync::Arc, collections::HashMap}; +use std::{ + sync::Arc, + collections::{VecDeque, HashMap}, +}; use async_trait::async_trait; @@ -268,7 +271,7 @@ pub struct TendermintNetwork { pub(crate) validators: Arc, pub(crate) blockchain: Arc>>, - pub(crate) to_rebroadcast: Arc>>>, + pub(crate) to_rebroadcast: Arc>>>, pub(crate) p2p: P, } @@ -277,29 +280,6 @@ pub const BLOCK_PROCESSING_TIME: u32 = 999; pub const LATENCY_TIME: u32 = 1667; pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME); -#[test] -fn assert_target_block_time() { - use serai_db::MemDb; - - #[derive(Clone, Debug)] - pub struct DummyP2p; - - #[async_trait::async_trait] - impl P2p for DummyP2p { - async fn broadcast(&self, _: [u8; 32], _: Vec) { - unimplemented!() - } - } - - // Type paremeters don't matter here since we only need to call the block_time() - // and it only relies on the constants of the trait implementation. block_time() is in seconds, - // TARGET_BLOCK_TIME is in milliseconds. - assert_eq!( - as Network>::block_time(), - TARGET_BLOCK_TIME / 1000 - ) -} - #[async_trait] impl Network for TendermintNetwork { type Db = D; @@ -327,6 +307,9 @@ impl Network for TendermintNetwork } async fn broadcast(&mut self, msg: SignedMessageFor) { + let mut to_broadcast = vec![TENDERMINT_MESSAGE]; + to_broadcast.extend(msg.encode()); + // Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second // until the block it's trying to build is complete // If the P2P layer drops a message before all nodes obtained access, or a node had an @@ -334,10 +317,18 @@ impl Network for TendermintNetwork // This is atrocious if there's no content-based deduplication protocol for messages actively // being gossiped // LibP2p, as used by Serai, is configured to content-based deduplicate - let mut to_broadcast = vec![TENDERMINT_MESSAGE]; - to_broadcast.extend(msg.encode()); - // TODO: Prune messages from old rounds which are no longer necessary - self.to_rebroadcast.write().await.push(to_broadcast.clone()); + { + let mut to_rebroadcast_lock = self.to_rebroadcast.write().await; + to_rebroadcast_lock.push_back(to_broadcast.clone()); + // We should have, ideally, 3 * validators messages within a round + // Therefore, this should keep the most recent 2-rounds + // TODO: This isn't perfect. Each participant should just rebroadcast their latest round of + // messages + while to_rebroadcast_lock.len() > (6 * self.validators.weights.len()) { + to_rebroadcast_lock.pop_front(); + } + } + self.p2p.broadcast(self.genesis, to_broadcast).await } @@ -443,7 +434,7 @@ impl Network for TendermintNetwork } // Since we've added a valid block, clear to_rebroadcast - *self.to_rebroadcast.write().await = vec![]; + *self.to_rebroadcast.write().await = VecDeque::new(); Some(TendermintBlock( self.blockchain.write().await.build_block::(&self.signature_scheme()).serialize(), diff --git a/coordinator/tributary/src/tests/mod.rs b/coordinator/tributary/src/tests/mod.rs index 7c75ac36..dcaa11a5 100644 --- a/coordinator/tributary/src/tests/mod.rs +++ b/coordinator/tributary/src/tests/mod.rs @@ -1,3 +1,6 @@ +#[cfg(test)] +mod tendermint; + mod transaction; pub use transaction::*; diff --git a/coordinator/tributary/src/tests/tendermint.rs b/coordinator/tributary/src/tests/tendermint.rs new file mode 100644 index 00000000..77dfc9e5 --- /dev/null +++ b/coordinator/tributary/src/tests/tendermint.rs @@ -0,0 +1,28 @@ +use tendermint::ext::Network; +use crate::{ + P2p, TendermintTx, + tendermint::{TARGET_BLOCK_TIME, TendermintNetwork}, +}; + +#[test] +fn assert_target_block_time() { + use serai_db::MemDb; + + #[derive(Clone, Debug)] + pub struct DummyP2p; + + #[async_trait::async_trait] + impl P2p for DummyP2p { + async fn broadcast(&self, _: [u8; 32], _: Vec) { + unimplemented!() + } + } + + // Type paremeters don't matter here since we only need to call the block_time() + // and it only relies on the constants of the trait implementation. block_time() is in seconds, + // TARGET_BLOCK_TIME is in milliseconds. + assert_eq!( + as Network>::block_time(), + TARGET_BLOCK_TIME / 1000 + ) +} From 35b58a45bdcb8cc8a13db3504ad13e6ba8931ae0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 23:40:15 -0400 Subject: [PATCH 044/126] Split peer finding into a dedicated task --- coordinator/src/p2p.rs | 138 +++++++++++++++++++++++------------------ 1 file changed, 78 insertions(+), 60 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index ce6be688..8fe609a0 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -290,6 +290,75 @@ impl LibP2p { IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) } + // Find and connect to peers + let (pending_p2p_connections_send, mut pending_p2p_connections_recv) = + tokio::sync::mpsc::unbounded_channel(); + let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel(); + tokio::spawn({ + let pending_p2p_connections_send = pending_p2p_connections_send.clone(); + async move { + loop { + // TODO: Add better peer management logic? + { + let connect = |addr: Multiaddr| { + log::info!("found peer from substrate: {addr}"); + + let protocols = addr.iter().filter_map(|piece| match piece { + // Drop PeerIds from the Substrate P2p network + Protocol::P2p(_) => None, + // Use our own TCP port + Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)), + other => Some(other), + }); + + let mut new_addr = Multiaddr::empty(); + for protocol in protocols { + new_addr.push(protocol); + } + let addr = new_addr; + log::debug!("transformed found peer: {addr}"); + + // TODO: Check this isn't a duplicate + to_dial_send.send(addr).unwrap(); + }; + + while let Some(network) = pending_p2p_connections_recv.recv().await { + if let Ok(mut nodes) = serai.p2p_validators(network).await { + // If there's an insufficient amount of nodes known, connect to all yet add it + // back and break + if nodes.len() < 3 { + log::warn!( + "insufficient amount of P2P nodes known for {:?}: {}", + network, + nodes.len() + ); + pending_p2p_connections_send.send(network).unwrap(); + for node in nodes { + connect(node); + } + break; + } + + // Randomly select up to 5 + for _ in 0 .. 5 { + if !nodes.is_empty() { + let to_connect = nodes.swap_remove( + usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) + .unwrap(), + ); + connect(to_connect); + } + } + } + } + } + // Sleep 60 seconds before moving to the next iteration + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + } + } + }); + + // Manage the actual swarm tokio::spawn({ let mut time_of_last_p2p_message = Instant::now(); @@ -321,66 +390,7 @@ impl LibP2p { async move { let mut set_for_genesis = HashMap::new(); - let mut pending_p2p_connections = vec![]; - // Run this task ad-infinitum loop { - // Handle pending P2P connections - // TODO: Break this out onto its own task with better peer management logic? - { - let mut connect = |addr: Multiaddr| { - log::info!("found peer from substrate: {addr}"); - - let protocols = addr.iter().filter_map(|piece| match piece { - // Drop PeerIds from the Substrate P2p network - Protocol::P2p(_) => None, - // Use our own TCP port - Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)), - other => Some(other), - }); - - let mut new_addr = Multiaddr::empty(); - for protocol in protocols { - new_addr.push(protocol); - } - let addr = new_addr; - log::debug!("transformed found peer: {addr}"); - - if let Err(e) = swarm.dial(addr) { - log::warn!("dialing peer failed: {e:?}"); - } - }; - - while let Some(network) = pending_p2p_connections.pop() { - if let Ok(mut nodes) = serai.p2p_validators(network).await { - // If there's an insufficient amount of nodes known, connect to all yet add it back - // and break - if nodes.len() < 3 { - log::warn!( - "insufficient amount of P2P nodes known for {:?}: {}", - network, - nodes.len() - ); - pending_p2p_connections.push(network); - for node in nodes { - connect(node); - } - break; - } - - // Randomly select up to 5 - for _ in 0 .. 5 { - if !nodes.is_empty() { - let to_connect = nodes.swap_remove( - usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) - .unwrap(), - ); - connect(to_connect); - } - } - } - } - } - let time_since_last = Instant::now().duration_since(time_of_last_p2p_message); tokio::select! { biased; @@ -392,7 +402,7 @@ impl LibP2p { let topic = topic_for_set(set); if subscribe { log::info!("subscribing to p2p messages for {set:?}"); - pending_p2p_connections.push(set.network); + pending_p2p_connections_send.send(set.network).unwrap(); set_for_genesis.insert(genesis, set); swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap(); } else { @@ -440,6 +450,14 @@ impl LibP2p { } } + // Handle peers to dial + addr = to_dial_recv.recv() => { + let addr = addr.expect("received address was None (sender dropped?)"); + if let Err(e) = swarm.dial(addr) { + log::warn!("dialing peer failed: {e:?}"); + } + } + // If it's been >80s since we've published a message, publish a KeepAlive since we're // still an active service // This is useful when we have no active tributaries and accordingly aren't sending From f11a08c43668be3ecec512fa2f0a2240ed4a487c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 23:47:43 -0400 Subject: [PATCH 045/126] Peer finding which won't get stuck on one specific network --- coordinator/src/p2p.rs | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 8fe609a0..f64834ee 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -322,6 +322,7 @@ impl LibP2p { to_dial_send.send(addr).unwrap(); }; + let mut to_retry = vec![]; while let Some(network) = pending_p2p_connections_recv.recv().await { if let Ok(mut nodes) = serai.p2p_validators(network).await { // If there's an insufficient amount of nodes known, connect to all yet add it @@ -332,11 +333,11 @@ impl LibP2p { network, nodes.len() ); - pending_p2p_connections_send.send(network).unwrap(); + to_retry.push(network); for node in nodes { connect(node); } - break; + continue; } // Randomly select up to 5 @@ -351,6 +352,9 @@ impl LibP2p { } } } + for to_retry in to_retry { + pending_p2p_connections_send.send(to_retry).unwrap(); + } } // Sleep 60 seconds before moving to the next iteration tokio::time::sleep(core::time::Duration::from_secs(60)).await; @@ -432,12 +436,16 @@ impl LibP2p { log::debug!("dialing to peer in connection ID {}", &connection_id); } Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => { - log::debug!( - "connection established to peer {} in connection ID {}", - &peer_id, - &connection_id, - ); - swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id) + if &peer_id == swarm.local_peer_id() { + swarm.close_connection(connection_id); + } else { + log::debug!( + "connection established to peer {} in connection ID {}", + &peer_id, + &connection_id, + ); + swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id) + } } Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( GsEvent::Message { propagation_source, message, .. }, From 4914420a379c57a1f2fd5f8da3a1e832a40e0d80 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 22 Mar 2024 23:51:51 -0400 Subject: [PATCH 046/126] Don't add as an explicit peer if already connected --- coordinator/src/p2p.rs | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index f64834ee..d840fea8 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -435,18 +435,18 @@ impl LibP2p { Some(SwarmEvent::Dialing { connection_id, .. }) => { log::debug!("dialing to peer in connection ID {}", &connection_id); } - Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => { - if &peer_id == swarm.local_peer_id() { - swarm.close_connection(connection_id); - } else { - log::debug!( - "connection established to peer {} in connection ID {}", - &peer_id, - &connection_id, - ); - swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id) - } + Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => { + if &peer_id == swarm.local_peer_id() { + swarm.close_connection(connection_id); + } else if swarm.is_connected(&peer_id) {} else { + log::debug!( + "connection established to peer {} in connection ID {}", + &peer_id, + &connection_id, + ); + swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id) } + } Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( GsEvent::Message { propagation_source, message, .. }, ))) => { From bca3728a10fcaf88d8a899063608986fdd084712 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 23 Mar 2024 00:09:23 -0400 Subject: [PATCH 047/126] Randomly select an addr from the authority discovery --- Cargo.lock | 1 + substrate/node/Cargo.toml | 1 + substrate/node/src/rpc.rs | 15 +++++++++++---- 3 files changed, 13 insertions(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9c5d097e..1ae1d463 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7625,6 +7625,7 @@ dependencies = [ "hex", "jsonrpsee", "pallet-transaction-payment-rpc", + "rand_core", "sc-authority-discovery", "sc-basic-authorship", "sc-cli", diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index f66a9705..12ba4d17 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -23,6 +23,7 @@ name = "serai-node" zeroize = "1" hex = "0.4" +rand_core = "0.6" schnorrkel = "0.11" sp-core = { git = "https://github.com/serai-dex/substrate" } diff --git a/substrate/node/src/rpc.rs b/substrate/node/src/rpc.rs index f5ed2582..d07778cc 100644 --- a/substrate/node/src/rpc.rs +++ b/substrate/node/src/rpc.rs @@ -1,5 +1,7 @@ use std::{sync::Arc, collections::HashSet}; +use rand_core::{RngCore, OsRng}; + use sp_blockchain::{Error as BlockchainError, HeaderBackend, HeaderMetadata}; use sp_block_builder::BlockBuilder; use sp_api::ProvideRuntimeApi; @@ -72,14 +74,19 @@ where .get_addresses_by_authority_id(validator.into()) .await .unwrap_or_else(HashSet::new) - .into_iter(); - // Only take a single address + .into_iter() + .collect::>(); + // Randomly select an address // There should be one, there may be two if their IP address changed, and more should only // occur if they have multiple proxies/an IP address changing frequently/some issue // preventing consistent self-identification // It isn't beneficial to use multiple addresses for a single peer here - if let Some(address) = returned_addresses.next() { - all_p2p_addresses.push(address); + if !returned_addresses.is_empty() { + all_p2p_addresses.push( + returned_addresses.remove( + usize::try_from(OsRng.next_u64() >> 32).unwrap() % returned_addresses.len(), + ), + ); } } Ok(all_p2p_addresses) From 2a31d8552e26f9a257e3c6eba877ac092b04dee9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 23 Mar 2024 16:48:12 -0400 Subject: [PATCH 048/126] Add empty string for the KEY to serai-client to use the default keystore --- substrate/client/tests/common/mod.rs | 7 ++++++- substrate/client/tests/dht.rs | 4 +++- substrate/client/tests/validator_sets.rs | 5 ++++- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/substrate/client/tests/common/mod.rs b/substrate/client/tests/common/mod.rs index d887b0b1..a6d5300b 100644 --- a/substrate/client/tests/common/mod.rs +++ b/substrate/client/tests/common/mod.rs @@ -29,7 +29,12 @@ macro_rules! serai_test { "--rpc-cors".to_string(), "all".to_string(), ]) - .replace_env(HashMap::from([("RUST_LOG".to_string(), "runtime=debug".to_string())])) + .replace_env( + HashMap::from([ + ("RUST_LOG".to_string(), "runtime=debug".to_string()), + ("KEY".to_string(), String::new()), + ]) + ) .set_publish_all_ports(true) .set_handle(handle) .set_start_policy(StartPolicy::Strict) diff --git a/substrate/client/tests/dht.rs b/substrate/client/tests/dht.rs index 2fd40b12..4ab177ad 100644 --- a/substrate/client/tests/dht.rs +++ b/substrate/client/tests/dht.rs @@ -14,7 +14,9 @@ async fn dht() { TestBodySpecification::with_image( Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), ) - .replace_env([("SERAI_NAME".to_string(), name.to_string())].into()) + .replace_env( + [("SERAI_NAME".to_string(), name.to_string()), ("KEY".to_string(), String::new())].into(), + ) .set_publish_all_ports(true) .set_handle(handle(name)) .set_start_policy(StartPolicy::Strict) diff --git a/substrate/client/tests/validator_sets.rs b/substrate/client/tests/validator_sets.rs index fc284f64..884a42db 100644 --- a/substrate/client/tests/validator_sets.rs +++ b/substrate/client/tests/validator_sets.rs @@ -100,7 +100,10 @@ async fn validator_set_rotation() { "local".to_string(), format!("--{name}"), ]) - .replace_env(HashMap::from([("RUST_LOG=runtime".to_string(), "debug".to_string())])) + .replace_env(HashMap::from([ + ("RUST_LOG".to_string(), "runtime=debug".to_string()), + ("KEY".to_string(), String::new()), + ])) .set_publish_all_ports(true) .set_handle(handle(name)) .set_start_policy(StartPolicy::Strict) From 5ea3b1bf9782cd6c44b3ba74cade4024e606f6cd Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 23 Mar 2024 17:38:50 -0400 Subject: [PATCH 049/126] Use " " instead of "" for the empty key so sh doesn't interpret it as falsy --- substrate/client/tests/common/mod.rs | 2 +- substrate/client/tests/dht.rs | 2 +- substrate/client/tests/validator_sets.rs | 2 +- substrate/node/src/keystore.rs | 2 +- tests/coordinator/src/lib.rs | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/substrate/client/tests/common/mod.rs b/substrate/client/tests/common/mod.rs index a6d5300b..d7e8436b 100644 --- a/substrate/client/tests/common/mod.rs +++ b/substrate/client/tests/common/mod.rs @@ -32,7 +32,7 @@ macro_rules! serai_test { .replace_env( HashMap::from([ ("RUST_LOG".to_string(), "runtime=debug".to_string()), - ("KEY".to_string(), String::new()), + ("KEY".to_string(), " ".to_string()), ]) ) .set_publish_all_ports(true) diff --git a/substrate/client/tests/dht.rs b/substrate/client/tests/dht.rs index 4ab177ad..82450e46 100644 --- a/substrate/client/tests/dht.rs +++ b/substrate/client/tests/dht.rs @@ -15,7 +15,7 @@ async fn dht() { Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), ) .replace_env( - [("SERAI_NAME".to_string(), name.to_string()), ("KEY".to_string(), String::new())].into(), + [("SERAI_NAME".to_string(), name.to_string()), ("KEY".to_string(), " ".to_string())].into(), ) .set_publish_all_ports(true) .set_handle(handle(name)) diff --git a/substrate/client/tests/validator_sets.rs b/substrate/client/tests/validator_sets.rs index 884a42db..8ae150ec 100644 --- a/substrate/client/tests/validator_sets.rs +++ b/substrate/client/tests/validator_sets.rs @@ -102,7 +102,7 @@ async fn validator_set_rotation() { ]) .replace_env(HashMap::from([ ("RUST_LOG".to_string(), "runtime=debug".to_string()), - ("KEY".to_string(), String::new()), + ("KEY".to_string(), " ".to_string()), ])) .set_publish_all_ports(true) .set_handle(handle(name)) diff --git a/substrate/node/src/keystore.rs b/substrate/node/src/keystore.rs index f7b9110f..c313773a 100644 --- a/substrate/node/src/keystore.rs +++ b/substrate/node/src/keystore.rs @@ -8,7 +8,7 @@ pub struct Keystore(sr25519::Pair); impl Keystore { pub fn from_env() -> Option { let mut key_hex = serai_env::var("KEY")?; - if key_hex.is_empty() { + if key_hex.trim().is_empty() { None?; } let mut key = hex::decode(&key_hex).expect("KEY from environment wasn't hex"); diff --git a/tests/coordinator/src/lib.rs b/tests/coordinator/src/lib.rs index d09f4487..e6b0324d 100644 --- a/tests/coordinator/src/lib.rs +++ b/tests/coordinator/src/lib.rs @@ -67,7 +67,7 @@ pub fn serai_composition(name: &str) -> TestBodySpecification { Image::with_repository("serai-dev-serai").pull_policy(PullPolicy::Never), ) .replace_env( - [("SERAI_NAME".to_string(), name.to_lowercase()), ("KEY".to_string(), String::new())].into(), + [("SERAI_NAME".to_string(), name.to_lowercase()), ("KEY".to_string(), " ".to_string())].into(), ) .set_publish_all_ports(true) } From b7d49af1d51c02ae8508cd3d40042398d4e4bb72 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 23 Mar 2024 18:02:48 -0400 Subject: [PATCH 050/126] Track total peer count in the coordinator --- coordinator/src/p2p.rs | 25 ++++++++++++++++++------- 1 file changed, 18 insertions(+), 7 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index d840fea8..65b205d5 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -394,6 +394,7 @@ impl LibP2p { async move { let mut set_for_genesis = HashMap::new(); + let mut connected_peers = 0; loop { let time_since_last = Instant::now().duration_since(time_of_last_p2p_message); tokio::select! { @@ -437,15 +438,25 @@ impl LibP2p { } Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => { if &peer_id == swarm.local_peer_id() { + log::warn!("established a libp2p connection to ourselves"); swarm.close_connection(connection_id); - } else if swarm.is_connected(&peer_id) {} else { - log::debug!( - "connection established to peer {} in connection ID {}", - &peer_id, - &connection_id, - ); - swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id) + continue; } + + connected_peers += 1; + log::debug!( + "connection established to peer {} in connection ID {}, connected peers: {}", + &peer_id, + &connection_id, + connected_peers, + ); + } + Some(SwarmEvent::ConnectionClosed { peer_id, .. }) => { + connected_peers -= 1; + log::debug!( + "connection with peer {peer_id} closed, connected peers: {}", + connected_peers, + ); } Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( GsEvent::Message { propagation_source, message, .. }, From 333a9571b8fff3b446a684182d010437209b6a44 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 23 Mar 2024 22:17:43 -0400 Subject: [PATCH 051/126] Use volumes for message-queue/processors/coordinator/serai --- orchestration/src/coins/bitcoin.rs | 2 +- orchestration/src/coins/monero.rs | 3 +-- orchestration/src/coordinator.rs | 2 +- orchestration/src/main.rs | 40 ++++++++++++++++++++++-------- orchestration/src/message_queue.rs | 2 +- orchestration/src/processor.rs | 2 +- orchestration/testnet/serai/run.sh | 2 +- 7 files changed, 35 insertions(+), 18 deletions(-) diff --git a/orchestration/src/coins/bitcoin.rs b/orchestration/src/coins/bitcoin.rs index 13c86bad..f8b6b7fc 100644 --- a/orchestration/src/coins/bitcoin.rs +++ b/orchestration/src/coins/bitcoin.rs @@ -44,7 +44,7 @@ CMD ["/run.sh"] ); let run = - os(Os::Debian, "RUN mkdir /volume && chown bitcoin:bitcoin /volume", "bitcoin") + &run_bitcoin; + os(Os::Debian, "", "bitcoin") + &run_bitcoin; let res = setup + &run; let mut bitcoin_path = orchestration_path.to_path_buf(); diff --git a/orchestration/src/coins/monero.rs b/orchestration/src/coins/monero.rs index f64f0a04..7df874d2 100644 --- a/orchestration/src/coins/monero.rs +++ b/orchestration/src/coins/monero.rs @@ -57,8 +57,7 @@ CMD ["/run.sh"] let run = crate::os( os, - &("RUN mkdir /volume && chown monero /volume\r\n".to_string() + - if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }), + if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }, "monero", ) + &run_monero; let res = setup + &run; diff --git a/orchestration/src/coordinator.rs b/orchestration/src/coordinator.rs index 9995dbbf..67a24527 100644 --- a/orchestration/src/coordinator.rs +++ b/orchestration/src/coordinator.rs @@ -33,7 +33,7 @@ RUN apt install -y ca-certificates let env_vars = [ ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), ("MESSAGE_QUEUE_KEY", hex::encode(coordinator_key.to_repr())), - ("DB_PATH", "./coordinator-db".to_string()), + ("DB_PATH", "/volume/coordinator-db".to_string()), ("SERAI_KEY", hex::encode(serai_key.to_repr())), ("SERAI_HOSTNAME", format!("serai-{}-serai", network.label())), ("RUST_LOG", DEFAULT_RUST_LOG.to_string()), diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 548aca8b..c942efe2 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -92,6 +92,9 @@ RUN apk update && apk upgrade # System user (not a human), shell of nologin, no password assigned RUN adduser -S -s /sbin/nologin -D {user} +# Make the /volume directory and transfer it to the user +RUN mkdir /volume && chown {user}:{user} /volume + {additional_root} # Switch to a non-root user @@ -112,6 +115,9 @@ RUN apt update && apt upgrade -y && apt autoremove -y && apt clean RUN useradd --system --create-home --shell /sbin/nologin {user} +# Make the /volume directory and transfer it to the user +RUN mkdir /volume && chown {user}:{user} /volume + {additional_root} # Switch to a non-root user @@ -416,6 +422,10 @@ fn start(network: Network, services: HashSet) { .arg("container") .arg("inspect") .arg(&docker_name) + // Use null for all IO to silence 'container does not exist' + .stdin(Stdio::null()) + .stdout(Stdio::null()) + .stderr(Stdio::null()) .status() .unwrap() .success() @@ -429,38 +439,46 @@ fn start(network: Network, services: HashSet) { let command = command.arg("--restart").arg("always"); let command = command.arg("--log-opt").arg("max-size=100m"); let command = command.arg("--log-opt").arg("max-file=3"); + let command = if network == Network::Dev { + command + } else { + // Assign a persistent volume if this isn't for Dev + command.arg("--volume").arg(volume); + } let command = match name { "bitcoin" => { + // Expose the RPC for tests if network == Network::Dev { command.arg("-p").arg("8332:8332") - } else { - command.arg("--volume").arg(volume) } } "monero" => { + // Expose the RPC for tests if network == Network::Dev { command.arg("-p").arg("18081:18081") - } else { - command.arg("--volume").arg(volume) } } "monero-wallet-rpc" => { assert_eq!(network, Network::Dev, "monero-wallet-rpc is only for dev"); + // Expose the RPC for tests command.arg("-p").arg("18082:18082") } "coordinator" => { - if network != Network::Dev { - command.arg("-p").arg("30563:30563") - } else { + if network == Network::Dev { command + else { + // Publish the port + command.arg("-p").arg("30563:30563") } } "serai" => { - let mut command = command; - if network != Network::Dev { - command = command.arg("-p").arg("30333:30333"); + let mut command = command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime"); + if network == Network::Dev { + command + } else { + // Publish the port + command.arg("-p").arg("30333:30333") } - command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime")) } _ => command, }; diff --git a/orchestration/src/message_queue.rs b/orchestration/src/message_queue.rs index 3e47571c..f16c6cbe 100644 --- a/orchestration/src/message_queue.rs +++ b/orchestration/src/message_queue.rs @@ -20,7 +20,7 @@ pub fn message_queue( ("BITCOIN_KEY", hex::encode(bitcoin_key.to_bytes())), ("ETHEREUM_KEY", hex::encode(ethereum_key.to_bytes())), ("MONERO_KEY", hex::encode(monero_key.to_bytes())), - ("DB_PATH", "./message-queue-db".to_string()), + ("DB_PATH", "/volume/message-queue-db".to_string()), ("RUST_LOG", "info,serai_message_queue=trace".to_string()), ]; let mut env_vars_str = String::new(); diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index 3d76a6c9..7ee69d11 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -47,7 +47,7 @@ RUN apt install -y ca-certificates ("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")), ("NETWORK_RPC_HOSTNAME", hostname), ("NETWORK_RPC_PORT", format!("{port}")), - ("DB_PATH", "./processor-db".to_string()), + ("DB_PATH", "/volume/processor-db".to_string()), ("RUST_LOG", "info,serai_processor=debug".to_string()), ]; let mut env_vars_str = String::new(); diff --git a/orchestration/testnet/serai/run.sh b/orchestration/testnet/serai/run.sh index 7400ff50..ab3b59df 100755 --- a/orchestration/testnet/serai/run.sh +++ b/orchestration/testnet/serai/run.sh @@ -1,3 +1,3 @@ #!/bin/sh -serai-node --unsafe-rpc-external --rpc-cors all --chain testnet --validator +serai-node --base-path /volume --unsafe-rpc-external --rpc-cors all --chain testnet --validator From 1f92e1cbda9725020e61b8a400f5dfdd88fe26f5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 23 Mar 2024 22:22:24 -0400 Subject: [PATCH 052/126] Fixes for prior commit --- orchestration/src/coins/bitcoin.rs | 3 +-- orchestration/src/coins/monero.rs | 8 +++----- orchestration/src/main.rs | 21 ++++++++++++++++----- 3 files changed, 20 insertions(+), 12 deletions(-) diff --git a/orchestration/src/coins/bitcoin.rs b/orchestration/src/coins/bitcoin.rs index f8b6b7fc..a5c8b21c 100644 --- a/orchestration/src/coins/bitcoin.rs +++ b/orchestration/src/coins/bitcoin.rs @@ -43,8 +43,7 @@ CMD ["/run.sh"] network.label() ); - let run = - os(Os::Debian, "", "bitcoin") + &run_bitcoin; + let run = os(Os::Debian, "", "bitcoin") + &run_bitcoin; let res = setup + &run; let mut bitcoin_path = orchestration_path.to_path_buf(); diff --git a/orchestration/src/coins/monero.rs b/orchestration/src/coins/monero.rs index 7df874d2..873c6458 100644 --- a/orchestration/src/coins/monero.rs +++ b/orchestration/src/coins/monero.rs @@ -55,11 +55,9 @@ CMD ["/run.sh"] network.label(), ); - let run = crate::os( - os, - if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }, - "monero", - ) + &run_monero; + let run = + crate::os(os, if os == Os::Alpine { "RUN apk --no-cache add gcompat" } else { "" }, "monero") + + &run_monero; let res = setup + &run; let mut monero_path = orchestration_path.to_path_buf(); diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index c942efe2..988358bc 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -2,7 +2,14 @@ // TODO: Generate keys for a validator and the infra use core::ops::Deref; -use std::{collections::HashSet, env, path::PathBuf, io::Write, fs, process::Command}; +use std::{ + collections::HashSet, + env, + path::PathBuf, + io::Write, + fs, + process::{Stdio, Command}, +}; use zeroize::Zeroizing; @@ -443,19 +450,23 @@ fn start(network: Network, services: HashSet) { command } else { // Assign a persistent volume if this isn't for Dev - command.arg("--volume").arg(volume); - } + command.arg("--volume").arg(volume) + }; let command = match name { "bitcoin" => { // Expose the RPC for tests if network == Network::Dev { command.arg("-p").arg("8332:8332") + } else { + command } } "monero" => { // Expose the RPC for tests if network == Network::Dev { command.arg("-p").arg("18081:18081") + } else { + command } } "monero-wallet-rpc" => { @@ -466,13 +477,13 @@ fn start(network: Network, services: HashSet) { "coordinator" => { if network == Network::Dev { command - else { + } else { // Publish the port command.arg("-p").arg("30563:30563") } } "serai" => { - let mut command = command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime"); + let command = command.arg("--volume").arg(format!("{serai_runtime_volume}:/runtime")); if network == Network::Dev { command } else { From 7408e267816f99aeea8bb96e3b9ae809fca31c92 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 23 Mar 2024 23:13:33 -0400 Subject: [PATCH 053/126] Don't regenerate infrastructure keys Enables running setup without invalidating the message queue --- orchestration/src/main.rs | 87 ++++++++++++++++++++++++++------------- 1 file changed, 59 insertions(+), 28 deletions(-) diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 988358bc..823e1b61 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -3,7 +3,7 @@ use core::ops::Deref; use std::{ - collections::HashSet, + collections::{HashSet, HashMap}, env, path::PathBuf, io::Write, @@ -212,6 +212,55 @@ fn orchestration_path(network: Network) -> PathBuf { orchestration_path } +type InfrastructureKeys = + HashMap<&'static str, (Zeroizing<::F>, ::G)>; +fn infrastructure_keys(network: Network) -> InfrastructureKeys { + // Generate entropy for the infrastructure keys + + let entropy = if network == Network::Dev { + // Don't use actual entropy if this is a dev environment + Zeroizing::new([0; 32]) + } else { + let path = home::home_dir() + .unwrap() + .join(".serai") + .join(network.label()) + .join("infrastructure_keys_entropy"); + // Check if there's existing entropy + if let Ok(entropy) = fs::read(&path).map(Zeroizing::new) { + assert_eq!(entropy.len(), 32, "entropy saved to disk wasn't 32 bytes"); + let mut res = Zeroizing::new([0; 32]); + res.copy_from_slice(entropy.as_ref()); + res + } else { + // If there isn't, generate fresh entropy + let mut res = Zeroizing::new([0; 32]); + OsRng.fill_bytes(res.as_mut()); + fs::write(&path, &res).unwrap(); + res + } + }; + + let mut transcript = + RecommendedTranscript::new(b"Serai Orchestrator Infrastructure Keys Transcript"); + transcript.append_message(b"network", network.label().as_bytes()); + transcript.append_message(b"entropy", entropy); + let mut rng = ChaCha20Rng::from_seed(transcript.rng_seed(b"infrastructure_keys")); + + let mut key_pair = || { + let key = Zeroizing::new(::F::random(&mut rng)); + let public = Ristretto::generator() * key.deref(); + (key, public) + }; + + HashMap::from([ + ("coordinator", key_pair()), + ("bitcoin", key_pair()), + ("ethereum", key_pair()), + ("monero", key_pair()), + ]) +} + fn dockerfiles(network: Network) { let orchestration_path = orchestration_path(network); @@ -222,28 +271,11 @@ fn dockerfiles(network: Network) { monero_wallet_rpc(&orchestration_path); } - // TODO: Generate infra keys in key_gen, yet service entropy here? - - // Generate entropy for the infrastructure keys - let mut entropy = Zeroizing::new([0; 32]); - // Only use actual entropy if this isn't a development environment - if network != Network::Dev { - OsRng.fill_bytes(entropy.as_mut()); - } - let mut transcript = RecommendedTranscript::new(b"Serai Orchestrator Transcript"); - transcript.append_message(b"entropy", entropy); - let mut new_rng = |label| ChaCha20Rng::from_seed(transcript.rng_seed(label)); - - let mut message_queue_keys_rng = new_rng(b"message_queue_keys"); - let mut key_pair = || { - let key = Zeroizing::new(::F::random(&mut message_queue_keys_rng)); - let public = Ristretto::generator() * key.deref(); - (key, public) - }; - let coordinator_key = key_pair(); - let bitcoin_key = key_pair(); - let ethereum_key = key_pair(); - let monero_key = key_pair(); + let mut infrastructure_keys = infrastructure_keys(network); + let coordinator_key = infrastructure_keys.remove("coordinator").unwrap(); + let bitcoin_key = infrastructure_keys.remove("bitcoin").unwrap(); + let ethereum_key = infrastructure_keys.remove("ethereum").unwrap(); + let monero_key = infrastructure_keys.remove("monero").unwrap(); message_queue( &orchestration_path, @@ -254,10 +286,9 @@ fn dockerfiles(network: Network) { monero_key.1, ); - let mut processor_entropy_rng = new_rng(b"processor_entropy"); - let mut new_entropy = || { + let new_entropy = || { let mut res = Zeroizing::new([0; 32]); - processor_entropy_rng.fill_bytes(res.as_mut()); + OsRng.fill_bytes(res.as_mut()); res }; processor( @@ -514,10 +545,10 @@ Serai Orchestrator v0.0.1 Commands: key_gen *network* - Generates a key for the validator. + Generate a key for the validator. setup *network* - Generate infrastructure keys and the Dockerfiles for every Serai service. + Generate the Dockerfiles for every Serai service. start *network* [service1, service2...] Start the specified services for the specified network ("dev" or "testnet"). From 4cacce5e55087a21174f05732d30662e83cdfd9f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 23 Mar 2024 23:30:51 -0400 Subject: [PATCH 054/126] Perform key share amortization on-chain to avoid discrepancies --- coordinator/src/substrate/mod.rs | 12 ++------- substrate/validator-sets/pallet/src/lib.rs | 27 +++++++++++-------- .../validator-sets/primitives/src/lib.rs | 10 +++---- 3 files changed, 23 insertions(+), 26 deletions(-) diff --git a/coordinator/src/substrate/mod.rs b/coordinator/src/substrate/mod.rs index 7a76353c..fb1e3aed 100644 --- a/coordinator/src/substrate/mod.rs +++ b/coordinator/src/substrate/mod.rs @@ -11,10 +11,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; use serai_client::{ SeraiError, Block, Serai, TemporalSerai, primitives::{BlockHash, NetworkId}, - validator_sets::{ - primitives::{ValidatorSet, amortize_excess_key_shares}, - ValidatorSetsEvent, - }, + validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent}, in_instructions::InInstructionsEvent, coins::CoinsEvent, }; @@ -69,12 +66,7 @@ async fn handle_new_set( let set_participants = serai.participants(set.network).await?.expect("NewSet for set which doesn't exist"); - let mut set_data = set_participants - .into_iter() - .map(|(k, w)| (k, u16::try_from(w).unwrap())) - .collect::>(); - amortize_excess_key_shares(&mut set_data); - set_data + set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::>() }; let time = if let Ok(time) = block.time() { diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index 76c07e1c..d1385c2d 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -363,21 +363,26 @@ pub mod pallet { let allocation_per_key_share = Self::allocation_per_key_share(network).unwrap().0; - let mut iter = SortedAllocationsIter::::new(network); let mut participants = vec![]; - let mut key_shares = 0; let mut total_stake = 0; - while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) { - let Some((key, amount)) = iter.next() else { break }; + { + let mut iter = SortedAllocationsIter::::new(network); + let mut key_shares = 0; + while key_shares < u64::from(MAX_KEY_SHARES_PER_SET) { + let Some((key, amount)) = iter.next() else { break }; - let these_key_shares = amount.0 / allocation_per_key_share; - InSet::::set(network, key, Some(these_key_shares)); - participants.push((key, these_key_shares)); + let these_key_shares = + (amount.0 / allocation_per_key_share).min(u64::from(MAX_KEY_SHARES_PER_SET)); + participants.push((key, these_key_shares)); - // This can technically set key_shares to a value exceeding MAX_KEY_SHARES_PER_SET - // Off-chain, the key shares per validator will be accordingly adjusted - key_shares += these_key_shares; - total_stake += amount.0; + key_shares += these_key_shares; + total_stake += amount.0; + } + amortize_excess_key_shares(&mut participants); + } + + for (key, shares) in &participants { + InSet::::set(network, key, Some(*shares)); } TotalAllocatedStake::::set(network, Some(Amount(total_stake))); diff --git a/substrate/validator-sets/primitives/src/lib.rs b/substrate/validator-sets/primitives/src/lib.rs index 644b19e1..c900b0a9 100644 --- a/substrate/validator-sets/primitives/src/lib.rs +++ b/substrate/validator-sets/primitives/src/lib.rs @@ -115,11 +115,11 @@ pub fn report_slashes_message(set: &ValidatorSet, slashes: &[(Public, u32)]) -> /// maximum. /// /// Reduction occurs by reducing each validator in a reverse round-robin. -pub fn amortize_excess_key_shares(validators: &mut [(Public, u16)]) { - let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::(); - for i in 0 .. usize::from( - total_key_shares.saturating_sub(u16::try_from(MAX_KEY_SHARES_PER_SET).unwrap()), - ) { +pub fn amortize_excess_key_shares(validators: &mut [(Public, u64)]) { + let total_key_shares = validators.iter().map(|(_, shares)| shares).sum::(); + for i in 0 .. usize::try_from(total_key_shares.saturating_sub(u64::from(MAX_KEY_SHARES_PER_SET))) + .unwrap() + { validators[validators.len() - ((i % validators.len()) + 1)].1 -= 1; } } From bc44fbdbac573efb7342bcf59e4144110356fb43 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 23 Mar 2024 23:31:20 -0400 Subject: [PATCH 055/126] Add TODO to coordinator P2P --- coordinator/src/p2p.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 65b205d5..19bf299d 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -322,6 +322,8 @@ impl LibP2p { to_dial_send.send(addr).unwrap(); }; + // TODO: We should also connect to random peers from random nets as needed for + // cosigning let mut to_retry = vec![]; while let Some(network) = pending_p2p_connections_recv.recv().await { if let Ok(mut nodes) = serai.p2p_validators(network).await { From 07df9aa035f85cff83604de41acad3178fc40e63 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 24 Mar 2024 00:03:32 -0400 Subject: [PATCH 056/126] Ensure user is in a group --- orchestration/src/main.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 823e1b61..13b98554 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -96,8 +96,8 @@ ENV LD_PRELOAD=libmimalloc.so RUN apk update && apk upgrade -# System user (not a human), shell of nologin, no password assigned -RUN adduser -S -s /sbin/nologin -D {user} +RUN adduser --system --shell /sbin/nologin --disabled-password {user} +RUN addgroup {user} {user} # Make the /volume directory and transfer it to the user RUN mkdir /volume && chown {user}:{user} /volume @@ -120,7 +120,7 @@ RUN echo "/usr/lib/libmimalloc.so" >> /etc/ld.so.preload RUN apt update && apt upgrade -y && apt autoremove -y && apt clean -RUN useradd --system --create-home --shell /sbin/nologin {user} +RUN useradd --system --user-group --create-home --shell /sbin/nologin {user} # Make the /volume directory and transfer it to the user RUN mkdir /volume && chown {user}:{user} /volume From 3d855c75be7b863ee7171b44adc497a703d9924a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 24 Mar 2024 00:18:40 -0400 Subject: [PATCH 057/126] Create group before adding to it --- orchestration/src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 13b98554..4be84cd4 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -97,6 +97,7 @@ ENV LD_PRELOAD=libmimalloc.so RUN apk update && apk upgrade RUN adduser --system --shell /sbin/nologin --disabled-password {user} +RUN addgroup {user} RUN addgroup {user} {user} # Make the /volume directory and transfer it to the user From 63521f6a965107590d5104ae16264169fd59d3e2 Mon Sep 17 00:00:00 2001 From: noot <36753753+noot@users.noreply.github.com> Date: Sun, 24 Mar 2024 09:00:54 -0400 Subject: [PATCH 058/126] implement Router.sol and associated functions (#92) * start Router contract * use calldata for function args * var name changes * start testing router contract * test with and without abi.encode * cleanup * why tf isn't tests/utils working * cleanup tests * remove unused files * wip * fix router contract and tests, add set/update public keys funcs * impl some Froms * make execute non-reentrant * cleanup * update Router to use ReentrancyGuard * update contract to use errors, use bitfield in Executed event, minor other fixes * wip * fix build issues from merge, tests ok * Router.sol cleanup * cleanup, uncomment stuff * bump ethers.rs version to latest * make contract functions take generic middleware * update build script to assert no compiler errors * hardcode pubkey parity into contract, update tests * Polish coins/ethereum in various ways --------- Co-authored-by: Luke Parker --- .github/actions/build-dependencies/action.yml | 4 +- coins/ethereum/.gitignore | 6 +- coins/ethereum/Cargo.toml | 3 + coins/ethereum/build.rs | 35 ++++- coins/ethereum/contracts/Router.sol | 90 ++++++++++++ coins/ethereum/contracts/Schnorr.sol | 29 ++-- coins/ethereum/src/abi/mod.rs | 6 + coins/ethereum/src/contract.rs | 36 ----- coins/ethereum/src/crypto.rs | 124 +++++++--------- coins/ethereum/src/lib.rs | 16 ++- coins/ethereum/src/router.rs | 30 ++++ coins/ethereum/src/schnorr.rs | 34 +++++ coins/ethereum/src/tests/crypto.rs | 132 ++++++++++++++++++ coins/ethereum/src/tests/mod.rs | 92 ++++++++++++ coins/ethereum/src/tests/router.rs | 109 +++++++++++++++ coins/ethereum/src/tests/schnorr.rs | 67 +++++++++ coins/ethereum/tests/contract.rs | 128 ----------------- coins/ethereum/tests/crypto.rs | 87 ------------ coins/ethereum/tests/mod.rs | 2 - spec/Getting Started.md | 8 +- 20 files changed, 690 insertions(+), 348 deletions(-) create mode 100644 coins/ethereum/contracts/Router.sol create mode 100644 coins/ethereum/src/abi/mod.rs delete mode 100644 coins/ethereum/src/contract.rs create mode 100644 coins/ethereum/src/router.rs create mode 100644 coins/ethereum/src/schnorr.rs create mode 100644 coins/ethereum/src/tests/crypto.rs create mode 100644 coins/ethereum/src/tests/mod.rs create mode 100644 coins/ethereum/src/tests/router.rs create mode 100644 coins/ethereum/src/tests/schnorr.rs delete mode 100644 coins/ethereum/tests/contract.rs delete mode 100644 coins/ethereum/tests/crypto.rs delete mode 100644 coins/ethereum/tests/mod.rs diff --git a/.github/actions/build-dependencies/action.yml b/.github/actions/build-dependencies/action.yml index 2a8e8ed8..5994b723 100644 --- a/.github/actions/build-dependencies/action.yml +++ b/.github/actions/build-dependencies/action.yml @@ -42,8 +42,8 @@ runs: shell: bash run: | cargo install svm-rs - svm install 0.8.16 - svm use 0.8.16 + svm install 0.8.25 + svm use 0.8.25 # - name: Cache Rust # uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43 diff --git a/coins/ethereum/.gitignore b/coins/ethereum/.gitignore index 6ff35861..46365e03 100644 --- a/coins/ethereum/.gitignore +++ b/coins/ethereum/.gitignore @@ -1,3 +1,7 @@ -# solidity build outputs +# Solidity build outputs cache artifacts + +# Auto-generated ABI files +src/abi/schnorr.rs +src/abi/router.rs diff --git a/coins/ethereum/Cargo.toml b/coins/ethereum/Cargo.toml index 1d1c6dbb..bc60d3a4 100644 --- a/coins/ethereum/Cargo.toml +++ b/coins/ethereum/Cargo.toml @@ -30,6 +30,9 @@ ethers-core = { version = "2", default-features = false } ethers-providers = { version = "2", default-features = false } ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] } +[build-dependencies] +ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] } + [dev-dependencies] rand_core = { version = "0.6", default-features = false, features = ["std"] } diff --git a/coins/ethereum/build.rs b/coins/ethereum/build.rs index 2166f6ad..3590b12f 100644 --- a/coins/ethereum/build.rs +++ b/coins/ethereum/build.rs @@ -1,6 +1,20 @@ +use std::process::Command; + +use ethers_contract::Abigen; + fn main() { - println!("cargo:rerun-if-changed=contracts"); - println!("cargo:rerun-if-changed=artifacts"); + println!("cargo:rerun-if-changed=contracts/*"); + println!("cargo:rerun-if-changed=artifacts/*"); + + for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout) + .unwrap() + .lines() + { + if let Some(version) = line.strip_prefix("Version: ") { + let version = version.split('+').next().unwrap(); + assert_eq!(version, "0.8.25"); + } + } #[rustfmt::skip] let args = [ @@ -8,8 +22,21 @@ fn main() { "-o", "./artifacts", "--overwrite", "--bin", "--abi", "--optimize", - "./contracts/Schnorr.sol" + "./contracts/Schnorr.sol", "./contracts/Router.sol", ]; + assert!(Command::new("solc").args(args).status().unwrap().success()); - assert!(std::process::Command::new("solc").args(args).status().unwrap().success()); + Abigen::new("Schnorr", "./artifacts/Schnorr.abi") + .unwrap() + .generate() + .unwrap() + .write_to_file("./src/abi/schnorr.rs") + .unwrap(); + + Abigen::new("Router", "./artifacts/Router.abi") + .unwrap() + .generate() + .unwrap() + .write_to_file("./src/abi/router.rs") + .unwrap(); } diff --git a/coins/ethereum/contracts/Router.sol b/coins/ethereum/contracts/Router.sol new file mode 100644 index 00000000..25775ec5 --- /dev/null +++ b/coins/ethereum/contracts/Router.sol @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +import "./Schnorr.sol"; + +contract Router is Schnorr { + // Contract initializer + // TODO: Replace with a MuSig of the genesis validators + address public initializer; + + // Nonce is incremented for each batch of transactions executed + uint256 public nonce; + + // fixed parity for the public keys used in this contract + uint8 constant public KEY_PARITY = 27; + + // current public key's x-coordinate + // note: this key must always use the fixed parity defined above + bytes32 public seraiKey; + + struct OutInstruction { + address to; + uint256 value; + bytes data; + } + + struct Signature { + bytes32 c; + bytes32 s; + } + + // success is a uint256 representing a bitfield of transaction successes + event Executed(uint256 nonce, bytes32 batch, uint256 success); + + // error types + error NotInitializer(); + error AlreadyInitialized(); + error InvalidKey(); + error TooManyTransactions(); + + constructor() { + initializer = msg.sender; + } + + // initSeraiKey can be called by the contract initializer to set the first + // public key, only if the public key has yet to be set. + function initSeraiKey(bytes32 _seraiKey) external { + if (msg.sender != initializer) revert NotInitializer(); + if (seraiKey != 0) revert AlreadyInitialized(); + if (_seraiKey == bytes32(0)) revert InvalidKey(); + seraiKey = _seraiKey; + } + + // updateSeraiKey validates the given Schnorr signature against the current public key, + // and if successful, updates the contract's public key to the given one. + function updateSeraiKey( + bytes32 _seraiKey, + Signature memory sig + ) public { + if (_seraiKey == bytes32(0)) revert InvalidKey(); + bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey)); + if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature(); + seraiKey = _seraiKey; + } + + // execute accepts a list of transactions to execute as well as a Schnorr signature. + // if signature verification passes, the given transactions are executed. + // if signature verification fails, this function will revert. + function execute( + OutInstruction[] calldata transactions, + Signature memory sig + ) public { + if (transactions.length > 256) revert TooManyTransactions(); + + bytes32 message = keccak256(abi.encode("execute", nonce, transactions)); + // This prevents re-entrancy from causing double spends yet does allow + // out-of-order execution via re-entrancy + nonce++; + if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature(); + + uint256 successes; + for(uint256 i = 0; i < transactions.length; i++) { + (bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data); + assembly { + successes := or(successes, shl(i, success)) + } + } + emit Executed(nonce, message, successes); + } +} diff --git a/coins/ethereum/contracts/Schnorr.sol b/coins/ethereum/contracts/Schnorr.sol index 3f0196b2..47263e66 100644 --- a/coins/ethereum/contracts/Schnorr.sol +++ b/coins/ethereum/contracts/Schnorr.sol @@ -1,4 +1,4 @@ -//SPDX-License-Identifier: AGPLv3 +// SPDX-License-Identifier: AGPLv3 pragma solidity ^0.8.0; // see https://github.com/noot/schnorr-verify for implementation details @@ -7,29 +7,32 @@ contract Schnorr { uint256 constant public Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; + error InvalidSOrA(); + error InvalidSignature(); + // parity := public key y-coord parity (27 or 28) // px := public key x-coord - // message := 32-byte message + // message := 32-byte hash of the message + // c := schnorr signature challenge // s := schnorr signature - // e := schnorr signature challenge function verify( uint8 parity, bytes32 px, bytes32 message, - bytes32 s, - bytes32 e + bytes32 c, + bytes32 s ) public view returns (bool) { // ecrecover = (m, v, r, s); - bytes32 sp = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); - bytes32 ep = bytes32(Q - mulmod(uint256(e), uint256(px), Q)); + bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); + bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); - require(sp != 0); + if (sa == 0) revert InvalidSOrA(); // the ecrecover precompile implementation checks that the `r` and `s` - // inputs are non-zero (in this case, `px` and `ep`), thus we don't need to - // check if they're zero.will make me - address R = ecrecover(sp, parity, px, ep); - require(R != address(0), "ecrecover failed"); - return e == keccak256( + // inputs are non-zero (in this case, `px` and `ca`), thus we don't need to + // check if they're zero. + address R = ecrecover(sa, parity, px, ca); + if (R == address(0)) revert InvalidSignature(); + return c == keccak256( abi.encodePacked(R, uint8(parity), px, block.chainid, message) ); } diff --git a/coins/ethereum/src/abi/mod.rs b/coins/ethereum/src/abi/mod.rs new file mode 100644 index 00000000..2d7dd47c --- /dev/null +++ b/coins/ethereum/src/abi/mod.rs @@ -0,0 +1,6 @@ +#[rustfmt::skip] +#[allow(clippy::all)] +pub(crate) mod schnorr; +#[rustfmt::skip] +#[allow(clippy::all)] +pub(crate) mod router; diff --git a/coins/ethereum/src/contract.rs b/coins/ethereum/src/contract.rs deleted file mode 100644 index 80093b08..00000000 --- a/coins/ethereum/src/contract.rs +++ /dev/null @@ -1,36 +0,0 @@ -use thiserror::Error; -use eyre::{eyre, Result}; - -use ethers_providers::{Provider, Http}; -use ethers_contract::abigen; - -use crate::crypto::ProcessedSignature; - -#[derive(Error, Debug)] -pub enum EthereumError { - #[error("failed to verify Schnorr signature")] - VerificationError, -} - -abigen!(Schnorr, "./artifacts/Schnorr.abi"); - -pub async fn call_verify( - contract: &Schnorr>, - params: &ProcessedSignature, -) -> Result<()> { - if contract - .verify( - params.parity + 27, - params.px.to_bytes().into(), - params.message, - params.s.to_bytes().into(), - params.e.to_bytes().into(), - ) - .call() - .await? - { - Ok(()) - } else { - Err(eyre!(EthereumError::VerificationError)) - } -} diff --git a/coins/ethereum/src/crypto.rs b/coins/ethereum/src/crypto.rs index 3e9d50fa..5f681cfa 100644 --- a/coins/ethereum/src/crypto.rs +++ b/coins/ethereum/src/crypto.rs @@ -1,50 +1,54 @@ use sha3::{Digest, Keccak256}; -use group::Group; +use group::ff::PrimeField; use k256::{ elliptic_curve::{ - bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint, sec1::ToEncodedPoint, + bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint, }, - AffinePoint, ProjectivePoint, Scalar, U256, + ProjectivePoint, Scalar, U256, }; -use frost::{algorithm::Hram, curve::Secp256k1}; +use frost::{ + algorithm::{Hram, SchnorrSignature}, + curve::Secp256k1, +}; -pub fn keccak256(data: &[u8]) -> [u8; 32] { +pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { Keccak256::digest(data).into() } -pub fn hash_to_scalar(data: &[u8]) -> Scalar { - Scalar::reduce(U256::from_be_slice(&keccak256(data))) -} - -pub fn address(point: &ProjectivePoint) -> [u8; 20] { +pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] { let encoded_point = point.to_encoded_point(false); - keccak256(&encoded_point.as_ref()[1 .. 65])[12 .. 32].try_into().unwrap() + // Last 20 bytes of the hash of the concatenated x and y coordinates + // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point + keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() } -pub fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> { - if r.is_zero().into() || s.is_zero().into() { - return None; - } +#[allow(non_snake_case)] +pub struct PublicKey { + pub A: ProjectivePoint, + pub px: Scalar, + pub parity: u8, +} +impl PublicKey { #[allow(non_snake_case)] - let R = AffinePoint::decompress(&r.to_bytes(), v.into()); - #[allow(non_snake_case)] - if let Some(R) = Option::::from(R) { - #[allow(non_snake_case)] - let R = ProjectivePoint::from(R); - - let r = r.invert().unwrap(); - let u1 = ProjectivePoint::GENERATOR * (-message * r); - let u2 = R * (s * r); - let key: ProjectivePoint = u1 + u2; - if !bool::from(key.is_identity()) { - return Some(address(&key)); + pub fn new(A: ProjectivePoint) -> Option { + let affine = A.to_affine(); + let parity = u8::from(bool::from(affine.y_is_odd())) + 27; + if parity != 27 { + None?; } - } - None + let x_coord = affine.x(); + let x_coord_scalar = >::reduce_bytes(&x_coord); + // Return None if a reduction would occur + if x_coord_scalar.to_repr() != x_coord { + None?; + } + + Some(PublicKey { A, px: x_coord_scalar, parity }) + } } #[derive(Clone, Default)] @@ -55,53 +59,33 @@ impl Hram for EthereumHram { let a_encoded_point = A.to_encoded_point(true); let mut a_encoded = a_encoded_point.as_ref().to_owned(); a_encoded[0] += 25; // Ethereum uses 27/28 for point parity + assert!((a_encoded[0] == 27) || (a_encoded[0] == 28)); let mut data = address(R).to_vec(); data.append(&mut a_encoded); - data.append(&mut m.to_vec()); + data.extend(m); Scalar::reduce(U256::from_be_slice(&keccak256(&data))) } } -pub struct ProcessedSignature { - pub s: Scalar, - pub px: Scalar, - pub parity: u8, - pub message: [u8; 32], - pub e: Scalar, +pub struct Signature { + pub(crate) c: Scalar, + pub(crate) s: Scalar, } - -#[allow(non_snake_case)] -pub fn preprocess_signature_for_ecrecover( - m: [u8; 32], - R: &ProjectivePoint, - s: Scalar, - A: &ProjectivePoint, - chain_id: U256, -) -> (Scalar, Scalar) { - let processed_sig = process_signature_for_contract(m, R, s, A, chain_id); - let sr = processed_sig.s.mul(&processed_sig.px).negate(); - let er = processed_sig.e.mul(&processed_sig.px).negate(); - (sr, er) -} - -#[allow(non_snake_case)] -pub fn process_signature_for_contract( - m: [u8; 32], - R: &ProjectivePoint, - s: Scalar, - A: &ProjectivePoint, - chain_id: U256, -) -> ProcessedSignature { - let encoded_pk = A.to_encoded_point(true); - let px = &encoded_pk.as_ref()[1 .. 33]; - let px_scalar = Scalar::reduce(U256::from_be_slice(px)); - let e = EthereumHram::hram(R, A, &[chain_id.to_be_byte_array().as_slice(), &m].concat()); - ProcessedSignature { - s, - px: px_scalar, - parity: &encoded_pk.as_ref()[0] - 2, - #[allow(non_snake_case)] - message: m, - e, +impl Signature { + pub fn new( + public_key: &PublicKey, + chain_id: U256, + m: &[u8], + signature: SchnorrSignature, + ) -> Option { + let c = EthereumHram::hram( + &signature.R, + &public_key.A, + &[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(), + ); + if !signature.verify(public_key.A, c) { + None?; + } + Some(Signature { c, s: signature.s }) } } diff --git a/coins/ethereum/src/lib.rs b/coins/ethereum/src/lib.rs index 75a58525..505de38e 100644 --- a/coins/ethereum/src/lib.rs +++ b/coins/ethereum/src/lib.rs @@ -1,2 +1,16 @@ -pub mod contract; +use thiserror::Error; + pub mod crypto; + +pub(crate) mod abi; +pub mod schnorr; +pub mod router; + +#[cfg(test)] +mod tests; + +#[derive(Error, Debug)] +pub enum Error { + #[error("failed to verify Schnorr signature")] + InvalidSignature, +} diff --git a/coins/ethereum/src/router.rs b/coins/ethereum/src/router.rs new file mode 100644 index 00000000..3696fd9b --- /dev/null +++ b/coins/ethereum/src/router.rs @@ -0,0 +1,30 @@ +pub use crate::abi::router::*; + +/* +use crate::crypto::{ProcessedSignature, PublicKey}; +use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode}; +use eyre::Result; +use std::{convert::From, fs::File, sync::Arc}; + +pub async fn router_update_public_key( + contract: &Router, + public_key: &PublicKey, + signature: &ProcessedSignature, +) -> std::result::Result, eyre::ErrReport> { + let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into()); + let pending_tx = tx.send().await?; + let receipt = pending_tx.await?; + Ok(receipt) +} + +pub async fn router_execute( + contract: &Router, + txs: Vec, + signature: &ProcessedSignature, +) -> std::result::Result, eyre::ErrReport> { + let tx = contract.execute(txs, signature.into()).send(); + let pending_tx = tx.send().await?; + let receipt = pending_tx.await?; + Ok(receipt) +} +*/ diff --git a/coins/ethereum/src/schnorr.rs b/coins/ethereum/src/schnorr.rs new file mode 100644 index 00000000..0e4495ec --- /dev/null +++ b/coins/ethereum/src/schnorr.rs @@ -0,0 +1,34 @@ +use eyre::{eyre, Result}; + +use group::ff::PrimeField; + +use ethers_providers::{Provider, Http}; + +use crate::{ + Error, + crypto::{keccak256, PublicKey, Signature}, +}; +pub use crate::abi::schnorr::*; + +pub async fn call_verify( + contract: &Schnorr>, + public_key: &PublicKey, + message: &[u8], + signature: &Signature, +) -> Result<()> { + if contract + .verify( + public_key.parity, + public_key.px.to_repr().into(), + keccak256(message), + signature.c.to_repr().into(), + signature.s.to_repr().into(), + ) + .call() + .await? + { + Ok(()) + } else { + Err(eyre!(Error::InvalidSignature)) + } +} diff --git a/coins/ethereum/src/tests/crypto.rs b/coins/ethereum/src/tests/crypto.rs new file mode 100644 index 00000000..6dced933 --- /dev/null +++ b/coins/ethereum/src/tests/crypto.rs @@ -0,0 +1,132 @@ +use rand_core::OsRng; + +use sha2::Sha256; +use sha3::{Digest, Keccak256}; + +use group::Group; +use k256::{ + ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey}, + elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint}, + U256, Scalar, AffinePoint, ProjectivePoint, +}; + +use frost::{ + curve::Secp256k1, + algorithm::{Hram, IetfSchnorr}, + tests::{algorithm_machines, sign}, +}; + +use crate::{crypto::*, tests::key_gen}; + +pub fn hash_to_scalar(data: &[u8]) -> Scalar { + Scalar::reduce(U256::from_be_slice(&keccak256(data))) +} + +pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> { + if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) { + return None; + } + + #[allow(non_snake_case)] + let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into()); + #[allow(non_snake_case)] + if let Some(R) = Option::::from(R) { + #[allow(non_snake_case)] + let R = ProjectivePoint::from(R); + + let r = r.invert().unwrap(); + let u1 = ProjectivePoint::GENERATOR * (-message * r); + let u2 = R * (s * r); + let key: ProjectivePoint = u1 + u2; + if !bool::from(key.is_identity()) { + return Some(address(&key)); + } + } + + None +} + +#[test] +fn test_ecrecover() { + let private = SigningKey::random(&mut OsRng); + let public = VerifyingKey::from(&private); + + // Sign the signature + const MESSAGE: &[u8] = b"Hello, World!"; + let (sig, recovery_id) = private + .as_nonzero_scalar() + .try_sign_prehashed_rfc6979::(&Keccak256::digest(MESSAGE), b"") + .unwrap(); + + // Sanity check the signature verifies + #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result + { + assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ()); + } + + // Perform the ecrecover + assert_eq!( + ecrecover( + hash_to_scalar(MESSAGE), + u8::from(recovery_id.unwrap().is_y_odd()) + 27, + *sig.r(), + *sig.s() + ) + .unwrap(), + address(&ProjectivePoint::from(public.as_affine())) + ); +} + +// Run the sign test with the EthereumHram +#[test] +fn test_signing() { + let (keys, _) = key_gen(); + + const MESSAGE: &[u8] = b"Hello, World!"; + + let algo = IetfSchnorr::::ietf(); + let _sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); +} + +#[allow(non_snake_case)] +pub fn preprocess_signature_for_ecrecover( + R: ProjectivePoint, + public_key: &PublicKey, + chain_id: U256, + m: &[u8], + s: Scalar, +) -> (u8, Scalar, Scalar) { + let c = EthereumHram::hram( + &R, + &public_key.A, + &[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(), + ); + let sa = -(s * public_key.px); + let ca = -(c * public_key.px); + (public_key.parity, sa, ca) +} + +#[test] +fn test_ecrecover_hack() { + let (keys, public_key) = key_gen(); + + const MESSAGE: &[u8] = b"Hello, World!"; + let hashed_message = keccak256(MESSAGE); + let chain_id = U256::ONE; + let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); + + let algo = IetfSchnorr::::ietf(); + let sig = sign( + &mut OsRng, + &algo, + keys.clone(), + algorithm_machines(&mut OsRng, &algo, &keys), + full_message, + ); + + let (parity, sa, ca) = + preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s); + let q = ecrecover(sa, parity, public_key.px, ca).unwrap(); + assert_eq!(q, address(&sig.R)); +} diff --git a/coins/ethereum/src/tests/mod.rs b/coins/ethereum/src/tests/mod.rs new file mode 100644 index 00000000..c468cfb6 --- /dev/null +++ b/coins/ethereum/src/tests/mod.rs @@ -0,0 +1,92 @@ +use std::{sync::Arc, time::Duration, fs::File, collections::HashMap}; + +use rand_core::OsRng; + +use group::ff::PrimeField; +use k256::{Scalar, ProjectivePoint}; +use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen}; + +use ethers_core::{ + types::{H160, Signature as EthersSignature}, + abi::Abi, +}; +use ethers_contract::ContractFactory; +use ethers_providers::{Middleware, Provider, Http}; + +use crate::crypto::PublicKey; + +mod crypto; +mod schnorr; +mod router; + +pub fn key_gen() -> (HashMap>, PublicKey) { + let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng); + let mut group_key = keys[&Participant::new(1).unwrap()].group_key(); + + let mut offset = Scalar::ZERO; + while PublicKey::new(group_key).is_none() { + offset += Scalar::ONE; + group_key += ProjectivePoint::GENERATOR; + } + for keys in keys.values_mut() { + *keys = keys.offset(offset); + } + let public_key = PublicKey::new(group_key).unwrap(); + + (keys, public_key) +} + +// TODO: Replace with a contract deployment from an unknown account, so the environment solely has +// to fund the deployer, not create/pass a wallet +// TODO: Deterministic deployments across chains +pub async fn deploy_contract( + chain_id: u32, + client: Arc>, + wallet: &k256::ecdsa::SigningKey, + name: &str, +) -> eyre::Result { + let abi: Abi = + serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap(); + + let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap(); + let hex_bin = + if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf }; + let bin = hex::decode(hex_bin).unwrap(); + let factory = ContractFactory::new(abi, bin.into(), client.clone()); + + let mut deployment_tx = factory.deploy(())?.tx; + deployment_tx.set_chain_id(chain_id); + deployment_tx.set_gas(1_000_000); + let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?; + deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas); + deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas); + + let sig_hash = deployment_tx.sighash(); + let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap(); + + // EIP-155 v + let mut v = u64::from(rid.to_byte()); + assert!((v == 0) || (v == 1)); + v += u64::from((chain_id * 2) + 35); + + let r = sig.r().to_repr(); + let r_ref: &[u8] = r.as_ref(); + let s = sig.s().to_repr(); + let s_ref: &[u8] = s.as_ref(); + let deployment_tx = + deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v }); + + let pending_tx = client.send_raw_transaction(deployment_tx).await?; + + let mut receipt; + while { + receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?; + receipt.is_none() + } { + tokio::time::sleep(Duration::from_secs(6)).await; + } + let receipt = receipt.unwrap(); + assert!(receipt.status == Some(1.into())); + + Ok(receipt.contract_address.unwrap()) +} diff --git a/coins/ethereum/src/tests/router.rs b/coins/ethereum/src/tests/router.rs new file mode 100644 index 00000000..c9be93be --- /dev/null +++ b/coins/ethereum/src/tests/router.rs @@ -0,0 +1,109 @@ +use std::{convert::TryFrom, sync::Arc, collections::HashMap}; + +use rand_core::OsRng; + +use group::ff::PrimeField; +use frost::{ + curve::Secp256k1, + Participant, ThresholdKeys, + algorithm::IetfSchnorr, + tests::{algorithm_machines, sign}, +}; + +use ethers_core::{ + types::{H160, U256, Bytes}, + abi::AbiEncode, + utils::{Anvil, AnvilInstance}, +}; +use ethers_providers::{Middleware, Provider, Http}; + +use crate::{ + crypto::{keccak256, PublicKey, EthereumHram, Signature}, + router::{self, *}, + tests::{key_gen, deploy_contract}, +}; + +async fn setup_test() -> ( + u32, + AnvilInstance, + Router>, + HashMap>, + PublicKey, +) { + let anvil = Anvil::new().spawn(); + + let provider = Provider::::try_from(anvil.endpoint()).unwrap(); + let chain_id = provider.get_chainid().await.unwrap().as_u32(); + let wallet = anvil.keys()[0].clone().into(); + let client = Arc::new(provider); + + let contract_address = + deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap(); + let contract = Router::new(contract_address, client.clone()); + + let (keys, public_key) = key_gen(); + + // Set the key to the threshold keys + let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000); + let pending_tx = tx.send().await.unwrap(); + let receipt = pending_tx.await.unwrap().unwrap(); + assert!(receipt.status == Some(1.into())); + + (chain_id, anvil, contract, keys, public_key) +} + +#[tokio::test] +async fn test_deploy_contract() { + setup_test().await; +} + +pub fn hash_and_sign( + keys: &HashMap>, + public_key: &PublicKey, + chain_id: U256, + message: &[u8], +) -> Signature { + let hashed_message = keccak256(message); + + let mut chain_id_bytes = [0; 32]; + chain_id.to_big_endian(&mut chain_id_bytes); + let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat(); + + let algo = IetfSchnorr::::ietf(); + let sig = sign( + &mut OsRng, + &algo, + keys.clone(), + algorithm_machines(&mut OsRng, &algo, keys), + full_message, + ); + + Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap() +} + +#[tokio::test] +async fn test_router_execute() { + let (chain_id, _anvil, contract, keys, public_key) = setup_test().await; + + let to = H160([0u8; 20]); + let value = U256([0u64; 4]); + let data = Bytes::from([0]); + let tx = OutInstruction { to, value, data: data.clone() }; + + let nonce_call = contract.nonce(); + let nonce = nonce_call.call().await.unwrap(); + + let encoded = + ("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode(); + let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded); + + let tx = contract + .execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() }) + .gas(300_000); + let pending_tx = tx.send().await.unwrap(); + let receipt = dbg!(pending_tx.await.unwrap().unwrap()); + assert!(receipt.status == Some(1.into())); + + println!("gas used: {:?}", receipt.cumulative_gas_used); + println!("logs: {:?}", receipt.logs); +} diff --git a/coins/ethereum/src/tests/schnorr.rs b/coins/ethereum/src/tests/schnorr.rs new file mode 100644 index 00000000..9525e4d6 --- /dev/null +++ b/coins/ethereum/src/tests/schnorr.rs @@ -0,0 +1,67 @@ +use std::{convert::TryFrom, sync::Arc}; + +use rand_core::OsRng; + +use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar}; + +use ethers_core::utils::{keccak256, Anvil, AnvilInstance}; +use ethers_providers::{Middleware, Provider, Http}; + +use frost::{ + curve::Secp256k1, + algorithm::IetfSchnorr, + tests::{algorithm_machines, sign}, +}; + +use crate::{ + crypto::*, + schnorr::*, + tests::{key_gen, deploy_contract}, +}; + +async fn setup_test() -> (u32, AnvilInstance, Schnorr>) { + let anvil = Anvil::new().spawn(); + + let provider = Provider::::try_from(anvil.endpoint()).unwrap(); + let chain_id = provider.get_chainid().await.unwrap().as_u32(); + let wallet = anvil.keys()[0].clone().into(); + let client = Arc::new(provider); + + let contract_address = + deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap(); + let contract = Schnorr::new(contract_address, client.clone()); + (chain_id, anvil, contract) +} + +#[tokio::test] +async fn test_deploy_contract() { + setup_test().await; +} + +#[tokio::test] +async fn test_ecrecover_hack() { + let (chain_id, _anvil, contract) = setup_test().await; + let chain_id = U256::from(chain_id); + + let (keys, public_key) = key_gen(); + + const MESSAGE: &[u8] = b"Hello, World!"; + let hashed_message = keccak256(MESSAGE); + let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); + + let algo = IetfSchnorr::::ietf(); + let sig = sign( + &mut OsRng, + &algo, + keys.clone(), + algorithm_machines(&mut OsRng, &algo, &keys), + full_message, + ); + let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap(); + + call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap(); + // Test an invalid signature fails + let mut sig = sig; + sig.s += Scalar::ONE; + assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err()); +} diff --git a/coins/ethereum/tests/contract.rs b/coins/ethereum/tests/contract.rs deleted file mode 100644 index 37875819..00000000 --- a/coins/ethereum/tests/contract.rs +++ /dev/null @@ -1,128 +0,0 @@ -use std::{convert::TryFrom, sync::Arc, time::Duration, fs::File}; - -use rand_core::OsRng; - -use ::k256::{ - elliptic_curve::{bigint::ArrayEncoding, PrimeField}, - U256, -}; - -use ethers_core::{ - types::Signature, - abi::Abi, - utils::{keccak256, Anvil, AnvilInstance}, -}; -use ethers_contract::ContractFactory; -use ethers_providers::{Middleware, Provider, Http}; - -use frost::{ - curve::Secp256k1, - Participant, - algorithm::IetfSchnorr, - tests::{key_gen, algorithm_machines, sign}, -}; - -use ethereum_serai::{ - crypto, - contract::{Schnorr, call_verify}, -}; - -// TODO: Replace with a contract deployment from an unknown account, so the environment solely has -// to fund the deployer, not create/pass a wallet -pub async fn deploy_schnorr_verifier_contract( - chain_id: u32, - client: Arc>, - wallet: &k256::ecdsa::SigningKey, -) -> eyre::Result>> { - let abi: Abi = serde_json::from_reader(File::open("./artifacts/Schnorr.abi").unwrap()).unwrap(); - - let hex_bin_buf = std::fs::read_to_string("./artifacts/Schnorr.bin").unwrap(); - let hex_bin = - if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf }; - let bin = hex::decode(hex_bin).unwrap(); - let factory = ContractFactory::new(abi, bin.into(), client.clone()); - - let mut deployment_tx = factory.deploy(())?.tx; - deployment_tx.set_chain_id(chain_id); - deployment_tx.set_gas(500_000); - let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?; - deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas); - deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas); - - let sig_hash = deployment_tx.sighash(); - let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap(); - - // EIP-155 v - let mut v = u64::from(rid.to_byte()); - assert!((v == 0) || (v == 1)); - v += u64::from((chain_id * 2) + 35); - - let r = sig.r().to_repr(); - let r_ref: &[u8] = r.as_ref(); - let s = sig.s().to_repr(); - let s_ref: &[u8] = s.as_ref(); - let deployment_tx = deployment_tx.rlp_signed(&Signature { r: r_ref.into(), s: s_ref.into(), v }); - - let pending_tx = client.send_raw_transaction(deployment_tx).await?; - - let mut receipt; - while { - receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?; - receipt.is_none() - } { - tokio::time::sleep(Duration::from_secs(6)).await; - } - let receipt = receipt.unwrap(); - assert!(receipt.status == Some(1.into())); - - let contract = Schnorr::new(receipt.contract_address.unwrap(), client.clone()); - Ok(contract) -} - -async fn deploy_test_contract() -> (u32, AnvilInstance, Schnorr>) { - let anvil = Anvil::new().spawn(); - - let provider = - Provider::::try_from(anvil.endpoint()).unwrap().interval(Duration::from_millis(10u64)); - let chain_id = provider.get_chainid().await.unwrap().as_u32(); - let wallet = anvil.keys()[0].clone().into(); - let client = Arc::new(provider); - - (chain_id, anvil, deploy_schnorr_verifier_contract(chain_id, client, &wallet).await.unwrap()) -} - -#[tokio::test] -async fn test_deploy_contract() { - deploy_test_contract().await; -} - -#[tokio::test] -async fn test_ecrecover_hack() { - let (chain_id, _anvil, contract) = deploy_test_contract().await; - let chain_id = U256::from(chain_id); - - let keys = key_gen::<_, Secp256k1>(&mut OsRng); - let group_key = keys[&Participant::new(1).unwrap()].group_key(); - - const MESSAGE: &[u8] = b"Hello, World!"; - let hashed_message = keccak256(MESSAGE); - - let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); - - let algo = IetfSchnorr::::ietf(); - let sig = sign( - &mut OsRng, - &algo, - keys.clone(), - algorithm_machines(&mut OsRng, &algo, &keys), - full_message, - ); - let mut processed_sig = - crypto::process_signature_for_contract(hashed_message, &sig.R, sig.s, &group_key, chain_id); - - call_verify(&contract, &processed_sig).await.unwrap(); - - // test invalid signature fails - processed_sig.message[0] = 0; - assert!(call_verify(&contract, &processed_sig).await.is_err()); -} diff --git a/coins/ethereum/tests/crypto.rs b/coins/ethereum/tests/crypto.rs deleted file mode 100644 index f1ab08b0..00000000 --- a/coins/ethereum/tests/crypto.rs +++ /dev/null @@ -1,87 +0,0 @@ -use k256::{ - elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, sec1::ToEncodedPoint}, - ProjectivePoint, Scalar, U256, -}; -use frost::{curve::Secp256k1, Participant}; - -use ethereum_serai::crypto::*; - -#[test] -fn test_ecrecover() { - use rand_core::OsRng; - use sha2::Sha256; - use sha3::{Digest, Keccak256}; - use k256::ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey}; - - let private = SigningKey::random(&mut OsRng); - let public = VerifyingKey::from(&private); - - const MESSAGE: &[u8] = b"Hello, World!"; - let (sig, recovery_id) = private - .as_nonzero_scalar() - .try_sign_prehashed_rfc6979::(&Keccak256::digest(MESSAGE), b"") - .unwrap(); - #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result - { - assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ()); - } - - assert_eq!( - ecrecover(hash_to_scalar(MESSAGE), recovery_id.unwrap().is_y_odd().into(), *sig.r(), *sig.s()) - .unwrap(), - address(&ProjectivePoint::from(public.as_affine())) - ); -} - -#[test] -fn test_signing() { - use frost::{ - algorithm::IetfSchnorr, - tests::{algorithm_machines, key_gen, sign}, - }; - use rand_core::OsRng; - - let keys = key_gen::<_, Secp256k1>(&mut OsRng); - let _group_key = keys[&Participant::new(1).unwrap()].group_key(); - - const MESSAGE: &[u8] = b"Hello, World!"; - - let algo = IetfSchnorr::::ietf(); - let _sig = - sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); -} - -#[test] -fn test_ecrecover_hack() { - use frost::{ - algorithm::IetfSchnorr, - tests::{algorithm_machines, key_gen, sign}, - }; - use rand_core::OsRng; - - let keys = key_gen::<_, Secp256k1>(&mut OsRng); - let group_key = keys[&Participant::new(1).unwrap()].group_key(); - let group_key_encoded = group_key.to_encoded_point(true); - let group_key_compressed = group_key_encoded.as_ref(); - let group_key_x = Scalar::reduce(U256::from_be_slice(&group_key_compressed[1 .. 33])); - - const MESSAGE: &[u8] = b"Hello, World!"; - let hashed_message = keccak256(MESSAGE); - let chain_id = U256::ONE; - - let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); - - let algo = IetfSchnorr::::ietf(); - let sig = sign( - &mut OsRng, - &algo, - keys.clone(), - algorithm_machines(&mut OsRng, &algo, &keys), - full_message, - ); - - let (sr, er) = - preprocess_signature_for_ecrecover(hashed_message, &sig.R, sig.s, &group_key, chain_id); - let q = ecrecover(sr, group_key_compressed[0] - 2, group_key_x, er).unwrap(); - assert_eq!(q, address(&sig.R)); -} diff --git a/coins/ethereum/tests/mod.rs b/coins/ethereum/tests/mod.rs deleted file mode 100644 index 257fb61f..00000000 --- a/coins/ethereum/tests/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod contract; -mod crypto; diff --git a/spec/Getting Started.md b/spec/Getting Started.md index 0034d69d..c2530b2a 100644 --- a/spec/Getting Started.md +++ b/spec/Getting Started.md @@ -36,16 +36,16 @@ rustup target add wasm32-unknown-unknown --toolchain nightly ``` cargo install svm-rs -svm install 0.8.16 -svm use 0.8.16 +svm install 0.8.25 +svm use 0.8.25 ``` ### Install Solidity Compiler Version Manager ``` cargo install svm-rs -svm install 0.8.16 -svm use 0.8.16 +svm install 0.8.25 +svm use 0.8.25 ``` ### Install foundry (for tests) From 93be7a30674ecedfb325b6d09dc22d550d7c13f8 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 27 Mar 2024 00:17:04 -0400 Subject: [PATCH 059/126] Latest hyper-rustls, remove async-recursion I didn't remove async-recursion when I updated the repo to 1.77 as I forgot we used it in the tests. I still had to add some Box::pins, which may have been a valid option, on the prior Rust version, yet at least resolves everything now. Also updates everything which doesn't introduce further depends. --- Cargo.lock | 371 ++++++++++++++--------------- common/request/Cargo.toml | 2 +- tests/coordinator/Cargo.toml | 1 - tests/coordinator/src/tests/mod.rs | 6 +- tests/full-stack/Cargo.toml | 1 - tests/full-stack/src/tests/mod.rs | 93 ++++---- 6 files changed, 233 insertions(+), 241 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1ae1d463..ee0c2692 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -86,9 +86,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -173,9 +173,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" +checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" [[package]] name = "approx" @@ -262,9 +262,9 @@ dependencies = [ [[package]] name = "async-io" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f97ab0c5b00a7cdbe5a371b9a782ee7be1316095885c8a4ea1daf490eb0ef65" +checksum = "dcccb0f599cfa2f8ace422d3555572f47424da5648a4382a9dd0310ff8210884" dependencies = [ "async-lock", "cfg-if", @@ -290,26 +290,15 @@ dependencies = [ "pin-project-lite 0.2.13", ] -[[package]] -name = "async-recursion" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fd55a5ba1179988837d24ab4c7cc8ed6efdeff578ede0416b4225a5fca35bd0" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.52", -] - [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -342,7 +331,7 @@ version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8d9a9bf8b79a749ee0b911b91b671cc2b6c670bdbc7e3dfd537576ddc94bb2a2" dependencies = [ - "http 0.2.11", + "http 0.2.12", "log", "url", ] @@ -355,14 +344,14 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "backtrace" @@ -449,7 +438,7 @@ version = "0.69.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a00dc851838a2120612785d195287475a3ac45514741da670b735818822129a0" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cexpr", "clang-sys", "itertools", @@ -460,7 +449,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -528,9 +517,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" [[package]] name = "bitvec" @@ -577,9 +566,9 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0231f06152bf547e9c2b5194f247cd97aacf6dcd8b15d8e5ec0663f64580da87" +checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" dependencies = [ "arrayref", "arrayvec", @@ -640,7 +629,7 @@ dependencies = [ "futures-core", "futures-util", "hex", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.28", "hyperlocal", "log", @@ -670,9 +659,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f58b559fd6448c6e2fd0adb5720cd98a2506594cafa4737ff98c396f3e82f667" +checksum = "0901fc8eb0aca4c83be0106d6f2db17d86a08dfc2c25f0e84464bf381158add6" dependencies = [ "borsh-derive", "cfg_aliases", @@ -680,15 +669,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7aadb5b6ccbd078890f6d7003694e33816e6b784358f18e15e7e6d9f065a57cd" +checksum = "51670c3aa053938b0ee3bd67c3817e471e626151131b934038e83c5bf8de48f5" dependencies = [ "once_cell", "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", "syn_derive", ] @@ -706,9 +695,9 @@ dependencies = [ [[package]] name = "bs58" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5353f36341f7451062466f0b755b96ac3a9547e4d7f6b70d603fc721a7d7896" +checksum = "bf88ba1141d185c399bee5288d850d63b8369520c1eafc32a0430b5b6c287bf4" dependencies = [ "tinyvec", ] @@ -746,9 +735,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.14.3" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2ef034f05691a48569bd920a96c81b9d91bbad1ab5ac7c4616c1f6ef36cb79f" +checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" [[package]] name = "byteorder" @@ -758,9 +747,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" dependencies = [ "serde", ] @@ -787,9 +776,9 @@ dependencies = [ [[package]] name = "cargo-platform" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "694c8807f2ae16faecc43dc17d74b3eb042482789fd0eb64b39a2e04e087053f" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" dependencies = [ "serde", ] @@ -975,7 +964,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -1312,14 +1301,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] name = "cxx" -version = "1.0.119" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "635179be18797d7e10edb9cd06c859580237750c7351f39ed9b298bfc17544ad" +checksum = "ff4dc7287237dd438b926a81a1a5605dad33d286870e5eee2db17bf2bcd9e92a" dependencies = [ "cc", "cxxbridge-flags", @@ -1329,9 +1318,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.119" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9324397d262f63ef77eb795d900c0d682a34a43ac0932bec049ed73055d52f63" +checksum = "f47c6c8ad7c1a10d3ef0fe3ff6733f4db0d78f08ef0b13121543163ef327058b" dependencies = [ "cc", "codespan-reporting", @@ -1339,24 +1328,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] name = "cxxbridge-flags" -version = "1.0.119" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a87ff7342ffaa54b7c61618e0ce2bbcf827eba6d55b923b83d82551acbbecfe5" +checksum = "701a1ac7a697e249cdd8dc026d7a7dafbfd0dbcd8bd24ec55889f2bc13dd6287" [[package]] name = "cxxbridge-macro" -version = "1.0.119" +version = "1.0.120" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70b5b86cf65fa0626d85720619d80b288013477a91a0389fa8bc716bf4903ad1" +checksum = "b404f596046b0bb2d903a9c786b875a126261b52b7c3a64bbb66382c41c771df" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -1542,7 +1531,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -1775,7 +1764,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -1918,7 +1907,7 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.52", + "syn 2.0.55", "toml 0.7.8", "walkdir", ] @@ -1936,7 +1925,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -1962,7 +1951,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.52", + "syn 2.0.55", "tempfile", "thiserror", "tiny-keccak", @@ -1986,7 +1975,7 @@ dependencies = [ "futures-timer", "futures-util", "hashers", - "http 0.2.11", + "http 0.2.12", "instant", "jsonwebtoken", "once_cell", @@ -2051,7 +2040,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -2072,9 +2061,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" [[package]] name = "fdlimit" @@ -2112,9 +2101,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" +checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" [[package]] name = "file-per-thread-logger" @@ -2325,7 +2314,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -2337,7 +2326,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -2347,7 +2336,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -2490,9 +2479,9 @@ checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" [[package]] name = "futures-lite" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445ba825b27408685aaecefd65178908c36c6e96aaf6d8599419d46e624192ba" +checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ "futures-core", "pin-project-lite 0.2.13", @@ -2506,7 +2495,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -2585,7 +2574,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "27d12c0aed7f1e24276a241aadc4cb8ea9f83000f34bc062b7cc2d51e3b0fabd" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "debugid", "fxhash", "serde", @@ -2679,7 +2668,7 @@ dependencies = [ "bstr", "log", "regex-automata 0.4.6", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -2707,17 +2696,17 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "4fbd2820c5e49886948654ab546d0688ff24530286bdcf8fca3cefb16d4618eb" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.11", - "indexmap 2.2.5", + "http 0.2.12", + "indexmap 2.2.6", "slab", "tokio", "tokio-util", @@ -2858,9 +2847,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -2885,7 +2874,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http 0.2.11", + "http 0.2.12", "pin-project-lite 0.2.13", ] @@ -2901,12 +2890,12 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41cb79eb393015dadd30fc252023adb0b2400a0caee0fa2a077e6e21a551e840" +checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" dependencies = [ "bytes", - "futures-util", + "futures-core", "http 1.1.0", "http-body 1.0.0", "pin-project-lite 0.2.13", @@ -2947,13 +2936,13 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite 0.2.13", - "socket2 0.5.6", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -2981,15 +2970,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.26.0" +version = "0.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +checksum = "736f15a50e749d033164c56c09783b6102c4ff8da79ad77dbddbbaea0f8567f7" dependencies = [ "futures-util", "http 1.1.0", "hyper 1.2.0", "hyper-util", - "rustls 0.22.2", + "rustls 0.23.4", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -3113,7 +3102,7 @@ dependencies = [ "attohttpc", "bytes", "futures", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.28", "log", "rand", @@ -3179,9 +3168,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.5" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b0b929d511467233429c45a44ac1dcaa21ba0f5ba11e4879e6ed28ddb4f9df4" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -3255,9 +3244,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "js-sys" @@ -3328,7 +3317,7 @@ checksum = "cf4d945a6008c9b03db3354fb3c83ee02d2faa9f2e755ec1dfb69c3551b8f4ba" dependencies = [ "futures-channel", "futures-util", - "http 0.2.11", + "http 0.2.12", "hyper 0.14.28", "jsonrpsee-core", "jsonrpsee-types", @@ -3833,7 +3822,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -3953,9 +3942,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.15" +version = "1.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "037731f5d3aaa87a5675e895b63ddff1a87624bc29f77004ea829809654e48f6" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" dependencies = [ "cc", "pkg-config", @@ -4086,7 +4075,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -4100,7 +4089,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -4111,7 +4100,7 @@ checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -4122,7 +4111,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -4199,9 +4188,9 @@ dependencies = [ [[package]] name = "memoffset" -version = "0.9.0" +version = "0.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" +checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" dependencies = [ "autocfg", ] @@ -4761,7 +4750,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -5200,7 +5189,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.5", + "indexmap 2.2.6", ] [[package]] @@ -5230,7 +5219,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -5269,18 +5258,19 @@ checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "platforms" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "polling" -version = "3.5.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24f040dee2588b4963afb4e420540439d126f73fdacf4a9c486a96d840bac3c9" +checksum = "e0c976a60b2d7e99d6f229e414670a9b85d13ac305cc6d1e9c134de58c5aaaf6" dependencies = [ "cfg-if", "concurrent-queue", + "hermit-abi", "pin-project-lite 0.2.13", "rustix", "tracing", @@ -5369,7 +5359,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -5453,14 +5443,14 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] @@ -5499,7 +5489,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -5724,9 +5714,9 @@ checksum = "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" [[package]] name = "rayon" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4963ed1bc86e4f3ee217022bd855b297cef07fb9eac5dfa1f788b220b49b3bd" +checksum = "b418a60154510ca1a002a752ca9714984e21e4241e804d32555251faf8b78ffa" dependencies = [ "either", "rayon-core", @@ -5800,7 +5790,7 @@ checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -5818,14 +5808,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", "regex-automata 0.4.6", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -5845,7 +5835,7 @@ checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -5856,9 +5846,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "reqwest" @@ -5872,7 +5862,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "hyper 0.14.28", "ipnet", @@ -6067,11 +6057,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -6092,10 +6082,11 @@ dependencies = [ [[package]] name = "rustls" -version = "0.22.2" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e87c9956bd9807afa1f77e0f7594af32566e830e088a5576d27c5b6f30f49d41" +checksum = "8c4d6d8ad9f2492485e13453acbb291dd08f64441b6609c491f1c2cd2c6b4fe1" dependencies = [ + "once_cell", "ring 0.17.8", "rustls-pki-types", "rustls-webpki 0.102.2", @@ -6128,9 +6119,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ede67b28608b4c60685c7d54122d4400d90f62b40caee7700e700380a390fa8" +checksum = "868e20fada228fefaf6b652e00cc73623d54f8171e7352c18bb281571f2d92da" [[package]] name = "rustls-webpki" @@ -6298,7 +6289,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -6875,7 +6866,7 @@ name = "sc-rpc-server" version = "4.0.0-dev" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ - "http 0.2.11", + "http 0.2.12", "jsonrpsee", "log", "serde_json", @@ -7059,7 +7050,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -7121,9 +7112,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.10.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7d66a1128282b7ef025a8ead62a4a9fcf017382ec53b8ffbf4d7bf77bd3c60" +checksum = "788745a868b0e751750388f4e6546eb921ef714a4317fa6954f7cde114eb2eb7" dependencies = [ "bitvec", "cfg-if", @@ -7135,9 +7126,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.10.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf2c68b89cafb3b8d918dd07b42be0da66ff202cf1155c5739a4e0c1ea0dc19" +checksum = "7dc2f4e8bc344b9fc3d5f74f72c2e55bfc38d28dc2ebc69c194a3df424e4d9ac" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", @@ -7443,7 +7434,6 @@ dependencies = [ name = "serai-coordinator-tests" version = "0.1.0" dependencies = [ - "async-recursion", "async-trait", "blake2", "borsh", @@ -7505,7 +7495,6 @@ version = "0.1.0" name = "serai-full-stack-tests" version = "0.1.0" dependencies = [ - "async-recursion", "async-trait", "bitcoin-serai", "curve25519-dalek", @@ -7911,14 +7900,14 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ "itoa", "ryu", @@ -7933,7 +7922,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -7959,15 +7948,15 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.6.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15d167997bd841ec232f5b2b8e0e26606df2e7caa4c31b95ea9ca52b200bd270" +checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" dependencies = [ "base64 0.21.7", "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.2.5", + "indexmap 2.2.6", "serde", "serde_derive", "serde_json", @@ -8115,9 +8104,9 @@ checksum = "826167069c09b99d56f31e9ae5c99049e932a98c9dc2dac47645b08dbbf76ba7" [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "snap" @@ -8171,7 +8160,7 @@ dependencies = [ "base64 0.13.1", "bytes", "futures", - "http 0.2.11", + "http 0.2.12", "httparse", "log", "rand", @@ -8210,7 +8199,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -8406,7 +8395,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -8425,7 +8414,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -8597,7 +8586,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -8750,7 +8739,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -8811,9 +8800,9 @@ checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" [[package]] name = "ss58-registry" -version = "1.46.0" +version = "1.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1114ee5900b8569bbc8b1a014a942f937b752af4b44f4607430b5f86cedaac0" +checksum = "4743ce898933fbff7bbf414f497c459a782d496269644b3d650a398ae6a487ba" dependencies = [ "Inflector", "num-format", @@ -8929,7 +8918,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -9017,9 +9006,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.52" +version = "2.0.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b699d15b36d1f02c3e7c69f8ffef53de37aefae075d8488d4ba1a7788d574a07" +checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" dependencies = [ "proc-macro2", "quote", @@ -9035,7 +9024,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -9127,22 +9116,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -9263,25 +9252,25 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] name = "tokio-rustls" -version = "0.25.0" +version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.22.2", + "rustls 0.23.4", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite 0.2.13", @@ -9340,7 +9329,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", @@ -9353,7 +9342,7 @@ version = "0.21.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "toml_datetime", "winnow", ] @@ -9380,11 +9369,11 @@ version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "61c5bb1d698276a2443e5ecfabc1008bf15a36c12e6a7176e7bf089ea9131140" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "bytes", "futures-core", "futures-util", - "http 0.2.11", + "http 0.2.12", "http-body 0.4.6", "http-range-header", "pin-project-lite 0.2.13", @@ -9424,7 +9413,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -9755,9 +9744,9 @@ checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" [[package]] name = "valuable" @@ -9829,7 +9818,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", "wasm-bindgen-shared", ] @@ -9863,7 +9852,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -9953,7 +9942,7 @@ version = "0.110.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dfcdb72d96f01e6c85b6bf20102e7423bdbaad5c337301bab2bbf253d26413c" dependencies = [ - "indexmap 2.2.5", + "indexmap 2.2.6", "semver 1.0.22", ] @@ -9968,7 +9957,7 @@ dependencies = [ "bumpalo", "cfg-if", "fxprof-processed-profile", - "indexmap 2.2.5", + "indexmap 2.2.6", "libc", "log", "object 0.31.1", @@ -10067,7 +10056,7 @@ dependencies = [ "anyhow", "cranelift-entity", "gimli 0.27.3", - "indexmap 2.2.5", + "indexmap 2.2.6", "log", "object 0.31.1", "serde", @@ -10134,7 +10123,7 @@ dependencies = [ "anyhow", "cc", "cfg-if", - "indexmap 2.2.5", + "indexmap 2.2.6", "libc", "log", "mach", @@ -10172,7 +10161,7 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -10549,7 +10538,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] @@ -10569,7 +10558,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.52", + "syn 2.0.55", ] [[package]] diff --git a/common/request/Cargo.toml b/common/request/Cargo.toml index 0fe9ae5c..e5018056 100644 --- a/common/request/Cargo.toml +++ b/common/request/Cargo.toml @@ -23,7 +23,7 @@ hyper-util = { version = "0.1", default-features = false, features = ["http1", " http-body-util = { version = "0.1", default-features = false } tokio = { version = "1", default-features = false } -hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true } +hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true } zeroize = { version = "1", optional = true } base64ct = { version = "1", features = ["alloc"], optional = true } diff --git a/tests/coordinator/Cargo.toml b/tests/coordinator/Cargo.toml index a331d484..f5bc6426 100644 --- a/tests/coordinator/Cargo.toml +++ b/tests/coordinator/Cargo.toml @@ -20,7 +20,6 @@ workspace = true hex = "0.4" async-trait = "0.1" -async-recursion = "1" zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } diff --git a/tests/coordinator/src/tests/mod.rs b/tests/coordinator/src/tests/mod.rs index 5f0acab6..b564a26b 100644 --- a/tests/coordinator/src/tests/mod.rs +++ b/tests/coordinator/src/tests/mod.rs @@ -135,7 +135,6 @@ pub(crate) async fn new_test(test_body: impl TestBody) { *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None; // Spawns a coordinator, if one has yet to be spawned, or else runs the test. - #[async_recursion::async_recursion] async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) { // If the outer operations have yet to be set, these *are* the outer operations let outer_ops = OUTER_OPS.get().unwrap(); @@ -178,7 +177,10 @@ pub(crate) async fn new_test(test_body: impl TestBody) { test.provide_container(composition); drop(context_lock); - test.run_async(spawn_coordinator_or_run_test).await; + fn recurse(ops: DockerOperations) -> core::pin::Pin>> { + Box::pin(spawn_coordinator_or_run_test(ops)) + } + test.run_async(recurse).await; } else { let outer_ops = outer_ops.lock().await.take().unwrap(); diff --git a/tests/full-stack/Cargo.toml b/tests/full-stack/Cargo.toml index b45d7b53..58e6de28 100644 --- a/tests/full-stack/Cargo.toml +++ b/tests/full-stack/Cargo.toml @@ -20,7 +20,6 @@ workspace = true hex = "0.4" async-trait = "0.1" -async-recursion = "1" zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false } diff --git a/tests/full-stack/src/tests/mod.rs b/tests/full-stack/src/tests/mod.rs index 31c98952..1fae8c48 100644 --- a/tests/full-stack/src/tests/mod.rs +++ b/tests/full-stack/src/tests/mod.rs @@ -161,54 +161,57 @@ pub(crate) async fn new_test(test_body: impl TestBody) { *OUTER_OPS.get_or_init(|| Mutex::new(None)).lock().await = None; // Spawns a coordinator, if one has yet to be spawned, or else runs the test. - #[async_recursion::async_recursion] - async fn spawn_coordinator_or_run_test(inner_ops: DockerOperations) { - // If the outer operations have yet to be set, these *are* the outer operations - let outer_ops = OUTER_OPS.get().unwrap(); - if outer_ops.lock().await.is_none() { - *outer_ops.lock().await = Some(inner_ops); - } + pub(crate) fn spawn_coordinator_or_run_test( + inner_ops: DockerOperations, + ) -> core::pin::Pin>> { + Box::pin(async { + // If the outer operations have yet to be set, these *are* the outer operations + let outer_ops = OUTER_OPS.get().unwrap(); + if outer_ops.lock().await.is_none() { + *outer_ops.lock().await = Some(inner_ops); + } - let context_lock = CONTEXT.get().unwrap().lock().await; - let Context { pending_coordinator_compositions, handles, test_body } = - context_lock.as_ref().unwrap(); + let context_lock = CONTEXT.get().unwrap().lock().await; + let Context { pending_coordinator_compositions, handles, test_body } = + context_lock.as_ref().unwrap(); - // Check if there is a coordinator left - let maybe_coordinator = { - let mut remaining = pending_coordinator_compositions.lock().await; - let maybe_coordinator = if !remaining.is_empty() { - let handles = handles[handles.len() - remaining.len()].clone(); - let composition = remaining.remove(0); - Some((composition, handles)) + // Check if there is a coordinator left + let maybe_coordinator = { + let mut remaining = pending_coordinator_compositions.lock().await; + let maybe_coordinator = if !remaining.is_empty() { + let handles = handles[handles.len() - remaining.len()].clone(); + let composition = remaining.remove(0); + Some((composition, handles)) + } else { + None + }; + drop(remaining); + maybe_coordinator + }; + + if let Some((mut composition, handles)) = maybe_coordinator { + let network = { + let outer_ops = outer_ops.lock().await; + let outer_ops = outer_ops.as_ref().unwrap(); + // Spawn it by building another DockerTest which recursively calls this function + // TODO: Spawn this outside of DockerTest so we can remove the recursion + let serai_container = outer_ops.handle(&handles.serai); + composition.modify_env("SERAI_HOSTNAME", serai_container.ip()); + let message_queue_container = outer_ops.handle(&handles.message_queue); + composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip()); + + format!("container:{}", serai_container.name()) + }; + let mut test = DockerTest::new().with_network(dockertest::Network::External(network)); + test.provide_container(composition); + + drop(context_lock); + test.run_async(spawn_coordinator_or_run_test).await; } else { - None - }; - drop(remaining); - maybe_coordinator - }; - - if let Some((mut composition, handles)) = maybe_coordinator { - let network = { - let outer_ops = outer_ops.lock().await; - let outer_ops = outer_ops.as_ref().unwrap(); - // Spawn it by building another DockerTest which recursively calls this function - // TODO: Spawn this outside of DockerTest so we can remove the recursion - let serai_container = outer_ops.handle(&handles.serai); - composition.modify_env("SERAI_HOSTNAME", serai_container.ip()); - let message_queue_container = outer_ops.handle(&handles.message_queue); - composition.modify_env("MESSAGE_QUEUE_RPC", message_queue_container.ip()); - - format!("container:{}", serai_container.name()) - }; - let mut test = DockerTest::new().with_network(dockertest::Network::External(network)); - test.provide_container(composition); - - drop(context_lock); - test.run_async(spawn_coordinator_or_run_test).await; - } else { - let outer_ops = outer_ops.lock().await.take().unwrap(); - test_body.body(outer_ops, handles.clone()).await; - } + let outer_ops = outer_ops.lock().await.take().unwrap(); + test_body.body(outer_ops, handles.clone()).await; + } + }) } test.run_async(spawn_coordinator_or_run_test).await; From ab4d79628d6345383dc46bfea3b93d92ba2ecffc Mon Sep 17 00:00:00 2001 From: Boog900 <54e72d8a-345f-4599-bd90-c6b9bc7d0ec5@aleeas.com> Date: Tue, 9 Apr 2024 20:14:52 +0100 Subject: [PATCH 060/126] fix CLSAG verification. We were not setting c1 to the last calculated c during verification, instead keeping it set to the one provided in the signature. --- coins/monero/src/ringct/clsag/mod.rs | 16 ++++++++-------- coins/monero/src/tests/clsag.rs | 7 ++++++- 2 files changed, 14 insertions(+), 9 deletions(-) diff --git a/coins/monero/src/ringct/clsag/mod.rs b/coins/monero/src/ringct/clsag/mod.rs index 1290e3e3..fd8253e8 100644 --- a/coins/monero/src/ringct/clsag/mod.rs +++ b/coins/monero/src/ringct/clsag/mod.rs @@ -9,7 +9,7 @@ use std_shims::{ use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; -use subtle::{ConstantTimeEq, Choice, CtOption}; +use subtle::{ConstantTimeEq, ConditionallySelectable}; use curve25519_dalek::{ constants::ED25519_BASEPOINT_TABLE, @@ -169,13 +169,8 @@ fn core( } // Perform the core loop - let mut c1 = CtOption::new(Scalar::ZERO, Choice::from(0)); + let mut c1 = c; for i in (start .. end).map(|i| i % n) { - // This will only execute once and shouldn't need to be constant time. Making it constant time - // removes the risk of branch prediction creating timing differences depending on ring index - // however - c1 = c1.or_else(|| CtOption::new(c, i.ct_eq(&0))); - let c_p = mu_P * c; let c_c = mu_C * c; @@ -188,10 +183,15 @@ fn core( to_hash.extend(L.compress().to_bytes()); to_hash.extend(R.compress().to_bytes()); c = hash_to_scalar(&to_hash); + + // This will only execute once and shouldn't need to be constant time. Making it constant time + // removes the risk of branch prediction creating timing differences depending on ring index + // however + c1.conditional_assign(&c, i.ct_eq(&(n - 1))); } // This first tuple is needed to continue signing, the latter is the c to be tested/worked with - ((D, c * mu_P, c * mu_C), c1.unwrap_or(c)) + ((D, c * mu_P, c * mu_C), c1) } /// CLSAG signature, as used in Monero. diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index 59e41ebf..a17d7ba2 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -57,7 +57,7 @@ fn clsag() { } let image = generate_key_image(&secrets.0); - let (clsag, pseudo_out) = Clsag::sign( + let (mut clsag, pseudo_out) = Clsag::sign( &mut OsRng, vec![( secrets.0, @@ -76,7 +76,12 @@ fn clsag() { msg, ) .swap_remove(0); + clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap(); + + // make sure verification fails if we throw a random `c1` at it. + clsag.c1 = random_scalar(&mut OsRng); + assert!(clsag.verify(&ring, &image, &pseudo_out, &msg).is_err()); } } From fcad402186f6a9d17646e731158b5dfa23464611 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 10 Apr 2024 06:34:01 -0400 Subject: [PATCH 061/126] cargo update Resolves deny error caused by h2. --- Cargo.lock | 266 ++++++++++++++++++++++++++--------------------------- 1 file changed, 133 insertions(+), 133 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ee0c2692..6840c5ea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -173,9 +173,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "approx" @@ -287,7 +287,7 @@ checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ "event-listener 4.0.3", "event-listener-strategy", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -298,7 +298,7 @@ checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -322,7 +322,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -344,7 +344,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -449,14 +449,14 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] name = "bitcoin" -version = "0.31.1" +version = "0.31.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd00f3c09b5f21fb357abe32d29946eb8bb7a0862bae62c0b5e4a692acbbe73c" +checksum = "6c85783c2fe40083ea54a33aa2f0ba58831d90fcd190f5bdc47e74e84d2a96ae" dependencies = [ "bech32", "bitcoin-internals", @@ -633,7 +633,7 @@ dependencies = [ "hyper 0.14.28", "hyperlocal", "log", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "serde", "serde_derive", "serde_json", @@ -677,7 +677,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", "syn_derive", ] @@ -723,9 +723,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.4" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-slice-cast" @@ -964,7 +964,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1301,14 +1301,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] name = "cxx" -version = "1.0.120" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dc7287237dd438b926a81a1a5605dad33d286870e5eee2db17bf2bcd9e92a" +checksum = "21db378d04296a84d8b7d047c36bb3954f0b46529db725d7e62fb02f9ba53ccc" dependencies = [ "cc", "cxxbridge-flags", @@ -1318,9 +1318,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.120" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f47c6c8ad7c1a10d3ef0fe3ff6733f4db0d78f08ef0b13121543163ef327058b" +checksum = "3e5262a7fa3f0bae2a55b767c223ba98032d7c328f5c13fa5cdc980b77fc0658" dependencies = [ "cc", "codespan-reporting", @@ -1328,24 +1328,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] name = "cxxbridge-flags" -version = "1.0.120" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "701a1ac7a697e249cdd8dc026d7a7dafbfd0dbcd8bd24ec55889f2bc13dd6287" +checksum = "be8dcadd2e2fb4a501e1d9e93d6e88e6ea494306d8272069c92d5a9edf8855c0" [[package]] name = "cxxbridge-macro" -version = "1.0.120" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b404f596046b0bb2d903a9c786b875a126261b52b7c3a64bbb66382c41c771df" +checksum = "ad08a837629ad949b73d032c637653d069e909cffe4ee7870b02301939ce39cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1401,9 +1401,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "zeroize", @@ -1531,7 +1531,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1718,9 +1718,9 @@ dependencies = [ [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] @@ -1764,7 +1764,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1907,7 +1907,7 @@ dependencies = [ "regex", "serde", "serde_json", - "syn 2.0.55", + "syn 2.0.58", "toml 0.7.8", "walkdir", ] @@ -1925,7 +1925,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1951,7 +1951,7 @@ dependencies = [ "serde", "serde_json", "strum 0.25.0", - "syn 2.0.55", + "syn 2.0.58", "tempfile", "thiserror", "tiny-keccak", @@ -2008,7 +2008,7 @@ checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" dependencies = [ "concurrent-queue", "parking", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2018,7 +2018,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" dependencies = [ "event-listener 4.0.3", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2040,7 +2040,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -2314,7 +2314,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -2326,7 +2326,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -2336,7 +2336,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -2484,7 +2484,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ "futures-core", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2495,7 +2495,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -2554,7 +2554,7 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "pin-utils", "slab", ] @@ -2616,9 +2616,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" dependencies = [ "cfg-if", "libc", @@ -2696,9 +2696,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbd2820c5e49886948654ab546d0688ff24530286bdcf8fca3cefb16d4618eb" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -2875,7 +2875,7 @@ checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http 0.2.12", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2898,7 +2898,7 @@ dependencies = [ "futures-core", "http 1.1.0", "http-body 1.0.0", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2941,7 +2941,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "socket2 0.4.10", "tokio", "tower-service", @@ -2962,7 +2962,7 @@ dependencies = [ "http-body 1.0.0", "httparse", "itoa", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "smallvec", "tokio", "want", @@ -2998,7 +2998,7 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "hyper 1.2.0", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "socket2 0.5.6", "tokio", "tower", @@ -3822,7 +3822,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -4075,7 +4075,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -4089,7 +4089,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -4100,7 +4100,7 @@ checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -4111,7 +4111,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -4164,9 +4164,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memfd" @@ -4518,9 +4518,9 @@ dependencies = [ [[package]] name = "nalgebra" -version = "0.32.4" +version = "0.32.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4541eb06dce09c0241ebbaab7102f0a01a0c8994afed2e5d0d66775016e25ac2" +checksum = "3ea4908d4f23254adda3daa60ffef0f1ac7b8c3e9a864cf3cc154b251908a2ef" dependencies = [ "approx", "matrixmultiply", @@ -4607,9 +4607,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" +checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" dependencies = [ "bytes", "futures", @@ -4750,7 +4750,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5219,7 +5219,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5230,9 +5230,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -5271,7 +5271,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "rustix", "tracing", "windows-sys 0.52.0", @@ -5359,7 +5359,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" dependencies = [ "proc-macro2", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5443,7 +5443,7 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5489,7 +5489,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5602,7 +5602,7 @@ checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" dependencies = [ "bytes", "futures-io", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "quinn-proto", "quinn-udp", "rustc-hash", @@ -5644,9 +5644,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -5790,7 +5790,7 @@ checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5871,7 +5871,7 @@ dependencies = [ "mime", "once_cell", "percent-encoding", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "serde", "serde_json", "serde_urlencoded", @@ -6119,9 +6119,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "868e20fada228fefaf6b652e00cc73623d54f8171e7352c18bb281571f2d92da" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" [[package]] name = "rustls-webpki" @@ -6146,9 +6146,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" [[package]] name = "rw-stream-sink" @@ -6289,7 +6289,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -7050,7 +7050,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -7112,9 +7112,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.1" +version = "2.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "788745a868b0e751750388f4e6546eb921ef714a4317fa6954f7cde114eb2eb7" +checksum = "7c453e59a955f81fb62ee5d596b450383d699f152d350e9d23a0db2adb78e4c0" dependencies = [ "bitvec", "cfg-if", @@ -7126,9 +7126,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.1" +version = "2.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dc2f4e8bc344b9fc3d5f74f72c2e55bfc38d28dc2ebc69c194a3df424e4d9ac" +checksum = "18cf6c6447f813ef19eb450e985bcce6705f9ce7660db221b59093d15c79c4b7" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", @@ -7263,9 +7263,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -7276,9 +7276,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -7900,7 +7900,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -7916,13 +7916,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -8199,7 +8199,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -8395,7 +8395,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -8414,7 +8414,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -8586,7 +8586,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -8739,7 +8739,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -8876,9 +8876,9 @@ dependencies = [ [[package]] name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" @@ -8918,7 +8918,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -9006,9 +9006,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.55" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -9024,7 +9024,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -9131,7 +9131,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -9227,9 +9227,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -9237,7 +9237,7 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "signal-hook-registry", "socket2 0.5.6", "tokio-macros", @@ -9252,7 +9252,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -9273,7 +9273,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tokio", "tokio-util", ] @@ -9288,7 +9288,7 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tokio", "tracing", ] @@ -9356,7 +9356,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tokio", "tower-layer", "tower-service", @@ -9376,7 +9376,7 @@ dependencies = [ "http 0.2.12", "http-body 0.4.6", "http-range-header", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tower-layer", "tower-service", ] @@ -9400,7 +9400,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tracing-attributes", "tracing-core", ] @@ -9413,7 +9413,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -9818,7 +9818,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", "wasm-bindgen-shared", ] @@ -9852,7 +9852,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10161,7 +10161,7 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -10204,9 +10204,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -10477,9 +10477,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" +checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" [[package]] name = "xmltree" @@ -10538,7 +10538,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -10558,14 +10558,14 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" dependencies = [ - "zstd 0.13.0", + "zstd 0.13.1", ] [[package]] @@ -10579,11 +10579,11 @@ dependencies = [ [[package]] name = "zstd" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" +checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" dependencies = [ - "zstd-safe 7.0.0", + "zstd-safe 7.1.0", ] [[package]] @@ -10598,18 +10598,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.0.0" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" +checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", "pkg-config", From 33dd412e67cd60bc6338ea3f90288f18b1e1c14d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 12 Apr 2024 00:38:40 -0400 Subject: [PATCH 062/126] Add bootnode code prior used in testnet-internal (#554) * Add bootnode code prior used in testnet-internal Also performs the devnet/testnet differentation done since the testnet branch. * Fixes * fmt --- Cargo.lock | 1 + substrate/node/Cargo.toml | 2 + substrate/node/src/chain_spec.rs | 102 ++++++++++++++++++++++++++++--- substrate/node/src/command.rs | 3 +- substrate/node/src/rpc.rs | 12 ++-- substrate/node/src/service.rs | 62 ++++++++++++++++++- 6 files changed, 169 insertions(+), 13 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6840c5ea..ee2ecdcf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7613,6 +7613,7 @@ dependencies = [ "futures-util", "hex", "jsonrpsee", + "libp2p", "pallet-transaction-payment-rpc", "rand_core", "sc-authority-discovery", diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index 12ba4d17..60f7dc0f 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -26,6 +26,8 @@ hex = "0.4" rand_core = "0.6" schnorrkel = "0.11" +libp2p = "0.52" + sp-core = { git = "https://github.com/serai-dex/substrate" } sp-keystore = { git = "https://github.com/serai-dex/substrate" } sp-timestamp = { git = "https://github.com/serai-dex/substrate" } diff --git a/substrate/node/src/chain_spec.rs b/substrate/node/src/chain_spec.rs index b630c00b..6fa8d6c3 100644 --- a/substrate/node/src/chain_spec.rs +++ b/substrate/node/src/chain_spec.rs @@ -1,6 +1,7 @@ use core::marker::PhantomData; +use std::collections::HashSet; -use sp_core::Pair as PairTrait; +use sp_core::{Decode, Pair as PairTrait, sr25519::Public}; use sc_service::ChainType; @@ -23,7 +24,7 @@ fn wasm_binary() -> Vec { WASM_BINARY.ok_or("compiled in wasm not available").unwrap().to_vec() } -fn testnet_genesis( +fn devnet_genesis( wasm_binary: &[u8], validators: &[&'static str], endowed_accounts: Vec, @@ -72,6 +73,57 @@ fn testnet_genesis( } } +fn testnet_genesis(wasm_binary: &[u8], validators: Vec<&'static str>) -> RuntimeGenesisConfig { + let validators = validators + .into_iter() + .map(|validator| Public::decode(&mut hex::decode(validator).unwrap().as_slice()).unwrap()) + .collect::>(); + + assert_eq!(validators.iter().collect::>().len(), validators.len()); + + RuntimeGenesisConfig { + system: SystemConfig { code: wasm_binary.to_vec(), _config: PhantomData }, + + transaction_payment: Default::default(), + + coins: CoinsConfig { + accounts: validators + .iter() + .map(|a| (*a, Balance { coin: Coin::Serai, amount: Amount(5_000_000 * 10_u64.pow(8)) })) + .collect(), + _ignore: Default::default(), + }, + + dex: DexConfig { + pools: vec![Coin::Bitcoin, Coin::Ether, Coin::Dai, Coin::Monero], + _ignore: Default::default(), + }, + + validator_sets: ValidatorSetsConfig { + networks: serai_runtime::primitives::NETWORKS + .iter() + .map(|network| match network { + NetworkId::Serai => (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), + NetworkId::Bitcoin => (NetworkId::Bitcoin, Amount(1_000_000 * 10_u64.pow(8))), + NetworkId::Ethereum => (NetworkId::Ethereum, Amount(1_000_000 * 10_u64.pow(8))), + NetworkId::Monero => (NetworkId::Monero, Amount(100_000 * 10_u64.pow(8))), + }) + .collect(), + participants: validators.clone(), + }, + signals: SignalsConfig::default(), + babe: BabeConfig { + authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(), + epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), + _config: PhantomData, + }, + grandpa: GrandpaConfig { + authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(), + _config: PhantomData, + }, + } +} + pub fn development_config() -> ChainSpec { let wasm_binary = wasm_binary(); @@ -82,7 +134,7 @@ pub fn development_config() -> ChainSpec { "devnet", ChainType::Development, move || { - testnet_genesis( + devnet_genesis( &wasm_binary, &["Alice"], vec![ @@ -100,7 +152,7 @@ pub fn development_config() -> ChainSpec { // Telemetry None, // Protocol ID - Some("serai"), + Some("serai-devnet"), // Fork ID None, // Properties @@ -110,7 +162,7 @@ pub fn development_config() -> ChainSpec { ) } -pub fn testnet_config() -> ChainSpec { +pub fn local_config() -> ChainSpec { let wasm_binary = wasm_binary(); ChainSpec::from_genesis( @@ -120,7 +172,7 @@ pub fn testnet_config() -> ChainSpec { "local", ChainType::Local, move || { - testnet_genesis( + devnet_genesis( &wasm_binary, &["Alice", "Bob", "Charlie", "Dave"], vec![ @@ -138,7 +190,7 @@ pub fn testnet_config() -> ChainSpec { // Telemetry None, // Protocol ID - Some("serai"), + Some("serai-local"), // Fork ID None, // Properties @@ -147,3 +199,39 @@ pub fn testnet_config() -> ChainSpec { None, ) } + +pub fn testnet_config() -> ChainSpec { + let wasm_binary = wasm_binary(); + + ChainSpec::from_genesis( + // Name + "Test Network 2", + // ID + "testnet-2", + ChainType::Live, + move || { + let _ = testnet_genesis(&wasm_binary, vec![]); + todo!() + }, + // Bootnodes + vec![], + // Telemetry + None, + // Protocol ID + Some("serai-testnet-2"), + // Fork ID + None, + // Properties + None, + // Extensions + None, + ) +} + +pub fn bootnode_multiaddrs(id: &str) -> Vec { + match id { + "devnet" | "local" => vec![], + "testnet-2" => todo!(), + _ => panic!("unrecognized network ID"), + } +} diff --git a/substrate/node/src/command.rs b/substrate/node/src/command.rs index 2f7ea0f7..71eee047 100644 --- a/substrate/node/src/command.rs +++ b/substrate/node/src/command.rs @@ -40,7 +40,8 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> Result, String> { match id { "dev" | "devnet" => Ok(Box::new(chain_spec::development_config())), - "local" => Ok(Box::new(chain_spec::testnet_config())), + "local" => Ok(Box::new(chain_spec::local_config())), + "testnet" => Ok(Box::new(chain_spec::testnet_config())), _ => panic!("Unknown network ID"), } } diff --git a/substrate/node/src/rpc.rs b/substrate/node/src/rpc.rs index d07778cc..b818c798 100644 --- a/substrate/node/src/rpc.rs +++ b/substrate/node/src/rpc.rs @@ -19,6 +19,7 @@ pub use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; pub struct FullDeps { + pub id: String, pub client: Arc, pub pool: Arc

, pub deny_unsafe: DenyUnsafe, @@ -46,18 +47,19 @@ where use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; let mut module = RpcModule::new(()); - let FullDeps { client, pool, deny_unsafe, authority_discovery } = deps; + let FullDeps { id, client, pool, deny_unsafe, authority_discovery } = deps; module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; module.merge(TransactionPayment::new(client.clone()).into_rpc())?; if let Some(authority_discovery) = authority_discovery { - let mut authority_discovery_module = RpcModule::new((client, RwLock::new(authority_discovery))); + let mut authority_discovery_module = + RpcModule::new((id, client, RwLock::new(authority_discovery))); authority_discovery_module.register_async_method( "p2p_validators", |params, context| async move { let network: NetworkId = params.parse()?; - let (client, authority_discovery) = &*context; + let (id, client, authority_discovery) = &*context; let latest_block = client.info().best_hash; let validators = client.runtime_api().validators(latest_block, network).map_err(|_| { @@ -66,7 +68,9 @@ where "please report this at https://github.com/serai-dex/serai", ))) })?; - let mut all_p2p_addresses = vec![]; + // Always return the protocol's bootnodes + let mut all_p2p_addresses = crate::chain_spec::bootnode_multiaddrs(id); + // Additionally returns validators found over the DHT for validator in validators { let mut returned_addresses = authority_discovery .write() diff --git a/substrate/node/src/service.rs b/substrate/node/src/service.rs index 686e4c39..5f76decf 100644 --- a/substrate/node/src/service.rs +++ b/substrate/node/src/service.rs @@ -161,7 +161,7 @@ pub fn new_partial( )) } -pub fn new_full(config: Configuration) -> Result { +pub fn new_full(mut config: Configuration) -> Result { let ( sc_service::PartialComponents { client, @@ -176,6 +176,11 @@ pub fn new_full(config: Configuration) -> Result { keystore_container, ) = new_partial(&config)?; + config.network.node_name = "serai".to_string(); + config.network.client_version = "0.1.0".to_string(); + config.network.listen_addresses = + vec!["/ip4/0.0.0.0/tcp/30333".parse().unwrap(), "/ip6/::/tcp/30333".parse().unwrap()]; + let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); let grandpa_protocol_name = grandpa::protocol_standard_name(&client.block_hash(0).unwrap().unwrap(), &config.chain_spec); @@ -203,6 +208,59 @@ pub fn new_full(config: Configuration) -> Result { warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), })?; + task_manager.spawn_handle().spawn("bootnodes", "bootnodes", { + let network = network.clone(); + let id = config.chain_spec.id().to_string(); + + async move { + // Transforms the above Multiaddrs into MultiaddrWithPeerIds + // While the PeerIds *should* be known in advance and hardcoded, that data wasn't collected in + // time and this fine for a testnet + let bootnodes = || async { + use libp2p::{Transport as TransportTrait, tcp::tokio::Transport, noise::Config}; + + let bootnode_multiaddrs = crate::chain_spec::bootnode_multiaddrs(&id); + + let mut tasks = vec![]; + for multiaddr in bootnode_multiaddrs { + tasks.push(tokio::time::timeout( + core::time::Duration::from_secs(10), + tokio::task::spawn(async move { + let Ok(noise) = Config::new(&sc_network::Keypair::generate_ed25519()) else { None? }; + let mut transport = Transport::default() + .upgrade(libp2p::core::upgrade::Version::V1) + .authenticate(noise) + .multiplex(libp2p::yamux::Config::default()); + let Ok(transport) = transport.dial(multiaddr.clone()) else { None? }; + let Ok((peer_id, _)) = transport.await else { None? }; + Some(sc_network::config::MultiaddrWithPeerId { multiaddr, peer_id }) + }), + )); + } + + let mut res = vec![]; + for task in tasks { + if let Ok(Ok(Some(bootnode))) = task.await { + res.push(bootnode); + } + } + res + }; + + use sc_network::{NetworkStatusProvider, NetworkPeers}; + loop { + if let Ok(status) = network.status().await { + if status.num_connected_peers < 3 { + for bootnode in bootnodes().await { + let _ = network.add_reserved_peer(bootnode); + } + } + } + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + } + } + }); + if config.offchain_worker.enabled { task_manager.spawn_handle().spawn( "offchain-workers-runner", @@ -258,11 +316,13 @@ pub fn new_full(config: Configuration) -> Result { }; let rpc_builder = { + let id = config.chain_spec.id().to_string(); let client = client.clone(); let pool = transaction_pool.clone(); Box::new(move |deny_unsafe, _| { crate::rpc::create_full(crate::rpc::FullDeps { + id: id.clone(), client: client.clone(), pool: pool.clone(), deny_unsafe, From db2e8376df33c06f97c63b438d979f86f7c3d74e Mon Sep 17 00:00:00 2001 From: expiredhotdog <108702183+expiredhotdog@users.noreply.github.com> Date: Fri, 12 Apr 2024 23:52:56 +0000 Subject: [PATCH 063/126] use multiscalar_mul for CLSAG (#553) * use multiscalar_mul for CLSAG * use multiscalar_mul for CLSAG signing * use OnceLock for basepoint precomputation --- coins/monero/src/lib.rs | 14 +++++++++- coins/monero/src/ringct/clsag/mod.rs | 38 ++++++++++++++++++++-------- 2 files changed, 41 insertions(+), 11 deletions(-) diff --git a/coins/monero/src/lib.rs b/coins/monero/src/lib.rs index 6d9c0a6b..4e6b26d1 100644 --- a/coins/monero/src/lib.rs +++ b/coins/monero/src/lib.rs @@ -14,7 +14,12 @@ use zeroize::{Zeroize, ZeroizeOnDrop}; use sha3::{Digest, Keccak256}; -use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint}; +use curve25519_dalek::{ + constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT}, + scalar::Scalar, + edwards::{EdwardsPoint, VartimeEdwardsPrecomputation}, + traits::VartimePrecomputedMultiscalarMul, +}; pub use monero_generators::{H, decompress_point}; @@ -56,6 +61,13 @@ pub(crate) fn INV_EIGHT() -> Scalar { *INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert()) } +static BASEPOINT_PRECOMP_CELL: OnceLock = OnceLock::new(); +#[allow(non_snake_case)] +pub(crate) fn BASEPOINT_PRECOMP() -> &'static VartimeEdwardsPrecomputation { + BASEPOINT_PRECOMP_CELL + .get_or_init(|| VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT])) +} + /// Monero protocol version. /// /// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the diff --git a/coins/monero/src/ringct/clsag/mod.rs b/coins/monero/src/ringct/clsag/mod.rs index fd8253e8..3fe65254 100644 --- a/coins/monero/src/ringct/clsag/mod.rs +++ b/coins/monero/src/ringct/clsag/mod.rs @@ -12,14 +12,14 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; use subtle::{ConstantTimeEq, ConditionallySelectable}; use curve25519_dalek::{ - constants::ED25519_BASEPOINT_TABLE, + constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT}, scalar::Scalar, - traits::{IsIdentity, VartimePrecomputedMultiscalarMul}, + traits::{IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul}, edwards::{EdwardsPoint, VartimeEdwardsPrecomputation}, }; use crate::{ - INV_EIGHT, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys, + INV_EIGHT, BASEPOINT_PRECOMP, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys, ringct::hash_to_point, serialize::*, }; @@ -100,8 +100,11 @@ fn core( ) -> ((EdwardsPoint, Scalar, Scalar), Scalar) { let n = ring.len(); - let images_precomp = VartimeEdwardsPrecomputation::new([I, D]); - let D = D * INV_EIGHT(); + let images_precomp = match A_c1 { + Mode::Sign(..) => None, + Mode::Verify(..) => Some(VartimeEdwardsPrecomputation::new([I, D])), + }; + let D_INV_EIGHT = D * INV_EIGHT(); // Generate the transcript // Instead of generating multiple, a single transcript is created and then edited as needed @@ -130,7 +133,7 @@ fn core( } to_hash.extend(I.compress().to_bytes()); - to_hash.extend(D.compress().to_bytes()); + to_hash.extend(D_INV_EIGHT.compress().to_bytes()); to_hash.extend(pseudo_out.compress().to_bytes()); // mu_P with agg_0 let mu_P = hash_to_scalar(&to_hash); @@ -174,10 +177,25 @@ fn core( let c_p = mu_P * c; let c_c = mu_C * c; - let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]); + // (s_i * G) + (c_p * P_i) + (c_c * C_i) + let L = match A_c1 { + Mode::Sign(..) => { + EdwardsPoint::multiscalar_mul([s[i], c_p, c_c], [ED25519_BASEPOINT_POINT, P[i], C[i]]) + } + Mode::Verify(..) => { + BASEPOINT_PRECOMP().vartime_mixed_multiscalar_mul([s[i]], [c_p, c_c], [P[i], C[i]]) + } + }; + let PH = hash_to_point(&P[i]); - // Shouldn't be an issue as all of the variables in this vartime statement are public - let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]); + + // (c_p * I) + (c_c * D) + (s_i * PH) + let R = match A_c1 { + Mode::Sign(..) => EdwardsPoint::multiscalar_mul([c_p, c_c, s[i]], [I, D, &PH]), + Mode::Verify(..) => { + images_precomp.as_ref().unwrap().vartime_mixed_multiscalar_mul([c_p, c_c], [s[i]], [PH]) + } + }; to_hash.truncate(((2 * n) + 3) * 32); to_hash.extend(L.compress().to_bytes()); @@ -191,7 +209,7 @@ fn core( } // This first tuple is needed to continue signing, the latter is the c to be tested/worked with - ((D, c * mu_P, c * mu_C), c1) + ((D_INV_EIGHT, c * mu_P, c * mu_C), c1) } /// CLSAG signature, as used in Monero. From dac46c8d7da0cf5230cde041a32cc59ec36994ac Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 12 Apr 2024 20:38:27 -0400 Subject: [PATCH 064/126] Correct comment in VS pallet --- substrate/validator-sets/pallet/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index d1385c2d..c852c4ce 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -477,7 +477,7 @@ pub mod pallet { let Some(top) = top else { return false }; - // key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause an off-chain reduction of + // key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause a round robin reduction of // each validator's key shares until their sum is MAX_KEY_SHARES_PER_SET // post_amortization_key_shares_for_top_validator yields what the top validator's key shares // would be after such a reduction, letting us evaluate this correctly From 6b4df4f2c0bf9d5e90262906fc813c1b7c505350 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 17 Apr 2024 21:54:10 -0400 Subject: [PATCH 065/126] Only have some nodes respond to latent heartbeats Also only respond if they're more than 2 blocks behind to minimize redundant sending of blocks. --- coordinator/src/main.rs | 1 + coordinator/src/p2p.rs | 42 ++++++++++--------- coordinator/src/tests/tributary/handle_p2p.rs | 11 ++++- coordinator/src/tests/tributary/sync.rs | 11 +++-- 4 files changed, 40 insertions(+), 25 deletions(-) diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 4de23ae0..e6e49c3e 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1292,6 +1292,7 @@ pub async fn run( p2p.clone(), cosign_channel.clone(), tributary_event_listener_4, + ::generator() * key.deref(), )); // Handle all messages from processors diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 19bf299d..627af966 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -9,6 +9,8 @@ use std::{ use async_trait::async_trait; use rand_core::{RngCore, OsRng}; +use ciphersuite::{Ciphersuite, Ristretto}; + use scale::Encode; use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai}; @@ -612,6 +614,7 @@ pub async fn handle_p2p_task( p2p: P, cosign_channel: mpsc::UnboundedSender, mut tributary_event: broadcast::Receiver>, + our_key: ::G, ) { let channels = Arc::new(RwLock::new(HashMap::<_, mpsc::UnboundedSender>>::new())); tokio::spawn({ @@ -631,6 +634,8 @@ pub async fn handle_p2p_task( // Subscribe to the topic for this tributary p2p.subscribe(tributary.spec.set(), genesis).await; + let spec_set = tributary.spec.set(); + // Per-Tributary P2P message handler tokio::spawn({ let p2p = p2p.clone(); @@ -645,7 +650,7 @@ pub async fn handle_p2p_task( P2pMessageKind::Tributary(msg_genesis) => { assert_eq!(msg_genesis, genesis); - log::trace!("handling message for tributary {:?}", tributary.spec.set()); + log::trace!("handling message for tributary {:?}", spec_set); if tributary.tributary.handle_message(&msg.msg).await { P2p::broadcast(&p2p, msg.kind, msg.msg).await; } @@ -668,18 +673,13 @@ pub async fn handle_p2p_task( // Spawn a dedicated task as this may require loading large amounts of data // from disk and take a notable amount of time tokio::spawn(async move { - /* // Have sqrt(n) nodes reply with the blocks - let mut responders = (tributary.spec.n() as f32).sqrt().floor() as u64; + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] + let mut responders = f32::from(spec.n(&[])).sqrt().floor() as u64; // Try to have at least 3 responders if responders < 3 { - responders = tributary.spec.n().min(3).into(); + responders = spec.n(&[]).min(3).into(); } - */ - - /* - // Have up to three nodes respond - let responders = u64::from(spec.n().min(3)); // Decide which nodes will respond by using the latest block's hash as a // mutually agreed upon entropy source @@ -689,7 +689,7 @@ pub async fn handle_p2p_task( // (so the highest is 7, 8, 9) // entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7 let start = - usize::try_from(entropy % (u64::from(spec.n() + 1) - responders)) + usize::try_from(entropy % (u64::from(spec.n(&[]) + 1) - responders)) .unwrap(); let mut selected = false; for validator in &spec.validators() @@ -706,22 +706,24 @@ pub async fn handle_p2p_task( } log::debug!("received heartbeat and selected to respond"); - */ - // Have every node respond - // While we could only have a subset respond, LibP2P will sync all messages - // it isn't aware of - // It's cheaper to be aware from our disk than from over the network + // Have the selected nodes respond // TODO: Spawn a dedicated topic for this heartbeat response? let mut latest = msg.msg[.. 32].try_into().unwrap(); + let mut to_send = vec![]; while let Some(next) = reader.block_after(&latest) { - let mut res = reader.block(&next).unwrap().serialize(); - res.extend(reader.commit(&next).unwrap()); - // Also include the timestamp used within the Heartbeat - res.extend(&msg.msg[32 .. 40]); - p2p.send(msg.sender, P2pMessageKind::Block(spec.genesis()), res).await; + to_send.push(next); latest = next; } + if to_send.len() > 1 { + for next in to_send { + let mut res = reader.block(&next).unwrap().serialize(); + res.extend(reader.commit(&next).unwrap()); + // Also include the timestamp used within the Heartbeat + res.extend(&msg.msg[32 .. 40]); + p2p.send(msg.sender, P2pMessageKind::Block(spec.genesis()), res).await; + } + } }); } diff --git a/coordinator/src/tests/tributary/handle_p2p.rs b/coordinator/src/tests/tributary/handle_p2p.rs index 756f4561..daa0cf02 100644 --- a/coordinator/src/tests/tributary/handle_p2p.rs +++ b/coordinator/src/tests/tributary/handle_p2p.rs @@ -3,6 +3,8 @@ use std::sync::Arc; use rand_core::OsRng; +use ciphersuite::{Ciphersuite, Ristretto}; + use tokio::{ sync::{mpsc, broadcast}, time::sleep, @@ -35,12 +37,17 @@ async fn handle_p2p_test() { let mut tributary_senders = vec![]; let mut tributary_arcs = vec![]; - for (p2p, tributary) in tributaries.drain(..) { + for (i, (p2p, tributary)) in tributaries.drain(..).enumerate() { let tributary = Arc::new(tributary); tributary_arcs.push(tributary.clone()); let (new_tributary_send, new_tributary_recv) = broadcast::channel(5); let (cosign_send, _) = mpsc::unbounded_channel(); - tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv)); + tokio::spawn(handle_p2p_task( + p2p, + cosign_send, + new_tributary_recv, + ::generator() * *keys[i], + )); new_tributary_send .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary })) .map_err(|_| "failed to send ActiveTributary") diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs index 0a468c63..1af08fa8 100644 --- a/coordinator/src/tests/tributary/sync.rs +++ b/coordinator/src/tests/tributary/sync.rs @@ -45,12 +45,17 @@ async fn sync_test() { let mut tributary_senders = vec![]; let mut tributary_arcs = vec![]; let mut p2p_threads = vec![]; - for (p2p, tributary) in tributaries.drain(..) { + for (i, (p2p, tributary)) in tributaries.drain(..).enumerate() { let tributary = Arc::new(tributary); tributary_arcs.push(tributary.clone()); let (new_tributary_send, new_tributary_recv) = broadcast::channel(5); let (cosign_send, _) = mpsc::unbounded_channel(); - let thread = tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv)); + let thread = tokio::spawn(handle_p2p_task( + p2p, + cosign_send, + new_tributary_recv, + ::generator() * *keys[i], + )); new_tributary_send .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary })) .map_err(|_| "failed to send ActiveTributary") @@ -86,7 +91,7 @@ async fn sync_test() { let syncer_tributary = Arc::new(syncer_tributary); let (syncer_tributary_send, syncer_tributary_recv) = broadcast::channel(5); let (cosign_send, _) = mpsc::unbounded_channel(); - tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv)); + tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv, syncer_key)); syncer_tributary_send .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), From 4960c3222e9c7ec94a33a4c951f50f8215ed462c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 18 Apr 2024 01:24:38 -0400 Subject: [PATCH 066/126] Ensure we don't reply to stale heartbeats --- coordinator/src/p2p.rs | 29 ++++++++++++++++++++--------- 1 file changed, 20 insertions(+), 9 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 627af966..cce96d86 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -558,6 +558,17 @@ impl TributaryP2p for LibP2p { } } +fn heartbeat_time_unit() -> u64 { + // Also include the timestamp so LibP2p doesn't flag this as an old message re-circulating + let timestamp = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("system clock is wrong") + .as_secs(); + // Divide by the block time so if multiple parties send a Heartbeat, they're more likely to + // overlap + timestamp / u64::from(Tributary::::block_time()) +} + pub async fn heartbeat_tributaries_task( p2p: P, mut tributary_event: broadcast::Receiver>, @@ -592,14 +603,7 @@ pub async fn heartbeat_tributaries_task( if SystemTime::now() > (block_time + Duration::from_secs(60)) { log::warn!("last known tributary block was over a minute ago"); let mut msg = tip.to_vec(); - // Also include the timestamp so LibP2p doesn't flag this as an old message re-circulating - let timestamp = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("system clock is wrong") - .as_secs(); - // Divide by the block time so if multiple parties send a Heartbeat, they're more likely to - // overlap - let time_unit = timestamp / u64::from(Tributary::::block_time()); + let time_unit = heartbeat_time_unit::(); msg.extend(time_unit.to_le_bytes()); P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), msg).await; } @@ -666,6 +670,13 @@ pub async fn handle_p2p_task( log::error!("validator sent invalid heartbeat"); continue; } + // Only respond to recent heartbeats + let msg_time_unit = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect( + "length-checked heartbeat message didn't have 8 bytes for the u64", + )); + if heartbeat_time_unit::().saturating_sub(msg_time_unit) > 1 { + continue; + } let p2p = p2p.clone(); let spec = tributary.spec.clone(); @@ -715,7 +726,7 @@ pub async fn handle_p2p_task( to_send.push(next); latest = next; } - if to_send.len() > 1 { + if to_send.len() > 3 { for next in to_send { let mut res = reader.block(&next).unwrap().serialize(); res.extend(reader.commit(&next).unwrap()); From fea16df567af64e2358d84e107231b641b138638 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 18 Apr 2024 01:39:34 -0400 Subject: [PATCH 067/126] Only reply to heartbeats after a certain distance --- coordinator/src/p2p.rs | 87 ++++++++++++++++++++++++------------------ 1 file changed, 49 insertions(+), 38 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index cce96d86..8fdb2e81 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -644,6 +644,7 @@ pub async fn handle_p2p_task( tokio::spawn({ let p2p = p2p.clone(); async move { + let mut last_replied_to_heartbeat = 0; loop { let Some(mut msg) = recv.recv().await else { // Channel closure happens when the tributary retires @@ -666,6 +667,12 @@ pub async fn handle_p2p_task( // them? P2pMessageKind::Heartbeat(msg_genesis) => { assert_eq!(msg_genesis, genesis); + + let current_time_unit = heartbeat_time_unit::(); + if current_time_unit.saturating_sub(last_replied_to_heartbeat) < 10 { + continue; + } + if msg.msg.len() != 40 { log::error!("validator sent invalid heartbeat"); continue; @@ -674,50 +681,54 @@ pub async fn handle_p2p_task( let msg_time_unit = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect( "length-checked heartbeat message didn't have 8 bytes for the u64", )); - if heartbeat_time_unit::().saturating_sub(msg_time_unit) > 1 { + if current_time_unit.saturating_sub(msg_time_unit) > 1 { continue; } - let p2p = p2p.clone(); - let spec = tributary.spec.clone(); + // This is the network's last replied to, not ours specifically + last_replied_to_heartbeat = current_time_unit; + let reader = tributary.tributary.reader(); + + // Have sqrt(n) nodes reply with the blocks + #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] + let mut responders = f32::from(tributary.spec.n(&[])).sqrt().floor() as u64; + // Try to have at least 3 responders + if responders < 3 { + responders = tributary.spec.n(&[]).min(3).into(); + } + + // Decide which nodes will respond by using the latest block's hash as a + // mutually agreed upon entropy source + // This isn't a secure source of entropy, yet it's fine for this + let entropy = u64::from_le_bytes(reader.tip()[.. 8].try_into().unwrap()); + // If n = 10, responders = 3, we want `start` to be 0 ..= 7 + // (so the highest is 7, 8, 9) + // entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7 + let start = usize::try_from( + entropy % (u64::from(tributary.spec.n(&[]) + 1) - responders), + ) + .unwrap(); + let mut selected = false; + for validator in &tributary.spec.validators() + [start .. (start + usize::try_from(responders).unwrap())] + { + if our_key == validator.0 { + selected = true; + break; + } + } + if !selected { + log::debug!("received heartbeat and not selected to respond"); + return; + } + + log::debug!("received heartbeat and selected to respond"); + + let p2p = p2p.clone(); // Spawn a dedicated task as this may require loading large amounts of data // from disk and take a notable amount of time tokio::spawn(async move { - // Have sqrt(n) nodes reply with the blocks - #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] - let mut responders = f32::from(spec.n(&[])).sqrt().floor() as u64; - // Try to have at least 3 responders - if responders < 3 { - responders = spec.n(&[]).min(3).into(); - } - - // Decide which nodes will respond by using the latest block's hash as a - // mutually agreed upon entropy source - // This isn't a secure source of entropy, yet it's fine for this - let entropy = u64::from_le_bytes(reader.tip()[.. 8].try_into().unwrap()); - // If n = 10, responders = 3, we want `start` to be 0 ..= 7 - // (so the highest is 7, 8, 9) - // entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7 - let start = - usize::try_from(entropy % (u64::from(spec.n(&[]) + 1) - responders)) - .unwrap(); - let mut selected = false; - for validator in &spec.validators() - [start .. (start + usize::try_from(responders).unwrap())] - { - if our_key == validator.0 { - selected = true; - break; - } - } - if !selected { - log::debug!("received heartbeat and not selected to respond"); - return; - } - - log::debug!("received heartbeat and selected to respond"); - // Have the selected nodes respond // TODO: Spawn a dedicated topic for this heartbeat response? let mut latest = msg.msg[.. 32].try_into().unwrap(); @@ -732,7 +743,7 @@ pub async fn handle_p2p_task( res.extend(reader.commit(&next).unwrap()); // Also include the timestamp used within the Heartbeat res.extend(&msg.msg[32 .. 40]); - p2p.send(msg.sender, P2pMessageKind::Block(spec.genesis()), res).await; + p2p.send(msg.sender, P2pMessageKind::Block(genesis), res).await; } } }); From bcc88c3e866cff2a2b0a75fa3403ef022c3b0d4d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 18 Apr 2024 01:42:40 -0400 Subject: [PATCH 068/126] Don't broadcast added blocks Online validators should inherently have them. Offline validators will receive from the sync protocol. This does somewhat eliminate the class of nodes who would follow the blockchain (without validating it), yet that's fine for the performance benefit. --- coordinator/tributary/src/lib.rs | 19 +------------------ coordinator/tributary/src/tendermint/mod.rs | 10 +++------- 2 files changed, 4 insertions(+), 25 deletions(-) diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 99deb588..dcf38c68 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -59,8 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50; pub const BLOCK_SIZE_LIMIT: usize = 3_001_000; pub(crate) const TENDERMINT_MESSAGE: u8 = 0; -pub(crate) const BLOCK_MESSAGE: u8 = 1; -pub(crate) const TRANSACTION_MESSAGE: u8 = 2; +pub(crate) const TRANSACTION_MESSAGE: u8 = 2; // TODO: Normalize to 1 #[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq, Debug)] @@ -336,9 +335,6 @@ impl Tributary { // Return true if the message should be rebroadcasted. pub async fn handle_message(&self, msg: &[u8]) -> bool { - // Acquire the lock now to prevent sync_block from being run at the same time - let mut sync_block = self.synced_block_result.write().await; - match msg.first() { Some(&TRANSACTION_MESSAGE) => { let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else { @@ -370,19 +366,6 @@ impl Tributary { false } - Some(&BLOCK_MESSAGE) => { - let mut msg_ref = &msg[1 ..]; - let Ok(block) = Block::::read(&mut msg_ref) else { - log::error!("received invalid block message"); - return false; - }; - let commit = msg[(msg.len() - msg_ref.len()) ..].to_vec(); - if self.sync_block_internal(block, commit, &mut sync_block).await { - log::debug!("synced block over p2p net instead of building the commit ourselves"); - } - false - } - _ => false, } } diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index df8f7219..dc249a35 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -41,7 +41,7 @@ use tendermint::{ use tokio::sync::RwLock; use crate::{ - TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite, + TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError, Blockchain, P2p, }; @@ -414,12 +414,7 @@ impl Network for TendermintNetwork ); match block_res { Ok(()) => { - // If we successfully added this block, broadcast it - // TODO: Move this under the coordinator once we set up on new block notifications? - let mut msg = serialized_block.0; - msg.insert(0, BLOCK_MESSAGE); - msg.extend(encoded_commit); - self.p2p.broadcast(self.genesis, msg).await; + // If we successfully added this block, break break; } Err(BlockError::NonLocalProvided(hash)) => { @@ -428,6 +423,7 @@ impl Network for TendermintNetwork hex::encode(hash), hex::encode(self.genesis) ); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; } _ => return invalid_block(), } From 5830c2463d9115a23930d7c321600be70e4610a9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 18 Apr 2024 02:03:28 -0400 Subject: [PATCH 069/126] fmt --- coordinator/tributary/src/tendermint/mod.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index dc249a35..e38efa5d 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -41,9 +41,8 @@ use tendermint::{ use tokio::sync::RwLock; use crate::{ - TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, - transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError, - Blockchain, P2p, + TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait, + Transaction, BlockHeader, Block, BlockError, Blockchain, P2p, }; pub mod tx; From 593aefd2295b4a5bdc426ec5bf24546483b52105 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Thu, 18 Apr 2024 02:51:38 -0400 Subject: [PATCH 070/126] Extend time in sync test --- coordinator/src/p2p.rs | 4 ++-- coordinator/src/tests/tributary/sync.rs | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 8fdb2e81..48a23aa5 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -715,12 +715,12 @@ pub async fn handle_p2p_task( { if our_key == validator.0 { selected = true; - break; + continue; } } if !selected { log::debug!("received heartbeat and not selected to respond"); - return; + continue; } log::debug!("received heartbeat and selected to respond"); diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs index 1af08fa8..9f01ca4d 100644 --- a/coordinator/src/tests/tributary/sync.rs +++ b/coordinator/src/tests/tributary/sync.rs @@ -121,8 +121,8 @@ async fn sync_test() { .map_err(|_| "failed to send ActiveTributary to heartbeat") .unwrap(); - // The heartbeat is once every 10 blocks - sleep(Duration::from_secs(10 * block_time)).await; + // The heartbeat is once every 10 blocks, with some limitations + sleep(Duration::from_secs(20 * block_time)).await; assert!(syncer_tributary.tip().await != spec.genesis()); // Verify it synced to the tip From 940e9553fd384d482646810ca6a4609604ce5491 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 19 Apr 2024 06:12:33 -0400 Subject: [PATCH 071/126] Add missing crates to GH workflows --- .github/workflows/common-tests.yml | 3 ++- .github/workflows/coordinator-tests.yml | 2 +- .github/workflows/full-stack-tests.yml | 2 +- .github/workflows/message-queue-tests.yml | 2 +- .github/workflows/processor-tests.yml | 2 +- .github/workflows/reproducible-runtime.yml | 2 +- .github/workflows/tests.yml | 3 +++ 7 files changed, 10 insertions(+), 6 deletions(-) diff --git a/.github/workflows/common-tests.yml b/.github/workflows/common-tests.yml index 0135fcaf..f0545f0b 100644 --- a/.github/workflows/common-tests.yml +++ b/.github/workflows/common-tests.yml @@ -28,4 +28,5 @@ jobs: -p std-shims \ -p zalloc \ -p serai-db \ - -p serai-env + -p serai-env \ + -p simple-request diff --git a/.github/workflows/coordinator-tests.yml b/.github/workflows/coordinator-tests.yml index 7cc4d7b3..138fd106 100644 --- a/.github/workflows/coordinator-tests.yml +++ b/.github/workflows/coordinator-tests.yml @@ -37,4 +37,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run coordinator Docker tests - run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/full-stack-tests.yml b/.github/workflows/full-stack-tests.yml index 3d1c86a1..baacf774 100644 --- a/.github/workflows/full-stack-tests.yml +++ b/.github/workflows/full-stack-tests.yml @@ -19,4 +19,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run Full Stack Docker tests - run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/message-queue-tests.yml b/.github/workflows/message-queue-tests.yml index 273af237..7894549c 100644 --- a/.github/workflows/message-queue-tests.yml +++ b/.github/workflows/message-queue-tests.yml @@ -33,4 +33,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run message-queue Docker tests - run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/processor-tests.yml b/.github/workflows/processor-tests.yml index 88f4429c..0b5ecbbe 100644 --- a/.github/workflows/processor-tests.yml +++ b/.github/workflows/processor-tests.yml @@ -37,4 +37,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run processor Docker tests - run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/reproducible-runtime.yml b/.github/workflows/reproducible-runtime.yml index 16256ab6..d34e5ca5 100644 --- a/.github/workflows/reproducible-runtime.yml +++ b/.github/workflows/reproducible-runtime.yml @@ -33,4 +33,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run Reproducible Runtime tests - run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 257c1dd5..e32d2119 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -43,6 +43,7 @@ jobs: -p tendermint-machine \ -p tributary-chain \ -p serai-coordinator \ + -p serai-orchestrator \ -p serai-docker-tests test-substrate: @@ -64,7 +65,9 @@ jobs: -p serai-validator-sets-pallet \ -p serai-in-instructions-primitives \ -p serai-in-instructions-pallet \ + -p serai-signals-primitives \ -p serai-signals-pallet \ + -p serai-abi \ -p serai-runtime \ -p serai-node From a4428761f7de88d8c8c823a1f4985fe9f05f4b17 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 19 Apr 2024 06:03:15 -0400 Subject: [PATCH 072/126] Bitcoin 27.0 --- .github/actions/bitcoin/action.yml | 2 +- .github/actions/test-dependencies/action.yml | 2 +- orchestration/src/coins/bitcoin.rs | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/actions/bitcoin/action.yml b/.github/actions/bitcoin/action.yml index 90310868..5008b690 100644 --- a/.github/actions/bitcoin/action.yml +++ b/.github/actions/bitcoin/action.yml @@ -5,7 +5,7 @@ inputs: version: description: "Version to download and run" required: false - default: 24.0.1 + default: 27.0 runs: using: "composite" diff --git a/.github/actions/test-dependencies/action.yml b/.github/actions/test-dependencies/action.yml index a19e1704..7487a33b 100644 --- a/.github/actions/test-dependencies/action.yml +++ b/.github/actions/test-dependencies/action.yml @@ -10,7 +10,7 @@ inputs: bitcoin-version: description: "Bitcoin version to download and run as a regtest node" required: false - default: 24.0.1 + default: 27.0 runs: using: "composite" diff --git a/orchestration/src/coins/bitcoin.rs b/orchestration/src/coins/bitcoin.rs index a5c8b21c..527b1062 100644 --- a/orchestration/src/coins/bitcoin.rs +++ b/orchestration/src/coins/bitcoin.rs @@ -7,7 +7,7 @@ pub fn bitcoin(orchestration_path: &Path, network: Network) { const DOWNLOAD_BITCOIN: &str = r#" FROM alpine:latest as bitcoin -ENV BITCOIN_VERSION=26.0 +ENV BITCOIN_VERSION=27.0 RUN apk --no-cache add git gnupg From 4c349ae605f2eb106f694ddb3384927b9ffe635d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 20 Apr 2024 18:10:49 -0400 Subject: [PATCH 073/126] Redo how tendermint-machine checks if messages were prior sent Instead of saving, for every sent message, if it was sent or not, we track the latest block/round participated in. These two keys are comprehensive to all prior block/rounds. We then use three keys for the latest round's proposal/prevote/precommit, enabling tracking current state as necessary to prevent equivocations with just 5 keys. The storage of the latest three messages also enables proper rebroadcasting of the current round (not implemented in this commit). --- coordinator/tributary/tendermint/src/block.rs | 66 ++++++++++++++----- 1 file changed, 51 insertions(+), 15 deletions(-) diff --git a/coordinator/tributary/tendermint/src/block.rs b/coordinator/tributary/tendermint/src/block.rs index 71dfb3cc..6dfacfdb 100644 --- a/coordinator/tributary/tendermint/src/block.rs +++ b/coordinator/tributary/tendermint/src/block.rs @@ -139,10 +139,8 @@ impl BlockData { // 27, 33, 41, 46, 60, 64 self.round_mut().step = data.step(); - // Only return a message to if we're actually a current validator and haven't prior posted a - // message + // Only return a message to if we're actually a current validator let round_number = self.round().number; - let step = data.step(); let res = self.validator_id.map(|validator_id| Message { sender: validator_id, block: self.number, @@ -150,21 +148,59 @@ impl BlockData { data, }); - if res.is_some() { + if let Some(res) = res.as_ref() { + const LATEST_BLOCK_KEY: &[u8] = b"tendermint-machine-sent_block"; + const LATEST_ROUND_KEY: &[u8] = b"tendermint-machine-sent_round"; + const PROPOSE_KEY: &[u8] = b"tendermint-machine-sent_propose"; + const PEVOTE_KEY: &[u8] = b"tendermint-machine-sent_prevote"; + const PRECOMMIT_KEY: &[u8] = b"tendermint-machine-sent_commit"; + + let genesis = self.genesis; + let key = |prefix: &[u8]| [prefix, &genesis].concat(); + let mut txn = self.db.txn(); - let key = [ - b"tendermint-machine_already_sent_message".as_ref(), - &self.genesis, - &self.number.0.to_le_bytes(), - &round_number.0.to_le_bytes(), - &step.encode(), - ] - .concat(); - // If we've already sent a message, return - if txn.get(&key).is_some() { + + // Ensure we haven't prior sent a message for a future block/round + let last_block_or_round = |txn: &mut ::Transaction<'_>, prefix, current| { + let key = key(prefix); + let latest = + u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap()); + if latest > current { + None?; + } + if current > latest { + txn.put(&key, current.to_le_bytes()); + return Some(true); + } + Some(false) + }; + let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?; + if new_block { + // Delete the latest round key + txn.del(&key(LATEST_ROUND_KEY)); + } + let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?; + if new_block || new_round { + // Delete the messages for the old round + txn.del(&key(PROPOSE_KEY)); + txn.del(&key(PEVOTE_KEY)); + txn.del(&key(PRECOMMIT_KEY)); + } + + // Check we haven't sent this message within this round + let msg_key = key(match res.data.step() { + Step::Propose => PROPOSE_KEY, + Step::Prevote => PEVOTE_KEY, + Step::Precommit => PRECOMMIT_KEY, + }); + if txn.get(&msg_key).is_some() { + assert!(!new_block); + assert!(!new_round); None?; } - txn.put(&key, []); + // Put this message to the DB + txn.put(&msg_key, res.encode()); + txn.commit(); } From bba7d2a356d22f22a9c1706a077ac5de7c03f814 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 20 Apr 2024 18:13:44 -0400 Subject: [PATCH 074/126] Better logs in tendermint-machine --- coordinator/tributary/tendermint/src/lib.rs | 26 ++++++++++++++++++--- 1 file changed, 23 insertions(+), 3 deletions(-) diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index 77805677..c7cf069a 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -710,7 +710,13 @@ impl TendermintMachine { if !self.block.log.log(signed.clone())? { return Err(TendermintError::AlreadyHandled); } - log::debug!(target: "tendermint", "received new tendermint message"); + log::debug!( + target: "tendermint", + "received new tendermint message (block: {}, round: {}, step: {:?})", + msg.block.0, + msg.round.0, + msg.data.step(), + ); // All functions, except for the finalizer and the jump, are locked to the current round @@ -757,6 +763,8 @@ impl TendermintMachine { // 55-56 // Jump, enabling processing by the below code if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() { + log::debug!("jumping from round {} to round {}", self.block.round().number.0, msg.round.0); + // Jump to the new round. let proposer = self.round(msg.round, None); @@ -814,13 +822,24 @@ impl TendermintMachine { if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) { let (participation, weight) = self.block.log.message_instances(self.block.round().number, &Data::Prevote(None)); + let threshold_weight = self.weights.threshold(); + if (participation < threshold_weight) && + ((threshold_weight - participation) > (threshold_weight / 10)) + { + log::trace!( + "close to setting prevote timeout, participation: {}, needed: {}", + participation, + threshold_weight + ); + } // 34-35 - if participation >= self.weights.threshold() { + if participation >= threshold_weight { + log::trace!("setting timeout for prevote due to sufficient participation"); self.block.round_mut().set_timeout(Step::Prevote); } // 44-46 - if weight >= self.weights.threshold() { + if weight >= threshold_weight { self.broadcast(Data::Precommit(None)); return Ok(None); } @@ -830,6 +849,7 @@ impl TendermintMachine { if matches!(msg.data, Data::Precommit(_)) && self.block.log.has_participation(self.block.round().number, Step::Precommit) { + log::trace!("setting timeout for precommit due to sufficient participation"); self.block.round_mut().set_timeout(Step::Precommit); } From ac9e356af468916a783f88681a5293e7470882ba Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 20 Apr 2024 19:15:15 -0400 Subject: [PATCH 075/126] Correct log targets in tendermint-machine --- coordinator/tributary/tendermint/src/lib.rs | 40 +++++++++++++++---- .../tributary/tendermint/src/message_log.rs | 3 +- 2 files changed, 34 insertions(+), 9 deletions(-) diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index c7cf069a..a240d13a 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -313,11 +313,16 @@ impl TendermintMachine { let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now()); if time_until_round_end == Duration::ZERO { log::trace!( + target: "tendermint", "resetting when prior round ended {}ms ago", Instant::now().saturating_duration_since(round_end.instant()).as_millis(), ); } - log::trace!("sleeping until round ends in {}ms", time_until_round_end.as_millis()); + log::trace!( + target: "tendermint", + "sleeping until round ends in {}ms", + time_until_round_end.as_millis(), + ); sleep(time_until_round_end).await; // Clear our outbound message queue @@ -598,7 +603,11 @@ impl TendermintMachine { ); let id = block.id(); let proposal = self.network.add_block(block, commit).await; - log::trace!("added block {} (produced by machine)", hex::encode(id.as_ref())); + log::trace!( + target: "tendermint", + "added block {} (produced by machine)", + hex::encode(id.as_ref()), + ); self.reset(msg.round, proposal).await; } Err(TendermintError::Malicious(sender, evidence)) => { @@ -692,7 +701,12 @@ impl TendermintMachine { (msg.round == self.block.round().number) && (msg.data.step() == Step::Propose) { - log::trace!("received Propose for block {}, round {}", msg.block.0, msg.round.0); + log::trace!( + target: "tendermint", + "received Propose for block {}, round {}", + msg.block.0, + msg.round.0, + ); } // If this is a precommit, verify its signature @@ -763,7 +777,12 @@ impl TendermintMachine { // 55-56 // Jump, enabling processing by the below code if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() { - log::debug!("jumping from round {} to round {}", self.block.round().number.0, msg.round.0); + log::debug!( + target: "tendermint", + "jumping from round {} to round {}", + self.block.round().number.0, + msg.round.0, + ); // Jump to the new round. let proposer = self.round(msg.round, None); @@ -827,14 +846,18 @@ impl TendermintMachine { ((threshold_weight - participation) > (threshold_weight / 10)) { log::trace!( + target: "tendermint", "close to setting prevote timeout, participation: {}, needed: {}", participation, - threshold_weight + threshold_weight, ); } // 34-35 if participation >= threshold_weight { - log::trace!("setting timeout for prevote due to sufficient participation"); + log::trace!( + target: "tendermint", + "setting timeout for prevote due to sufficient participation", + ); self.block.round_mut().set_timeout(Step::Prevote); } @@ -849,7 +872,10 @@ impl TendermintMachine { if matches!(msg.data, Data::Precommit(_)) && self.block.log.has_participation(self.block.round().number, Step::Precommit) { - log::trace!("setting timeout for precommit due to sufficient participation"); + log::trace!( + target: "tendermint", + "setting timeout for precommit due to sufficient participation", + ); self.block.round_mut().set_timeout(Step::Precommit); } diff --git a/coordinator/tributary/tendermint/src/message_log.rs b/coordinator/tributary/tendermint/src/message_log.rs index e045189b..3959852d 100644 --- a/coordinator/tributary/tendermint/src/message_log.rs +++ b/coordinator/tributary/tendermint/src/message_log.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, collections::HashMap}; -use log::debug; use parity_scale_codec::Encode; use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence}; @@ -27,7 +26,7 @@ impl MessageLog { let step = msg.data.step(); if let Some(existing) = msgs.get(&step) { if existing.msg.data != msg.data { - debug!( + log::debug!( target: "tendermint", "Validator sent multiple messages for the same block + round + step" ); From fd4f247917cb83de19f933f75e8cf90008b25976 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 20 Apr 2024 19:54:16 -0400 Subject: [PATCH 076/126] Correct log which didn't work as intended --- coordinator/tributary/tendermint/src/lib.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index a240d13a..da80a41c 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -842,12 +842,10 @@ impl TendermintMachine { let (participation, weight) = self.block.log.message_instances(self.block.round().number, &Data::Prevote(None)); let threshold_weight = self.weights.threshold(); - if (participation < threshold_weight) && - ((threshold_weight - participation) > (threshold_weight / 10)) - { + if participation < threshold_weight { log::trace!( target: "tendermint", - "close to setting prevote timeout, participation: {}, needed: {}", + "progess towards setting prevote timeout, participation: {}, needed: {}", participation, threshold_weight, ); From 523d2ac911890322b86186b845093ff9b8033b92 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 05:30:31 -0400 Subject: [PATCH 077/126] Rewrite tendermint's message handling loop to much more clearly match the paper (#560) * Rewrite tendermint's message handling loop to much more clearly match the paper No longer checks relevant branches upon messages, yet all branches upon any state change. This is slower, yet easier to review and likely without one or two rare edge cases. When reviewing, please see page 5 of https://arxiv.org/pdf/1807.04938.pdf. Lines from the specified algorithm can be found in the code by searching for "// L". * Sane rebroadcasting of consensus messages Instead of broadcasting the last n messages on the Tributary side of things, we now have the machine rebroadcast the message tape for the current block. * Only rebroadcast messages which didn't error in some way * Only rebroadcast our own messages for tendermint --- coordinator/tributary/src/lib.rs | 34 +- coordinator/tributary/src/tendermint/mod.rs | 32 +- coordinator/tributary/tendermint/src/block.rs | 17 +- coordinator/tributary/tendermint/src/ext.rs | 2 +- coordinator/tributary/tendermint/src/lib.rs | 981 ++++++++++-------- .../tributary/tendermint/src/message_log.rs | 15 +- coordinator/tributary/tendermint/tests/ext.rs | 2 +- 7 files changed, 555 insertions(+), 528 deletions(-) diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index dcf38c68..121ac385 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -1,5 +1,5 @@ use core::{marker::PhantomData, fmt::Debug}; -use std::{sync::Arc, io, collections::VecDeque}; +use std::{sync::Arc, io}; use async_trait::async_trait; @@ -154,14 +154,6 @@ pub struct Tributary { synced_block: Arc>>>, synced_block_result: Arc>, messages: Arc>>>, - - p2p_meta_task_handle: Arc, -} - -impl Drop for Tributary { - fn drop(&mut self) { - self.p2p_meta_task_handle.abort(); - } } impl Tributary { @@ -193,28 +185,7 @@ impl Tributary { ); let blockchain = Arc::new(RwLock::new(blockchain)); - let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new())); - // Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the - // P2P layer - let p2p_meta_task_handle = Arc::new( - tokio::spawn({ - let to_rebroadcast = to_rebroadcast.clone(); - let p2p = p2p.clone(); - async move { - loop { - let to_rebroadcast = to_rebroadcast.read().await.clone(); - for msg in to_rebroadcast { - p2p.broadcast(genesis, msg).await; - } - tokio::time::sleep(core::time::Duration::from_secs(60)).await; - } - } - }) - .abort_handle(), - ); - - let network = - TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p }; + let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p }; let TendermintHandle { synced_block, synced_block_result, messages, machine } = TendermintMachine::new( @@ -235,7 +206,6 @@ impl Tributary { synced_block: Arc::new(RwLock::new(synced_block)), synced_block_result: Arc::new(RwLock::new(synced_block_result)), messages: Arc::new(RwLock::new(messages)), - p2p_meta_task_handle, }) } diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index e38efa5d..0ce6232c 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -1,8 +1,5 @@ use core::ops::Deref; -use std::{ - sync::Arc, - collections::{VecDeque, HashMap}, -}; +use std::{sync::Arc, collections::HashMap}; use async_trait::async_trait; @@ -270,8 +267,6 @@ pub struct TendermintNetwork { pub(crate) validators: Arc, pub(crate) blockchain: Arc>>, - pub(crate) to_rebroadcast: Arc>>>, - pub(crate) p2p: P, } @@ -308,26 +303,6 @@ impl Network for TendermintNetwork async fn broadcast(&mut self, msg: SignedMessageFor) { let mut to_broadcast = vec![TENDERMINT_MESSAGE]; to_broadcast.extend(msg.encode()); - - // Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second - // until the block it's trying to build is complete - // If the P2P layer drops a message before all nodes obtained access, or a node had an - // intermittent failure, this will ensure reconcilliation - // This is atrocious if there's no content-based deduplication protocol for messages actively - // being gossiped - // LibP2p, as used by Serai, is configured to content-based deduplicate - { - let mut to_rebroadcast_lock = self.to_rebroadcast.write().await; - to_rebroadcast_lock.push_back(to_broadcast.clone()); - // We should have, ideally, 3 * validators messages within a round - // Therefore, this should keep the most recent 2-rounds - // TODO: This isn't perfect. Each participant should just rebroadcast their latest round of - // messages - while to_rebroadcast_lock.len() > (6 * self.validators.weights.len()) { - to_rebroadcast_lock.pop_front(); - } - } - self.p2p.broadcast(self.genesis, to_broadcast).await } @@ -366,7 +341,7 @@ impl Network for TendermintNetwork } } - async fn validate(&mut self, block: &Self::Block) -> Result<(), TendermintBlockError> { + async fn validate(&self, block: &Self::Block) -> Result<(), TendermintBlockError> { let block = Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?; self @@ -428,9 +403,6 @@ impl Network for TendermintNetwork } } - // Since we've added a valid block, clear to_rebroadcast - *self.to_rebroadcast.write().await = VecDeque::new(); - Some(TendermintBlock( self.blockchain.write().await.build_block::(&self.signature_scheme()).serialize(), )) diff --git a/coordinator/tributary/tendermint/src/block.rs b/coordinator/tributary/tendermint/src/block.rs index 6dfacfdb..236b4816 100644 --- a/coordinator/tributary/tendermint/src/block.rs +++ b/coordinator/tributary/tendermint/src/block.rs @@ -3,7 +3,6 @@ use std::{ collections::{HashSet, HashMap}, }; -use parity_scale_codec::Encode; use serai_db::{Get, DbTxn, Db}; use crate::{ @@ -20,7 +19,7 @@ pub(crate) struct BlockData { pub(crate) number: BlockNumber, pub(crate) validator_id: Option, - pub(crate) proposal: Option, + pub(crate) our_proposal: Option, pub(crate) log: MessageLog, pub(crate) slashes: HashSet, @@ -43,7 +42,7 @@ impl BlockData { weights: Arc, number: BlockNumber, validator_id: Option, - proposal: Option, + our_proposal: Option, ) -> BlockData { BlockData { db, @@ -51,7 +50,7 @@ impl BlockData { number, validator_id, - proposal, + our_proposal, log: MessageLog::new(weights), slashes: HashSet::new(), @@ -108,17 +107,17 @@ impl BlockData { self.populate_end_time(round); } - // 11-13 + // L11-13 self.round = Some(RoundData::::new( round, time.unwrap_or_else(|| self.end_time[&RoundNumber(round.0 - 1)]), )); self.end_time.insert(round, self.round().end_time()); - // 14-21 + // L14-21 if Some(proposer) == self.validator_id { let (round, block) = self.valid.clone().unzip(); - block.or_else(|| self.proposal.clone()).map(|block| Data::Proposal(round, block)) + block.or_else(|| self.our_proposal.clone()).map(|block| Data::Proposal(round, block)) } else { self.round_mut().set_timeout(Step::Propose); None @@ -198,8 +197,8 @@ impl BlockData { assert!(!new_round); None?; } - // Put this message to the DB - txn.put(&msg_key, res.encode()); + // Put that we're sending this message to the DB + txn.put(&msg_key, []); txn.commit(); } diff --git a/coordinator/tributary/tendermint/src/ext.rs b/coordinator/tributary/tendermint/src/ext.rs index b3d568a2..3869d9d9 100644 --- a/coordinator/tributary/tendermint/src/ext.rs +++ b/coordinator/tributary/tendermint/src/ext.rs @@ -288,7 +288,7 @@ pub trait Network: Sized + Send + Sync { async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent); /// Validate a block. - async fn validate(&mut self, block: &Self::Block) -> Result<(), BlockError>; + async fn validate(&self, block: &Self::Block) -> Result<(), BlockError>; /// Add a block, returning the proposal for the next one. /// diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index da80a41c..edd26183 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -6,7 +6,7 @@ use std::{ collections::VecDeque, }; -use parity_scale_codec::{Encode, Decode}; +use parity_scale_codec::{Encode, Decode, IoReader}; use futures_channel::mpsc; use futures_util::{ @@ -15,6 +15,8 @@ use futures_util::{ }; use tokio::time::sleep; +use serai_db::{Get, DbTxn, Db}; + pub mod time; use time::{sys_time, CanonicalInstant}; @@ -30,6 +32,11 @@ pub(crate) mod message_log; pub mod ext; use ext::*; +const MESSAGE_TAPE_KEY: &[u8] = b"tendermint-machine-message_tape"; +fn message_tape_key(genesis: [u8; 32]) -> Vec { + [MESSAGE_TAPE_KEY, &genesis].concat() +} + pub fn commit_msg(end_time: u64, id: &[u8]) -> Vec { [&end_time.to_le_bytes(), id].concat() } @@ -103,9 +110,23 @@ impl SignedMessage { } } +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] +pub enum SlashReason { + FailToPropose, + InvalidBlock, + InvalidProposer, +} + +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +pub enum Evidence { + ConflictingMessages(Vec, Vec), + InvalidPrecommit(Vec), + InvalidValidRound(Vec), +} + #[derive(Clone, PartialEq, Eq, Debug)] -pub enum TendermintError { - Malicious(N::ValidatorId, Option), +pub enum TendermintError { + Malicious, Temporal, AlreadyHandled, InvalidEvidence, @@ -126,20 +147,6 @@ pub type SignedMessageFor = SignedMessage< <::SignatureScheme as SignatureScheme>::Signature, >; -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] -pub enum SlashReason { - FailToPropose, - InvalidBlock, - InvalidMessage, -} - -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -pub enum Evidence { - ConflictingMessages(Vec, Vec), - InvalidPrecommit(Vec), - InvalidValidRound(Vec), -} - pub fn decode_signed_message(mut data: &[u8]) -> Option> { SignedMessageFor::::decode(&mut data).ok() } @@ -147,7 +154,7 @@ pub fn decode_signed_message(mut data: &[u8]) -> Option( data: &[u8], schema: &N::SignatureScheme, -) -> Result, TendermintError> { +) -> Result, TendermintError> { let msg = decode_signed_message::(data).ok_or(TendermintError::InvalidEvidence)?; // verify that evidence messages are signed correctly @@ -162,7 +169,7 @@ pub fn verify_tendermint_evience( evidence: &Evidence, schema: &N::SignatureScheme, commit: impl Fn(u64) -> Option>, -) -> Result<(), TendermintError> { +) -> Result<(), TendermintError> { match evidence { Evidence::ConflictingMessages(first, second) => { let first = decode_and_verify_signed_message::(first, schema)?.msg; @@ -186,15 +193,16 @@ pub fn verify_tendermint_evience( }; // TODO: We need to be passed in the genesis time to handle this edge case if msg.block.0 == 0 { - todo!("invalid precommit signature on first block") + Err(TendermintError::InvalidEvidence)? + // todo!("invalid precommit signature on first block") } // get the last commit let prior_commit = match commit(msg.block.0 - 1) { Some(c) => c, - // If we have yet to sync the block in question, we will return InvalidContent based + // If we have yet to sync the block in question, we will return InvalidEvidence based // on our own temporal ambiguity - // This will also cause an InvalidContent for anything using a non-existent block, + // This will also cause an InvalidEvidence for anything using a non-existent block, // yet that's valid behavior // TODO: Double check the ramifications of this _ => Err(TendermintError::InvalidEvidence)?, @@ -229,6 +237,16 @@ pub enum SlashEvent { WithEvidence(Evidence), } +// Struct for if various upon handlers have been triggered to ensure they don't trigger multiple +// times. +#[derive(Clone, PartialEq, Eq, Debug)] +struct Upons { + upon_prevotes: bool, + upon_successful_current_round_prevotes: bool, + upon_negative_current_round_prevotes: bool, + upon_precommits: bool, +} + /// A machine executing the Tendermint protocol. pub struct TendermintMachine { db: N::Db, @@ -245,6 +263,7 @@ pub struct TendermintMachine { synced_block_result_send: mpsc::UnboundedSender, block: BlockData, + upons: Upons, } pub struct SyncedBlock { @@ -325,6 +344,13 @@ impl TendermintMachine { ); sleep(time_until_round_end).await; + // Clear the message tape + { + let mut txn = self.db.txn(); + txn.del(&message_tape_key(self.genesis)); + txn.commit(); + } + // Clear our outbound message queue self.queue = VecDeque::new(); @@ -338,6 +364,14 @@ impl TendermintMachine { proposal, ); + // Clear upons + self.upons = Upons { + upon_prevotes: false, + upon_successful_current_round_prevotes: false, + upon_negative_current_round_prevotes: false, + upon_precommits: false, + }; + // Start the first round self.round(RoundNumber(0), Some(round_end)); } @@ -375,6 +409,419 @@ impl TendermintMachine { } } + fn proposal_for_round(&self, round: RoundNumber) -> Option<(Option, &N::Block)> { + let proposer = self.weights.proposer(self.block.number, round); + if let Some(proposal_signed) = self.block.log.get(round, proposer, Step::Propose) { + if let Data::Proposal(vr, block) = &proposal_signed.msg.data { + Some((*vr, block)) + } else { + panic!("message for Step::Propose didn't have Data::Proposal"); + } + } else { + None? + } + } + + // L22-27 + fn upon_proposal_without_valid_round(&mut self) { + if self.block.round().step != Step::Propose { + return; + } + + // If we have the proposal message... + let Some((None, block)) = self.proposal_for_round(self.block.round().number) else { + return; + }; + + // There either needs to not be a locked value or it must be equivalent + #[allow(clippy::map_unwrap_or)] + if self + .block + .locked + .as_ref() + .map(|(_round, locked_block)| block.id() == *locked_block) + .unwrap_or(true) + { + self.broadcast(Data::Prevote(Some(block.id()))); + } else { + self.broadcast(Data::Prevote(None)); + } + } + + // L28-33 + fn upon_proposal_with_valid_round(&mut self) { + if self.block.round().step != Step::Propose { + return; + } + + // If we have the proposal message... + let Some((Some(proposal_valid_round), block)) = + self.proposal_for_round(self.block.round().number) + else { + return; + }; + + // Check we have the necessary prevotes + if !self.block.log.has_consensus(proposal_valid_round, &Data::Prevote(Some(block.id()))) { + return; + } + + // We don't check valid round < current round as the `message` function does + + // If locked is None, lockedRoundp is -1 and less than valid round + #[allow(clippy::map_unwrap_or)] + let locked_clause_1 = self + .block + .locked + .as_ref() + .map(|(locked_round, _block)| locked_round.0 <= proposal_valid_round.0) + .unwrap_or(true); + // The second clause is if the locked values are equivalent. If no value is locked, they aren't + #[allow(clippy::map_unwrap_or)] + let locked_clause_2 = self + .block + .locked + .as_ref() + .map(|(_round, locked_block)| block.id() == *locked_block) + .unwrap_or(false); + + if locked_clause_1 || locked_clause_2 { + self.broadcast(Data::Prevote(Some(block.id()))); + } else { + self.broadcast(Data::Prevote(None)); + } + } + + // L34-35 + fn upon_prevotes(&mut self) { + if self.upons.upon_prevotes || (self.block.round().step != Step::Prevote) { + return; + } + + if self.block.log.has_participation(self.block.round().number, Step::Prevote) { + self.block.round_mut().set_timeout(Step::Prevote); + self.upons.upon_prevotes = true; + } + } + + // L36-43 + async fn upon_successful_current_round_prevotes(&mut self) { + // Returning if `self.step == Step::Propose` is equivalent to guarding `step >= prevote` + if self.upons.upon_successful_current_round_prevotes || + (self.block.round().step == Step::Propose) + { + return; + } + + // If we have the proposal message... + let Some((_, block)) = self.proposal_for_round(self.block.round().number) else { + return; + }; + + // Check we have the necessary prevotes + if !self.block.log.has_consensus(self.block.round().number, &Data::Prevote(Some(block.id()))) { + return; + } + + let block = block.clone(); + self.upons.upon_successful_current_round_prevotes = true; + + if self.block.round().step == Step::Prevote { + self.block.locked = Some((self.block.round().number, block.id())); + let signature = self + .signer + .sign(&commit_msg( + self.block.end_time[&self.block.round().number].canonical(), + block.id().as_ref(), + )) + .await; + self.broadcast(Data::Precommit(Some((block.id(), signature)))); + } + self.block.valid = Some((self.block.round().number, block)); + } + + // L44-46 + fn upon_negative_current_round_prevotes(&mut self) { + if self.upons.upon_negative_current_round_prevotes || (self.block.round().step != Step::Prevote) + { + return; + } + + if self.block.log.has_consensus(self.block.round().number, &Data::Prevote(None)) { + self.broadcast(Data::Precommit(None)); + } + + self.upons.upon_negative_current_round_prevotes = true; + } + + // L47-48 + fn upon_precommits(&mut self) { + if self.upons.upon_precommits { + return; + } + + if self.block.log.has_participation(self.block.round().number, Step::Precommit) { + self.block.round_mut().set_timeout(Step::Precommit); + self.upons.upon_precommits = true; + } + } + + // L22-48 + async fn all_current_round_upons(&mut self) { + self.upon_proposal_without_valid_round(); + self.upon_proposal_with_valid_round(); + self.upon_prevotes(); + self.upon_successful_current_round_prevotes().await; + self.upon_negative_current_round_prevotes(); + self.upon_precommits(); + } + + // L49-54 + async fn upon_successful_precommits(&mut self, round: RoundNumber) -> bool { + // If we have the proposal message... + let Some((_, block)) = self.proposal_for_round(round) else { return false }; + + // Check we have the necessary precommits + // The precommit we check we have consensus upon uses a junk signature since message equality + // disregards the signature + if !self + .block + .log + .has_consensus(round, &Data::Precommit(Some((block.id(), self.signer.sign(&[]).await)))) + { + return false; + } + + // Get all participants in this commit + let mut validators = vec![]; + let mut sigs = vec![]; + // Get all precommits for this round + for (validator, msgs) in &self.block.log.log[&round] { + if let Some(signed) = msgs.get(&Step::Precommit) { + if let Data::Precommit(Some((id, sig))) = &signed.msg.data { + // If this precommit was for this block, include it + if *id == block.id() { + validators.push(*validator); + sigs.push(sig.clone()); + } + } + } + } + + // Form the commit itself + let commit_msg = commit_msg(self.block.end_time[&round].canonical(), block.id().as_ref()); + let commit = Commit { + end_time: self.block.end_time[&round].canonical(), + validators: validators.clone(), + signature: self.network.signature_scheme().aggregate(&validators, &commit_msg, &sigs), + }; + debug_assert!(self.network.verify_commit(block.id(), &commit)); + + // Add the block and reset the machine + log::info!( + target: "tendermint", + "TendermintMachine produced block {}", + hex::encode(block.id().as_ref()), + ); + let id = block.id(); + let proposal = self.network.add_block(block.clone(), commit).await; + log::trace!( + target: "tendermint", + "added block {} (produced by machine)", + hex::encode(id.as_ref()), + ); + self.reset(round, proposal).await; + + true + } + + // L49-54 + async fn all_any_round_upons(&mut self, round: RoundNumber) -> bool { + self.upon_successful_precommits(round).await + } + + // Returns Ok(true) if this was a Precommit which had either no signature or its signature + // validated + // Returns Ok(false) if it wasn't a Precommit or the signature wasn't validated yet + // Returns Err if the signature was invalid + async fn verify_precommit_signature( + &mut self, + signed: &SignedMessageFor, + ) -> Result { + let msg = &signed.msg; + if let Data::Precommit(precommit) = &msg.data { + let Some((id, sig)) = precommit else { return Ok(true) }; + // Also verify the end_time of the commit + // Only perform this verification if we already have the end_time + // Else, there's a DoS where we receive a precommit for some round infinitely in the future + // which forces us to calculate every end time + if let Some(end_time) = self.block.end_time.get(&msg.round) { + if !self.validators.verify(msg.sender, &commit_msg(end_time.canonical(), id.as_ref()), sig) + { + log::warn!(target: "tendermint", "validator produced an invalid commit signature"); + self + .slash( + msg.sender, + SlashEvent::WithEvidence(Evidence::InvalidPrecommit(signed.encode())), + ) + .await; + Err(TendermintError::Malicious)?; + } + return Ok(true); + } + } + Ok(false) + } + + async fn message(&mut self, signed: &SignedMessageFor) -> Result<(), TendermintError> { + let msg = &signed.msg; + if msg.block != self.block.number { + Err(TendermintError::Temporal)?; + } + + if (msg.block == self.block.number) && + (msg.round == self.block.round().number) && + (msg.data.step() == Step::Propose) + { + log::trace!( + target: "tendermint", + "received Propose for block {}, round {}", + msg.block.0, + msg.round.0, + ); + } + + // If this is a precommit, verify its signature + self.verify_precommit_signature(signed).await?; + + // Only let the proposer propose + if matches!(msg.data, Data::Proposal(..)) && + (msg.sender != self.weights.proposer(msg.block, msg.round)) + { + log::warn!(target: "tendermint", "Validator who wasn't the proposer proposed"); + // TODO: This should have evidence + self + .slash(msg.sender, SlashEvent::Id(SlashReason::InvalidProposer, msg.block.0, msg.round.0)) + .await; + Err(TendermintError::Malicious)?; + }; + + // If this is a proposal, verify the block + // If the block is invalid, drop the message, letting the timeout cover it + // This prevents needing to check if valid inside every `upon` block + if let Data::Proposal(_, block) = &msg.data { + match self.network.validate(block).await { + Ok(()) => {} + Err(BlockError::Temporal) => return Err(TendermintError::Temporal), + Err(BlockError::Fatal) => { + log::warn!(target: "tendermint", "validator proposed a fatally invalid block"); + self + .slash( + msg.sender, + SlashEvent::Id(SlashReason::InvalidBlock, self.block.number.0, msg.round.0), + ) + .await; + Err(TendermintError::Malicious)?; + } + }; + } + + // If this is a proposal, verify the valid round isn't fundamentally invalid + if let Data::Proposal(Some(valid_round), _) = msg.data { + if valid_round.0 >= msg.round.0 { + log::warn!( + target: "tendermint", + "proposed proposed with a syntactically invalid valid round", + ); + self + .slash(msg.sender, SlashEvent::WithEvidence(Evidence::InvalidValidRound(msg.encode()))) + .await; + Err(TendermintError::Malicious)?; + } + } + + // Add it to the log, returning if it was already handled + match self.block.log.log(signed.clone()) { + Ok(true) => {} + Ok(false) => Err(TendermintError::AlreadyHandled)?, + Err(evidence) => { + self.slash(msg.sender, SlashEvent::WithEvidence(evidence)).await; + Err(TendermintError::Malicious)?; + } + } + log::debug!( + target: "tendermint", + "received new tendermint message (block: {}, round: {}, step: {:?})", + msg.block.0, + msg.round.0, + msg.data.step(), + ); + + // Run all `upons` run for any round + // If it returned true, we added a new block, so return + if self.all_any_round_upons(msg.round).await { + return Ok(()); + } + + // Check if we need to jump ahead + #[allow(clippy::comparison_chain)] + if msg.round.0 < self.block.round().number.0 { + // Prior round, disregard if not finalizing + return Ok(()); + } else if msg.round.0 > self.block.round().number.0 { + // 55-56 + // Jump, enabling processing by the below code + if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() { + log::debug!( + target: "tendermint", + "jumping from round {} to round {}", + self.block.round().number.0, + msg.round.0, + ); + + // Jump to the new round. + self.round(msg.round, None); + + // If this round already has precommit messages, verify their signatures + let round_msgs = self.block.log.log[&msg.round].clone(); + for (validator, msgs) in &round_msgs { + if let Some(existing) = msgs.get(&Step::Precommit) { + if let Ok(res) = self.verify_precommit_signature(existing).await { + // Ensure this actually verified the signature instead of believing it shouldn't yet + assert!(res); + } else { + // Remove the message so it isn't counted towards forming a commit/included in one + // This won't remove the fact they precommitted for this block hash in the MessageLog + // TODO: Don't even log these in the first place until we jump, preventing needing + // to do this in the first place + self + .block + .log + .log + .get_mut(&msg.round) + .unwrap() + .get_mut(validator) + .unwrap() + .remove(&Step::Precommit) + .unwrap(); + } + } + } + } else { + // Future round which we aren't ready to jump to, so return for now + return Ok(()); + } + } + + // msg.round is now guaranteed to be equal to self.block.round().number + debug_assert_eq!(msg.round, self.block.round().number); + + // Run all `upons` run for the current round + self.all_current_round_upons().await; + + Ok(()) + } + /// Create a new Tendermint machine, from the specified point, with the specified block as the /// one to propose next. This will return a channel to send messages from the gossip layer and /// the machine itself. The machine should have `run` called from an asynchronous task. @@ -419,7 +866,7 @@ impl TendermintMachine { let validators = network.signature_scheme(); let weights = Arc::new(network.weights()); let validator_id = signer.validator_id().await; - // 01-10 + // L01-10 let mut machine = TendermintMachine { db: db.clone(), genesis, @@ -442,6 +889,13 @@ impl TendermintMachine { validator_id, Some(proposal), ), + + upons: Upons { + upon_prevotes: false, + upon_successful_current_round_prevotes: false, + upon_negative_current_round_prevotes: false, + upon_precommits: false, + }, }; // The end time of the last block is the start time for this one @@ -460,16 +914,16 @@ impl TendermintMachine { pub async fn run(mut self) { log::debug!(target: "tendermint", "running TendermintMachine"); + let mut rebroadcast_future = Box::pin(sleep(Duration::from_secs(60))).fuse(); loop { // Also create a future for if the queue has a message // Does not pop_front as if another message has higher priority, its future will be handled // instead in this loop, and the popped value would be dropped with the next iteration - // While no other message has a higher priority right now, this is a safer practice let mut queue_future = if self.queue.is_empty() { Fuse::terminated() } else { future::ready(()).fuse() }; if let Some((our_message, msg, mut sig)) = futures_util::select_biased! { - // Handle a new block occurring externally (an external sync loop) + // Handle a new block occurring externally (from an external sync loop) // Has the highest priority as it makes all other futures here irrelevant msg = self.synced_block_recv.next() => { if let Some(SyncedBlock { number, block, commit }) = msg { @@ -503,16 +957,19 @@ impl TendermintMachine { Some((true, self.queue.pop_front().unwrap(), None)) }, + // L57-67 // Handle any timeouts step = self.block.round().timeout_future().fuse() => { // Remove the timeout so it doesn't persist, always being the selected future due to bias // While this does enable the timeout to be entered again, the timeout setting code will // never attempt to add a timeout after its timeout has expired + // (due to it setting an `upon` boolean) self.block.round_mut().timeouts.remove(&step); - // Only run if it's still the step in question - if self.block.round().step == step { - match step { - Step::Propose => { + + match step { + Step::Propose => { + // Only run if it's still the step in question + if self.block.round().step == step { // Slash the validator for not proposing when they should've log::debug!(target: "tendermint", "Validator didn't propose when they should have"); // this slash will be voted on. @@ -525,14 +982,42 @@ impl TendermintMachine { ), ).await; self.broadcast(Data::Prevote(None)); - }, - Step::Prevote => self.broadcast(Data::Precommit(None)), - Step::Precommit => { - self.round(RoundNumber(self.block.round().number.0 + 1), None); - continue; } + }, + Step::Prevote => { + // Only run if it's still the step in question + if self.block.round().step == step { + self.broadcast(Data::Precommit(None)) + } + }, + Step::Precommit => { + self.round(RoundNumber(self.block.round().number.0 + 1), None); } + }; + + // Execute the upons now that the state has changed + self.all_any_round_upons(self.block.round().number).await; + self.all_current_round_upons().await; + + None + }, + + // If it's been more than 60s, rebroadcast our own messages + () = rebroadcast_future => { + let key = message_tape_key(self.genesis); + let messages = self.db.get(key).unwrap_or(vec![]); + let mut messages = messages.as_slice(); + + while !messages.is_empty() { + self.network.broadcast( + SignedMessageFor::::decode(&mut IoReader(&mut messages)) + .expect("saved invalid message to DB") + ).await; } + + // Reset the rebroadcast future + rebroadcast_future = Box::pin(sleep(core::time::Duration::from_secs(60))).fuse(); + None }, @@ -554,429 +1039,31 @@ impl TendermintMachine { } let sig = sig.unwrap(); - // TODO: message may internally call broadcast. We should check within broadcast it's not - // broadcasting our own message at this time. let signed_msg = SignedMessage { msg: msg.clone(), sig: sig.clone() }; let res = self.message(&signed_msg).await; + // If this is our message, and we hit an invariant, we could be slashed. + // We only broadcast our message after running it ourselves, to ensure it doesn't error, to + // ensure we don't get slashed on invariants. if res.is_err() && our_message { panic!("honest node (ourselves) had invalid behavior"); } - // Only now should we allow broadcasts since we're sure an invariant wasn't reached causing - // us to have invalid messages. + // Save this message to a linear tape of all our messages for this block, if ours + // TODO: Since we do this after we mark this message as sent to prevent equivocations, a + // precisely time reboot could cause this message marked as sent yet not added to the tape + if our_message { + let message_tape_key = message_tape_key(self.genesis); + let mut txn = self.db.txn(); + let mut message_tape = txn.get(&message_tape_key).unwrap_or(vec![]); + message_tape.extend(signed_msg.encode()); + txn.put(&message_tape_key, message_tape); + } + + // Re-broadcast this since it's an original consensus message worth handling if res.is_ok() { - // Re-broadcast this since it's an original consensus message self.network.broadcast(signed_msg).await; } - - match res { - Ok(None) => {} - Ok(Some(block)) => { - let mut validators = vec![]; - let mut sigs = vec![]; - // Get all precommits for this round - for (validator, msgs) in &self.block.log.log[&msg.round] { - if let Some(signed) = msgs.get(&Step::Precommit) { - if let Data::Precommit(Some((id, sig))) = &signed.msg.data { - // If this precommit was for this block, include it - if *id == block.id() { - validators.push(*validator); - sigs.push(sig.clone()); - } - } - } - } - - let commit_msg = - commit_msg(self.block.end_time[&msg.round].canonical(), block.id().as_ref()); - let commit = Commit { - end_time: self.block.end_time[&msg.round].canonical(), - validators: validators.clone(), - signature: self.network.signature_scheme().aggregate(&validators, &commit_msg, &sigs), - }; - debug_assert!(self.network.verify_commit(block.id(), &commit)); - - log::info!( - target: "tendermint", - "TendermintMachine produced block {}", - hex::encode(block.id().as_ref()), - ); - let id = block.id(); - let proposal = self.network.add_block(block, commit).await; - log::trace!( - target: "tendermint", - "added block {} (produced by machine)", - hex::encode(id.as_ref()), - ); - self.reset(msg.round, proposal).await; - } - Err(TendermintError::Malicious(sender, evidence)) => { - let current_msg = SignedMessage { msg: msg.clone(), sig: sig.clone() }; - - let slash = if let Some(ev) = evidence { - // if the malicious message contains a block, only vote to slash - // TODO: Should this decision be made at a higher level? - // A higher-level system may be able to verify if the contained block is fatally - // invalid - // A higher-level system may accept the bandwidth size of this, even if the issue is - // just the valid round field - if let Data::Proposal(_, _) = ¤t_msg.msg.data { - SlashEvent::Id( - SlashReason::InvalidBlock, - self.block.number.0, - self.block.round().number.0, - ) - } else { - // slash with evidence otherwise - SlashEvent::WithEvidence(ev) - } - } else { - // we don't have evidence. Slash with vote. - SlashEvent::Id( - SlashReason::InvalidMessage, - self.block.number.0, - self.block.round().number.0, - ) - }; - - // Each message that we're voting to slash over needs to be re-broadcasted so other - // validators also trigger their own votes - // TODO: should this be inside slash function? - if let SlashEvent::Id(_, _, _) = slash { - self.network.broadcast(current_msg).await; - } - - self.slash(sender, slash).await - } - Err( - TendermintError::Temporal | - TendermintError::AlreadyHandled | - TendermintError::InvalidEvidence, - ) => (), - } } } } - - // Returns Ok(true) if this was a Precommit which had either no signature or its signature - // validated - // Returns Ok(false) if it wasn't a Precommit or the signature wasn't validated yet - // Returns Err if the signature was invalid - fn verify_precommit_signature( - &self, - signed: &SignedMessageFor, - ) -> Result> { - let msg = &signed.msg; - if let Data::Precommit(precommit) = &msg.data { - let Some((id, sig)) = precommit else { return Ok(true) }; - // Also verify the end_time of the commit - // Only perform this verification if we already have the end_time - // Else, there's a DoS where we receive a precommit for some round infinitely in the future - // which forces us to calculate every end time - if let Some(end_time) = self.block.end_time.get(&msg.round) { - if !self.validators.verify(msg.sender, &commit_msg(end_time.canonical(), id.as_ref()), sig) - { - log::warn!(target: "tendermint", "Validator produced an invalid commit signature"); - Err(TendermintError::Malicious( - msg.sender, - Some(Evidence::InvalidPrecommit(signed.encode())), - ))?; - } - return Ok(true); - } - } - Ok(false) - } - - async fn message( - &mut self, - signed: &SignedMessageFor, - ) -> Result, TendermintError> { - let msg = &signed.msg; - if msg.block != self.block.number { - Err(TendermintError::Temporal)?; - } - - if (msg.block == self.block.number) && - (msg.round == self.block.round().number) && - (msg.data.step() == Step::Propose) - { - log::trace!( - target: "tendermint", - "received Propose for block {}, round {}", - msg.block.0, - msg.round.0, - ); - } - - // If this is a precommit, verify its signature - self.verify_precommit_signature(signed)?; - - // Only let the proposer propose - if matches!(msg.data, Data::Proposal(..)) && - (msg.sender != self.weights.proposer(msg.block, msg.round)) - { - log::warn!(target: "tendermint", "Validator who wasn't the proposer proposed"); - // TODO: This should have evidence - Err(TendermintError::Malicious(msg.sender, None))?; - }; - - if !self.block.log.log(signed.clone())? { - return Err(TendermintError::AlreadyHandled); - } - log::debug!( - target: "tendermint", - "received new tendermint message (block: {}, round: {}, step: {:?})", - msg.block.0, - msg.round.0, - msg.data.step(), - ); - - // All functions, except for the finalizer and the jump, are locked to the current round - - // Run the finalizer to see if it applies - // 49-52 - if matches!(msg.data, Data::Proposal(..)) || matches!(msg.data, Data::Precommit(_)) { - let proposer = self.weights.proposer(self.block.number, msg.round); - - // Get the proposal - if let Some(proposal_signed) = self.block.log.get(msg.round, proposer, Step::Propose) { - if let Data::Proposal(_, block) = &proposal_signed.msg.data { - // Check if it has gotten a sufficient amount of precommits - // Uses a junk signature since message equality disregards the signature - if self.block.log.has_consensus( - msg.round, - &Data::Precommit(Some((block.id(), self.signer.sign(&[]).await))), - ) { - // If msg.round is in the future, these Precommits won't have their inner signatures - // verified - // It should be impossible for msg.round to be in the future however, as this requires - // 67% of validators to Precommit, and we jump on 34% participating in the new round - // The one exception would be if a validator had 34%, and could cause participation to - // go from 33% (not enough to jump) to 67%, without executing the below code - // This also would require the local machine to be outside of allowed time tolerances, - // or the validator with 34% to not be publishing Prevotes (as those would cause a - // a jump) - // Both are invariants - // TODO: Replace this panic with an inner signature check - assert!(msg.round.0 <= self.block.round().number.0); - - log::debug!(target: "tendermint", "block {} has consensus", msg.block.0); - return Ok(Some(block.clone())); - } - } - } - } - - // Else, check if we need to jump ahead - #[allow(clippy::comparison_chain)] - if msg.round.0 < self.block.round().number.0 { - // Prior round, disregard if not finalizing - return Ok(None); - } else if msg.round.0 > self.block.round().number.0 { - // 55-56 - // Jump, enabling processing by the below code - if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() { - log::debug!( - target: "tendermint", - "jumping from round {} to round {}", - self.block.round().number.0, - msg.round.0, - ); - - // Jump to the new round. - let proposer = self.round(msg.round, None); - - // If this round already has precommit messages, verify their signatures - let round_msgs = self.block.log.log[&msg.round].clone(); - for (validator, msgs) in &round_msgs { - if let Some(existing) = msgs.get(&Step::Precommit) { - if let Ok(res) = self.verify_precommit_signature(existing) { - // Ensure this actually verified the signature instead of believing it shouldn't yet - assert!(res); - } else { - // Remove the message so it isn't counted towards forming a commit/included in one - // This won't remove the fact they precommitted for this block hash in the MessageLog - // TODO: Don't even log these in the first place until we jump, preventing needing - // to do this in the first place - let msg = self - .block - .log - .log - .get_mut(&msg.round) - .unwrap() - .get_mut(validator) - .unwrap() - .remove(&Step::Precommit) - .unwrap(); - - // Slash the validator for publishing an invalid commit signature - self - .slash( - *validator, - SlashEvent::WithEvidence(Evidence::InvalidPrecommit(msg.encode())), - ) - .await; - } - } - } - - // If we're the proposer, return now we don't waste time on the current round - // (as it doesn't have a proposal, since we didn't propose, and cannot complete) - if proposer { - return Ok(None); - } - } else { - // Future round which we aren't ready to jump to, so return for now - return Ok(None); - } - } - - // msg.round is now guaranteed to be equal to self.block.round().number - debug_assert_eq!(msg.round, self.block.round().number); - - // The paper executes these checks when the step is prevote. Making sure this message warrants - // rerunning these checks is a sane optimization since message instances is a full iteration - // of the round map - if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) { - let (participation, weight) = - self.block.log.message_instances(self.block.round().number, &Data::Prevote(None)); - let threshold_weight = self.weights.threshold(); - if participation < threshold_weight { - log::trace!( - target: "tendermint", - "progess towards setting prevote timeout, participation: {}, needed: {}", - participation, - threshold_weight, - ); - } - // 34-35 - if participation >= threshold_weight { - log::trace!( - target: "tendermint", - "setting timeout for prevote due to sufficient participation", - ); - self.block.round_mut().set_timeout(Step::Prevote); - } - - // 44-46 - if weight >= threshold_weight { - self.broadcast(Data::Precommit(None)); - return Ok(None); - } - } - - // 47-48 - if matches!(msg.data, Data::Precommit(_)) && - self.block.log.has_participation(self.block.round().number, Step::Precommit) - { - log::trace!( - target: "tendermint", - "setting timeout for precommit due to sufficient participation", - ); - self.block.round_mut().set_timeout(Step::Precommit); - } - - // All further operations require actually having the proposal in question - let proposer = self.weights.proposer(self.block.number, self.block.round().number); - let (vr, block) = if let Some(proposal_signed) = - self.block.log.get(self.block.round().number, proposer, Step::Propose) - { - if let Data::Proposal(vr, block) = &proposal_signed.msg.data { - (vr, block) - } else { - panic!("message for Step::Propose didn't have Data::Proposal"); - } - } else { - return Ok(None); - }; - - // 22-33 - if self.block.round().step == Step::Propose { - // Delay error handling (triggering a slash) until after we vote. - let (valid, err) = match self.network.validate(block).await { - Ok(()) => (true, Ok(None)), - Err(BlockError::Temporal) => (false, Ok(None)), - Err(BlockError::Fatal) => (false, { - log::warn!(target: "tendermint", "Validator proposed a fatally invalid block"); - // TODO: Produce evidence of this for the higher level code to decide what to do with - Err(TendermintError::Malicious(proposer, None)) - }), - }; - // Create a raw vote which only requires block validity as a basis for the actual vote. - let raw_vote = Some(block.id()).filter(|_| valid); - - // If locked is none, it has a round of -1 according to the protocol. That satisfies - // 23 and 29. If it's some, both are satisfied if they're for the same ID. If it's some - // with different IDs, the function on 22 rejects yet the function on 28 has one other - // condition - let locked = self.block.locked.as_ref().map_or(true, |(_, id)| id == &block.id()); - let mut vote = raw_vote.filter(|_| locked); - - if let Some(vr) = vr { - // Malformed message - if vr.0 >= self.block.round().number.0 { - log::warn!(target: "tendermint", "Validator claimed a round from the future was valid"); - Err(TendermintError::Malicious( - msg.sender, - Some(Evidence::InvalidValidRound(signed.encode())), - ))?; - } - - if self.block.log.has_consensus(*vr, &Data::Prevote(Some(block.id()))) { - // Allow differing locked values if the proposal has a newer valid round - // This is the other condition described above - if let Some((locked_round, _)) = self.block.locked.as_ref() { - vote = vote.or_else(|| raw_vote.filter(|_| locked_round.0 <= vr.0)); - } - - self.broadcast(Data::Prevote(vote)); - return err; - } - } else { - self.broadcast(Data::Prevote(vote)); - return err; - } - - return Ok(None); - } - - if self.block.valid.as_ref().map_or(true, |(round, _)| round != &self.block.round().number) { - // 36-43 - - // The run once condition is implemented above. Since valid will always be set by this, it - // not being set, or only being set historically, means this has yet to be run - - if self.block.log.has_consensus(self.block.round().number, &Data::Prevote(Some(block.id()))) { - match self.network.validate(block).await { - // BlockError::Temporal is due to a temporal error we have, yet a supermajority of the - // network does not, Because we do not believe this block to be fatally invalid, and - // because a supermajority deems it valid, accept it. - Ok(()) | Err(BlockError::Temporal) => (), - Err(BlockError::Fatal) => { - log::warn!(target: "tendermint", "Validator proposed a fatally invalid block"); - // TODO: Produce evidence of this for the higher level code to decide what to do with - Err(TendermintError::Malicious(proposer, None))? - } - }; - - self.block.valid = Some((self.block.round().number, block.clone())); - if self.block.round().step == Step::Prevote { - self.block.locked = Some((self.block.round().number, block.id())); - self.broadcast(Data::Precommit(Some(( - block.id(), - self - .signer - .sign(&commit_msg( - self.block.end_time[&self.block.round().number].canonical(), - block.id().as_ref(), - )) - .await, - )))); - } - } - } - - Ok(None) - } } diff --git a/coordinator/tributary/tendermint/src/message_log.rs b/coordinator/tributary/tendermint/src/message_log.rs index 3959852d..e65568ca 100644 --- a/coordinator/tributary/tendermint/src/message_log.rs +++ b/coordinator/tributary/tendermint/src/message_log.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, collections::HashMap}; use parity_scale_codec::Encode; -use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence}; +use crate::{ext::*, RoundNumber, Step, DataFor, SignedMessageFor, Evidence}; type RoundLog = HashMap<::ValidatorId, HashMap>>; pub(crate) struct MessageLog { @@ -16,7 +16,7 @@ impl MessageLog { } // Returns true if it's a new message - pub(crate) fn log(&mut self, signed: SignedMessageFor) -> Result> { + pub(crate) fn log(&mut self, signed: SignedMessageFor) -> Result { let msg = &signed.msg; // Clarity, and safety around default != new edge cases let round = self.log.entry(msg.round).or_insert_with(HashMap::new); @@ -30,10 +30,7 @@ impl MessageLog { target: "tendermint", "Validator sent multiple messages for the same block + round + step" ); - Err(TendermintError::Malicious( - msg.sender, - Some(Evidence::ConflictingMessages(existing.encode(), signed.encode())), - ))?; + Err(Evidence::ConflictingMessages(existing.encode(), signed.encode()))?; } return Ok(false); } @@ -47,7 +44,8 @@ impl MessageLog { pub(crate) fn message_instances(&self, round: RoundNumber, data: &DataFor) -> (u64, u64) { let mut participating = 0; let mut weight = 0; - for (participant, msgs) in &self.log[&round] { + let Some(log) = self.log.get(&round) else { return (0, 0) }; + for (participant, msgs) in log { if let Some(msg) = msgs.get(&data.step()) { let validator_weight = self.weights.weight(*participant); participating += validator_weight; @@ -73,7 +71,8 @@ impl MessageLog { // Check if a supermajority of nodes have participated on a specific step pub(crate) fn has_participation(&self, round: RoundNumber, step: Step) -> bool { let mut participating = 0; - for (participant, msgs) in &self.log[&round] { + let Some(log) = self.log.get(&round) else { return false }; + for (participant, msgs) in log { if msgs.get(&step).is_some() { participating += self.weights.weight(*participant); } diff --git a/coordinator/tributary/tendermint/tests/ext.rs b/coordinator/tributary/tendermint/tests/ext.rs index 3b3cf7c3..bec95ddc 100644 --- a/coordinator/tributary/tendermint/tests/ext.rs +++ b/coordinator/tributary/tendermint/tests/ext.rs @@ -145,7 +145,7 @@ impl Network for TestNetwork { println!("Slash for {id} due to {event:?}"); } - async fn validate(&mut self, block: &TestBlock) -> Result<(), BlockError> { + async fn validate(&self, block: &TestBlock) -> Result<(), BlockError> { block.valid } From 43083dfd49639e584d830e9d86830e531fdc052a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 05:32:41 -0400 Subject: [PATCH 078/126] Remove redundant log from tendermint lib --- coordinator/tributary/tendermint/src/lib.rs | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index edd26183..9ee71a9d 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -679,18 +679,6 @@ impl TendermintMachine { Err(TendermintError::Temporal)?; } - if (msg.block == self.block.number) && - (msg.round == self.block.round().number) && - (msg.data.step() == Step::Propose) - { - log::trace!( - target: "tendermint", - "received Propose for block {}, round {}", - msg.block.0, - msg.round.0, - ); - } - // If this is a precommit, verify its signature self.verify_precommit_signature(signed).await?; @@ -698,7 +686,7 @@ impl TendermintMachine { if matches!(msg.data, Data::Proposal(..)) && (msg.sender != self.weights.proposer(msg.block, msg.round)) { - log::warn!(target: "tendermint", "Validator who wasn't the proposer proposed"); + log::warn!(target: "tendermint", "validator who wasn't the proposer proposed"); // TODO: This should have evidence self .slash(msg.sender, SlashEvent::Id(SlashReason::InvalidProposer, msg.block.0, msg.round.0)) @@ -971,7 +959,7 @@ impl TendermintMachine { // Only run if it's still the step in question if self.block.round().step == step { // Slash the validator for not proposing when they should've - log::debug!(target: "tendermint", "Validator didn't propose when they should have"); + log::debug!(target: "tendermint", "validator didn't propose when they should have"); // this slash will be voted on. self.slash( self.weights.proposer(self.block.number, self.block.round().number), From 0f0db14f05cdb90a941eabe8098f3d01d98ba8e9 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 06:02:12 -0400 Subject: [PATCH 079/126] Ethereum Integration (#557) * Clean up Ethereum * Consistent contract address for deployed contracts * Flesh out Router a bit * Add a Deployer for DoS-less deployment * Implement Router-finding * Use CREATE2 helper present in ethers * Move from CREATE2 to CREATE Bit more streamlined for our use case. * Document ethereum-serai * Tidy tests a bit * Test updateSeraiKey * Use encodePacked for updateSeraiKey * Take in the block hash to read state during * Add a Sandbox contract to the Ethereum integration * Add retrieval of transfers from Ethereum * Add inInstruction function to the Router * Augment our handling of InInstructions events with a check the transfer event also exists * Have the Deployer error upon failed deployments * Add --via-ir * Make get_transaction test-only We only used it to get transactions to confirm the resolution of Eventualities. Eventualities need to be modularized. By introducing the dedicated confirm_completion function, we remove the need for a non-test get_transaction AND begin this modularization (by no longer explicitly grabbing a transaction to check with). * Modularize Eventuality Almost fully-deprecates the Transaction trait for Completion. Replaces Transaction ID with Claim. * Modularize the Scheduler behind a trait * Add an extremely basic account Scheduler * Add nonce uses, key rotation to the account scheduler * Only report the account Scheduler empty after transferring keys Also ban payments to the branch/change/forward addresses. * Make fns reliant on state test-only * Start of an Ethereum integration for the processor * Add a session to the Router to prevent updateSeraiKey replaying This would only happen if an old key was rotated to again, which would require n-of-n collusion (already ridiculous and a valid fault attributable event). It just clarifies the formal arguments. * Add a RouterCommand + SignMachine for producing it to coins/ethereum * Ethereum which compiles * Have branch/change/forward return an option Also defines a UtxoNetwork extension trait for MAX_INPUTS. * Make external_address exclusively a test fn * Move the "account" scheduler to "smart contract" * Remove ABI artifact * Move refund/forward Plan creation into the Processor We create forward Plans in the scan path, and need to know their exact fees in the scan path. This requires adding a somewhat wonky shim_forward_plan method so we can obtain a Plan equivalent to the actual forward Plan for fee reasons, yet don't expect it to be the actual forward Plan (which may be distinct if the Plan pulls from the global state, such as with a nonce). Also properly types a Scheduler addendum such that the SC scheduler isn't cramming the nonce to use into the N::Output type. * Flesh out the Ethereum integration more * Two commits ago, into the **Scheduler, not Processor * Remove misc TODOs in SC Scheduler * Add constructor to RouterCommandMachine * RouterCommand read, pairing with the prior added write * Further add serialization methods * Have the Router's key included with the InInstruction This does not use the key at the time of the event. This uses the key at the end of the block for the event. Its much simpler than getting the full event streams for each, checking when they interlace. This does not read the state. Every block, this makes a request for every single key update and simply chooses the last one. This allows pruning state, only keeping the event tree. Ideally, we'd also introduce a cache to reduce the cost of the filter (small in events yielded, long in blocks searched). Since Serai doesn't have any forwarding TXs, nor Branches, nor change, all of our Plans should solely have payments out, and there's no expectation of a Plan being made under one key broken by it being received by another key. * Add read/write to InInstruction * Abstract the ABI for Call/OutInstruction in ethereum-serai * Fill out signable_transaction for Ethereum * Move ethereum-serai to alloy Resolves #331. * Use the opaque sol macro instead of generated files * Move the processor over to the now-alloy-based ethereum-serai * Use the ecrecover provided by alloy * Have the SC use nonce for rotation, not session (an independent nonce which wasn't synchronized) * Always use the latest keys for SC scheduled plans * get_eventuality_completions for Ethereum * Finish fleshing out the processor Ethereum integration as needed for serai-processor tests This doesn't not support any actual deployments, not even the ones simulated by serai-processor-docker-tests. * Add alloy-simple-request-transport to the GH workflows * cargo update * Clarify a few comments and make one check more robust * Use a string for 27.0 in .github * Remove optional from no-longer-optional dependencies in processor * Add alloy to git deny exception * Fix no longer optional specification in processor's binaries feature * Use a version of foundry from 2024 * Correct fetching Bitcoin TXs in the processor docker tests * Update rustls to resolve RUSTSEC warnings * Use the monthly nightly foundry, not the deleted daily nightly --- .github/actions/bitcoin/action.yml | 2 +- .github/actions/test-dependencies/action.yml | 6 +- .github/workflows/coins-tests.yml | 1 + Cargo.lock | 1461 ++++++++++------- Cargo.toml | 1 + coins/bitcoin/src/wallet/send.rs | 2 +- coins/ethereum/.gitignore | 4 - coins/ethereum/Cargo.toml | 39 +- coins/ethereum/README.md | 6 + .../alloy-simple-request-transport/Cargo.toml | 29 + .../alloy-simple-request-transport/LICENSE | 21 + .../alloy-simple-request-transport/README.md | 4 + .../alloy-simple-request-transport/src/lib.rs | 60 + coins/ethereum/build.rs | 37 +- coins/ethereum/contracts/Deployer.sol | 52 + coins/ethereum/contracts/IERC20.sol | 20 + coins/ethereum/contracts/Router.sol | 214 ++- coins/ethereum/contracts/Sandbox.sol | 48 + coins/ethereum/contracts/Schnorr.sol | 39 +- coins/ethereum/src/abi/mod.rs | 35 +- coins/ethereum/src/crypto.rs | 154 +- coins/ethereum/src/deployer.rs | 119 ++ coins/ethereum/src/erc20.rs | 118 ++ coins/ethereum/src/lib.rs | 18 +- coins/ethereum/src/machine.rs | 414 +++++ coins/ethereum/src/router.rs | 446 ++++- coins/ethereum/src/schnorr.rs | 34 - coins/ethereum/src/tests/abi/mod.rs | 13 + coins/ethereum/src/tests/contracts/ERC20.sol | 51 + .../ethereum/src/tests/contracts/Schnorr.sol | 15 + coins/ethereum/src/tests/crypto.rs | 89 +- coins/ethereum/src/tests/mod.rs | 135 +- coins/ethereum/src/tests/router.rs | 188 ++- coins/ethereum/src/tests/schnorr.rs | 82 +- deny.toml | 1 + processor/Cargo.toml | 18 +- processor/src/lib.rs | 8 + processor/src/main.rs | 5 + processor/src/multisigs/db.rs | 66 +- processor/src/multisigs/mod.rs | 185 ++- processor/src/multisigs/scanner.rs | 28 +- processor/src/multisigs/scheduler/mod.rs | 95 ++ .../src/multisigs/scheduler/smart_contract.rs | 208 +++ .../{scheduler.rs => scheduler/utxo.rs} | 156 +- processor/src/networks/bitcoin.rs | 147 +- processor/src/networks/ethereum.rs | 827 ++++++++++ processor/src/networks/mod.rs | 153 +- processor/src/networks/monero.rs | 114 +- processor/src/plan.rs | 22 +- processor/src/signer.rs | 214 +-- processor/src/tests/addresses.rs | 33 +- processor/src/tests/literal/mod.rs | 2 +- processor/src/tests/scanner.rs | 16 +- processor/src/tests/signer.rs | 50 +- processor/src/tests/wallet.rs | 65 +- spec/integrations/Ethereum.md | 27 +- tests/processor/src/lib.rs | 15 +- tests/processor/src/tests/send.rs | 4 +- 58 files changed, 5031 insertions(+), 1385 deletions(-) create mode 100644 coins/ethereum/alloy-simple-request-transport/Cargo.toml create mode 100644 coins/ethereum/alloy-simple-request-transport/LICENSE create mode 100644 coins/ethereum/alloy-simple-request-transport/README.md create mode 100644 coins/ethereum/alloy-simple-request-transport/src/lib.rs create mode 100644 coins/ethereum/contracts/Deployer.sol create mode 100644 coins/ethereum/contracts/IERC20.sol create mode 100644 coins/ethereum/contracts/Sandbox.sol create mode 100644 coins/ethereum/src/deployer.rs create mode 100644 coins/ethereum/src/erc20.rs create mode 100644 coins/ethereum/src/machine.rs delete mode 100644 coins/ethereum/src/schnorr.rs create mode 100644 coins/ethereum/src/tests/abi/mod.rs create mode 100644 coins/ethereum/src/tests/contracts/ERC20.sol create mode 100644 coins/ethereum/src/tests/contracts/Schnorr.sol create mode 100644 processor/src/multisigs/scheduler/mod.rs create mode 100644 processor/src/multisigs/scheduler/smart_contract.rs rename processor/src/multisigs/{scheduler.rs => scheduler/utxo.rs} (80%) create mode 100644 processor/src/networks/ethereum.rs diff --git a/.github/actions/bitcoin/action.yml b/.github/actions/bitcoin/action.yml index 5008b690..2a6dbce3 100644 --- a/.github/actions/bitcoin/action.yml +++ b/.github/actions/bitcoin/action.yml @@ -5,7 +5,7 @@ inputs: version: description: "Version to download and run" required: false - default: 27.0 + default: "27.0" runs: using: "composite" diff --git a/.github/actions/test-dependencies/action.yml b/.github/actions/test-dependencies/action.yml index 7487a33b..49c2fa64 100644 --- a/.github/actions/test-dependencies/action.yml +++ b/.github/actions/test-dependencies/action.yml @@ -10,7 +10,7 @@ inputs: bitcoin-version: description: "Bitcoin version to download and run as a regtest node" required: false - default: 27.0 + default: "27.0" runs: using: "composite" @@ -19,9 +19,9 @@ runs: uses: ./.github/actions/build-dependencies - name: Install Foundry - uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 with: - version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2 + version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9 cache: false - name: Run a Monero Regtest Node diff --git a/.github/workflows/coins-tests.yml b/.github/workflows/coins-tests.yml index a0437c61..f94e9fd5 100644 --- a/.github/workflows/coins-tests.yml +++ b/.github/workflows/coins-tests.yml @@ -30,6 +30,7 @@ jobs: run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p bitcoin-serai \ + -p alloy-simple-request-transport \ -p ethereum-serai \ -p monero-generators \ -p monero-serai diff --git a/Cargo.lock b/Cargo.lock index ee2ecdcf..edc46693 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,9 +95,344 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" + +[[package]] +name = "alloy-consensus" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "serde", + "sha2", +] + +[[package]] +name = "alloy-core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbd9ee412dfb4e81d23cd1ae816d828c494a77d1eb00358035043695d4c5808" +dependencies = [ + "alloy-primitives", +] + +[[package]] +name = "alloy-eips" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "once_cell", + "serde", +] + +[[package]] +name = "alloy-genesis" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-json-abi" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a35ddfd27576474322a5869e4c123e5f3e7b2177297c18e4e82ea501cb125b" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", +] + +[[package]] +name = "alloy-json-rpc" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-primitives", + "alloy-rpc-types", + "alloy-signer", + "async-trait", + "futures-utils-wasm", + "thiserror", +] + +[[package]] +name = "alloy-node-bindings" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-genesis", + "alloy-primitives", + "k256", + "serde_json", + "tempfile", + "thiserror", + "tracing", + "url", +] + +[[package]] +name = "alloy-primitives" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99bbad0a6b588ef4aec1b5ddbbfdacd9ef04e00b979617765b03174318ee1f3a" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "hex-literal", + "itoa", + "k256", + "keccak-asm", + "proptest", + "rand", + "ruint", + "serde", + "tiny-keccak", +] + +[[package]] +name = "alloy-provider" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-primitives", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-rpc-types-trace", + "alloy-transport", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "futures", + "futures-utils-wasm", + "lru", + "serde_json", + "tokio", + "tracing", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.60", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "alloy-transport-http", + "futures", + "pin-project", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower", + "tracing", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "itertools 0.12.1", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "alloy-rpc-types-trace" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types", + "alloy-serde", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-serde" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-signer" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "elliptic-curve", + "k256", + "thiserror", +] + +[[package]] +name = "alloy-simple-request-transport" +version = "0.1.0" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "serde_json", + "simple-request", + "tower", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "452d929748ac948a10481fff4123affead32c553cf362841c5103dd508bdfc16" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.4.1", + "indexmap 2.2.6", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.60", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df64e094f6d2099339f9e82b5b38440b159757b6920878f28316243f8166c8d1" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.60", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "715f4d09a330cc181fc7c361b5c5c2766408fa59a0bac60349dcb7baabd404cc" +dependencies = [ + "winnow 0.6.6", +] + +[[package]] +name = "alloy-sol-types" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43bc2d6dfc2a19fd56644494479510f98b1ee929e04cf0d4aa45e98baa3e545b" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", +] + +[[package]] +name = "alloy-transport" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.0", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "url", + "wasm-bindgen-futures", +] + +[[package]] +name = "alloy-transport-http" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-transport", + "url", +] [[package]] name = "android-tzdata" @@ -192,6 +527,130 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.0", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + [[package]] name = "array-bytes" version = "6.2.2" @@ -291,25 +750,36 @@ dependencies = [ ] [[package]] -name = "async-trait" -version = "0.1.79" +name = "async-stream" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.58", + "async-stream-impl", + "futures-core", + "pin-project-lite 0.2.14", ] [[package]] -name = "async_io_stream" -version = "0.3.3" +name = "async-stream-impl" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "futures", - "pharos", - "rustc_version", + "proc-macro2", + "quote", + "syn 2.0.60", +] + +[[package]] +name = "async-trait" +version = "0.1.80" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.60", ] [[package]] @@ -344,7 +814,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -402,6 +872,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + [[package]] name = "base64ct" version = "1.6.0" @@ -441,7 +917,7 @@ dependencies = [ "bitflags 2.5.0", "cexpr", "clang-sys", - "itertools", + "itertools 0.12.1", "lazy_static", "lazycell", "proc-macro2", @@ -449,9 +925,24 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.58", + "syn 2.0.60", ] +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + [[package]] name = "bitcoin" version = "0.31.2" @@ -617,6 +1108,18 @@ dependencies = [ "subtle", ] +[[package]] +name = "blst" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + [[package]] name = "bollard" version = "0.15.0" @@ -677,7 +1180,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "syn_derive", ] @@ -765,6 +1268,20 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "c-kzg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3130f3d8717cc02e668a896af24984d5d5d4e8bf12e278e982e0f1bd88a0f9af" +dependencies = [ + "blst", + "cc", + "glob", + "hex", + "libc", + "serde", +] + [[package]] name = "camino" version = "1.1.6" @@ -817,9 +1334,9 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.7" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa50868b64a9a6fda9d593ce778849ea8715cd2a3d2cc17ffdb4a2f2f2f1961d" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" dependencies = [ "smallvec", ] @@ -862,9 +1379,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.35" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -935,9 +1452,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.2" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -957,14 +1474,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.0" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1000,13 +1517,14 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.9.1" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c37be52ef5e3b394db27a2341010685ad5103c72ac15ce2e9420a7e8f93f342c" +checksum = "5ba00838774b4ab0233e355d26710fbfc8327a05c017f6dc4873f876d1f79f78" dependencies = [ "cfg-if", "cpufeatures", "hex", + "proptest", "serde", ] @@ -1042,6 +1560,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.4" @@ -1195,7 +1719,7 @@ dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "itertools", + "itertools 0.10.5", "log", "smallvec", "wasmparser", @@ -1288,7 +1812,7 @@ dependencies = [ "group", "platforms", "rand_core", - "rustc_version", + "rustc_version 0.4.0", "subtle", "zeroize", ] @@ -1301,7 +1825,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1328,7 +1852,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1345,7 +1869,7 @@ checksum = "ad08a837629ad949b73d032c637653d069e909cffe4ee7870b02301939ce39cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1364,6 +1888,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.3", + "lock_api", + "once_cell", + "parking_lot_core 0.9.9", +] + [[package]] name = "data-encoding" version = "2.5.0" @@ -1433,6 +1970,17 @@ dependencies = [ "serde", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive-syn-parse" version = "0.1.5" @@ -1450,8 +1998,10 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version 0.4.0", "syn 1.0.109", ] @@ -1531,7 +2081,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1692,9 +2242,9 @@ dependencies = [ [[package]] name = "either" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "elliptic-curve" @@ -1716,40 +2266,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "encoding_rs" -version = "0.8.34" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "enr" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" -dependencies = [ - "base64 0.21.7", - "bytes", - "hex", - "k256", - "log", - "rand", - "rlp", - "serde", - "sha3", - "zeroize", -] - [[package]] name = "enum-as-inner" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "syn 1.0.109", @@ -1761,10 +2284,10 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -1802,198 +2325,27 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "ethabi" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" -dependencies = [ - "ethereum-types", - "hex", - "once_cell", - "regex", - "serde", - "serde_json", - "sha3", - "thiserror", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "scale-info", - "tiny-keccak", -] - [[package]] name = "ethereum-serai" version = "0.1.0" dependencies = [ - "ethers-contract", - "ethers-core", - "ethers-providers", - "eyre", + "alloy-consensus", + "alloy-core", + "alloy-node-bindings", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-simple-request-transport", + "alloy-sol-types", + "flexible-transcript", "group", - "hex", "k256", "modular-frost", "rand_core", - "serde", - "serde_json", - "sha2", - "sha3", "thiserror", "tokio", ] -[[package]] -name = "ethereum-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "primitive-types", - "scale-info", - "uint", -] - -[[package]] -name = "ethers-contract" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d79269278125006bb0552349c03593ffa9702112ca88bc7046cc669f148fb47c" -dependencies = [ - "const-hex", - "ethers-contract-abigen", - "ethers-contract-derive", - "ethers-core", - "ethers-providers", - "futures-util", - "once_cell", - "pin-project", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "ethers-contract-abigen" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce95a43c939b2e4e2f3191c5ad4a1f279780b8a39139c9905b43a7433531e2ab" -dependencies = [ - "Inflector", - "const-hex", - "dunce", - "ethers-core", - "eyre", - "prettyplease 0.2.16", - "proc-macro2", - "quote", - "regex", - "serde", - "serde_json", - "syn 2.0.58", - "toml 0.7.8", - "walkdir", -] - -[[package]] -name = "ethers-contract-derive" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9ce44906fc871b3ee8c69a695ca7ec7f70e50cb379c9b9cb5e532269e492f6" -dependencies = [ - "Inflector", - "const-hex", - "ethers-contract-abigen", - "ethers-core", - "proc-macro2", - "quote", - "serde_json", - "syn 2.0.58", -] - -[[package]] -name = "ethers-core" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0a17f0708692024db9956b31d7a20163607d2745953f5ae8125ab368ba280ad" -dependencies = [ - "arrayvec", - "bytes", - "cargo_metadata", - "chrono", - "const-hex", - "elliptic-curve", - "ethabi", - "generic-array 0.14.7", - "k256", - "num_enum", - "once_cell", - "open-fastrlp", - "rand", - "rlp", - "serde", - "serde_json", - "strum 0.25.0", - "syn 2.0.58", - "tempfile", - "thiserror", - "tiny-keccak", - "unicode-xid", -] - -[[package]] -name = "ethers-providers" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5" -dependencies = [ - "async-trait", - "auto_impl", - "base64 0.21.7", - "bytes", - "const-hex", - "enr", - "ethers-core", - "futures-core", - "futures-timer", - "futures-util", - "hashers", - "http 0.2.12", - "instant", - "jsonwebtoken", - "once_cell", - "pin-project", - "reqwest", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-futures", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "ws_stream_wasm", -] - [[package]] name = "event-listener" version = "2.5.3" @@ -2040,17 +2392,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.58", -] - -[[package]] -name = "eyre" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" -dependencies = [ - "indenter", - "once_cell", + "syn 2.0.60", ] [[package]] @@ -2065,6 +2407,17 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "fdlimit" version = "0.2.1" @@ -2309,12 +2662,12 @@ dependencies = [ "derive-syn-parse", "expander", "frame-support-procedural-tools", - "itertools", + "itertools 0.10.5", "macro_magic", "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2326,7 +2679,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2336,7 +2689,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2495,7 +2848,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -2505,7 +2858,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.21.10", + "rustls 0.21.11", ] [[package]] @@ -2536,10 +2889,6 @@ name = "futures-timer" version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" -dependencies = [ - "gloo-timers", - "send_wrapper 0.4.0", -] [[package]] name = "futures-util" @@ -2559,6 +2908,12 @@ dependencies = [ "slab", ] +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + [[package]] name = "fxhash" version = "0.2.1" @@ -2616,9 +2971,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.14" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94b22e06ecb0110981051723910cbf0b5f5e09a2062dd7663334ee79a9d1286c" +checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" dependencies = [ "cfg-if", "libc", @@ -2671,18 +3026,6 @@ dependencies = [ "regex-syntax 0.8.3", ] -[[package]] -name = "gloo-timers" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - [[package]] name = "group" version = "0.13.0" @@ -2753,21 +3096,18 @@ dependencies = [ "allocator-api2", ] -[[package]] -name = "hashers" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2bca93b15ea5a746f220e56587f71e73c6165eab783df9e26590069953e3c30" -dependencies = [ - "fxhash", -] - [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.3.9" @@ -2779,6 +3119,9 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] [[package]] name = "hex-conservative" @@ -2951,9 +3294,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", @@ -2970,15 +3313,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736f15a50e749d033164c56c09783b6102c4ff8da79ad77dbddbbaea0f8567f7" +checksum = "908bb38696d7a037a01ebcc68a00634112ac2bbf8ca74e30a2c3d2f4f021302b" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.2.0", + "hyper 1.3.1", "hyper-util", - "rustls 0.23.4", + "rustls 0.23.5", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -2997,7 +3340,7 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.2.0", + "hyper 1.3.1", "pin-project-lite 0.2.14", "socket2 0.5.6", "tokio", @@ -3063,6 +3406,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.10.2" @@ -3120,15 +3473,6 @@ dependencies = [ "parity-scale-codec", ] -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - [[package]] name = "impl-serde" version = "0.4.0" @@ -3149,12 +3493,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "indenter" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" - [[package]] name = "indexmap" version = "1.9.3" @@ -3242,6 +3580,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -3302,7 +3649,7 @@ version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44e8ab85614a08792b9bff6c8feee23be78c98d0182d4c622c05256ab553892a" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro-crate 1.3.1", "proc-macro2", "quote", @@ -3345,20 +3692,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "jsonwebtoken" -version = "8.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" -dependencies = [ - "base64 0.21.7", - "pem", - "ring 0.16.20", - "serde", - "serde_json", - "simple_asn1", -] - [[package]] name = "k256" version = "0.13.3" @@ -3370,7 +3703,6 @@ dependencies = [ "elliptic-curve", "once_cell", "sha2", - "signature", ] [[package]] @@ -3382,6 +3714,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb8515fff80ed850aea4a1595f2e519c003e2a00a82fe168ebf5269196caf444" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "kvdb" version = "0.13.0" @@ -3448,7 +3790,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.4", + "windows-targets 0.48.5", ] [[package]] @@ -3765,7 +4107,7 @@ dependencies = [ "quinn", "rand", "ring 0.16.20", - "rustls 0.21.10", + "rustls 0.21.11", "socket2 0.5.6", "thiserror", "tokio", @@ -3818,11 +4160,11 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -3854,7 +4196,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls 0.21.10", + "rustls 0.21.11", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -3886,7 +4228,7 @@ dependencies = [ "futures", "js-sys", "libp2p-core", - "send_wrapper 0.6.0", + "send_wrapper", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -4075,7 +4417,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -4089,7 +4431,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -4100,7 +4442,7 @@ checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -4111,7 +4453,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -4216,12 +4558,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - [[package]] name = "mini-serai" version = "0.1.0" @@ -4732,27 +5068,6 @@ dependencies = [ "libc", ] -[[package]] -name = "num_enum" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" -dependencies = [ - "num_enum_derive", -] - -[[package]] -name = "num_enum_derive" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" -dependencies = [ - "proc-macro-crate 3.1.0", - "proc-macro2", - "quote", - "syn 2.0.58", -] - [[package]] name = "object" version = "0.31.1" @@ -4795,31 +5110,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "open-fastrlp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" -dependencies = [ - "arrayvec", - "auto_impl", - "bytes", - "ethereum-types", - "open-fastrlp-derive", -] - -[[package]] -name = "open-fastrlp-derive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" -dependencies = [ - "bytes", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "openssl-probe" version = "0.1.5" @@ -5182,6 +5472,17 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + [[package]] name = "petgraph" version = "0.6.4" @@ -5192,16 +5493,6 @@ dependencies = [ "indexmap 2.2.6", ] -[[package]] -name = "pharos" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" -dependencies = [ - "futures", - "rustc_version", -] - [[package]] name = "pin-project" version = "1.1.5" @@ -5219,7 +5510,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -5320,7 +5611,7 @@ checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", - "itertools", + "itertools 0.10.5", "normalize-line-endings", "predicates-core", "regex", @@ -5352,16 +5643,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "prettyplease" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" -dependencies = [ - "proc-macro2", - "syn 2.0.58", -] - [[package]] name = "primeorder" version = "0.13.6" @@ -5379,7 +5660,6 @@ checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", - "impl-rlp", "impl-serde", "scale-info", "uint", @@ -5443,14 +5723,14 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -5489,7 +5769,27 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", +] + +[[package]] +name = "proptest" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.5.0", + "lazy_static", + "num-traits", + "rand", + "rand_chacha", + "rand_xorshift", + "regex-syntax 0.8.3", + "rusty-fork", + "tempfile", + "unarray", ] [[package]] @@ -5509,13 +5809,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", - "heck", - "itertools", + "heck 0.4.1", + "itertools 0.10.5", "lazy_static", "log", "multimap", "petgraph", - "prettyplease 0.1.25", + "prettyplease", "prost", "prost-types", "regex", @@ -5531,7 +5831,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -5606,7 +5906,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.10", + "rustls 0.21.11", "thiserror", "tokio", "tracing", @@ -5622,7 +5922,7 @@ dependencies = [ "rand", "ring 0.16.20", "rustc-hash", - "rustls 0.21.10", + "rustls 0.21.11", "slab", "thiserror", "tinyvec", @@ -5706,6 +6006,15 @@ dependencies = [ "rand_core", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core", +] + [[package]] name = "rawpointer" version = "0.2.1" @@ -5790,7 +6099,7 @@ checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -5850,41 +6159,6 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" -[[package]] -name = "reqwest" -version = "0.11.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite 0.2.14", - "serde", - "serde_json", - "serde_urlencoded", - "system-configuration", - "tokio", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - [[package]] name = "resolv-conf" version = "0.7.0" @@ -5951,21 +6225,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ "bytes", - "rlp-derive", "rustc-hex", ] -[[package]] -name = "rlp-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "rocksdb" version = "0.21.0" @@ -6019,6 +6281,36 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ruint" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f308135fef9fc398342da5472ce7c484529df23743fb7c734e0f3d472971e62" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86854cf50259291520509879a5c294c3c9a4c334e9ff65071c51e42ef1e2343" + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -6037,6 +6329,15 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.0" @@ -6070,9 +6371,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", "ring 0.17.8", @@ -6082,9 +6383,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.4" +version = "0.23.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c4d6d8ad9f2492485e13453acbb291dd08f64441b6609c491f1c2cd2c6b4fe1" +checksum = "afabcee0551bd1aa3e18e5adbf2c0544722014b899adb31bd186ec638d3da97e" dependencies = [ "once_cell", "ring 0.17.8", @@ -6109,11 +6410,11 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f48172685e6ff52a556baa527774f61fcaa884f59daf3375c62a3f1cd2549dab" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "rustls-pki-types", ] @@ -6150,6 +6451,18 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] + [[package]] name = "rw-stream-sink" version = "0.4.0" @@ -6289,7 +6602,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -7050,7 +7363,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -7290,7 +7603,16 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser 0.10.2", ] [[package]] @@ -7309,10 +7631,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] -name = "send_wrapper" -version = "0.4.0" +name = "semver-parser" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] [[package]] name = "send_wrapper" @@ -7688,9 +8013,11 @@ dependencies = [ "bitcoin-serai", "borsh", "ciphersuite", + "const-hex", "dalek-ff-group", "dockertest", "env_logger", + "ethereum-serai", "flexible-transcript", "frost-schnorrkel", "hex", @@ -7877,9 +8204,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" dependencies = [ "serde_derive", ] @@ -7895,20 +8222,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] name = "serde_json" -version = "1.0.115" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "itoa", "ryu", @@ -7923,7 +8250,7 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8009,6 +8336,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bac61da6b35ad76b195eb4771210f947734321a8d81d7738e1580d953bc7a15e" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -8062,7 +8399,7 @@ version = "0.1.0" dependencies = [ "base64ct", "http-body-util", - "hyper 1.2.0", + "hyper 1.3.1", "hyper-rustls", "hyper-util", "tokio", @@ -8070,18 +8407,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint", - "num-traits", - "thiserror", - "time", -] - [[package]] name = "siphasher" version = "0.3.11" @@ -8127,7 +8452,7 @@ dependencies = [ "curve25519-dalek", "rand_core", "ring 0.17.8", - "rustc_version", + "rustc_version 0.4.0", "sha2", "subtle", ] @@ -8200,7 +8525,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8396,7 +8721,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8415,7 +8740,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8587,7 +8912,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8740,7 +9065,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -8902,7 +9227,7 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", @@ -8915,11 +9240,11 @@ version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -9007,15 +9332,27 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.58" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4497156948bd342b52038035a6fa514a89626e37af9d2c52a5e8d8ebcc7ee479" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.60", +] + [[package]] name = "syn_derive" version = "0.1.8" @@ -9025,7 +9362,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -9117,22 +9454,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -9253,7 +9590,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -9262,7 +9599,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.4", + "rustls 0.23.5", "rustls-pki-types", "tokio", ] @@ -9334,7 +9671,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] @@ -9345,7 +9682,7 @@ checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ "indexmap 2.2.6", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] @@ -9414,7 +9751,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -9647,6 +9984,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uint" version = "0.9.5" @@ -9659,6 +10002,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicode-bidi" version = "0.3.15" @@ -9728,12 +10077,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.5.0", "percent-encoding", ] @@ -9773,6 +10122,15 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.5.0" @@ -9819,7 +10177,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "wasm-bindgen-shared", ] @@ -9853,7 +10211,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10162,7 +10520,7 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -10195,9 +10553,9 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.15" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89beec544f246e679fc25490e3f8e08003bc4bf612068f325120dad4cea02c1c" +checksum = "81a1851a719f11d1d2fea40e15c72f6c00de8c142d7ac47c1441cc7e4d0d5bc6" dependencies = [ "bytemuck", "safe_arch", @@ -10409,6 +10767,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352" + [[package]] name = "winreg" version = "0.50.0" @@ -10419,25 +10783,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "ws_stream_wasm" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" -dependencies = [ - "async_io_stream", - "futures", - "js-sys", - "log", - "pharos", - "rustc_version", - "send_wrapper 0.6.0", - "thiserror", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - [[package]] name = "wyz" version = "0.5.1" @@ -10539,7 +10884,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] @@ -10559,7 +10904,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.58", + "syn 2.0.60", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index bcc344ed..8a19d159 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ members = [ "crypto/schnorrkel", "coins/bitcoin", + "coins/ethereum/alloy-simple-request-transport", "coins/ethereum", "coins/monero/generators", "coins/monero", diff --git a/coins/bitcoin/src/wallet/send.rs b/coins/bitcoin/src/wallet/send.rs index f4cfa3b5..24594ab4 100644 --- a/coins/bitcoin/src/wallet/send.rs +++ b/coins/bitcoin/src/wallet/send.rs @@ -375,7 +375,7 @@ impl SignMachine for TransactionSignMachine { msg: &[u8], ) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> { if !msg.is_empty() { - panic!("message was passed to the TransactionMachine when it generates its own"); + panic!("message was passed to the TransactionSignMachine when it generates its own"); } let commitments = (0 .. self.sigs.len()) diff --git a/coins/ethereum/.gitignore b/coins/ethereum/.gitignore index 46365e03..2dccdce9 100644 --- a/coins/ethereum/.gitignore +++ b/coins/ethereum/.gitignore @@ -1,7 +1,3 @@ # Solidity build outputs cache artifacts - -# Auto-generated ABI files -src/abi/schnorr.rs -src/abi/router.rs diff --git a/coins/ethereum/Cargo.toml b/coins/ethereum/Cargo.toml index bc60d3a4..4bb92fe4 100644 --- a/coins/ethereum/Cargo.toml +++ b/coins/ethereum/Cargo.toml @@ -18,28 +18,29 @@ workspace = true [dependencies] thiserror = { version = "1", default-features = false } -eyre = { version = "0.6", default-features = false } -sha3 = { version = "0.10", default-features = false, features = ["std"] } - -group = { version = "0.13", default-features = false } -k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa"] } -frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] } - -ethers-core = { version = "2", default-features = false } -ethers-providers = { version = "2", default-features = false } -ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] } - -[build-dependencies] -ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] } - -[dev-dependencies] rand_core = { version = "0.6", default-features = false, features = ["std"] } -hex = { version = "0.4", default-features = false, features = ["std"] } -serde = { version = "1", default-features = false, features = ["std"] } -serde_json = { version = "1", default-features = false, features = ["std"] } +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] } -sha2 = { version = "0.10", default-features = false, features = ["std"] } +group = { version = "0.13", default-features = false } +k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } + +alloy-core = { version = "0.7", default-features = false } +alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false, features = ["k256"] } +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } +alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } +alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } + +[dev-dependencies] +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] } tokio = { version = "1", features = ["macros"] } + +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } + +[features] +tests = [] diff --git a/coins/ethereum/README.md b/coins/ethereum/README.md index 13f1f2db..0090b26b 100644 --- a/coins/ethereum/README.md +++ b/coins/ethereum/README.md @@ -3,6 +3,12 @@ This package contains Ethereum-related functionality, specifically deploying and interacting with Serai contracts. +While `monero-serai` and `bitcoin-serai` are general purpose libraries, +`ethereum-serai` is Serai specific. If any of the utilities are generally +desired, please fork and maintain your own copy to ensure the desired +functionality is preserved, or open an issue to request we make this library +general purpose. + ### Dependencies - solc diff --git a/coins/ethereum/alloy-simple-request-transport/Cargo.toml b/coins/ethereum/alloy-simple-request-transport/Cargo.toml new file mode 100644 index 00000000..115998e4 --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "alloy-simple-request-transport" +version = "0.1.0" +description = "A transport for alloy based off simple-request" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/alloy-simple-request-transport" +authors = ["Luke Parker "] +edition = "2021" +rust-version = "1.74" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +tower = "0.4" + +serde_json = { version = "1", default-features = false } +simple-request = { path = "../../../common/request", default-features = false } + +alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } +alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } + +[features] +default = ["tls"] +tls = ["simple-request/tls"] diff --git a/coins/ethereum/alloy-simple-request-transport/LICENSE b/coins/ethereum/alloy-simple-request-transport/LICENSE new file mode 100644 index 00000000..659881f1 --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/coins/ethereum/alloy-simple-request-transport/README.md b/coins/ethereum/alloy-simple-request-transport/README.md new file mode 100644 index 00000000..372540d1 --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/README.md @@ -0,0 +1,4 @@ +# Alloy Simple Request Transport + +A transport for alloy based on simple-request, a small HTTP client built around +hyper. diff --git a/coins/ethereum/alloy-simple-request-transport/src/lib.rs b/coins/ethereum/alloy-simple-request-transport/src/lib.rs new file mode 100644 index 00000000..93b35bc1 --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/src/lib.rs @@ -0,0 +1,60 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] + +use core::task; +use std::io; + +use alloy_json_rpc::{RequestPacket, ResponsePacket}; +use alloy_transport::{TransportError, TransportErrorKind, TransportFut}; + +use simple_request::{hyper, Request, Client}; + +use tower::Service; + +#[derive(Clone, Debug)] +pub struct SimpleRequest { + client: Client, + url: String, +} + +impl SimpleRequest { + pub fn new(url: String) -> Self { + Self { client: Client::with_connection_pool(), url } + } +} + +impl Service for SimpleRequest { + type Response = ResponsePacket; + type Error = TransportError; + type Future = TransportFut<'static>; + + #[inline] + fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll> { + task::Poll::Ready(Ok(())) + } + + #[inline] + fn call(&mut self, req: RequestPacket) -> Self::Future { + let inner = self.clone(); + Box::pin(async move { + let packet = req.serialize().map_err(TransportError::SerError)?; + let request = Request::from( + hyper::Request::post(&inner.url) + .header("Content-Type", "application/json") + .body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into()) + .unwrap(), + ); + + let mut res = inner + .client + .request(request) + .await + .map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))? + .body() + .await + .map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?; + + serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, "")) + }) + } +} diff --git a/coins/ethereum/build.rs b/coins/ethereum/build.rs index 3590b12f..38fcfe00 100644 --- a/coins/ethereum/build.rs +++ b/coins/ethereum/build.rs @@ -1,7 +1,5 @@ use std::process::Command; -use ethers_contract::Abigen; - fn main() { println!("cargo:rerun-if-changed=contracts/*"); println!("cargo:rerun-if-changed=artifacts/*"); @@ -21,22 +19,23 @@ fn main() { "--base-path", ".", "-o", "./artifacts", "--overwrite", "--bin", "--abi", - "--optimize", - "./contracts/Schnorr.sol", "./contracts/Router.sol", + "--via-ir", "--optimize", + + "./contracts/IERC20.sol", + + "./contracts/Schnorr.sol", + "./contracts/Deployer.sol", + "./contracts/Sandbox.sol", + "./contracts/Router.sol", + + "./src/tests/contracts/Schnorr.sol", + "./src/tests/contracts/ERC20.sol", + + "--no-color", ]; - assert!(Command::new("solc").args(args).status().unwrap().success()); - - Abigen::new("Schnorr", "./artifacts/Schnorr.abi") - .unwrap() - .generate() - .unwrap() - .write_to_file("./src/abi/schnorr.rs") - .unwrap(); - - Abigen::new("Router", "./artifacts/Router.abi") - .unwrap() - .generate() - .unwrap() - .write_to_file("./src/abi/router.rs") - .unwrap(); + let solc = Command::new("solc").args(args).output().unwrap(); + assert!(solc.status.success()); + for line in String::from_utf8(solc.stderr).unwrap().lines() { + assert!(!line.starts_with("Error:")); + } } diff --git a/coins/ethereum/contracts/Deployer.sol b/coins/ethereum/contracts/Deployer.sol new file mode 100644 index 00000000..475be4c1 --- /dev/null +++ b/coins/ethereum/contracts/Deployer.sol @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +/* +The expected deployment process of the Router is as follows: + +1) A transaction deploying Deployer is made. Then, a deterministic signature is + created such that an account with an unknown private key is the creator of + the contract. Anyone can fund this address, and once anyone does, the + transaction deploying Deployer can be published by anyone. No other + transaction may be made from that account. + +2) Anyone deploys the Router through the Deployer. This uses a sequential nonce + such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible. + While such attacks would still be feasible if the Deployer's address was + controllable, the usage of a deterministic signature with a NUMS method + prevents that. + +This doesn't have any denial-of-service risks and will resolve once anyone steps +forward as deployer. This does fail to guarantee an identical address across +every chain, though it enables letting anyone efficiently ask the Deployer for +the address (with the Deployer having an identical address on every chain). + +Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the +Deployer contract to use a consistent salt for the Router, yet the Router must +be deployed with a specific public key for Serai. Since Ethereum isn't able to +determine a valid public key (one the result of a Serai DKG) from a dishonest +public key, we have to allow multiple deployments with Serai being the one to +determine which to use. + +The alternative would be to have a council publish the Serai key on-Ethereum, +with Serai verifying the published result. This would introduce a DoS risk in +the council not publishing the correct key/not publishing any key. +*/ + +contract Deployer { + event Deployment(bytes32 indexed init_code_hash, address created); + + error DeploymentFailed(); + + function deploy(bytes memory init_code) external { + address created; + assembly { + created := create(0, add(init_code, 0x20), mload(init_code)) + } + if (created == address(0)) { + revert DeploymentFailed(); + } + // These may be emitted out of order upon re-entrancy + emit Deployment(keccak256(init_code), created); + } +} diff --git a/coins/ethereum/contracts/IERC20.sol b/coins/ethereum/contracts/IERC20.sol new file mode 100644 index 00000000..70f1f93c --- /dev/null +++ b/coins/ethereum/contracts/IERC20.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: CC0 +pragma solidity ^0.8.0; + +interface IERC20 { + event Transfer(address indexed from, address indexed to, uint256 value); + event Approval(address indexed owner, address indexed spender, uint256 value); + + function name() external view returns (string memory); + function symbol() external view returns (string memory); + function decimals() external view returns (uint8); + + function totalSupply() external view returns (uint256); + + function balanceOf(address owner) external view returns (uint256); + function transfer(address to, uint256 value) external returns (bool); + function transferFrom(address from, address to, uint256 value) external returns (bool); + + function approve(address spender, uint256 value) external returns (bool); + function allowance(address owner, address spender) external view returns (uint256); +} diff --git a/coins/ethereum/contracts/Router.sol b/coins/ethereum/contracts/Router.sol index 25775ec5..c5e1efa2 100644 --- a/coins/ethereum/contracts/Router.sol +++ b/coins/ethereum/contracts/Router.sol @@ -1,27 +1,24 @@ // SPDX-License-Identifier: AGPLv3 pragma solidity ^0.8.0; +import "./IERC20.sol"; + import "./Schnorr.sol"; +import "./Sandbox.sol"; -contract Router is Schnorr { - // Contract initializer - // TODO: Replace with a MuSig of the genesis validators - address public initializer; - - // Nonce is incremented for each batch of transactions executed +contract Router { + // Nonce is incremented for each batch of transactions executed/key update uint256 public nonce; - // fixed parity for the public keys used in this contract - uint8 constant public KEY_PARITY = 27; - - // current public key's x-coordinate - // note: this key must always use the fixed parity defined above + // Current public key's x-coordinate + // This key must always have the parity defined within the Schnorr contract bytes32 public seraiKey; struct OutInstruction { address to; + Call[] calls; + uint256 value; - bytes data; } struct Signature { @@ -29,62 +26,197 @@ contract Router is Schnorr { bytes32 s; } + event SeraiKeyUpdated( + uint256 indexed nonce, + bytes32 indexed key, + Signature signature + ); + event InInstruction( + address indexed from, + address indexed coin, + uint256 amount, + bytes instruction + ); // success is a uint256 representing a bitfield of transaction successes - event Executed(uint256 nonce, bytes32 batch, uint256 success); + event Executed( + uint256 indexed nonce, + bytes32 indexed batch, + uint256 success, + Signature signature + ); // error types - error NotInitializer(); - error AlreadyInitialized(); error InvalidKey(); + error InvalidSignature(); + error InvalidAmount(); + error FailedTransfer(); error TooManyTransactions(); - constructor() { - initializer = msg.sender; + modifier _updateSeraiKeyAtEndOfFn( + uint256 _nonce, + bytes32 key, + Signature memory sig + ) { + if ( + (key == bytes32(0)) || + ((bytes32(uint256(key) % Schnorr.Q)) != key) + ) { + revert InvalidKey(); + } + + _; + + seraiKey = key; + emit SeraiKeyUpdated(_nonce, key, sig); } - // initSeraiKey can be called by the contract initializer to set the first - // public key, only if the public key has yet to be set. - function initSeraiKey(bytes32 _seraiKey) external { - if (msg.sender != initializer) revert NotInitializer(); - if (seraiKey != 0) revert AlreadyInitialized(); - if (_seraiKey == bytes32(0)) revert InvalidKey(); - seraiKey = _seraiKey; + constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn( + 0, + _seraiKey, + Signature({ c: bytes32(0), s: bytes32(0) }) + ) { + nonce = 1; } - // updateSeraiKey validates the given Schnorr signature against the current public key, - // and if successful, updates the contract's public key to the given one. + // updateSeraiKey validates the given Schnorr signature against the current + // public key, and if successful, updates the contract's public key to the + // given one. function updateSeraiKey( bytes32 _seraiKey, - Signature memory sig - ) public { - if (_seraiKey == bytes32(0)) revert InvalidKey(); - bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey)); - if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature(); - seraiKey = _seraiKey; + Signature calldata sig + ) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) { + bytes memory message = + abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey); + nonce++; + + if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { + revert InvalidSignature(); + } } - // execute accepts a list of transactions to execute as well as a Schnorr signature. + function inInstruction( + address coin, + uint256 amount, + bytes memory instruction + ) external payable { + if (coin == address(0)) { + if (amount != msg.value) { + revert InvalidAmount(); + } + } else { + (bool success, bytes memory res) = + address(coin).call( + abi.encodeWithSelector( + IERC20.transferFrom.selector, + msg.sender, + address(this), + amount + ) + ); + + // Require there was nothing returned, which is done by some non-standard + // tokens, or that the ERC20 contract did in fact return true + bool nonStandardResOrTrue = + (res.length == 0) || abi.decode(res, (bool)); + if (!(success && nonStandardResOrTrue)) { + revert FailedTransfer(); + } + } + + /* + Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. + The amount instructed to transfer may not actually be the amount + transferred. + + If we add nonReentrant to every single function which can effect the + balance, we can check the amount exactly matches. This prevents transfers of + less value than expected occurring, at least, not without an additional + transfer to top up the difference (which isn't routed through this contract + and accordingly isn't trying to artificially create events). + + If we don't add nonReentrant, a transfer can be started, and then a new + transfer for the difference can follow it up (again and again until a + rounding error is reached). This contract would believe all transfers were + done in full, despite each only being done in part (except for the last + one). + + Given fee-on-transfer tokens aren't intended to be supported, the only + token planned to be supported is Dai and it doesn't have any fee-on-transfer + logic, fee-on-transfer tokens aren't even able to be supported at this time, + we simply classify this entire class of tokens as non-standard + implementations which induce undefined behavior. It is the Serai network's + role not to add support for any non-standard implementations. + */ + emit InInstruction(msg.sender, coin, amount, instruction); + } + + // execute accepts a list of transactions to execute as well as a signature. // if signature verification passes, the given transactions are executed. // if signature verification fails, this function will revert. function execute( OutInstruction[] calldata transactions, - Signature memory sig - ) public { - if (transactions.length > 256) revert TooManyTransactions(); + Signature calldata sig + ) external { + if (transactions.length > 256) { + revert TooManyTransactions(); + } - bytes32 message = keccak256(abi.encode("execute", nonce, transactions)); + bytes memory message = + abi.encode("execute", block.chainid, nonce, transactions); + uint256 executed_with_nonce = nonce; // This prevents re-entrancy from causing double spends yet does allow // out-of-order execution via re-entrancy nonce++; - if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature(); + + if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { + revert InvalidSignature(); + } uint256 successes; - for(uint256 i = 0; i < transactions.length; i++) { - (bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data); + for (uint256 i = 0; i < transactions.length; i++) { + bool success; + + // If there are no calls, send to `to` the value + if (transactions[i].calls.length == 0) { + (success, ) = transactions[i].to.call{ + value: transactions[i].value, + gas: 5_000 + }(""); + } else { + // If there are calls, ignore `to`. Deploy a new Sandbox and proxy the + // calls through that + // + // We could use a single sandbox in order to reduce gas costs, yet that + // risks one person creating an approval that's hooked before another + // user's intended action executes, in order to drain their coins + // + // While technically, that would be a flaw in the sandboxed flow, this + // is robust and prevents such flaws from being possible + // + // We also don't want people to set state via the Sandbox and expect it + // future available when anyone else could set a distinct value + Sandbox sandbox = new Sandbox(); + (success, ) = address(sandbox).call{ + value: transactions[i].value, + // TODO: Have the Call specify the gas up front + gas: 350_000 + }( + abi.encodeWithSelector( + Sandbox.sandbox.selector, + transactions[i].calls + ) + ); + } + assembly { successes := or(successes, shl(i, success)) } } - emit Executed(nonce, message, successes); + emit Executed( + executed_with_nonce, + keccak256(message), + successes, + sig + ); } } diff --git a/coins/ethereum/contracts/Sandbox.sol b/coins/ethereum/contracts/Sandbox.sol new file mode 100644 index 00000000..a82a3afd --- /dev/null +++ b/coins/ethereum/contracts/Sandbox.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.24; + +struct Call { + address to; + uint256 value; + bytes data; +} + +// A minimal sandbox focused on gas efficiency. +// +// The first call is executed if any of the calls fail, making it a fallback. +// All other calls are executed sequentially. +contract Sandbox { + error AlreadyCalled(); + error CallsFailed(); + + function sandbox(Call[] calldata calls) external payable { + // Prevent re-entrancy due to this executing arbitrary calls from anyone + // and anywhere + bool called; + assembly { called := tload(0) } + if (called) { + revert AlreadyCalled(); + } + assembly { tstore(0, 1) } + + // Execute the calls, starting from 1 + for (uint256 i = 1; i < calls.length; i++) { + (bool success, ) = + calls[i].to.call{ value: calls[i].value }(calls[i].data); + + // If this call failed, execute the fallback (call 0) + if (!success) { + (success, ) = + calls[0].to.call{ value: address(this).balance }(calls[0].data); + // If this call also failed, revert entirely + if (!success) { + revert CallsFailed(); + } + return; + } + } + + // We don't clear the re-entrancy guard as this contract should never be + // called again, so there's no reason to spend the effort + } +} diff --git a/coins/ethereum/contracts/Schnorr.sol b/coins/ethereum/contracts/Schnorr.sol index 47263e66..8edcdffd 100644 --- a/coins/ethereum/contracts/Schnorr.sol +++ b/coins/ethereum/contracts/Schnorr.sol @@ -2,38 +2,43 @@ pragma solidity ^0.8.0; // see https://github.com/noot/schnorr-verify for implementation details -contract Schnorr { +library Schnorr { // secp256k1 group order uint256 constant public Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; - error InvalidSOrA(); - error InvalidSignature(); + // Fixed parity for the public keys used in this contract + // This avoids spending a word passing the parity in a similar style to + // Bitcoin's Taproot + uint8 constant public KEY_PARITY = 27; - // parity := public key y-coord parity (27 or 28) - // px := public key x-coord + error InvalidSOrA(); + error MalformedSignature(); + + // px := public key x-coord, where the public key has a parity of KEY_PARITY // message := 32-byte hash of the message // c := schnorr signature challenge // s := schnorr signature function verify( - uint8 parity, bytes32 px, - bytes32 message, + bytes memory message, bytes32 c, bytes32 s - ) public view returns (bool) { - // ecrecover = (m, v, r, s); + ) internal pure returns (bool) { + // ecrecover = (m, v, r, s) -> key + // We instead pass the following to obtain the nonce (not the key) + // Then we hash it and verify it matches the challenge bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); + // For safety, we want each input to ecrecover to be 0 (sa, px, ca) + // The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero + // That leaves us to check `sa` are non-zero if (sa == 0) revert InvalidSOrA(); - // the ecrecover precompile implementation checks that the `r` and `s` - // inputs are non-zero (in this case, `px` and `ca`), thus we don't need to - // check if they're zero. - address R = ecrecover(sa, parity, px, ca); - if (R == address(0)) revert InvalidSignature(); - return c == keccak256( - abi.encodePacked(R, uint8(parity), px, block.chainid, message) - ); + address R = ecrecover(sa, KEY_PARITY, px, ca); + if (R == address(0)) revert MalformedSignature(); + + // Check the signature is correct by rebuilding the challenge + return c == keccak256(abi.encodePacked(R, px, message)); } } diff --git a/coins/ethereum/src/abi/mod.rs b/coins/ethereum/src/abi/mod.rs index 2d7dd47c..1ae23374 100644 --- a/coins/ethereum/src/abi/mod.rs +++ b/coins/ethereum/src/abi/mod.rs @@ -1,6 +1,37 @@ +use alloy_sol_types::sol; + #[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] #[allow(clippy::all)] -pub(crate) mod schnorr; +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod erc20_container { + use super::*; + sol!("contracts/IERC20.sol"); +} +pub use erc20_container::IERC20 as erc20; + #[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] #[allow(clippy::all)] -pub(crate) mod router; +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod deployer_container { + use super::*; + sol!("contracts/Deployer.sol"); +} +pub use deployer_container::Deployer as deployer; + +#[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] +#[allow(clippy::all)] +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod router_container { + use super::*; + sol!(Router, "artifacts/Router.abi"); +} +pub use router_container::Router as router; diff --git a/coins/ethereum/src/crypto.rs b/coins/ethereum/src/crypto.rs index 5f681cfa..ca228eb5 100644 --- a/coins/ethereum/src/crypto.rs +++ b/coins/ethereum/src/crypto.rs @@ -1,91 +1,185 @@ -use sha3::{Digest, Keccak256}; - use group::ff::PrimeField; use k256::{ - elliptic_curve::{ - bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint, - }, - ProjectivePoint, Scalar, U256, + elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, + ProjectivePoint, Scalar, U256 as KU256, }; +#[cfg(test)] +use k256::{elliptic_curve::point::DecompressPoint, AffinePoint}; use frost::{ algorithm::{Hram, SchnorrSignature}, - curve::Secp256k1, + curve::{Ciphersuite, Secp256k1}, }; +use alloy_core::primitives::{Parity, Signature as AlloySignature}; +use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; + +use crate::abi::router::{Signature as AbiSignature}; + pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { - Keccak256::digest(data).into() + alloy_core::primitives::keccak256(data).into() } -pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] { +pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar { + >::reduce_bytes(&keccak256(data).into()) +} + +pub fn address(point: &ProjectivePoint) -> [u8; 20] { let encoded_point = point.to_encoded_point(false); // Last 20 bytes of the hash of the concatenated x and y coordinates // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() } +pub(crate) fn deterministically_sign(tx: &TxLegacy) -> Signed { + assert!( + tx.chain_id.is_none(), + "chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)" + ); + + let sig_hash = tx.signature_hash().0; + let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat()); + let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat()); + loop { + let r_bytes: [u8; 32] = r.to_repr().into(); + let s_bytes: [u8; 32] = s.to_repr().into(); + let v = Parity::NonEip155(false); + let signature = + AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap(); + let tx = tx.clone().into_signed(signature); + if tx.recover_signer().is_ok() { + return tx; + } + + // Re-hash until valid + r = hash_to_scalar(r_bytes.as_ref()); + s = hash_to_scalar(s_bytes.as_ref()); + } +} + +/// The public key for a Schnorr-signing account. #[allow(non_snake_case)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct PublicKey { - pub A: ProjectivePoint, - pub px: Scalar, - pub parity: u8, + pub(crate) A: ProjectivePoint, + pub(crate) px: Scalar, } impl PublicKey { + /// Construct a new `PublicKey`. + /// + /// This will return None if the provided point isn't eligible to be a public key (due to + /// bounds such as parity). #[allow(non_snake_case)] pub fn new(A: ProjectivePoint) -> Option { let affine = A.to_affine(); - let parity = u8::from(bool::from(affine.y_is_odd())) + 27; - if parity != 27 { + // Only allow even keys to save a word within Ethereum + let is_odd = bool::from(affine.y_is_odd()); + if is_odd { None?; } let x_coord = affine.x(); - let x_coord_scalar = >::reduce_bytes(&x_coord); + let x_coord_scalar = >::reduce_bytes(&x_coord); // Return None if a reduction would occur + // Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less + // headache/concern to have + // This does ban a trivial amoount of public keys if x_coord_scalar.to_repr() != x_coord { None?; } - Some(PublicKey { A, px: x_coord_scalar, parity }) + Some(PublicKey { A, px: x_coord_scalar }) + } + + pub fn point(&self) -> ProjectivePoint { + self.A + } + + pub(crate) fn eth_repr(&self) -> [u8; 32] { + self.px.to_repr().into() + } + + #[cfg(test)] + pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option { + #[allow(non_snake_case)] + let A = Option::::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into(); + Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px }) } } +/// The HRAm to use for the Schnorr contract. #[derive(Clone, Default)] pub struct EthereumHram {} impl Hram for EthereumHram { #[allow(non_snake_case)] fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { - let a_encoded_point = A.to_encoded_point(true); - let mut a_encoded = a_encoded_point.as_ref().to_owned(); - a_encoded[0] += 25; // Ethereum uses 27/28 for point parity - assert!((a_encoded[0] == 27) || (a_encoded[0] == 28)); + let x_coord = A.to_affine().x(); + let mut data = address(R).to_vec(); - data.append(&mut a_encoded); + data.extend(x_coord.as_slice()); data.extend(m); - Scalar::reduce(U256::from_be_slice(&keccak256(&data))) + + >::reduce_bytes(&keccak256(&data).into()) } } +/// A signature for the Schnorr contract. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Signature { pub(crate) c: Scalar, pub(crate) s: Scalar, } impl Signature { + pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool { + #[allow(non_snake_case)] + let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c); + EthereumHram::hram(&R, &public_key.A, message) == self.c + } + + /// Construct a new `Signature`. + /// + /// This will return None if the signature is invalid. pub fn new( public_key: &PublicKey, - chain_id: U256, - m: &[u8], + message: &[u8], signature: SchnorrSignature, ) -> Option { - let c = EthereumHram::hram( - &signature.R, - &public_key.A, - &[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(), - ); + let c = EthereumHram::hram(&signature.R, &public_key.A, message); if !signature.verify(public_key.A, c) { None?; } - Some(Signature { c, s: signature.s }) + + let res = Signature { c, s: signature.s }; + assert!(res.verify(public_key, message)); + Some(res) + } + + pub fn c(&self) -> Scalar { + self.c + } + pub fn s(&self) -> Scalar { + self.s + } + + pub fn to_bytes(&self) -> [u8; 64] { + let mut res = [0; 64]; + res[.. 32].copy_from_slice(self.c.to_repr().as_ref()); + res[32 ..].copy_from_slice(self.s.to_repr().as_ref()); + res + } + + pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result { + let mut reader = bytes.as_slice(); + let c = Secp256k1::read_F(&mut reader)?; + let s = Secp256k1::read_F(&mut reader)?; + Ok(Signature { c, s }) + } +} +impl From<&Signature> for AbiSignature { + fn from(sig: &Signature) -> AbiSignature { + let c: [u8; 32] = sig.c.to_repr().into(); + let s: [u8; 32] = sig.s.to_repr().into(); + AbiSignature { c: c.into(), s: s.into() } } } diff --git a/coins/ethereum/src/deployer.rs b/coins/ethereum/src/deployer.rs new file mode 100644 index 00000000..d6cfeee9 --- /dev/null +++ b/coins/ethereum/src/deployer.rs @@ -0,0 +1,119 @@ +use std::sync::Arc; + +use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind}; +use alloy_consensus::{Signed, TxLegacy}; + +use alloy_sol_types::{SolCall, SolEvent}; + +use alloy_rpc_types::{BlockNumberOrTag, Filter}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use crate::{ + Error, + crypto::{self, keccak256, PublicKey}, + router::Router, +}; +pub use crate::abi::deployer as abi; + +/// The Deployer contract for the Router contract. +/// +/// This Deployer has a deterministic address, letting it be immediately identified on any +/// compatible chain. It then supports retrieving the Router contract's address (which isn't +/// deterministic) using a single log query. +#[derive(Clone, Debug)] +pub struct Deployer; +impl Deployer { + /// Obtain the transaction to deploy this contract, already signed. + /// + /// The account this transaction is sent from (which is populated in `from`) must be sufficiently + /// funded for this transaction to be submitted. This account has no known private key to anyone, + /// so ETH sent can be neither misappropriated nor returned. + pub fn deployment_tx() -> Signed { + let bytecode = include_str!("../artifacts/Deployer.bin"); + let bytecode = + Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex"); + + let tx = TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000u128, + // TODO: Use a more accurate gas limit + gas_limit: 1_000_000u128, + to: TxKind::Create, + value: U256::ZERO, + input: bytecode, + }; + + crypto::deterministically_sign(&tx) + } + + /// Obtain the deterministic address for this contract. + pub fn address() -> [u8; 20] { + let deployer_deployer = + Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature"); + **Address::create(&deployer_deployer, 0) + } + + /// Construct a new view of the `Deployer`. + pub async fn new(provider: Arc>) -> Result, Error> { + let address = Self::address(); + #[cfg(not(test))] + let required_block = BlockNumberOrTag::Finalized; + #[cfg(test)] + let required_block = BlockNumberOrTag::Latest; + let code = provider + .get_code_at(address.into(), required_block.into()) + .await + .map_err(|_| Error::ConnectionError)?; + // Contract has yet to be deployed + if code.is_empty() { + return Ok(None); + } + Ok(Some(Self)) + } + + /// Yield the `ContractCall` necessary to deploy the Router. + pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy { + TxLegacy { + to: TxKind::Call(Self::address().into()), + input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(), + gas_limit: 1_000_000, + ..Default::default() + } + } + + /// Find the first Router deployed with the specified key as its first key. + /// + /// This is the Router Serai will use, and is the only way to construct a `Router`. + pub async fn find_router( + &self, + provider: Arc>, + key: &PublicKey, + ) -> Result, Error> { + let init_code = Router::init_code(key); + let init_code_hash = keccak256(&init_code); + + #[cfg(not(test))] + let to_block = BlockNumberOrTag::Finalized; + #[cfg(test)] + let to_block = BlockNumberOrTag::Latest; + + // Find the first log using this init code (where the init code is binding to the key) + let filter = + Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address())); + let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH); + let filter = filter.topic1(B256::from(init_code_hash)); + let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let Some(first_log) = logs.first() else { return Ok(None) }; + let router = first_log + .log_decode::() + .map_err(|_| Error::ConnectionError)? + .inner + .data + .created; + + Ok(Some(Router::new(provider, router))) + } +} diff --git a/coins/ethereum/src/erc20.rs b/coins/ethereum/src/erc20.rs new file mode 100644 index 00000000..3b5bbee2 --- /dev/null +++ b/coins/ethereum/src/erc20.rs @@ -0,0 +1,118 @@ +use std::{sync::Arc, collections::HashSet}; + +use alloy_core::primitives::{Address, B256, U256}; + +use alloy_sol_types::{SolInterface, SolEvent}; + +use alloy_rpc_types::{BlockNumberOrTag, Filter}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use crate::Error; +pub use crate::abi::erc20 as abi; +use abi::{IERC20Calls, Transfer, transferCall, transferFromCall}; + +#[derive(Clone, Debug)] +pub struct TopLevelErc20Transfer { + pub id: [u8; 32], + pub from: [u8; 20], + pub amount: U256, + pub data: Vec, +} + +/// A view for an ERC20 contract. +#[derive(Clone, Debug)] +pub struct ERC20(Arc>, Address); +impl ERC20 { + /// Construct a new view of the specified ERC20 contract. + /// + /// This checks a contract is deployed at that address yet does not check the contract is + /// actually an ERC20. + pub async fn new( + provider: Arc>, + address: [u8; 20], + ) -> Result, Error> { + let code = provider + .get_code_at(address.into(), BlockNumberOrTag::Finalized.into()) + .await + .map_err(|_| Error::ConnectionError)?; + // Contract has yet to be deployed + if code.is_empty() { + return Ok(None); + } + Ok(Some(Self(provider.clone(), Address::from(&address)))) + } + + pub async fn top_level_transfers( + &self, + block: u64, + to: [u8; 20], + ) -> Result, Error> { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(Transfer::SIGNATURE_HASH); + let mut to_topic = [0; 32]; + to_topic[12 ..].copy_from_slice(&to); + let filter = filter.topic2(B256::from(to_topic)); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let mut handled = HashSet::new(); + + let mut top_level_transfers = vec![]; + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?; + let tx = self.0.get_transaction_by_hash(tx_id).await.map_err(|_| Error::ConnectionError)?; + + // If this is a top-level call... + if tx.to == Some(self.1) { + // And we recognize the call... + // Don't validate the encoding as this can't be re-encoded to an identical bytestring due + // to the InInstruction appended + if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) { + // Extract the top-level call's from/to/value + let (from, call_to, value) = match call { + IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value), + IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => { + (from, call_to, value) + } + // Treat any other function selectors as unrecognized + _ => continue, + }; + + let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + // Ensure the top-level transfer is equivalent, and this presumably isn't a log for an + // internal transfer + if (log.from != from) || (call_to != to) || (value != log.value) { + continue; + } + + // Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's + // the only log we handle + if handled.contains(&tx_id) { + continue; + } + handled.insert(tx_id); + + // Read the data appended after + let encoded = call.abi_encode(); + let data = tx.input.as_ref()[encoded.len() ..].to_vec(); + + // Push the transfer + top_level_transfers.push(TopLevelErc20Transfer { + // Since we'll only handle one log for this TX, set the ID to the TX ID + id: *tx_id, + from: *log.from.0, + amount: log.value, + data, + }); + } + } + } + Ok(top_level_transfers) + } +} diff --git a/coins/ethereum/src/lib.rs b/coins/ethereum/src/lib.rs index 505de38e..8d4a5312 100644 --- a/coins/ethereum/src/lib.rs +++ b/coins/ethereum/src/lib.rs @@ -1,16 +1,30 @@ use thiserror::Error; +pub use alloy_core; +pub use alloy_consensus; + +pub use alloy_rpc_types; +pub use alloy_simple_request_transport; +pub use alloy_rpc_client; +pub use alloy_provider; + pub mod crypto; pub(crate) mod abi; -pub mod schnorr; + +pub mod erc20; +pub mod deployer; pub mod router; +pub mod machine; + #[cfg(test)] mod tests; -#[derive(Error, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] pub enum Error { #[error("failed to verify Schnorr signature")] InvalidSignature, + #[error("couldn't make call/send TX")] + ConnectionError, } diff --git a/coins/ethereum/src/machine.rs b/coins/ethereum/src/machine.rs new file mode 100644 index 00000000..0d5dc7a5 --- /dev/null +++ b/coins/ethereum/src/machine.rs @@ -0,0 +1,414 @@ +use std::{ + io::{self, Read}, + collections::HashMap, +}; + +use rand_core::{RngCore, CryptoRng}; + +use transcript::{Transcript, RecommendedTranscript}; + +use group::GroupEncoding; +use frost::{ + curve::{Ciphersuite, Secp256k1}, + Participant, ThresholdKeys, FrostError, + algorithm::Schnorr, + sign::*, +}; + +use alloy_core::primitives::U256; + +use crate::{ + crypto::{PublicKey, EthereumHram, Signature}, + router::{ + abi::{Call as AbiCall, OutInstruction as AbiOutInstruction}, + Router, + }, +}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Call { + pub to: [u8; 20], + pub value: U256, + pub data: Vec, +} +impl Call { + pub fn read(reader: &mut R) -> io::Result { + let mut to = [0; 20]; + reader.read_exact(&mut to)?; + + let value = { + let mut value_bytes = [0; 32]; + reader.read_exact(&mut value_bytes)?; + U256::from_le_slice(&value_bytes) + }; + + let mut data_len = { + let mut data_len = [0; 4]; + reader.read_exact(&mut data_len)?; + usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize") + }; + + // A valid DoS would be to claim a 4 GB data is present for only 4 bytes + // We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB) + let mut data = vec![]; + while data_len > 0 { + let chunk_len = data_len.min(1024); + let mut chunk = vec![0; chunk_len]; + reader.read_exact(&mut chunk)?; + data.extend(&chunk); + data_len -= chunk_len; + } + + Ok(Call { to, value, data }) + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.to)?; + writer.write_all(&self.value.as_le_bytes())?; + + let data_len = u32::try_from(self.data.len()) + .map_err(|_| io::Error::other("call data length exceeded 2**32"))?; + writer.write_all(&data_len.to_le_bytes())?; + writer.write_all(&self.data) + } +} +impl From for AbiCall { + fn from(call: Call) -> AbiCall { + AbiCall { to: call.to.into(), value: call.value, data: call.data.into() } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum OutInstructionTarget { + Direct([u8; 20]), + Calls(Vec), +} +impl OutInstructionTarget { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + + match kind[0] { + 0 => { + let mut addr = [0; 20]; + reader.read_exact(&mut addr)?; + Ok(OutInstructionTarget::Direct(addr)) + } + 1 => { + let mut calls_len = [0; 4]; + reader.read_exact(&mut calls_len)?; + let calls_len = u32::from_le_bytes(calls_len); + + let mut calls = vec![]; + for _ in 0 .. calls_len { + calls.push(Call::read(reader)?); + } + Ok(OutInstructionTarget::Calls(calls)) + } + _ => Err(io::Error::other("unrecognized OutInstructionTarget"))?, + } + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + OutInstructionTarget::Direct(addr) => { + writer.write_all(&[0])?; + writer.write_all(addr)?; + } + OutInstructionTarget::Calls(calls) => { + writer.write_all(&[1])?; + let call_len = u32::try_from(calls.len()) + .map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?; + writer.write_all(&call_len.to_le_bytes())?; + for call in calls { + call.write(writer)?; + } + } + } + Ok(()) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct OutInstruction { + pub target: OutInstructionTarget, + pub value: U256, +} +impl OutInstruction { + fn read(reader: &mut R) -> io::Result { + let target = OutInstructionTarget::read(reader)?; + + let value = { + let mut value_bytes = [0; 32]; + reader.read_exact(&mut value_bytes)?; + U256::from_le_slice(&value_bytes) + }; + + Ok(OutInstruction { target, value }) + } + fn write(&self, writer: &mut W) -> io::Result<()> { + self.target.write(writer)?; + writer.write_all(&self.value.as_le_bytes()) + } +} +impl From for AbiOutInstruction { + fn from(instruction: OutInstruction) -> AbiOutInstruction { + match instruction.target { + OutInstructionTarget::Direct(addr) => { + AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value } + } + OutInstructionTarget::Calls(calls) => AbiOutInstruction { + to: [0; 20].into(), + calls: calls.into_iter().map(Into::into).collect(), + value: instruction.value, + }, + } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum RouterCommand { + UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey }, + Execute { chain_id: U256, nonce: U256, outs: Vec }, +} + +impl RouterCommand { + pub fn msg(&self) -> Vec { + match self { + RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { + Router::update_serai_key_message(*chain_id, *nonce, key) + } + RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message( + *chain_id, + *nonce, + outs.iter().map(|out| out.clone().into()).collect(), + ), + } + } + + pub fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + + match kind[0] { + 0 => { + let mut chain_id = [0; 32]; + reader.read_exact(&mut chain_id)?; + + let mut nonce = [0; 32]; + reader.read_exact(&mut nonce)?; + + let key = PublicKey::new(Secp256k1::read_G(reader)?) + .ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?; + Ok(RouterCommand::UpdateSeraiKey { + chain_id: U256::from_le_slice(&chain_id), + nonce: U256::from_le_slice(&nonce), + key, + }) + } + 1 => { + let mut chain_id = [0; 32]; + reader.read_exact(&mut chain_id)?; + let chain_id = U256::from_le_slice(&chain_id); + + let mut nonce = [0; 32]; + reader.read_exact(&mut nonce)?; + let nonce = U256::from_le_slice(&nonce); + + let mut outs_len = [0; 4]; + reader.read_exact(&mut outs_len)?; + let outs_len = u32::from_le_bytes(outs_len); + + let mut outs = vec![]; + for _ in 0 .. outs_len { + outs.push(OutInstruction::read(reader)?); + } + + Ok(RouterCommand::Execute { chain_id, nonce, outs }) + } + _ => Err(io::Error::other("reading unknown type of RouterCommand"))?, + } + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { + writer.write_all(&[0])?; + writer.write_all(&chain_id.as_le_bytes())?; + writer.write_all(&nonce.as_le_bytes())?; + writer.write_all(&key.A.to_bytes()) + } + RouterCommand::Execute { chain_id, nonce, outs } => { + writer.write_all(&[1])?; + writer.write_all(&chain_id.as_le_bytes())?; + writer.write_all(&nonce.as_le_bytes())?; + writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; + for out in outs { + out.write(writer)?; + } + Ok(()) + } + } + } + + pub fn serialize(&self) -> Vec { + let mut res = vec![]; + self.write(&mut res).unwrap(); + res + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct SignedRouterCommand { + command: RouterCommand, + signature: Signature, +} + +impl SignedRouterCommand { + pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option { + let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?; + let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?; + let signature = Signature { c, s }; + + if !signature.verify(key, &command.msg()) { + None? + } + Some(SignedRouterCommand { command, signature }) + } + + pub fn command(&self) -> &RouterCommand { + &self.command + } + + pub fn signature(&self) -> &Signature { + &self.signature + } + + pub fn read(reader: &mut R) -> io::Result { + let command = RouterCommand::read(reader)?; + + let mut sig = [0; 64]; + reader.read_exact(&mut sig)?; + let signature = Signature::from_bytes(sig)?; + + Ok(SignedRouterCommand { command, signature }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + self.command.write(writer)?; + writer.write_all(&self.signature.to_bytes()) + } +} + +pub struct RouterCommandMachine { + key: PublicKey, + command: RouterCommand, + machine: AlgorithmMachine>, +} + +impl RouterCommandMachine { + pub fn new(keys: ThresholdKeys, command: RouterCommand) -> Option { + // The Schnorr algorithm should be fine without this, even when using the IETF variant + // If this is better and more comprehensive, we should do it, even if not necessary + let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1"); + let key = keys.group_key(); + transcript.append_message(b"key", key.to_bytes()); + transcript.append_message(b"command", command.serialize()); + + Some(Self { + key: PublicKey::new(key)?, + command, + machine: AlgorithmMachine::new(Schnorr::new(transcript), keys), + }) + } +} + +impl PreprocessMachine for RouterCommandMachine { + type Preprocess = Preprocess; + type Signature = SignedRouterCommand; + type SignMachine = RouterCommandSignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + let (machine, preprocess) = self.machine.preprocess(rng); + + (RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess) + } +} + +pub struct RouterCommandSignMachine { + key: PublicKey, + command: RouterCommand, + machine: AlgorithmSignMachine>, +} + +impl SignMachine for RouterCommandSignMachine { + type Params = (); + type Keys = ThresholdKeys; + type Preprocess = Preprocess; + type SignatureShare = SignatureShare; + type SignatureMachine = RouterCommandSignatureMachine; + + fn cache(self) -> CachedPreprocess { + unimplemented!( + "RouterCommand machines don't support caching their preprocesses due to {}", + "being already bound to a specific command" + ); + } + + fn from_cache( + (): (), + _: ThresholdKeys, + _: CachedPreprocess, + ) -> (Self, Self::Preprocess) { + unimplemented!( + "RouterCommand machines don't support caching their preprocesses due to {}", + "being already bound to a specific command" + ); + } + + fn read_preprocess(&self, reader: &mut R) -> io::Result { + self.machine.read_preprocess(reader) + } + + fn sign( + self, + commitments: HashMap, + msg: &[u8], + ) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> { + if !msg.is_empty() { + panic!("message was passed to a RouterCommand machine when it generates its own"); + } + + let (machine, share) = self.machine.sign(commitments, &self.command.msg())?; + + Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share)) + } +} + +pub struct RouterCommandSignatureMachine { + key: PublicKey, + command: RouterCommand, + machine: + AlgorithmSignatureMachine>, +} + +impl SignatureMachine for RouterCommandSignatureMachine { + type SignatureShare = SignatureShare; + + fn read_share(&self, reader: &mut R) -> io::Result { + self.machine.read_share(reader) + } + + fn complete( + self, + shares: HashMap, + ) -> Result { + let sig = self.machine.complete(shares)?; + let signature = Signature::new(&self.key, &self.command.msg(), sig) + .expect("machine produced an invalid signature"); + Ok(SignedRouterCommand { command: self.command, signature }) + } +} diff --git a/coins/ethereum/src/router.rs b/coins/ethereum/src/router.rs index 3696fd9b..c4399ae3 100644 --- a/coins/ethereum/src/router.rs +++ b/coins/ethereum/src/router.rs @@ -1,30 +1,426 @@ -pub use crate::abi::router::*; +use std::{sync::Arc, io, collections::HashSet}; -/* -use crate::crypto::{ProcessedSignature, PublicKey}; -use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode}; -use eyre::Result; -use std::{convert::From, fs::File, sync::Arc}; +use k256::{ + elliptic_curve::{group::GroupEncoding, sec1}, + ProjectivePoint, +}; -pub async fn router_update_public_key( - contract: &Router, - public_key: &PublicKey, - signature: &ProcessedSignature, -) -> std::result::Result, eyre::ErrReport> { - let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into()); - let pending_tx = tx.send().await?; - let receipt = pending_tx.await?; - Ok(receipt) +use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; +#[cfg(test)] +use alloy_core::primitives::B256; +use alloy_consensus::TxLegacy; + +use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; + +use alloy_rpc_types::Filter; +#[cfg(test)] +use alloy_rpc_types::{BlockId, TransactionRequest, TransactionInput}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +pub use crate::{ + Error, + crypto::{PublicKey, Signature}, + abi::{erc20::Transfer, router as abi}, +}; +use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Coin { + Ether, + Erc20([u8; 20]), } -pub async fn router_execute( - contract: &Router, - txs: Vec, - signature: &ProcessedSignature, -) -> std::result::Result, eyre::ErrReport> { - let tx = contract.execute(txs, signature.into()).send(); - let pending_tx = tx.send().await?; - let receipt = pending_tx.await?; - Ok(receipt) +impl Coin { + pub fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + Ok(match kind[0] { + 0 => Coin::Ether, + 1 => { + let mut address = [0; 20]; + reader.read_exact(&mut address)?; + Coin::Erc20(address) + } + _ => Err(io::Error::other("unrecognized Coin type"))?, + }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Coin::Ether => writer.write_all(&[0]), + Coin::Erc20(token) => { + writer.write_all(&[1])?; + writer.write_all(token) + } + } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct InInstruction { + pub id: ([u8; 32], u64), + pub from: [u8; 20], + pub coin: Coin, + pub amount: U256, + pub data: Vec, + pub key_at_end_of_block: ProjectivePoint, +} + +impl InInstruction { + pub fn read(reader: &mut R) -> io::Result { + let id = { + let mut id_hash = [0; 32]; + reader.read_exact(&mut id_hash)?; + let mut id_pos = [0; 8]; + reader.read_exact(&mut id_pos)?; + let id_pos = u64::from_le_bytes(id_pos); + (id_hash, id_pos) + }; + + let mut from = [0; 20]; + reader.read_exact(&mut from)?; + + let coin = Coin::read(reader)?; + let mut amount = [0; 32]; + reader.read_exact(&mut amount)?; + let amount = U256::from_le_slice(&amount); + + let mut data_len = [0; 4]; + reader.read_exact(&mut data_len)?; + let data_len = usize::try_from(u32::from_le_bytes(data_len)) + .map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?; + let mut data = vec![0; data_len]; + reader.read_exact(&mut data)?; + + let mut key_at_end_of_block = ::Repr::default(); + reader.read_exact(&mut key_at_end_of_block)?; + let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block)) + .ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?; + + Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.id.0)?; + writer.write_all(&self.id.1.to_le_bytes())?; + + writer.write_all(&self.from)?; + + self.coin.write(writer)?; + writer.write_all(&self.amount.as_le_bytes())?; + + writer.write_all( + &u32::try_from(self.data.len()) + .map_err(|_| { + io::Error::other("InInstruction being written had data exceeding 2**32 in length") + })? + .to_le_bytes(), + )?; + writer.write_all(&self.data)?; + + writer.write_all(&self.key_at_end_of_block.to_bytes()) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Executed { + pub tx_id: [u8; 32], + pub nonce: u64, + pub signature: [u8; 64], +} + +/// The contract Serai uses to manage its state. +#[derive(Clone, Debug)] +pub struct Router(Arc>, Address); +impl Router { + pub(crate) fn code() -> Vec { + let bytecode = include_str!("../artifacts/Router.bin"); + Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec() + } + + pub(crate) fn init_code(key: &PublicKey) -> Vec { + let mut bytecode = Self::code(); + // Append the constructor arguments + bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode()); + bytecode + } + + // This isn't pub in order to force users to use `Deployer::find_router`. + pub(crate) fn new(provider: Arc>, address: Address) -> Self { + Self(provider, address) + } + + pub fn address(&self) -> [u8; 20] { + **self.1 + } + + /// Get the key for Serai at the specified block. + #[cfg(test)] + pub async fn serai_key(&self, at: [u8; 32]) -> Result { + let call = TransactionRequest::default() + .to(Some(self.1)) + .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into())); + let bytes = self + .0 + .call(&call, Some(BlockId::Hash(B256::from(at).into()))) + .await + .map_err(|_| Error::ConnectionError)?; + let res = + abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; + PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError) + } + + /// Get the message to be signed in order to update the key for Serai. + pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec { + let mut buffer = b"updateSeraiKey".to_vec(); + buffer.extend(&chain_id.to_be_bytes::<32>()); + buffer.extend(&nonce.to_be_bytes::<32>()); + buffer.extend(&key.eth_repr()); + buffer + } + + /// Update the key representing Serai. + pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { + // TODO: Set a more accurate gas + TxLegacy { + to: TxKind::Call(self.1), + input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into())) + .abi_encode() + .into(), + gas_limit: 100_000, + ..Default::default() + } + } + + /// Get the current nonce for the published batches. + #[cfg(test)] + pub async fn nonce(&self, at: [u8; 32]) -> Result { + let call = TransactionRequest::default() + .to(Some(self.1)) + .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into())); + let bytes = self + .0 + .call(&call, Some(BlockId::Hash(B256::from(at).into()))) + .await + .map_err(|_| Error::ConnectionError)?; + let res = + abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; + Ok(res._0) + } + + /// Get the message to be signed in order to update the key for Serai. + pub(crate) fn execute_message( + chain_id: U256, + nonce: U256, + outs: Vec, + ) -> Vec { + ("execute".to_string(), chain_id, nonce, outs).abi_encode_params() + } + + /// Execute a batch of `OutInstruction`s. + pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy { + TxLegacy { + to: TxKind::Call(self.1), + input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(), + // TODO + gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()), + ..Default::default() + } + } + + pub async fn in_instructions( + &self, + block: u64, + allowed_tokens: &HashSet<[u8; 20]>, + ) -> Result, Error> { + let key_at_end_of_block = { + let filter = Filter::new().from_block(0).to_block(block).address(self.1); + let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); + let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?; + let last_key_x_coordinate = last_key_x_coordinate_log + .log_decode::() + .map_err(|_| Error::ConnectionError)? + .inner + .data + .key; + + let mut compressed_point = ::Repr::default(); + compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY); + compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice()); + + ProjectivePoint::from_bytes(&compressed_point).expect("router's last key wasn't a valid key") + }; + + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let mut transfer_check = HashSet::new(); + let mut in_instructions = vec![]; + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let id = ( + log.block_hash.ok_or(Error::ConnectionError)?.into(), + log.log_index.ok_or(Error::ConnectionError)?, + ); + + let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?; + let tx = self.0.get_transaction_by_hash(tx_hash).await.map_err(|_| Error::ConnectionError)?; + + let log = + log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + let coin = if log.coin.0 == [0; 20] { + Coin::Ether + } else { + let token = *log.coin.0; + + if !allowed_tokens.contains(&token) { + continue; + } + + // If this also counts as a top-level transfer via the token, drop it + // + // Necessary in order to handle a potential edge case with some theoretical token + // implementations + // + // This will either let it be handled by the top-level transfer hook or will drop it + // entirely on the side of caution + if tx.to == Some(token.into()) { + continue; + } + + // Get all logs for this TX + let receipt = self + .0 + .get_transaction_receipt(tx_hash) + .await + .map_err(|_| Error::ConnectionError)? + .ok_or(Error::ConnectionError)?; + let tx_logs = receipt.inner.logs(); + + // Find a matching transfer log + let mut found_transfer = false; + for tx_log in tx_logs { + let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?; + // Ensure we didn't already use this transfer to check a distinct InInstruction event + if transfer_check.contains(&log_index) { + continue; + } + + // Check if this log is from the token we expected to be transferred + if tx_log.address().0 != token { + continue; + } + // Check if this is a transfer log + // https://github.com/alloy-rs/core/issues/589 + if tx_log.topics()[0] != Transfer::SIGNATURE_HASH { + continue; + } + let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue }; + // Check if this is a transfer to us for the expected amount + if (transfer.to == self.1) && (transfer.value == log.amount) { + transfer_check.insert(log_index); + found_transfer = true; + break; + } + } + if !found_transfer { + // This shouldn't be a ConnectionError + // This is an exploit, a non-conforming ERC20, or an invalid connection + // This should halt the process which is sufficient, yet this is sub-optimal + // TODO + Err(Error::ConnectionError)?; + } + + Coin::Erc20(token) + }; + + in_instructions.push(InInstruction { + id, + from: *log.from.0, + coin, + amount: log.amount, + data: log.instruction.as_ref().to_vec(), + key_at_end_of_block, + }); + } + + Ok(in_instructions) + } + + pub async fn executed_commands(&self, block: u64) -> Result, Error> { + let mut res = vec![]; + + { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); + + let log = + log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + let mut signature = [0; 64]; + signature[.. 32].copy_from_slice(log.signature.c.as_ref()); + signature[32 ..].copy_from_slice(log.signature.s.as_ref()); + res.push(Executed { + tx_id, + nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, + signature, + }); + } + } + + { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); + + let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + let mut signature = [0; 64]; + signature[.. 32].copy_from_slice(log.signature.c.as_ref()); + signature[32 ..].copy_from_slice(log.signature.s.as_ref()); + res.push(Executed { + tx_id, + nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, + signature, + }); + } + } + + Ok(res) + } + + #[cfg(feature = "tests")] + pub fn key_updated_filter(&self) -> Filter { + Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH) + } + #[cfg(feature = "tests")] + pub fn executed_filter(&self) -> Filter { + Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH) + } } -*/ diff --git a/coins/ethereum/src/schnorr.rs b/coins/ethereum/src/schnorr.rs deleted file mode 100644 index 0e4495ec..00000000 --- a/coins/ethereum/src/schnorr.rs +++ /dev/null @@ -1,34 +0,0 @@ -use eyre::{eyre, Result}; - -use group::ff::PrimeField; - -use ethers_providers::{Provider, Http}; - -use crate::{ - Error, - crypto::{keccak256, PublicKey, Signature}, -}; -pub use crate::abi::schnorr::*; - -pub async fn call_verify( - contract: &Schnorr>, - public_key: &PublicKey, - message: &[u8], - signature: &Signature, -) -> Result<()> { - if contract - .verify( - public_key.parity, - public_key.px.to_repr().into(), - keccak256(message), - signature.c.to_repr().into(), - signature.s.to_repr().into(), - ) - .call() - .await? - { - Ok(()) - } else { - Err(eyre!(Error::InvalidSignature)) - } -} diff --git a/coins/ethereum/src/tests/abi/mod.rs b/coins/ethereum/src/tests/abi/mod.rs new file mode 100644 index 00000000..57ea8811 --- /dev/null +++ b/coins/ethereum/src/tests/abi/mod.rs @@ -0,0 +1,13 @@ +use alloy_sol_types::sol; + +#[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] +#[allow(clippy::all)] +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod schnorr_container { + use super::*; + sol!("src/tests/contracts/Schnorr.sol"); +} +pub(crate) use schnorr_container::TestSchnorr as schnorr; diff --git a/coins/ethereum/src/tests/contracts/ERC20.sol b/coins/ethereum/src/tests/contracts/ERC20.sol new file mode 100644 index 00000000..e157974c --- /dev/null +++ b/coins/ethereum/src/tests/contracts/ERC20.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +contract TestERC20 { + event Transfer(address indexed from, address indexed to, uint256 value); + event Approval(address indexed owner, address indexed spender, uint256 value); + + function name() public pure returns (string memory) { + return "Test ERC20"; + } + function symbol() public pure returns (string memory) { + return "TEST"; + } + function decimals() public pure returns (uint8) { + return 18; + } + + function totalSupply() public pure returns (uint256) { + return 1_000_000 * 10e18; + } + + mapping(address => uint256) balances; + mapping(address => mapping(address => uint256)) allowances; + + constructor() { + balances[msg.sender] = totalSupply(); + } + + function balanceOf(address owner) public view returns (uint256) { + return balances[owner]; + } + function transfer(address to, uint256 value) public returns (bool) { + balances[msg.sender] -= value; + balances[to] += value; + return true; + } + function transferFrom(address from, address to, uint256 value) public returns (bool) { + allowances[from][msg.sender] -= value; + balances[from] -= value; + balances[to] += value; + return true; + } + + function approve(address spender, uint256 value) public returns (bool) { + allowances[msg.sender][spender] = value; + return true; + } + function allowance(address owner, address spender) public view returns (uint256) { + return allowances[owner][spender]; + } +} diff --git a/coins/ethereum/src/tests/contracts/Schnorr.sol b/coins/ethereum/src/tests/contracts/Schnorr.sol new file mode 100644 index 00000000..832cd2fe --- /dev/null +++ b/coins/ethereum/src/tests/contracts/Schnorr.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +import "../../../contracts/Schnorr.sol"; + +contract TestSchnorr { + function verify( + bytes32 px, + bytes calldata message, + bytes32 c, + bytes32 s + ) external pure returns (bool) { + return Schnorr.verify(px, message, c, s); + } +} diff --git a/coins/ethereum/src/tests/crypto.rs b/coins/ethereum/src/tests/crypto.rs index 6dced933..a668b2d6 100644 --- a/coins/ethereum/src/tests/crypto.rs +++ b/coins/ethereum/src/tests/crypto.rs @@ -1,49 +1,33 @@ use rand_core::OsRng; -use sha2::Sha256; -use sha3::{Digest, Keccak256}; - -use group::Group; +use group::ff::{Field, PrimeField}; use k256::{ - ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey}, - elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint}, - U256, Scalar, AffinePoint, ProjectivePoint, + ecdsa::{ + self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, + }, + Scalar, ProjectivePoint, }; use frost::{ - curve::Secp256k1, + curve::{Ciphersuite, Secp256k1}, algorithm::{Hram, IetfSchnorr}, tests::{algorithm_machines, sign}, }; use crate::{crypto::*, tests::key_gen}; -pub fn hash_to_scalar(data: &[u8]) -> Scalar { - Scalar::reduce(U256::from_be_slice(&keccak256(data))) -} - -pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> { - if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) { - return None; - } - - #[allow(non_snake_case)] - let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into()); - #[allow(non_snake_case)] - if let Some(R) = Option::::from(R) { - #[allow(non_snake_case)] - let R = ProjectivePoint::from(R); - - let r = r.invert().unwrap(); - let u1 = ProjectivePoint::GENERATOR * (-message * r); - let u2 = R * (s * r); - let key: ProjectivePoint = u1 + u2; - if !bool::from(key.is_identity()) { - return Some(address(&key)); - } - } - - None +// The ecrecover opcode, yet with parity replacing v +pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { + let sig = ecdsa::Signature::from_scalars(r, s).ok()?; + let message: [u8; 32] = message.to_repr().into(); + alloy_core::primitives::Signature::from_signature_and_parity( + sig, + alloy_core::primitives::Parity::Parity(odd_y), + ) + .ok()? + .recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) + .ok() + .map(Into::into) } #[test] @@ -55,20 +39,23 @@ fn test_ecrecover() { const MESSAGE: &[u8] = b"Hello, World!"; let (sig, recovery_id) = private .as_nonzero_scalar() - .try_sign_prehashed_rfc6979::(&Keccak256::digest(MESSAGE), b"") + .try_sign_prehashed( + ::F::random(&mut OsRng), + &keccak256(MESSAGE).into(), + ) .unwrap(); // Sanity check the signature verifies #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result { - assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ()); + assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ()); } // Perform the ecrecover assert_eq!( ecrecover( hash_to_scalar(MESSAGE), - u8::from(recovery_id.unwrap().is_y_odd()) + 27, + u8::from(recovery_id.unwrap().is_y_odd()) == 1, *sig.r(), *sig.s() ) @@ -93,18 +80,13 @@ fn test_signing() { pub fn preprocess_signature_for_ecrecover( R: ProjectivePoint, public_key: &PublicKey, - chain_id: U256, m: &[u8], s: Scalar, -) -> (u8, Scalar, Scalar) { - let c = EthereumHram::hram( - &R, - &public_key.A, - &[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(), - ); +) -> (Scalar, Scalar) { + let c = EthereumHram::hram(&R, &public_key.A, m); let sa = -(s * public_key.px); let ca = -(c * public_key.px); - (public_key.parity, sa, ca) + (sa, ca) } #[test] @@ -112,21 +94,12 @@ fn test_ecrecover_hack() { let (keys, public_key) = key_gen(); const MESSAGE: &[u8] = b"Hello, World!"; - let hashed_message = keccak256(MESSAGE); - let chain_id = U256::ONE; - let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); let algo = IetfSchnorr::::ietf(); - let sig = sign( - &mut OsRng, - &algo, - keys.clone(), - algorithm_machines(&mut OsRng, &algo, &keys), - full_message, - ); + let sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); - let (parity, sa, ca) = - preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s); - let q = ecrecover(sa, parity, public_key.px, ca).unwrap(); + let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s); + let q = ecrecover(sa, false, public_key.px, ca).unwrap(); assert_eq!(q, address(&sig.R)); } diff --git a/coins/ethereum/src/tests/mod.rs b/coins/ethereum/src/tests/mod.rs index c468cfb6..3a381d42 100644 --- a/coins/ethereum/src/tests/mod.rs +++ b/coins/ethereum/src/tests/mod.rs @@ -1,21 +1,25 @@ -use std::{sync::Arc, time::Duration, fs::File, collections::HashMap}; +use std::{sync::Arc, collections::HashMap}; use rand_core::OsRng; -use group::ff::PrimeField; use k256::{Scalar, ProjectivePoint}; use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen}; -use ethers_core::{ - types::{H160, Signature as EthersSignature}, - abi::Abi, +use alloy_core::{ + primitives::{Address, U256, Bytes, TxKind}, + hex::FromHex, }; -use ethers_contract::ContractFactory; -use ethers_providers::{Middleware, Provider, Http}; +use alloy_consensus::{SignableTransaction, TxLegacy}; -use crate::crypto::PublicKey; +use alloy_rpc_types::TransactionReceipt; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use crate::crypto::{address, deterministically_sign, PublicKey}; mod crypto; + +mod abi; mod schnorr; mod router; @@ -36,57 +40,88 @@ pub fn key_gen() -> (HashMap>, PublicKey) (keys, public_key) } -// TODO: Replace with a contract deployment from an unknown account, so the environment solely has -// to fund the deployer, not create/pass a wallet -// TODO: Deterministic deployments across chains +// TODO: Use a proper error here +pub async fn send( + provider: &RootProvider, + wallet: &k256::ecdsa::SigningKey, + mut tx: TxLegacy, +) -> Option { + let verifying_key = *wallet.verifying_key().as_affine(); + let address = Address::from(address(&verifying_key.into())); + + // https://github.com/alloy-rs/alloy/issues/539 + // let chain_id = provider.get_chain_id().await.unwrap(); + // tx.chain_id = Some(chain_id); + tx.chain_id = None; + tx.nonce = provider.get_transaction_count(address, None).await.unwrap(); + // 100 gwei + tx.gas_price = 100_000_000_000u128; + + let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap(); + assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap()); + assert!( + provider.get_balance(address, None).await.unwrap() > + ((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value) + ); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig.into(), &mut bytes); + let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?; + pending_tx.get_receipt().await.ok() +} + +pub async fn fund_account( + provider: &RootProvider, + wallet: &k256::ecdsa::SigningKey, + to_fund: Address, + value: U256, +) -> Option<()> { + let funding_tx = + TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() }; + assert!(send(provider, wallet, funding_tx).await.unwrap().status()); + + Some(()) +} + +// TODO: Use a proper error here pub async fn deploy_contract( - chain_id: u32, - client: Arc>, + client: Arc>, wallet: &k256::ecdsa::SigningKey, name: &str, -) -> eyre::Result { - let abi: Abi = - serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap(); - +) -> Option

{ let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap(); let hex_bin = if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf }; - let bin = hex::decode(hex_bin).unwrap(); - let factory = ContractFactory::new(abi, bin.into(), client.clone()); + let bin = Bytes::from_hex(hex_bin).unwrap(); - let mut deployment_tx = factory.deploy(())?.tx; - deployment_tx.set_chain_id(chain_id); - deployment_tx.set_gas(1_000_000); - let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?; - deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas); - deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas); + let deployment_tx = TxLegacy { + chain_id: None, + nonce: 0, + // 100 gwei + gas_price: 100_000_000_000u128, + gas_limit: 1_000_000, + to: TxKind::Create, + value: U256::ZERO, + input: bin, + }; - let sig_hash = deployment_tx.sighash(); - let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap(); + let deployment_tx = deterministically_sign(&deployment_tx); - // EIP-155 v - let mut v = u64::from(rid.to_byte()); - assert!((v == 0) || (v == 1)); - v += u64::from((chain_id * 2) + 35); + // Fund the deployer address + fund_account( + &client, + wallet, + deployment_tx.recover_signer().unwrap(), + U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price), + ) + .await?; - let r = sig.r().to_repr(); - let r_ref: &[u8] = r.as_ref(); - let s = sig.s().to_repr(); - let s_ref: &[u8] = s.as_ref(); - let deployment_tx = - deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v }); + let (deployment_tx, sig, _) = deployment_tx.into_parts(); + let mut bytes = vec![]; + deployment_tx.encode_with_signature_fields(&sig, &mut bytes); + let pending_tx = client.send_raw_transaction(&bytes).await.ok()?; + let receipt = pending_tx.get_receipt().await.ok()?; + assert!(receipt.status()); - let pending_tx = client.send_raw_transaction(deployment_tx).await?; - - let mut receipt; - while { - receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?; - receipt.is_none() - } { - tokio::time::sleep(Duration::from_secs(6)).await; - } - let receipt = receipt.unwrap(); - assert!(receipt.status == Some(1.into())); - - Ok(receipt.contract_address.unwrap()) + Some(receipt.contract_address.unwrap()) } diff --git a/coins/ethereum/src/tests/router.rs b/coins/ethereum/src/tests/router.rs index c9be93be..39a865bd 100644 --- a/coins/ethereum/src/tests/router.rs +++ b/coins/ethereum/src/tests/router.rs @@ -2,7 +2,8 @@ use std::{convert::TryFrom, sync::Arc, collections::HashMap}; use rand_core::OsRng; -use group::ff::PrimeField; +use group::Group; +use k256::ProjectivePoint; use frost::{ curve::Secp256k1, Participant, ThresholdKeys, @@ -10,100 +11,173 @@ use frost::{ tests::{algorithm_machines, sign}, }; -use ethers_core::{ - types::{H160, U256, Bytes}, - abi::AbiEncode, - utils::{Anvil, AnvilInstance}, -}; -use ethers_providers::{Middleware, Provider, Http}; +use alloy_core::primitives::{Address, U256}; + +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use alloy_node_bindings::{Anvil, AnvilInstance}; use crate::{ - crypto::{keccak256, PublicKey, EthereumHram, Signature}, - router::{self, *}, - tests::{key_gen, deploy_contract}, + crypto::*, + deployer::Deployer, + router::{Router, abi as router}, + tests::{key_gen, send, fund_account}, }; async fn setup_test() -> ( - u32, AnvilInstance, - Router>, + Arc>, + u64, + Router, HashMap>, PublicKey, ) { let anvil = Anvil::new().spawn(); - let provider = Provider::::try_from(anvil.endpoint()).unwrap(); - let chain_id = provider.get_chainid().await.unwrap().as_u32(); + let provider = RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + ); + let chain_id = provider.get_chain_id().await.unwrap(); let wallet = anvil.keys()[0].clone().into(); let client = Arc::new(provider); - let contract_address = - deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap(); - let contract = Router::new(contract_address, client.clone()); + // Make sure the Deployer constructor returns None, as it doesn't exist yet + assert!(Deployer::new(client.clone()).await.unwrap().is_none()); + + // Deploy the Deployer + let tx = Deployer::deployment_tx(); + fund_account( + &client, + &wallet, + tx.recover_signer().unwrap(), + U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price), + ) + .await + .unwrap(); + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + + let pending_tx = client.send_raw_transaction(&bytes).await.unwrap(); + let receipt = pending_tx.get_receipt().await.unwrap(); + assert!(receipt.status()); + let deployer = + Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed"); let (keys, public_key) = key_gen(); - // Set the key to the threshold keys - let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000); - let pending_tx = tx.send().await.unwrap(); - let receipt = pending_tx.await.unwrap().unwrap(); - assert!(receipt.status == Some(1.into())); + // Verify the Router constructor returns None, as it doesn't exist yet + assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none()); - (chain_id, anvil, contract, keys, public_key) + // Deploy the router + let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key)) + .await + .unwrap(); + assert!(receipt.status()); + let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap(); + + (anvil, client, chain_id, contract, keys, public_key) +} + +async fn latest_block_hash(client: &RootProvider) -> [u8; 32] { + client + .get_block(client.get_block_number().await.unwrap().into(), false) + .await + .unwrap() + .unwrap() + .header + .hash + .unwrap() + .0 } #[tokio::test] async fn test_deploy_contract() { - setup_test().await; + let (_anvil, client, _, router, _, public_key) = setup_test().await; + + let block_hash = latest_block_hash(&client).await; + assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key); + assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap()); + // TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis } pub fn hash_and_sign( keys: &HashMap>, public_key: &PublicKey, - chain_id: U256, message: &[u8], ) -> Signature { - let hashed_message = keccak256(message); - - let mut chain_id_bytes = [0; 32]; - chain_id.to_big_endian(&mut chain_id_bytes); - let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat(); - let algo = IetfSchnorr::::ietf(); - let sig = sign( - &mut OsRng, - &algo, - keys.clone(), - algorithm_machines(&mut OsRng, &algo, keys), - full_message, - ); + let sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message); - Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap() + Signature::new(public_key, message, sig).unwrap() +} + +#[tokio::test] +async fn test_router_update_serai_key() { + let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; + + let next_key = loop { + let point = ProjectivePoint::random(&mut OsRng); + let Some(next_key) = PublicKey::new(point) else { continue }; + break next_key; + }; + + let message = Router::update_serai_key_message( + U256::try_from(chain_id).unwrap(), + U256::try_from(1u64).unwrap(), + &next_key, + ); + let sig = hash_and_sign(&keys, &public_key, &message); + + let first_block_hash = latest_block_hash(&client).await; + assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key); + + let receipt = + send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig)) + .await + .unwrap(); + assert!(receipt.status()); + + let second_block_hash = latest_block_hash(&client).await; + assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key); + // Check this does still offer the historical state + assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key); + // TODO: Check logs + + println!("gas used: {:?}", receipt.gas_used); + // println!("logs: {:?}", receipt.logs); } #[tokio::test] async fn test_router_execute() { - let (chain_id, _anvil, contract, keys, public_key) = setup_test().await; + let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; - let to = H160([0u8; 20]); - let value = U256([0u64; 4]); - let data = Bytes::from([0]); - let tx = OutInstruction { to, value, data: data.clone() }; + let to = Address::from([0; 20]); + let value = U256::ZERO; + let tx = router::OutInstruction { to, value, calls: vec![] }; + let txs = vec![tx]; - let nonce_call = contract.nonce(); - let nonce = nonce_call.call().await.unwrap(); + let first_block_hash = latest_block_hash(&client).await; + let nonce = contract.nonce(first_block_hash).await.unwrap(); + assert_eq!(nonce, U256::try_from(1u64).unwrap()); - let encoded = - ("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode(); - let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded); + let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone()); + let sig = hash_and_sign(&keys, &public_key, &message); - let tx = contract - .execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() }) - .gas(300_000); - let pending_tx = tx.send().await.unwrap(); - let receipt = dbg!(pending_tx.await.unwrap().unwrap()); - assert!(receipt.status == Some(1.into())); + let receipt = + send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap(); + assert!(receipt.status()); - println!("gas used: {:?}", receipt.cumulative_gas_used); - println!("logs: {:?}", receipt.logs); + let second_block_hash = latest_block_hash(&client).await; + assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap()); + // Check this does still offer the historical state + assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap()); + // TODO: Check logs + + println!("gas used: {:?}", receipt.gas_used); + // println!("logs: {:?}", receipt.logs); } diff --git a/coins/ethereum/src/tests/schnorr.rs b/coins/ethereum/src/tests/schnorr.rs index 9525e4d6..9311c292 100644 --- a/coins/ethereum/src/tests/schnorr.rs +++ b/coins/ethereum/src/tests/schnorr.rs @@ -1,11 +1,9 @@ -use std::{convert::TryFrom, sync::Arc}; +use std::sync::Arc; use rand_core::OsRng; -use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar}; - -use ethers_core::utils::{keccak256, Anvil, AnvilInstance}; -use ethers_providers::{Middleware, Provider, Http}; +use group::ff::PrimeField; +use k256::Scalar; use frost::{ curve::Secp256k1, @@ -13,24 +11,34 @@ use frost::{ tests::{algorithm_machines, sign}, }; +use alloy_core::primitives::Address; + +use alloy_sol_types::SolCall; + +use alloy_rpc_types::{TransactionInput, TransactionRequest}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use alloy_node_bindings::{Anvil, AnvilInstance}; + use crate::{ + Error, crypto::*, - schnorr::*, - tests::{key_gen, deploy_contract}, + tests::{key_gen, deploy_contract, abi::schnorr as abi}, }; -async fn setup_test() -> (u32, AnvilInstance, Schnorr>) { +async fn setup_test() -> (AnvilInstance, Arc>, Address) { let anvil = Anvil::new().spawn(); - let provider = Provider::::try_from(anvil.endpoint()).unwrap(); - let chain_id = provider.get_chainid().await.unwrap().as_u32(); + let provider = RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + ); let wallet = anvil.keys()[0].clone().into(); let client = Arc::new(provider); - let contract_address = - deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap(); - let contract = Schnorr::new(contract_address, client.clone()); - (chain_id, anvil, contract) + let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap(); + (anvil, client, address) } #[tokio::test] @@ -38,30 +46,48 @@ async fn test_deploy_contract() { setup_test().await; } +pub async fn call_verify( + provider: &RootProvider, + contract: Address, + public_key: &PublicKey, + message: &[u8], + signature: &Signature, +) -> Result<(), Error> { + let px: [u8; 32] = public_key.px.to_repr().into(); + let c_bytes: [u8; 32] = signature.c.to_repr().into(); + let s_bytes: [u8; 32] = signature.s.to_repr().into(); + let call = TransactionRequest::default().to(Some(contract)).input(TransactionInput::new( + abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into())) + .abi_encode() + .into(), + )); + let bytes = provider.call(&call, None).await.map_err(|_| Error::ConnectionError)?; + let res = + abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; + + if res._0 { + Ok(()) + } else { + Err(Error::InvalidSignature) + } +} + #[tokio::test] async fn test_ecrecover_hack() { - let (chain_id, _anvil, contract) = setup_test().await; - let chain_id = U256::from(chain_id); + let (_anvil, client, contract) = setup_test().await; let (keys, public_key) = key_gen(); const MESSAGE: &[u8] = b"Hello, World!"; - let hashed_message = keccak256(MESSAGE); - let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); let algo = IetfSchnorr::::ietf(); - let sig = sign( - &mut OsRng, - &algo, - keys.clone(), - algorithm_machines(&mut OsRng, &algo, &keys), - full_message, - ); - let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap(); + let sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); + let sig = Signature::new(&public_key, MESSAGE, sig).unwrap(); - call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap(); + call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap(); // Test an invalid signature fails let mut sig = sig; sig.s += Scalar::ONE; - assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err()); + assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err()); } diff --git a/deny.toml b/deny.toml index 2e516b99..60331289 100644 --- a/deny.toml +++ b/deny.toml @@ -99,6 +99,7 @@ allow-git = [ "https://github.com/rust-lang-nursery/lazy-static.rs", "https://github.com/serai-dex/substrate-bip39", "https://github.com/serai-dex/substrate", + "https://github.com/alloy-rs/alloy", "https://github.com/monero-rs/base58-monero", "https://github.com/kayabaNerve/dockertest-rs", ] diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 73a34efe..cbc022a1 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -28,6 +28,7 @@ rand_core = { version = "0.6", default-features = false, features = ["std", "get rand_chacha = { version = "0.3", default-features = false, features = ["std"] } # Encoders +const-hex = { version = "1", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } @@ -40,11 +41,16 @@ transcript = { package = "flexible-transcript", path = "../crypto/transcript", d frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] } frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } +# Bitcoin/Ethereum +k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } + # Bitcoin secp256k1 = { version = "0.28", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } -k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } bitcoin-serai = { path = "../coins/bitcoin", default-features = false, features = ["std"], optional = true } +# Ethereum +ethereum-serai = { path = "../coins/ethereum", default-features = false, optional = true } + # Monero dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true } monero-serai = { path = "../coins/monero", default-features = false, features = ["std", "http-rpc", "multisig"], optional = true } @@ -55,12 +61,12 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } zalloc = { path = "../common/zalloc" } -serai-db = { path = "../common/db", optional = true } +serai-db = { path = "../common/db" } serai-env = { path = "../common/env", optional = true } # TODO: Replace with direct usage of primitives serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } -messages = { package = "serai-processor-messages", path = "./messages", optional = true } +messages = { package = "serai-processor-messages", path = "./messages" } message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true } @@ -69,6 +75,8 @@ frost = { package = "modular-frost", path = "../crypto/frost", features = ["test sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } +ethereum-serai = { path = "../coins/ethereum", default-features = false, features = ["tests"] } + dockertest = "0.4" serai-docker-tests = { path = "../tests/docker" } @@ -76,9 +84,11 @@ serai-docker-tests = { path = "../tests/docker" } secp256k1 = ["k256", "frost/secp256k1"] bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] +ethereum = ["secp256k1", "ethereum-serai"] + ed25519 = ["dalek-ff-group", "frost/ed25519"] monero = ["ed25519", "monero-serai", "serai-client/monero"] -binaries = ["env_logger", "serai-env", "messages", "message-queue"] +binaries = ["env_logger", "serai-env", "message-queue"] parity-db = ["serai-db/parity-db"] rocksdb = ["serai-db/rocksdb"] diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 378b852d..19f67508 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -1,7 +1,15 @@ +#![allow(dead_code)] + mod plan; pub use plan::*; +mod db; +pub(crate) use db::*; + +mod key_gen; + pub mod networks; +pub(crate) mod multisigs; mod additional_key; pub use additional_key::additional_key; diff --git a/processor/src/main.rs b/processor/src/main.rs index a4e9552d..1a50effa 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -31,6 +31,8 @@ mod networks; use networks::{Block, Network}; #[cfg(feature = "bitcoin")] use networks::Bitcoin; +#[cfg(feature = "ethereum")] +use networks::Ethereum; #[cfg(feature = "monero")] use networks::Monero; @@ -735,6 +737,7 @@ async fn main() { }; let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() { "bitcoin" => NetworkId::Bitcoin, + "ethereum" => NetworkId::Ethereum, "monero" => NetworkId::Monero, _ => panic!("unrecognized network"), }; @@ -744,6 +747,8 @@ async fn main() { match network_id { #[cfg(feature = "bitcoin")] NetworkId::Bitcoin => run(db, Bitcoin::new(url).await, coordinator).await, + #[cfg(feature = "ethereum")] + NetworkId::Ethereum => run(db.clone(), Ethereum::new(db, url).await, coordinator).await, #[cfg(feature = "monero")] NetworkId::Monero => run(db, Monero::new(url).await, coordinator).await, _ => panic!("spawning a processor for an unsupported network"), diff --git a/processor/src/multisigs/db.rs b/processor/src/multisigs/db.rs index 51287a0e..339b7bdc 100644 --- a/processor/src/multisigs/db.rs +++ b/processor/src/multisigs/db.rs @@ -1,3 +1,5 @@ +use std::io; + use ciphersuite::Ciphersuite; pub use serai_db::*; @@ -6,9 +8,59 @@ use serai_client::{primitives::Balance, in_instructions::primitives::InInstructi use crate::{ Get, Plan, - networks::{Transaction, Network}, + networks::{Output, Transaction, Network}, }; +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum PlanFromScanning { + Refund(N::Output, N::Address), + Forward(N::Output), +} + +impl PlanFromScanning { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + match kind[0] { + 0 => { + let output = N::Output::read(reader)?; + + let mut address_vec_len = [0; 4]; + reader.read_exact(&mut address_vec_len)?; + let mut address_vec = + vec![0; usize::try_from(u32::from_le_bytes(address_vec_len)).unwrap()]; + reader.read_exact(&mut address_vec)?; + let address = + N::Address::try_from(address_vec).map_err(|_| "invalid address saved to disk").unwrap(); + + Ok(PlanFromScanning::Refund(output, address)) + } + 1 => { + let output = N::Output::read(reader)?; + Ok(PlanFromScanning::Forward(output)) + } + _ => panic!("reading unrecognized PlanFromScanning"), + } + } + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + PlanFromScanning::Refund(output, address) => { + writer.write_all(&[0])?; + output.write(writer)?; + + let address_vec: Vec = + address.clone().try_into().map_err(|_| "invalid address being refunded to").unwrap(); + writer.write_all(&u32::try_from(address_vec.len()).unwrap().to_le_bytes())?; + writer.write_all(&address_vec) + } + PlanFromScanning::Forward(output) => { + writer.write_all(&[1])?; + output.write(writer) + } + } + } +} + create_db!( MultisigsDb { NextBatchDb: () -> u32, @@ -80,7 +132,11 @@ impl PlanDb { ) -> bool { let plan = Plan::::read::<&[u8]>(&mut &Self::get(getter, &id).unwrap()[8 ..]).unwrap(); assert_eq!(plan.id(), id); - (key == plan.key) && (Some(N::change_address(plan.key)) == plan.change) + if let Some(change) = N::change_address(plan.key) { + (key == plan.key) && (Some(change) == plan.change) + } else { + false + } } } @@ -130,7 +186,7 @@ impl PlansFromScanningDb { pub fn set_plans_from_scanning( txn: &mut impl DbTxn, block_number: usize, - plans: Vec>, + plans: Vec>, ) { let mut buf = vec![]; for plan in plans { @@ -142,13 +198,13 @@ impl PlansFromScanningDb { pub fn take_plans_from_scanning( txn: &mut impl DbTxn, block_number: usize, - ) -> Option>> { + ) -> Option>> { let block_number = u64::try_from(block_number).unwrap(); let res = Self::get(txn, block_number).map(|plans| { let mut plans_ref = plans.as_slice(); let mut res = vec![]; while !plans_ref.is_empty() { - res.push(Plan::::read(&mut plans_ref).unwrap()); + res.push(PlanFromScanning::::read(&mut plans_ref).unwrap()); } res }); diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs index a6e8bbc9..75c91675 100644 --- a/processor/src/multisigs/mod.rs +++ b/processor/src/multisigs/mod.rs @@ -7,7 +7,7 @@ use scale::{Encode, Decode}; use messages::SubstrateContext; use serai_client::{ - primitives::{MAX_DATA_LEN, NetworkId, Coin, ExternalAddress, BlockHash, Data}, + primitives::{MAX_DATA_LEN, ExternalAddress, BlockHash, Data}, in_instructions::primitives::{ InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE, }, @@ -28,15 +28,12 @@ use scanner::{ScannerEvent, ScannerHandle, Scanner}; mod db; use db::*; -#[cfg(not(test))] -mod scheduler; -#[cfg(test)] -pub mod scheduler; +pub(crate) mod scheduler; use scheduler::Scheduler; use crate::{ Get, Db, Payment, Plan, - networks::{OutputType, Output, Transaction, SignableTransaction, Block, PreparedSend, Network}, + networks::{OutputType, Output, SignableTransaction, Eventuality, Block, PreparedSend, Network}, }; // InInstructionWithBalance from an external output @@ -95,6 +92,8 @@ enum RotationStep { ClosingExisting, } +// This explicitly shouldn't take the database as we prepare Plans we won't execute for fee +// estimates async fn prepare_send( network: &N, block_number: usize, @@ -122,7 +121,7 @@ async fn prepare_send( pub struct MultisigViewer { activation_block: usize, key: ::G, - scheduler: Scheduler, + scheduler: N::Scheduler, } #[allow(clippy::type_complexity)] @@ -131,7 +130,7 @@ pub enum MultisigEvent { // Batches to publish Batches(Option<(::G, ::G)>, Vec), // Eventuality completion found on-chain - Completed(Vec, [u8; 32], N::Transaction), + Completed(Vec, [u8; 32], ::Completion), } pub struct MultisigManager { @@ -157,20 +156,7 @@ impl MultisigManager { assert!(current_keys.len() <= 2); let mut actively_signing = vec![]; for (_, key) in ¤t_keys { - schedulers.push( - Scheduler::from_db( - raw_db, - *key, - match N::NETWORK { - NetworkId::Serai => panic!("adding a key for Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - // TODO: This is incomplete to DAI - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - ) - .unwrap(), - ); + schedulers.push(N::Scheduler::from_db(raw_db, *key, N::NETWORK).unwrap()); // Load any TXs being actively signed let key = key.to_bytes(); @@ -245,17 +231,7 @@ impl MultisigManager { let viewer = Some(MultisigViewer { activation_block, key: external_key, - scheduler: Scheduler::::new::( - txn, - external_key, - match N::NETWORK { - NetworkId::Serai => panic!("adding a key for Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - // TODO: This is incomplete to DAI - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - ), + scheduler: N::Scheduler::new::(txn, external_key, N::NETWORK), }); if self.existing.is_none() { @@ -352,48 +328,30 @@ impl MultisigManager { (existing_outputs, new_outputs) } - fn refund_plan(output: N::Output, refund_to: N::Address) -> Plan { + fn refund_plan( + scheduler: &mut N::Scheduler, + txn: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan { log::info!("creating refund plan for {}", hex::encode(output.id())); assert_eq!(output.kind(), OutputType::External); - Plan { - key: output.key(), - // Uses a payment as this will still be successfully sent due to fee amortization, - // and because change is currently always a Serai key - payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], - inputs: vec![output], - change: None, - } + scheduler.refund_plan::(txn, output, refund_to) } - fn forward_plan(&self, output: N::Output) -> Plan { + // Returns the plan for forwarding if one is needed. + // Returns None if one is not needed to forward this output. + fn forward_plan(&mut self, txn: &mut D::Transaction<'_>, output: &N::Output) -> Option> { log::info!("creating forwarding plan for {}", hex::encode(output.id())); - - /* - Sending a Plan, with arbitrary data proxying the InInstruction, would require adding - a flow for networks which drop their data to still embed arbitrary data. It'd also have - edge cases causing failures (we'd need to manually provide the origin if it was implied, - which may exceed the encoding limit). - - Instead, we save the InInstruction as we scan this output. Then, when the output is - successfully forwarded, we simply read it from the local database. This also saves the - costs of embedding arbitrary data. - - Since we can't rely on the Eventuality system to detect if it's a forwarded transaction, - due to the asynchonicity of the Eventuality system, we instead interpret an Forwarded - output which has an amount associated with an InInstruction which was forwarded as having - been forwarded. - */ - - Plan { - key: self.existing.as_ref().unwrap().key, - payments: vec![Payment { - address: N::forward_address(self.new.as_ref().unwrap().key), - data: None, - balance: output.balance(), - }], - inputs: vec![output], - change: None, + let res = self.existing.as_mut().unwrap().scheduler.forward_plan::( + txn, + output.clone(), + self.new.as_ref().expect("forwarding plan yet no new multisig").key, + ); + if res.is_none() { + log::info!("no forwarding plan was necessary for {}", hex::encode(output.id())); } + res } // Filter newly received outputs due to the step being RotationStep::ClosingExisting. @@ -605,7 +563,31 @@ impl MultisigManager { block_number { // Load plans crated when we scanned the block - plans = PlansFromScanningDb::take_plans_from_scanning::(txn, block_number).unwrap(); + let scanning_plans = + PlansFromScanningDb::take_plans_from_scanning::(txn, block_number).unwrap(); + // Expand into actual plans + plans = scanning_plans + .into_iter() + .map(|plan| match plan { + PlanFromScanning::Refund(output, refund_to) => { + let existing = self.existing.as_mut().unwrap(); + if output.key() == existing.key { + Self::refund_plan(&mut existing.scheduler, txn, output, refund_to) + } else { + let new = self + .new + .as_mut() + .expect("new multisig didn't expect yet output wasn't for existing multisig"); + assert_eq!(output.key(), new.key, "output wasn't for existing nor new multisig"); + Self::refund_plan(&mut new.scheduler, txn, output, refund_to) + } + } + PlanFromScanning::Forward(output) => self + .forward_plan(txn, &output) + .expect("supposed to forward an output yet no forwarding plan"), + }) + .collect(); + for plan in &plans { plans_from_scanning.insert(plan.id()); } @@ -665,13 +647,23 @@ impl MultisigManager { }); for plan in &plans { - if plan.change == Some(N::change_address(plan.key)) { - // Assert these are only created during the expected step - match *step { - RotationStep::UseExisting => {} - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => panic!("change was set to self despite rotating"), + // This first equality should 'never meaningfully' be false + // All created plans so far are by the existing multisig EXCEPT: + // A) If we created a refund plan from the new multisig (yet that wouldn't have change) + // B) The existing Scheduler returned a Plan for the new key (yet that happens with the SC + // scheduler, yet that doesn't have change) + // Despite being 'unnecessary' now, it's better to explicitly ensure and be robust + if plan.key == self.existing.as_ref().unwrap().key { + if let Some(change) = N::change_address(plan.key) { + if plan.change == Some(change) { + // Assert these (self-change) are only created during the expected step + match *step { + RotationStep::UseExisting => {} + RotationStep::NewAsChange | + RotationStep::ForwardFromExisting | + RotationStep::ClosingExisting => panic!("change was set to self despite rotating"), + } + } } } } @@ -853,15 +845,20 @@ impl MultisigManager { let plans_at_start = plans.len(); let (refund_to, instruction) = instruction_from_output::(output); if let Some(mut instruction) = instruction { - // Build a dedicated Plan forwarding this - let forward_plan = self.forward_plan(output.clone()); - plans.push(forward_plan.clone()); + let Some(shimmed_plan) = N::Scheduler::shim_forward_plan( + output.clone(), + self.new.as_ref().expect("forwarding from existing yet no new multisig").key, + ) else { + // If this network doesn't need forwarding, report the output now + return true; + }; + plans.push(PlanFromScanning::::Forward(output.clone())); // Set the instruction for this output to be returned // We need to set it under the amount it's forwarded with, so prepare its forwarding // TX to determine the fees involved let PreparedSend { tx, post_fee_branches: _, operating_costs } = - prepare_send(network, block_number, forward_plan, 0).await; + prepare_send(network, block_number, shimmed_plan, 0).await; // operating_costs should not increase in a forwarding TX assert_eq!(operating_costs, 0); @@ -872,12 +869,28 @@ impl MultisigManager { // letting it die out if let Some(tx) = &tx { instruction.balance.amount.0 -= tx.0.fee(); + + /* + Sending a Plan, with arbitrary data proxying the InInstruction, would require + adding a flow for networks which drop their data to still embed arbitrary data. + It'd also have edge cases causing failures (we'd need to manually provide the + origin if it was implied, which may exceed the encoding limit). + + Instead, we save the InInstruction as we scan this output. Then, when the + output is successfully forwarded, we simply read it from the local database. + This also saves the costs of embedding arbitrary data. + + Since we can't rely on the Eventuality system to detect if it's a forwarded + transaction, due to the asynchonicity of the Eventuality system, we instead + interpret an Forwarded output which has an amount associated with an + InInstruction which was forwarded as having been forwarded. + */ ForwardedOutputDb::save_forwarded_output(txn, &instruction); } } else if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { // Build a dedicated Plan refunding this - plans.push(Self::refund_plan(output.clone(), refund_to)); + plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); } } @@ -909,7 +922,7 @@ impl MultisigManager { let Some(instruction) = instruction else { if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { - plans.push(Self::refund_plan(output.clone(), refund_to)); + plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); } } continue; @@ -999,9 +1012,9 @@ impl MultisigManager { // This must be emitted before ScannerEvent::Block for all completions of known Eventualities // within the block. Unknown Eventualities may have their Completed events emitted after // ScannerEvent::Block however. - ScannerEvent::Completed(key, block_number, id, tx) => { - ResolvedDb::resolve_plan::(txn, &key, id, &tx.id()); - (block_number, MultisigEvent::Completed(key, id, tx)) + ScannerEvent::Completed(key, block_number, id, tx_id, completion) => { + ResolvedDb::resolve_plan::(txn, &key, id, &tx_id); + (block_number, MultisigEvent::Completed(key, id, completion)) } }; diff --git a/processor/src/multisigs/scanner.rs b/processor/src/multisigs/scanner.rs index cefa8a25..20c61192 100644 --- a/processor/src/multisigs/scanner.rs +++ b/processor/src/multisigs/scanner.rs @@ -17,15 +17,25 @@ use tokio::{ use crate::{ Get, DbTxn, Db, - networks::{Output, Transaction, EventualitiesTracker, Block, Network}, + networks::{Output, Transaction, Eventuality, EventualitiesTracker, Block, Network}, }; #[derive(Clone, Debug)] pub enum ScannerEvent { // Block scanned - Block { is_retirement_block: bool, block: >::Id, outputs: Vec }, + Block { + is_retirement_block: bool, + block: >::Id, + outputs: Vec, + }, // Eventuality completion found on-chain - Completed(Vec, usize, [u8; 32], N::Transaction), + Completed( + Vec, + usize, + [u8; 32], + >::Id, + ::Completion, + ), } pub type ScannerEventChannel = mpsc::UnboundedReceiver>; @@ -555,19 +565,25 @@ impl Scanner { } } - for (id, (block_number, tx)) in network + for (id, (block_number, tx, completion)) in network .get_eventuality_completions(scanner.eventualities.get_mut(&key_vec).unwrap(), &block) .await { info!( "eventuality {} resolved by {}, as found on chain", hex::encode(id), - hex::encode(&tx.id()) + hex::encode(tx.as_ref()) ); completion_block_numbers.push(block_number); // This must be before the mission of ScannerEvent::Block, per commentary in mod.rs - if !scanner.emit(ScannerEvent::Completed(key_vec.clone(), block_number, id, tx)) { + if !scanner.emit(ScannerEvent::Completed( + key_vec.clone(), + block_number, + id, + tx, + completion, + )) { return; } } diff --git a/processor/src/multisigs/scheduler/mod.rs b/processor/src/multisigs/scheduler/mod.rs new file mode 100644 index 00000000..6ec95fc4 --- /dev/null +++ b/processor/src/multisigs/scheduler/mod.rs @@ -0,0 +1,95 @@ +use core::fmt::Debug; +use std::io; + +use ciphersuite::Ciphersuite; + +use serai_client::primitives::{NetworkId, Balance}; + +use crate::{networks::Network, Db, Payment, Plan}; + +pub(crate) mod utxo; +pub(crate) mod smart_contract; + +pub trait SchedulerAddendum: Send + Clone + PartialEq + Debug { + fn read(reader: &mut R) -> io::Result; + fn write(&self, writer: &mut W) -> io::Result<()>; +} + +impl SchedulerAddendum for () { + fn read(_: &mut R) -> io::Result { + Ok(()) + } + fn write(&self, _: &mut W) -> io::Result<()> { + Ok(()) + } +} + +pub trait Scheduler: Sized + Clone + PartialEq + Debug { + type Addendum: SchedulerAddendum; + + /// Check if this Scheduler is empty. + fn empty(&self) -> bool; + + /// Create a new Scheduler. + fn new( + txn: &mut D::Transaction<'_>, + key: ::G, + network: NetworkId, + ) -> Self; + + /// Load a Scheduler from the DB. + fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result; + + /// Check if a branch is usable. + fn can_use_branch(&self, balance: Balance) -> bool; + + /// Schedule a series of outputs/payments. + fn schedule( + &mut self, + txn: &mut D::Transaction<'_>, + utxos: Vec, + payments: Vec>, + key_for_any_change: ::G, + force_spend: bool, + ) -> Vec>; + + /// Consume all payments still pending within this Scheduler, without scheduling them. + fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec>; + + /// Note a branch output as having been created, with the amount it was actually created with, + /// or not having been created due to being too small. + fn created_output( + &mut self, + txn: &mut D::Transaction<'_>, + expected: u64, + actual: Option, + ); + + /// Refund a specific output. + fn refund_plan( + &mut self, + txn: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan; + + /// Shim the forwarding Plan as necessary to obtain a fee estimate. + /// + /// If this Scheduler is for a Network which requires forwarding, this must return Some with a + /// plan with identical fee behavior. If forwarding isn't necessary, returns None. + fn shim_forward_plan(output: N::Output, to: ::G) -> Option>; + + /// Forward a specific output to the new multisig. + /// + /// Returns None if no forwarding is necessary. Must return Some if forwarding is necessary. + fn forward_plan( + &mut self, + txn: &mut D::Transaction<'_>, + output: N::Output, + to: ::G, + ) -> Option>; +} diff --git a/processor/src/multisigs/scheduler/smart_contract.rs b/processor/src/multisigs/scheduler/smart_contract.rs new file mode 100644 index 00000000..27268b82 --- /dev/null +++ b/processor/src/multisigs/scheduler/smart_contract.rs @@ -0,0 +1,208 @@ +use std::{io, collections::HashSet}; + +use ciphersuite::{group::GroupEncoding, Ciphersuite}; + +use serai_client::primitives::{NetworkId, Coin, Balance}; + +use crate::{ + Get, DbTxn, Db, Payment, Plan, create_db, + networks::{Output, Network}, + multisigs::scheduler::{SchedulerAddendum, Scheduler as SchedulerTrait}, +}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Scheduler { + key: ::G, + coins: HashSet, + rotated: bool, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Addendum { + Nonce(u64), + RotateTo { nonce: u64, new_key: ::G }, +} + +impl SchedulerAddendum for Addendum { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + match kind[0] { + 0 => { + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + Ok(Addendum::Nonce(u64::from_le_bytes(nonce))) + } + 1 => { + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + + let new_key = N::Curve::read_G(reader)?; + Ok(Addendum::RotateTo { nonce, new_key }) + } + _ => Err(io::Error::other("reading unknown Addendum type"))?, + } + } + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Addendum::Nonce(nonce) => { + writer.write_all(&[0])?; + writer.write_all(&nonce.to_le_bytes()) + } + Addendum::RotateTo { nonce, new_key } => { + writer.write_all(&[1])?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(new_key.to_bytes().as_ref()) + } + } + } +} + +create_db! { + SchedulerDb { + LastNonce: () -> u64, + RotatedTo: (key: &[u8]) -> Vec, + } +} + +impl> SchedulerTrait for Scheduler { + type Addendum = Addendum; + + /// Check if this Scheduler is empty. + fn empty(&self) -> bool { + self.rotated + } + + /// Create a new Scheduler. + fn new( + _txn: &mut D::Transaction<'_>, + key: ::G, + network: NetworkId, + ) -> Self { + assert!(N::branch_address(key).is_none()); + assert!(N::change_address(key).is_none()); + assert!(N::forward_address(key).is_none()); + + Scheduler { key, coins: network.coins().iter().copied().collect(), rotated: false } + } + + /// Load a Scheduler from the DB. + fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result { + Ok(Scheduler { + key, + coins: network.coins().iter().copied().collect(), + rotated: RotatedTo::get(db, key.to_bytes().as_ref()).is_some(), + }) + } + + fn can_use_branch(&self, _balance: Balance) -> bool { + false + } + + fn schedule( + &mut self, + txn: &mut D::Transaction<'_>, + utxos: Vec, + payments: Vec>, + key_for_any_change: ::G, + force_spend: bool, + ) -> Vec> { + for utxo in utxos { + assert!(self.coins.contains(&utxo.balance().coin)); + } + + let mut nonce = LastNonce::get(txn).map_or(0, |nonce| nonce + 1); + let mut plans = vec![]; + for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) { + // Once we rotate, all further payments should be scheduled via the new multisig + assert!(!self.rotated); + plans.push(Plan { + key: self.key, + inputs: vec![], + payments: chunk.to_vec(), + change: None, + scheduler_addendum: Addendum::Nonce(nonce), + }); + nonce += 1; + } + + // If we're supposed to rotate to the new key, create an empty Plan which will signify the key + // update + if force_spend && (!self.rotated) { + plans.push(Plan { + key: self.key, + inputs: vec![], + payments: vec![], + change: None, + scheduler_addendum: Addendum::RotateTo { nonce, new_key: key_for_any_change }, + }); + nonce += 1; + self.rotated = true; + RotatedTo::set( + txn, + self.key.to_bytes().as_ref(), + &key_for_any_change.to_bytes().as_ref().to_vec(), + ); + } + + LastNonce::set(txn, &nonce); + + plans + } + + fn consume_payments(&mut self, _txn: &mut D::Transaction<'_>) -> Vec> { + vec![] + } + + fn created_output( + &mut self, + _txn: &mut D::Transaction<'_>, + _expected: u64, + _actual: Option, + ) { + panic!("Smart Contract Scheduler created a Branch output") + } + + /// Refund a specific output. + fn refund_plan( + &mut self, + txn: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan { + let current_key = RotatedTo::get(txn, self.key.to_bytes().as_ref()) + .and_then(|key_bytes| ::read_G(&mut key_bytes.as_slice()).ok()) + .unwrap_or(self.key); + + let nonce = LastNonce::get(txn).map_or(0, |nonce| nonce + 1); + LastNonce::set(txn, &(nonce + 1)); + Plan { + key: current_key, + inputs: vec![], + payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], + change: None, + scheduler_addendum: Addendum::Nonce(nonce), + } + } + + fn shim_forward_plan(_output: N::Output, _to: ::G) -> Option> { + None + } + + /// Forward a specific output to the new multisig. + /// + /// Returns None if no forwarding is necessary. + fn forward_plan( + &mut self, + _txn: &mut D::Transaction<'_>, + _output: N::Output, + _to: ::G, + ) -> Option> { + None + } +} diff --git a/processor/src/multisigs/scheduler.rs b/processor/src/multisigs/scheduler/utxo.rs similarity index 80% rename from processor/src/multisigs/scheduler.rs rename to processor/src/multisigs/scheduler/utxo.rs index abc81a80..e9aa3351 100644 --- a/processor/src/multisigs/scheduler.rs +++ b/processor/src/multisigs/scheduler/utxo.rs @@ -5,16 +5,17 @@ use std::{ use ciphersuite::{group::GroupEncoding, Ciphersuite}; -use serai_client::primitives::{Coin, Amount, Balance}; +use serai_client::primitives::{NetworkId, Coin, Amount, Balance}; use crate::{ - networks::{OutputType, Output, Network}, DbTxn, Db, Payment, Plan, + networks::{OutputType, Output, Network, UtxoNetwork}, + multisigs::scheduler::Scheduler as SchedulerTrait, }; -/// Stateless, deterministic output/payment manager. -#[derive(PartialEq, Eq, Debug)] -pub struct Scheduler { +/// Deterministic output/payment manager. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Scheduler { key: ::G, coin: Coin, @@ -46,7 +47,7 @@ fn scheduler_key(key: &G) -> Vec { D::key(b"SCHEDULER", b"scheduler", key.to_bytes()) } -impl Scheduler { +impl> Scheduler { pub fn empty(&self) -> bool { self.queued_plans.is_empty() && self.plans.is_empty() && @@ -144,8 +145,18 @@ impl Scheduler { pub fn new( txn: &mut D::Transaction<'_>, key: ::G, - coin: Coin, + network: NetworkId, ) -> Self { + assert!(N::branch_address(key).is_some()); + assert!(N::change_address(key).is_some()); + assert!(N::forward_address(key).is_some()); + + let coin = { + let coins = network.coins(); + assert_eq!(coins.len(), 1); + coins[0] + }; + let res = Scheduler { key, coin, @@ -159,7 +170,17 @@ impl Scheduler { res } - pub fn from_db(db: &D, key: ::G, coin: Coin) -> io::Result { + pub fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result { + let coin = { + let coins = network.coins(); + assert_eq!(coins.len(), 1); + coins[0] + }; + let scheduler = db.get(scheduler_key::(&key)).unwrap_or_else(|| { panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes())) }); @@ -201,7 +222,7 @@ impl Scheduler { amount }; - let branch_address = N::branch_address(self.key); + let branch_address = N::branch_address(self.key).unwrap(); // If we have more payments than we can handle in a single TX, create plans for them // TODO2: This isn't perfect. For 258 outputs, and a MAX_OUTPUTS of 16, this will create: @@ -237,7 +258,8 @@ impl Scheduler { key: self.key, inputs, payments, - change: Some(N::change_address(key_for_any_change)).filter(|_| change), + change: Some(N::change_address(key_for_any_change).unwrap()).filter(|_| change), + scheduler_addendum: (), } } @@ -305,7 +327,7 @@ impl Scheduler { its *own* branch address, since created_output is called on the signer's Scheduler. */ { - let branch_address = N::branch_address(self.key); + let branch_address = N::branch_address(self.key).unwrap(); payments = payments.drain(..).filter(|payment| payment.address != branch_address).collect::>(); } @@ -357,7 +379,8 @@ impl Scheduler { key: self.key, inputs: chunk, payments: vec![], - change: Some(N::change_address(key_for_any_change)), + change: Some(N::change_address(key_for_any_change).unwrap()), + scheduler_addendum: (), }) } @@ -403,7 +426,8 @@ impl Scheduler { key: self.key, inputs: self.utxos.drain(..).collect::>(), payments: vec![], - change: Some(N::change_address(key_for_any_change)), + change: Some(N::change_address(key_for_any_change).unwrap()), + scheduler_addendum: (), }); } @@ -435,9 +459,6 @@ impl Scheduler { // Note a branch output as having been created, with the amount it was actually created with, // or not having been created due to being too small - // This can be called whenever, so long as it's properly ordered - // (it's independent to Serai/the chain we're scheduling over, yet still expects outputs to be - // created in the same order Plans are returned in) pub fn created_output( &mut self, txn: &mut D::Transaction<'_>, @@ -501,3 +522,106 @@ impl Scheduler { txn.put(scheduler_key::(&self.key), self.serialize()); } } + +impl> SchedulerTrait for Scheduler { + type Addendum = (); + + /// Check if this Scheduler is empty. + fn empty(&self) -> bool { + Scheduler::empty(self) + } + + /// Create a new Scheduler. + fn new( + txn: &mut D::Transaction<'_>, + key: ::G, + network: NetworkId, + ) -> Self { + Scheduler::new::(txn, key, network) + } + + /// Load a Scheduler from the DB. + fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result { + Scheduler::from_db::(db, key, network) + } + + /// Check if a branch is usable. + fn can_use_branch(&self, balance: Balance) -> bool { + Scheduler::can_use_branch(self, balance) + } + + /// Schedule a series of outputs/payments. + fn schedule( + &mut self, + txn: &mut D::Transaction<'_>, + utxos: Vec, + payments: Vec>, + key_for_any_change: ::G, + force_spend: bool, + ) -> Vec> { + Scheduler::schedule::(self, txn, utxos, payments, key_for_any_change, force_spend) + } + + /// Consume all payments still pending within this Scheduler, without scheduling them. + fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { + Scheduler::consume_payments::(self, txn) + } + + /// Note a branch output as having been created, with the amount it was actually created with, + /// or not having been created due to being too small. + // TODO: Move this to Balance. + fn created_output( + &mut self, + txn: &mut D::Transaction<'_>, + expected: u64, + actual: Option, + ) { + Scheduler::created_output::(self, txn, expected, actual) + } + + fn refund_plan( + &mut self, + _: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan { + Plan { + key: output.key(), + // Uses a payment as this will still be successfully sent due to fee amortization, + // and because change is currently always a Serai key + payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], + inputs: vec![output], + change: None, + scheduler_addendum: (), + } + } + + fn shim_forward_plan(output: N::Output, to: ::G) -> Option> { + Some(Plan { + key: output.key(), + payments: vec![Payment { + address: N::forward_address(to).unwrap(), + data: None, + balance: output.balance(), + }], + inputs: vec![output], + change: None, + scheduler_addendum: (), + }) + } + + fn forward_plan( + &mut self, + _: &mut D::Transaction<'_>, + output: N::Output, + to: ::G, + ) -> Option> { + assert_eq!(self.key, output.key()); + // Call shim as shim returns the actual + Self::shim_forward_plan(output, to) + } +} diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs index 606a3e12..96f76949 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -52,9 +52,10 @@ use crate::{ networks::{ NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, + Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, }, Payment, + multisigs::scheduler::utxo::Scheduler, }; #[derive(Clone, PartialEq, Eq, Debug)] @@ -178,14 +179,6 @@ impl TransactionTrait for Transaction { hash.reverse(); hash } - fn serialize(&self) -> Vec { - let mut buf = vec![]; - self.consensus_encode(&mut buf).unwrap(); - buf - } - fn read(reader: &mut R) -> io::Result { - Transaction::consensus_decode(reader).map_err(|e| io::Error::other(format!("{e}"))) - } #[cfg(test)] async fn fee(&self, network: &Bitcoin) -> u64 { @@ -209,7 +202,23 @@ impl TransactionTrait for Transaction { #[derive(Clone, PartialEq, Eq, Debug)] pub struct Eventuality([u8; 32]); +#[derive(Clone, PartialEq, Eq, Default, Debug)] +pub struct EmptyClaim; +impl AsRef<[u8]> for EmptyClaim { + fn as_ref(&self) -> &[u8] { + &[] + } +} +impl AsMut<[u8]> for EmptyClaim { + fn as_mut(&mut self) -> &mut [u8] { + &mut [] + } +} + impl EventualityTrait for Eventuality { + type Claim = EmptyClaim; + type Completion = Transaction; + fn lookup(&self) -> Vec { self.0.to_vec() } @@ -224,6 +233,18 @@ impl EventualityTrait for Eventuality { fn serialize(&self) -> Vec { self.0.to_vec() } + + fn claim(_: &Transaction) -> EmptyClaim { + EmptyClaim + } + fn serialize_completion(completion: &Transaction) -> Vec { + let mut buf = vec![]; + completion.consensus_encode(&mut buf).unwrap(); + buf + } + fn read_completion(reader: &mut R) -> io::Result { + Transaction::consensus_decode(reader).map_err(|e| io::Error::other(format!("{e}"))) + } } #[derive(Clone, Debug)] @@ -374,8 +395,12 @@ impl Bitcoin { for input in &tx.input { let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array(); input_tx.reverse(); - in_value += self.get_transaction(&input_tx).await?.output - [usize::try_from(input.previous_output.vout).unwrap()] + in_value += self + .rpc + .get_transaction(&input_tx) + .await + .map_err(|_| NetworkError::ConnectionError)? + .output[usize::try_from(input.previous_output.vout).unwrap()] .value .to_sat(); } @@ -537,6 +562,25 @@ impl Bitcoin { } } +// Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) +// A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes +// While our inputs are entirely SegWit, such fine tuning is not necessary and could create +// issues in the future (if the size decreases or we misevaluate it) +// It also offers a minimal amount of benefit when we are able to logarithmically accumulate +// inputs +// For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and +// 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 +// bytes +// 100,000 / 192 = 520 +// 520 * 192 leaves 160 bytes of overhead for the transaction structure itself +const MAX_INPUTS: usize = 520; +const MAX_OUTPUTS: usize = 520; + +fn address_from_key(key: ProjectivePoint) -> Address { + Address::new(BAddress::::new(BNetwork::Bitcoin, address_payload(key).unwrap())) + .unwrap() +} + #[async_trait] impl Network for Bitcoin { type Curve = Secp256k1; @@ -549,6 +593,8 @@ impl Network for Bitcoin { type Eventuality = Eventuality; type TransactionMachine = TransactionMachine; + type Scheduler = Scheduler; + type Address = Address; const NETWORK: NetworkId = NetworkId::Bitcoin; @@ -598,19 +644,7 @@ impl Network for Bitcoin { // aggregation TX const COST_TO_AGGREGATE: u64 = 800; - // Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) - // A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes - // While our inputs are entirely SegWit, such fine tuning is not necessary and could create - // issues in the future (if the size decreases or we misevaluate it) - // It also offers a minimal amount of benefit when we are able to logarithmically accumulate - // inputs - // For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and - // 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 - // bytes - // 100,000 / 192 = 520 - // 520 * 192 leaves 160 bytes of overhead for the transaction structure itself - const MAX_INPUTS: usize = 520; - const MAX_OUTPUTS: usize = 520; + const MAX_OUTPUTS: usize = MAX_OUTPUTS; fn tweak_keys(keys: &mut ThresholdKeys) { *keys = tweak_keys(keys); @@ -618,24 +652,24 @@ impl Network for Bitcoin { scanner(keys.group_key()); } - fn external_address(key: ProjectivePoint) -> Address { - Address::new(BAddress::::new(BNetwork::Bitcoin, address_payload(key).unwrap())) - .unwrap() + #[cfg(test)] + async fn external_address(&self, key: ProjectivePoint) -> Address { + address_from_key(key) } - fn branch_address(key: ProjectivePoint) -> Address { + fn branch_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); - Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch])) + Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch]))) } - fn change_address(key: ProjectivePoint) -> Address { + fn change_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); - Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change])) + Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change]))) } - fn forward_address(key: ProjectivePoint) -> Address { + fn forward_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); - Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded])) + Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded]))) } async fn get_latest_block_number(&self) -> Result { @@ -682,7 +716,7 @@ impl Network for Bitcoin { spent_tx.reverse(); let mut tx; while { - tx = self.get_transaction(&spent_tx).await; + tx = self.rpc.get_transaction(&spent_tx).await; tx.is_err() } { log::error!("couldn't get transaction from bitcoin node: {tx:?}"); @@ -710,7 +744,7 @@ impl Network for Bitcoin { &self, eventualities: &mut EventualitiesTracker, block: &Self::Block, - ) -> HashMap<[u8; 32], (usize, Transaction)> { + ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { let mut res = HashMap::new(); if eventualities.map.is_empty() { return res; @@ -719,11 +753,11 @@ impl Network for Bitcoin { fn check_block( eventualities: &mut EventualitiesTracker, block: &Block, - res: &mut HashMap<[u8; 32], (usize, Transaction)>, + res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, ) { for tx in &block.txdata[1 ..] { if let Some((plan, _)) = eventualities.map.remove(tx.id().as_slice()) { - res.insert(plan, (eventualities.block_number, tx.clone())); + res.insert(plan, (eventualities.block_number, tx.id(), tx.clone())); } } @@ -770,7 +804,6 @@ impl Network for Bitcoin { async fn needed_fee( &self, block_number: usize, - _: &[u8; 32], inputs: &[Output], payments: &[Payment], change: &Option
, @@ -787,9 +820,11 @@ impl Network for Bitcoin { &self, block_number: usize, plan_id: &[u8; 32], + _key: ProjectivePoint, inputs: &[Output], payments: &[Payment], change: &Option
, + (): &(), ) -> Result, NetworkError> { Ok(self.make_signable_transaction(block_number, inputs, payments, change, false).await?.map( |signable| { @@ -803,7 +838,7 @@ impl Network for Bitcoin { )) } - async fn attempt_send( + async fn attempt_sign( &self, keys: ThresholdKeys, transaction: Self::SignableTransaction, @@ -817,7 +852,7 @@ impl Network for Bitcoin { ) } - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> { + async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { match self.rpc.send_raw_transaction(tx).await { Ok(_) => (), Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?, @@ -828,12 +863,14 @@ impl Network for Bitcoin { Ok(()) } - async fn get_transaction(&self, id: &[u8; 32]) -> Result { - self.rpc.get_transaction(id).await.map_err(|_| NetworkError::ConnectionError) - } - - fn confirm_completion(&self, eventuality: &Self::Eventuality, tx: &Transaction) -> bool { - eventuality.0 == tx.id() + async fn confirm_completion( + &self, + eventuality: &Self::Eventuality, + _: &EmptyClaim, + ) -> Result, NetworkError> { + Ok(Some( + self.rpc.get_transaction(&eventuality.0).await.map_err(|_| NetworkError::ConnectionError)?, + )) } #[cfg(test)] @@ -841,6 +878,20 @@ impl Network for Bitcoin { self.rpc.get_block_number(id).await.unwrap() } + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + _: &EmptyClaim, + ) -> bool { + self.rpc.get_transaction(&eventuality.0).await.is_ok() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction { + self.rpc.get_transaction(&id.0).await.unwrap() + } + #[cfg(test)] async fn mine_block(&self) { self @@ -892,3 +943,7 @@ impl Network for Bitcoin { self.get_block(block).await.unwrap() } } + +impl UtxoNetwork for Bitcoin { + const MAX_INPUTS: usize = MAX_INPUTS; +} diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs new file mode 100644 index 00000000..36051980 --- /dev/null +++ b/processor/src/networks/ethereum.rs @@ -0,0 +1,827 @@ +use core::{fmt::Debug, time::Duration}; +use std::{ + sync::Arc, + collections::{HashSet, HashMap}, + io, +}; + +use async_trait::async_trait; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; +use frost::ThresholdKeys; + +use ethereum_serai::{ + alloy_core::primitives::U256, + alloy_rpc_types::{BlockNumberOrTag, Transaction}, + alloy_simple_request_transport::SimpleRequest, + alloy_rpc_client::ClientBuilder, + alloy_provider::{Provider, RootProvider}, + crypto::{PublicKey, Signature}, + deployer::Deployer, + router::{Router, Coin as EthereumCoin, InInstruction as EthereumInInstruction}, + machine::*, +}; +#[cfg(test)] +use ethereum_serai::alloy_core::primitives::B256; + +use tokio::{ + time::sleep, + sync::{RwLock, RwLockReadGuard}, +}; + +use serai_client::{ + primitives::{Coin, Amount, Balance, NetworkId}, + validator_sets::primitives::Session, +}; + +use crate::{ + Db, Payment, + networks::{ + OutputType, Output, Transaction as TransactionTrait, SignableTransaction, Block, + Eventuality as EventualityTrait, EventualitiesTracker, NetworkError, Network, + }, + key_gen::NetworkKeyDb, + multisigs::scheduler::{ + Scheduler as SchedulerTrait, + smart_contract::{Addendum, Scheduler}, + }, +}; + +#[cfg(not(test))] +const DAI: [u8; 20] = + match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { + Ok(res) => res, + Err(_) => panic!("invalid non-test DAI hex address"), + }; +#[cfg(test)] // TODO +const DAI: [u8; 20] = + match const_hex::const_decode_to_array(b"0000000000000000000000000000000000000000") { + Ok(res) => res, + Err(_) => panic!("invalid test DAI hex address"), + }; + +fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { + match coin { + EthereumCoin::Ether => Some(Coin::Ether), + EthereumCoin::Erc20(token) => { + if *token == DAI { + return Some(Coin::Dai); + } + None + } + } +} + +fn amount_to_serai_amount(coin: Coin, amount: U256) -> Amount { + assert_eq!(coin.network(), NetworkId::Ethereum); + assert_eq!(coin.decimals(), 8); + // Remove 10 decimals so we go from 18 decimals to 8 decimals + let divisor = U256::from(10_000_000_000u64); + // This is valid up to 184b, which is assumed for the coins allowed + Amount(u64::try_from(amount / divisor).unwrap()) +} + +fn balance_to_ethereum_amount(balance: Balance) -> U256 { + assert_eq!(balance.coin.network(), NetworkId::Ethereum); + assert_eq!(balance.coin.decimals(), 8); + // Restore 10 decimals so we go from 8 decimals to 18 decimals + let factor = U256::from(10_000_000_000u64); + U256::from(balance.amount.0) * factor +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Address(pub [u8; 20]); +impl TryFrom> for Address { + type Error = (); + fn try_from(bytes: Vec) -> Result { + if bytes.len() != 20 { + Err(())?; + } + let mut res = [0; 20]; + res.copy_from_slice(&bytes); + Ok(Address(res)) + } +} +impl TryInto> for Address { + type Error = (); + fn try_into(self) -> Result, ()> { + Ok(self.0.to_vec()) + } +} +impl ToString for Address { + fn to_string(&self) -> String { + ethereum_serai::alloy_core::primitives::Address::from(self.0).to_string() + } +} + +impl SignableTransaction for RouterCommand { + fn fee(&self) -> u64 { + // Return a fee of 0 as we'll handle amortization on our end + 0 + } +} + +#[async_trait] +impl TransactionTrait> for Transaction { + type Id = [u8; 32]; + fn id(&self) -> Self::Id { + self.hash.0 + } + + #[cfg(test)] + async fn fee(&self, _network: &Ethereum) -> u64 { + // Return a fee of 0 as we'll handle amortization on our end + 0 + } +} + +// We use 32-block Epochs to represent blocks. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Epoch { + // The hash of the block which ended the prior Epoch. + prior_end_hash: [u8; 32], + // The first block number within this Epoch. + start: u64, + // The hash of the last block within this Epoch. + end_hash: [u8; 32], + // The monotonic time for this Epoch. + time: u64, +} + +impl Epoch { + fn end(&self) -> u64 { + self.start + 31 + } +} + +#[async_trait] +impl Block> for Epoch { + type Id = [u8; 32]; + fn id(&self) -> [u8; 32] { + self.end_hash + } + fn parent(&self) -> [u8; 32] { + self.prior_end_hash + } + async fn time(&self, _: &Ethereum) -> u64 { + self.time + } +} + +impl Output> for EthereumInInstruction { + type Id = [u8; 32]; + + fn kind(&self) -> OutputType { + OutputType::External + } + + fn id(&self) -> Self::Id { + let mut id = [0; 40]; + id[.. 32].copy_from_slice(&self.id.0); + id[32 ..].copy_from_slice(&self.id.1.to_le_bytes()); + *ethereum_serai::alloy_core::primitives::keccak256(id) + } + fn tx_id(&self) -> [u8; 32] { + self.id.0 + } + fn key(&self) -> ::G { + self.key_at_end_of_block + } + + fn presumed_origin(&self) -> Option
{ + Some(Address(self.from)) + } + + fn balance(&self) -> Balance { + let coin = coin_to_serai_coin(&self.coin).unwrap_or_else(|| { + panic!( + "requesting coin for an EthereumInInstruction with a coin {}", + "we don't handle. this never should have been yielded" + ) + }); + Balance { coin, amount: amount_to_serai_amount(coin, self.amount) } + } + fn data(&self) -> &[u8] { + &self.data + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + EthereumInInstruction::write(self, writer) + } + fn read(reader: &mut R) -> io::Result { + EthereumInInstruction::read(reader) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Claim { + signature: [u8; 64], +} +impl AsRef<[u8]> for Claim { + fn as_ref(&self) -> &[u8] { + &self.signature + } +} +impl AsMut<[u8]> for Claim { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.signature + } +} +impl Default for Claim { + fn default() -> Self { + Self { signature: [0; 64] } + } +} +impl From<&Signature> for Claim { + fn from(sig: &Signature) -> Self { + Self { signature: sig.to_bytes() } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Eventuality(PublicKey, RouterCommand); +impl EventualityTrait for Eventuality { + type Claim = Claim; + type Completion = SignedRouterCommand; + + fn lookup(&self) -> Vec { + match self.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + nonce.as_le_bytes().to_vec() + } + } + } + + fn read(reader: &mut R) -> io::Result { + let point = Secp256k1::read_G(reader)?; + let command = RouterCommand::read(reader)?; + Ok(Eventuality( + PublicKey::new(point).ok_or(io::Error::other("unusable key within Eventuality"))?, + command, + )) + } + fn serialize(&self) -> Vec { + let mut res = vec![]; + res.extend(self.0.point().to_bytes().as_slice()); + self.1.write(&mut res).unwrap(); + res + } + + fn claim(completion: &Self::Completion) -> Self::Claim { + Claim::from(completion.signature()) + } + fn serialize_completion(completion: &Self::Completion) -> Vec { + let mut res = vec![]; + completion.write(&mut res).unwrap(); + res + } + fn read_completion(reader: &mut R) -> io::Result { + SignedRouterCommand::read(reader) + } +} + +#[derive(Clone, Debug)] +pub struct Ethereum { + // This DB is solely used to access the first key generated, as needed to determine the Router's + // address. Accordingly, all methods present are consistent to a Serai chain with a finalized + // first key (regardless of local state), and this is safe. + db: D, + provider: Arc>, + deployer: Deployer, + router: Arc>>, +} +impl PartialEq for Ethereum { + fn eq(&self, _other: &Ethereum) -> bool { + true + } +} +impl Ethereum { + pub async fn new(db: D, url: String) -> Self { + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(url), true), + )); + + #[cfg(test)] // TODO: Move to test code + provider.raw_request::<_, ()>("evm_setAutomine".into(), false).await.unwrap(); + + let mut deployer = Deployer::new(provider.clone()).await; + while !matches!(deployer, Ok(Some(_))) { + log::error!("Deployer wasn't deployed yet or networking error"); + sleep(Duration::from_secs(5)).await; + deployer = Deployer::new(provider.clone()).await; + } + let deployer = deployer.unwrap().unwrap(); + + Ethereum { db, provider, deployer, router: Arc::new(RwLock::new(None)) } + } + + // Obtain a reference to the Router, sleeping until it's deployed if it hasn't already been. + // This is guaranteed to return Some. + pub async fn router(&self) -> RwLockReadGuard<'_, Option> { + // If we've already instantiated the Router, return a read reference + { + let router = self.router.read().await; + if router.is_some() { + return router; + } + } + + // Instantiate it + let mut router = self.router.write().await; + // If another attempt beat us to it, return + if router.is_some() { + drop(router); + return self.router.read().await; + } + + // Get the first key from the DB + let first_key = + NetworkKeyDb::get(&self.db, Session(0)).expect("getting outputs before confirming a key"); + let key = Secp256k1::read_G(&mut first_key.as_slice()).unwrap(); + let public_key = PublicKey::new(key).unwrap(); + + // Find the router + let mut found = self.deployer.find_router(self.provider.clone(), &public_key).await; + while !matches!(found, Ok(Some(_))) { + log::error!("Router wasn't deployed yet or networking error"); + sleep(Duration::from_secs(5)).await; + found = self.deployer.find_router(self.provider.clone(), &public_key).await; + } + + // Set it + *router = Some(found.unwrap().unwrap()); + + // Downgrade to a read lock + // Explicitly doesn't use `downgrade` so that another pending write txn can realize it's no + // longer necessary + drop(router); + self.router.read().await + } +} + +#[async_trait] +impl Network for Ethereum { + type Curve = Secp256k1; + + type Transaction = Transaction; + type Block = Epoch; + + type Output = EthereumInInstruction; + type SignableTransaction = RouterCommand; + type Eventuality = Eventuality; + type TransactionMachine = RouterCommandMachine; + + type Scheduler = Scheduler; + + type Address = Address; + + const NETWORK: NetworkId = NetworkId::Ethereum; + const ID: &'static str = "Ethereum"; + const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 32 * 12; + const CONFIRMATIONS: usize = 1; + + const DUST: u64 = 0; // TODO + + const COST_TO_AGGREGATE: u64 = 0; + + // TODO: usize::max, with a merkle tree in the router + const MAX_OUTPUTS: usize = 256; + + fn tweak_keys(keys: &mut ThresholdKeys) { + while PublicKey::new(keys.group_key()).is_none() { + *keys = keys.offset(::F::ONE); + } + } + + #[cfg(test)] + async fn external_address(&self, _key: ::G) -> Address { + Address(self.router().await.as_ref().unwrap().address()) + } + + fn branch_address(_key: ::G) -> Option
{ + None + } + + fn change_address(_key: ::G) -> Option
{ + None + } + + fn forward_address(_key: ::G) -> Option
{ + None + } + + async fn get_latest_block_number(&self) -> Result { + let actual_number = self + .provider + .get_block(BlockNumberOrTag::Finalized.into(), false) + .await + .map_err(|_| NetworkError::ConnectionError)? + .expect("no blocks were finalized") + .header + .number + .unwrap(); + // Error if there hasn't been a full epoch yet + if actual_number < 32 { + Err(NetworkError::ConnectionError)? + } + // If this is 33, the division will return 1, yet 1 is the epoch in progress + let latest_full_epoch = (actual_number / 32).saturating_sub(1); + Ok(latest_full_epoch.try_into().unwrap()) + } + + async fn get_block(&self, number: usize) -> Result { + let latest_finalized = self.get_latest_block_number().await?; + if number > latest_finalized { + Err(NetworkError::ConnectionError)? + } + + let start = number * 32; + let prior_end_hash = if start == 0 { + [0; 32] + } else { + self + .provider + .get_block(u64::try_from(start - 1).unwrap().into(), false) + .await + .ok() + .flatten() + .ok_or(NetworkError::ConnectionError)? + .header + .hash + .unwrap() + .into() + }; + + let end_header = self + .provider + .get_block(u64::try_from(start + 31).unwrap().into(), false) + .await + .ok() + .flatten() + .ok_or(NetworkError::ConnectionError)? + .header; + + let end_hash = end_header.hash.unwrap().into(); + let time = end_header.timestamp; + + Ok(Epoch { prior_end_hash, start: start.try_into().unwrap(), end_hash, time }) + } + + async fn get_outputs( + &self, + block: &Self::Block, + _: ::G, + ) -> Vec { + let router = self.router().await; + let router = router.as_ref().unwrap(); + + // TODO: Top-level transfers + + let mut all_events = vec![]; + for block in block.start .. (block.start + 32) { + let mut events = router.in_instructions(block, &HashSet::from([DAI])).await; + while let Err(e) = events { + log::error!("couldn't connect to Ethereum node for the Router's events: {e:?}"); + sleep(Duration::from_secs(5)).await; + events = router.in_instructions(block, &HashSet::from([DAI])).await; + } + all_events.extend(events.unwrap()); + } + + for event in &all_events { + assert!( + coin_to_serai_coin(&event.coin).is_some(), + "router yielded events for unrecognized coins" + ); + } + all_events + } + + async fn get_eventuality_completions( + &self, + eventualities: &mut EventualitiesTracker, + block: &Self::Block, + ) -> HashMap< + [u8; 32], + ( + usize, + >::Id, + ::Completion, + ), + > { + let mut res = HashMap::new(); + if eventualities.map.is_empty() { + return res; + } + + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let past_scanned_epoch = loop { + match self.get_block(eventualities.block_number).await { + Ok(block) => break block, + Err(e) => log::error!("couldn't get the last scanned block in the tracker: {}", e), + } + sleep(Duration::from_secs(10)).await; + }; + assert_eq!( + past_scanned_epoch.start / 32, + u64::try_from(eventualities.block_number).unwrap(), + "assumption of tracker block number's relation to epoch start is incorrect" + ); + + // Iterate from after the epoch number in the tracker to the end of this epoch + for block_num in (past_scanned_epoch.end() + 1) ..= block.end() { + let executed = loop { + match router.executed_commands(block_num).await { + Ok(executed) => break executed, + Err(e) => log::error!("couldn't get the executed commands in block {block_num}: {e}"), + } + sleep(Duration::from_secs(10)).await; + }; + + for executed in executed { + let lookup = executed.nonce.to_le_bytes().to_vec(); + if let Some((plan_id, eventuality)) = eventualities.map.get(&lookup) { + if let Some(command) = + SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &executed.signature) + { + res.insert(*plan_id, (block_num.try_into().unwrap(), executed.tx_id, command)); + eventualities.map.remove(&lookup); + } + } + } + } + eventualities.block_number = (block.start / 32).try_into().unwrap(); + + res + } + + async fn needed_fee( + &self, + _block_number: usize, + inputs: &[Self::Output], + _payments: &[Payment], + _change: &Option, + ) -> Result, NetworkError> { + assert_eq!(inputs.len(), 0); + // Claim no fee is needed so we can perform amortization ourselves + Ok(Some(0)) + } + + async fn signable_transaction( + &self, + _block_number: usize, + _plan_id: &[u8; 32], + key: ::G, + inputs: &[Self::Output], + payments: &[Payment], + change: &Option, + scheduler_addendum: &>::Addendum, + ) -> Result, NetworkError> { + assert_eq!(inputs.len(), 0); + assert!(change.is_none()); + let chain_id = self.provider.get_chain_id().await.map_err(|_| NetworkError::ConnectionError)?; + + // TODO: Perform fee amortization (in scheduler? + // TODO: Make this function internal and have needed_fee properly return None as expected? + // TODO: signable_transaction is written as cannot return None if needed_fee returns Some + // TODO: Why can this return None at all if it isn't allowed to return None? + + let command = match scheduler_addendum { + Addendum::Nonce(nonce) => RouterCommand::Execute { + chain_id: U256::try_from(chain_id).unwrap(), + nonce: U256::try_from(*nonce).unwrap(), + outs: payments + .iter() + .filter_map(|payment| { + Some(OutInstruction { + target: if let Some(data) = payment.data.as_ref() { + // This introspects the Call serialization format, expecting the first 20 bytes to + // be the address + // This avoids wasting the 20-bytes allocated within address + let full_data = [payment.address.0.as_slice(), data].concat(); + let mut reader = full_data.as_slice(); + + let mut calls = vec![]; + while !reader.is_empty() { + calls.push(Call::read(&mut reader).ok()?) + } + // The above must have executed at least once since reader contains the address + assert_eq!(calls[0].to, payment.address.0); + + OutInstructionTarget::Calls(calls) + } else { + OutInstructionTarget::Direct(payment.address.0) + }, + value: { + assert_eq!(payment.balance.coin, Coin::Ether); // TODO + balance_to_ethereum_amount(payment.balance) + }, + }) + }) + .collect(), + }, + Addendum::RotateTo { nonce, new_key } => { + assert!(payments.is_empty()); + RouterCommand::UpdateSeraiKey { + chain_id: U256::try_from(chain_id).unwrap(), + nonce: U256::try_from(*nonce).unwrap(), + key: PublicKey::new(*new_key).expect("new key wasn't a valid ETH public key"), + } + } + }; + Ok(Some(( + command.clone(), + Eventuality(PublicKey::new(key).expect("key wasn't a valid ETH public key"), command), + ))) + } + + async fn attempt_sign( + &self, + keys: ThresholdKeys, + transaction: Self::SignableTransaction, + ) -> Result { + Ok( + RouterCommandMachine::new(keys, transaction) + .expect("keys weren't usable to sign router commands"), + ) + } + + async fn publish_completion( + &self, + completion: &::Completion, + ) -> Result<(), NetworkError> { + // Publish this to the dedicated TX server for a solver to actually publish + #[cfg(not(test))] + { + let _ = completion; + todo!("TODO"); + } + + // Publish this using a dummy account we fund with magic RPC commands + #[cfg(test)] + { + use rand_core::OsRng; + use ciphersuite::group::ff::Field; + + let key = ::F::random(&mut OsRng); + let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); + + // Set a 1.1 ETH balance + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [Address(address).to_string(), "1100000000000000000".into()], + ) + .await + .unwrap(); + + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let mut tx = match completion.command() { + RouterCommand::UpdateSeraiKey { key, .. } => { + router.update_serai_key(key, completion.signature()) + } + RouterCommand::Execute { outs, .. } => router.execute( + &outs.iter().cloned().map(Into::into).collect::>(), + completion.signature(), + ), + }; + tx.gas_price = 100_000_000_000u128; + + use ethereum_serai::alloy_consensus::SignableTransaction; + let sig = + k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) + .sign_prehash_recoverable(tx.signature_hash().as_ref()) + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig.into(), &mut bytes); + let _ = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); + + Ok(()) + } + } + + async fn confirm_completion( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> Result::Completion>, NetworkError> { + Ok(SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature)) + } + + #[cfg(test)] + async fn get_block_number(&self, id: &>::Id) -> usize { + self + .provider + .get_block(B256::from(*id).into(), false) + .await + .unwrap() + .unwrap() + .header + .number + .unwrap() + .try_into() + .unwrap() + } + + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> bool { + SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature).is_some() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Self::Eventuality, + ) -> Self::Transaction { + match eventuality.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let block = u64::try_from(block).unwrap(); + let filter = router + .key_updated_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + if let Some(log) = logs.first() { + return self + .provider + .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) + .await + .unwrap(); + }; + + let filter = router + .executed_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + self.provider.get_transaction_by_hash(logs[0].transaction_hash.unwrap()).await.unwrap() + } + } + } + + #[cfg(test)] + async fn mine_block(&self) { + self.provider.raw_request::<_, ()>("anvil_mine".into(), [32]).await.unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, send_to: Self::Address) -> Self::Block { + use rand_core::OsRng; + use ciphersuite::group::ff::Field; + + let key = ::F::random(&mut OsRng); + let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); + + // Set a 1.1 ETH balance + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [Address(address).to_string(), "1100000000000000000".into()], + ) + .await + .unwrap(); + + let tx = ethereum_serai::alloy_consensus::TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000u128, + gas_limit: 21_0000u128, + to: ethereum_serai::alloy_core::primitives::TxKind::Call(send_to.0.into()), + // 1 ETH + value: U256::from_str_radix("1000000000000000000", 10).unwrap(), + input: vec![].into(), + }; + + use ethereum_serai::alloy_consensus::SignableTransaction; + let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) + .sign_prehash_recoverable(tx.signature_hash().as_ref()) + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig.into(), &mut bytes); + let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); + + // Mine an epoch containing this TX + self.mine_block().await; + assert!(pending_tx.get_receipt().await.unwrap().status()); + // Yield the freshly mined block + self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() + } +} diff --git a/processor/src/networks/mod.rs b/processor/src/networks/mod.rs index d77d43f1..803ed40a 100644 --- a/processor/src/networks/mod.rs +++ b/processor/src/networks/mod.rs @@ -21,12 +21,17 @@ pub mod bitcoin; #[cfg(feature = "bitcoin")] pub use self::bitcoin::Bitcoin; +#[cfg(feature = "ethereum")] +pub mod ethereum; +#[cfg(feature = "ethereum")] +pub use ethereum::Ethereum; + #[cfg(feature = "monero")] pub mod monero; #[cfg(feature = "monero")] pub use monero::Monero; -use crate::{Payment, Plan}; +use crate::{Payment, Plan, multisigs::scheduler::Scheduler}; #[derive(Clone, Copy, Error, Debug)] pub enum NetworkError { @@ -105,7 +110,7 @@ pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Deb fn kind(&self) -> OutputType; fn id(&self) -> Self::Id; - fn tx_id(&self) -> >::Id; + fn tx_id(&self) -> >::Id; // TODO: Review use of fn key(&self) -> ::G; fn presumed_origin(&self) -> Option; @@ -118,25 +123,33 @@ pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Deb } #[async_trait] -pub trait Transaction: Send + Sync + Sized + Clone + Debug { +pub trait Transaction: Send + Sync + Sized + Clone + PartialEq + Debug { type Id: 'static + Id; fn id(&self) -> Self::Id; - fn serialize(&self) -> Vec; - fn read(reader: &mut R) -> io::Result; - + // TODO: Move to Balance #[cfg(test)] async fn fee(&self, network: &N) -> u64; } pub trait SignableTransaction: Send + Sync + Clone + Debug { + // TODO: Move to Balance fn fee(&self) -> u64; } -pub trait Eventuality: Send + Sync + Clone + Debug { +pub trait Eventuality: Send + Sync + Clone + PartialEq + Debug { + type Claim: Send + Sync + Clone + PartialEq + Default + AsRef<[u8]> + AsMut<[u8]> + Debug; + type Completion: Send + Sync + Clone + PartialEq + Debug; + fn lookup(&self) -> Vec; fn read(reader: &mut R) -> io::Result; fn serialize(&self) -> Vec; + + fn claim(completion: &Self::Completion) -> Self::Claim; + + // TODO: Make a dedicated Completion trait + fn serialize_completion(completion: &Self::Completion) -> Vec; + fn read_completion(reader: &mut R) -> io::Result; } #[derive(Clone, PartialEq, Eq, Debug)] @@ -211,7 +224,7 @@ fn drop_branches( ) -> Vec { let mut branch_outputs = vec![]; for payment in payments { - if payment.address == N::branch_address(key) { + if Some(&payment.address) == N::branch_address(key).as_ref() { branch_outputs.push(PostFeeBranch { expected: payment.balance.amount.0, actual: None }); } } @@ -227,12 +240,12 @@ pub struct PreparedSend { } #[async_trait] -pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { +pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug { /// The elliptic curve used for this network. type Curve: Curve; /// The type representing the transaction for this network. - type Transaction: Transaction; + type Transaction: Transaction; // TODO: Review use of /// The type representing the block for this network. type Block: Block; @@ -246,7 +259,12 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { /// This must be binding to both the outputs expected and the plan ID. type Eventuality: Eventuality; /// The FROST machine to sign a transaction. - type TransactionMachine: PreprocessMachine; + type TransactionMachine: PreprocessMachine< + Signature = ::Completion, + >; + + /// The scheduler for this network. + type Scheduler: Scheduler; /// The type representing an address. // This should NOT be a String, yet a tailored type representing an efficient binary encoding, @@ -269,10 +287,6 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize; /// The amount of confirmations required to consider a block 'final'. const CONFIRMATIONS: usize; - /// The maximum amount of inputs which will fit in a TX. - /// This should be equal to MAX_OUTPUTS unless one is specifically limited. - /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. - const MAX_INPUTS: usize; /// The maximum amount of outputs which will fit in a TX. /// This should be equal to MAX_INPUTS unless one is specifically limited. /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. @@ -293,13 +307,16 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { fn tweak_keys(key: &mut ThresholdKeys); /// Address for the given group key to receive external coins to. - fn external_address(key: ::G) -> Self::Address; + #[cfg(test)] + async fn external_address(&self, key: ::G) -> Self::Address; /// Address for the given group key to use for scheduled branches. - fn branch_address(key: ::G) -> Self::Address; + fn branch_address(key: ::G) -> Option; /// Address for the given group key to use for change. - fn change_address(key: ::G) -> Self::Address; + fn change_address(key: ::G) -> Option; /// Address for forwarded outputs from prior multisigs. - fn forward_address(key: ::G) -> Self::Address; + /// + /// forward_address must only return None if explicit forwarding isn't necessary. + fn forward_address(key: ::G) -> Option; /// Get the latest block's number. async fn get_latest_block_number(&self) -> Result; @@ -349,13 +366,24 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { /// registered eventualities may have been completed in. /// /// This may panic if not fed a block greater than the tracker's block number. + /// + /// Plan ID -> (block number, TX ID, completion) // TODO: get_eventuality_completions_internal + provided get_eventuality_completions for common // code + // TODO: Consider having this return the Transaction + the Completion? + // Or Transaction with extract_completion? async fn get_eventuality_completions( &self, eventualities: &mut EventualitiesTracker, block: &Self::Block, - ) -> HashMap<[u8; 32], (usize, Self::Transaction)>; + ) -> HashMap< + [u8; 32], + ( + usize, + >::Id, + ::Completion, + ), + >; /// Returns the needed fee to fulfill this Plan at this fee rate. /// @@ -363,7 +391,6 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { async fn needed_fee( &self, block_number: usize, - plan_id: &[u8; 32], inputs: &[Self::Output], payments: &[Payment], change: &Option, @@ -375,16 +402,25 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { /// 1) Call needed_fee /// 2) If the Plan is fulfillable, amortize the fee /// 3) Call signable_transaction *which MUST NOT return None if the above was done properly* + /// + /// This takes a destructured Plan as some of these arguments are malleated from the original + /// Plan. + // TODO: Explicit AmortizedPlan? + #[allow(clippy::too_many_arguments)] async fn signable_transaction( &self, block_number: usize, plan_id: &[u8; 32], + key: ::G, inputs: &[Self::Output], payments: &[Payment], change: &Option, + scheduler_addendum: &>::Addendum, ) -> Result, NetworkError>; /// Prepare a SignableTransaction for a transaction. + /// + /// This must not persist anything as we will prepare Plans we never intend to execute. async fn prepare_send( &self, block_number: usize, @@ -395,13 +431,12 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { assert!((!plan.payments.is_empty()) || plan.change.is_some()); let plan_id = plan.id(); - let Plan { key, inputs, mut payments, change } = plan; + let Plan { key, inputs, mut payments, change, scheduler_addendum } = plan; let theoretical_change_amount = inputs.iter().map(|input| input.balance().amount.0).sum::() - payments.iter().map(|payment| payment.balance.amount.0).sum::(); - let Some(tx_fee) = self.needed_fee(block_number, &plan_id, &inputs, &payments, &change).await? - else { + let Some(tx_fee) = self.needed_fee(block_number, &inputs, &payments, &change).await? else { // This Plan is not fulfillable // TODO: Have Plan explicitly distinguish payments and branches in two separate Vecs? return Ok(PreparedSend { @@ -466,7 +501,7 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { // Note the branch outputs' new values let mut branch_outputs = vec![]; for (initial_amount, payment) in initial_payment_amounts.into_iter().zip(&payments) { - if payment.address == Self::branch_address(key) { + if Some(&payment.address) == Self::branch_address(key).as_ref() { branch_outputs.push(PostFeeBranch { expected: initial_amount, actual: if payment.balance.amount.0 == 0 { @@ -508,11 +543,20 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { ) })(); - let Some(tx) = - self.signable_transaction(block_number, &plan_id, &inputs, &payments, &change).await? + let Some(tx) = self + .signable_transaction( + block_number, + &plan_id, + key, + &inputs, + &payments, + &change, + &scheduler_addendum, + ) + .await? else { panic!( - "{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}", + "{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}, {}: {:?}", "signable_transaction returned None for a TX we prior successfully calculated the fee for", "id", hex::encode(plan_id), @@ -524,6 +568,8 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { change, "successfully amoritized fee", tx_fee, + "scheduler's addendum", + scheduler_addendum, ) }; @@ -546,31 +592,49 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { } /// Attempt to sign a SignableTransaction. - async fn attempt_send( + async fn attempt_sign( &self, keys: ThresholdKeys, transaction: Self::SignableTransaction, ) -> Result; - /// Publish a transaction. - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError>; - - /// Get a transaction by its ID. - async fn get_transaction( + /// Publish a completion. + async fn publish_completion( &self, - id: &>::Id, - ) -> Result; + completion: &::Completion, + ) -> Result<(), NetworkError>; - /// Confirm a plan was completed by the specified transaction. - // This is allowed to take shortcuts. - // This may assume an honest multisig, solely checking the inputs specified were spent. - // This may solely check the outputs are equivalent *so long as it's locked to the plan ID*. - fn confirm_completion(&self, eventuality: &Self::Eventuality, tx: &Self::Transaction) -> bool; + /// Confirm a plan was completed by the specified transaction, per our bounds. + /// + /// Returns Err if there was an error with the confirmation methodology. + /// Returns Ok(None) if this is not a valid completion. + /// Returns Ok(Some(_)) with the completion if it's valid. + async fn confirm_completion( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> Result::Completion>, NetworkError>; /// Get a block's number by its ID. #[cfg(test)] async fn get_block_number(&self, id: &>::Id) -> usize; + /// Check an Eventuality is fulfilled by a claim. + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> bool; + + /// Get a transaction by the Eventuality it completes. + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Self::Eventuality, + ) -> Self::Transaction; + #[cfg(test)] async fn mine_block(&self); @@ -579,3 +643,10 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { #[cfg(test)] async fn test_send(&self, key: Self::Address) -> Self::Block; } + +pub trait UtxoNetwork: Network { + /// The maximum amount of inputs which will fit in a TX. + /// This should be equal to MAX_OUTPUTS unless one is specifically limited. + /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. + const MAX_INPUTS: usize; +} diff --git a/processor/src/networks/monero.rs b/processor/src/networks/monero.rs index 8d58ee1a..8d4d1760 100644 --- a/processor/src/networks/monero.rs +++ b/processor/src/networks/monero.rs @@ -39,8 +39,9 @@ use crate::{ networks::{ NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, + Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, }, + multisigs::scheduler::utxo::Scheduler, }; #[derive(Clone, PartialEq, Eq, Debug)] @@ -117,12 +118,6 @@ impl TransactionTrait for Transaction { fn id(&self) -> Self::Id { self.hash() } - fn serialize(&self) -> Vec { - self.serialize() - } - fn read(reader: &mut R) -> io::Result { - Transaction::read(reader) - } #[cfg(test)] async fn fee(&self, _: &Monero) -> u64 { @@ -131,6 +126,9 @@ impl TransactionTrait for Transaction { } impl EventualityTrait for Eventuality { + type Claim = [u8; 32]; + type Completion = Transaction; + // Use the TX extra to look up potential matches // While anyone can forge this, a transaction with distinct outputs won't actually match // Extra includess the one time keys which are derived from the plan ID, so a collision here is a @@ -145,6 +143,16 @@ impl EventualityTrait for Eventuality { fn serialize(&self) -> Vec { self.serialize() } + + fn claim(tx: &Transaction) -> [u8; 32] { + tx.id() + } + fn serialize_completion(completion: &Transaction) -> Vec { + completion.serialize() + } + fn read_completion(reader: &mut R) -> io::Result { + Transaction::read(reader) + } } #[derive(Clone, Debug)] @@ -274,7 +282,8 @@ impl Monero { async fn median_fee(&self, block: &Block) -> Result { let mut fees = vec![]; for tx_hash in &block.txs { - let tx = self.get_transaction(tx_hash).await?; + let tx = + self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?; // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate if tx.rct_signatures.rct_type() != RctType::Null { continue; @@ -454,6 +463,8 @@ impl Network for Monero { type Eventuality = Eventuality; type TransactionMachine = TransactionMachine; + type Scheduler = Scheduler; + type Address = Address; const NETWORK: NetworkId = NetworkId::Monero; @@ -461,11 +472,6 @@ impl Network for Monero { const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120; const CONFIRMATIONS: usize = 10; - // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction - // larger than 150kb. This fits within the 100kb mark - // Technically, it can be ~124, yet a small bit of buffer is appreciated - // TODO: Test creating a TX this big - const MAX_INPUTS: usize = 120; const MAX_OUTPUTS: usize = 16; // 0.01 XMR @@ -478,20 +484,21 @@ impl Network for Monero { // Monero doesn't require/benefit from tweaking fn tweak_keys(_: &mut ThresholdKeys) {} - fn external_address(key: EdwardsPoint) -> Address { + #[cfg(test)] + async fn external_address(&self, key: EdwardsPoint) -> Address { Self::address_internal(key, EXTERNAL_SUBADDRESS) } - fn branch_address(key: EdwardsPoint) -> Address { - Self::address_internal(key, BRANCH_SUBADDRESS) + fn branch_address(key: EdwardsPoint) -> Option
{ + Some(Self::address_internal(key, BRANCH_SUBADDRESS)) } - fn change_address(key: EdwardsPoint) -> Address { - Self::address_internal(key, CHANGE_SUBADDRESS) + fn change_address(key: EdwardsPoint) -> Option
{ + Some(Self::address_internal(key, CHANGE_SUBADDRESS)) } - fn forward_address(key: EdwardsPoint) -> Address { - Self::address_internal(key, FORWARD_SUBADDRESS) + fn forward_address(key: EdwardsPoint) -> Option
{ + Some(Self::address_internal(key, FORWARD_SUBADDRESS)) } async fn get_latest_block_number(&self) -> Result { @@ -558,7 +565,7 @@ impl Network for Monero { &self, eventualities: &mut EventualitiesTracker, block: &Block, - ) -> HashMap<[u8; 32], (usize, Transaction)> { + ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { let mut res = HashMap::new(); if eventualities.map.is_empty() { return res; @@ -568,13 +575,13 @@ impl Network for Monero { network: &Monero, eventualities: &mut EventualitiesTracker, block: &Block, - res: &mut HashMap<[u8; 32], (usize, Transaction)>, + res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, ) { for hash in &block.txs { let tx = { let mut tx; while { - tx = network.get_transaction(hash).await; + tx = network.rpc.get_transaction(*hash).await; tx.is_err() } { log::error!("couldn't get transaction {}: {}", hex::encode(hash), tx.err().unwrap()); @@ -587,7 +594,7 @@ impl Network for Monero { if eventuality.matches(&tx) { res.insert( eventualities.map.remove(&tx.prefix.extra).unwrap().0, - (usize::try_from(block.number().unwrap()).unwrap(), tx), + (usize::try_from(block.number().unwrap()).unwrap(), tx.id(), tx), ); } } @@ -625,14 +632,13 @@ impl Network for Monero { async fn needed_fee( &self, block_number: usize, - plan_id: &[u8; 32], inputs: &[Output], payments: &[Payment], change: &Option
, ) -> Result, NetworkError> { Ok( self - .make_signable_transaction(block_number, plan_id, inputs, payments, change, true) + .make_signable_transaction(block_number, &[0; 32], inputs, payments, change, true) .await? .map(|(_, signable)| signable.fee()), ) @@ -642,9 +648,11 @@ impl Network for Monero { &self, block_number: usize, plan_id: &[u8; 32], + _key: EdwardsPoint, inputs: &[Output], payments: &[Payment], change: &Option
, + (): &(), ) -> Result, NetworkError> { Ok( self @@ -658,7 +666,7 @@ impl Network for Monero { ) } - async fn attempt_send( + async fn attempt_sign( &self, keys: ThresholdKeys, transaction: SignableTransaction, @@ -669,7 +677,7 @@ impl Network for Monero { } } - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> { + async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { match self.rpc.publish_transaction(tx).await { Ok(()) => Ok(()), Err(RpcError::ConnectionError(e)) => { @@ -682,12 +690,17 @@ impl Network for Monero { } } - async fn get_transaction(&self, id: &[u8; 32]) -> Result { - self.rpc.get_transaction(*id).await.map_err(map_rpc_err) - } - - fn confirm_completion(&self, eventuality: &Eventuality, tx: &Transaction) -> bool { - eventuality.matches(tx) + async fn confirm_completion( + &self, + eventuality: &Eventuality, + id: &[u8; 32], + ) -> Result, NetworkError> { + let tx = self.rpc.get_transaction(*id).await.map_err(map_rpc_err)?; + if eventuality.matches(&tx) { + Ok(Some(tx)) + } else { + Ok(None) + } } #[cfg(test)] @@ -695,6 +708,31 @@ impl Network for Monero { self.rpc.get_block(*id).await.unwrap().number().unwrap().try_into().unwrap() } + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + claim: &[u8; 32], + ) -> bool { + return eventuality.matches(&self.rpc.get_transaction(*claim).await.unwrap()); + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Eventuality, + ) -> Transaction { + let block = self.rpc.get_block_by_number(block).await.unwrap(); + for tx in &block.txs { + let tx = self.rpc.get_transaction(*tx).await.unwrap(); + if eventuality.matches(&tx) { + return tx; + } + } + panic!("block didn't have a transaction for this eventuality") + } + #[cfg(test)] async fn mine_block(&self) { // https://github.com/serai-dex/serai/issues/198 @@ -775,3 +813,11 @@ impl Network for Monero { self.get_block(block).await.unwrap() } } + +impl UtxoNetwork for Monero { + // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction + // larger than 150kb. This fits within the 100kb mark + // Technically, it can be ~124, yet a small bit of buffer is appreciated + // TODO: Test creating a TX this big + const MAX_INPUTS: usize = 120; +} diff --git a/processor/src/plan.rs b/processor/src/plan.rs index 3e10c7d3..58a8a5e1 100644 --- a/processor/src/plan.rs +++ b/processor/src/plan.rs @@ -8,7 +8,10 @@ use frost::curve::Ciphersuite; use serai_client::primitives::Balance; -use crate::networks::{Output, Network}; +use crate::{ + networks::{Output, Network}, + multisigs::scheduler::{SchedulerAddendum, Scheduler}, +}; #[derive(Clone, PartialEq, Eq, Debug)] pub struct Payment { @@ -73,7 +76,7 @@ impl Payment { } } -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq)] pub struct Plan { pub key: ::G, pub inputs: Vec, @@ -90,7 +93,11 @@ pub struct Plan { /// This MUST contain a Serai address. Operating costs may be deducted from the payments in this /// Plan on the premise that the change address is Serai's, and accordingly, Serai will recoup /// the operating costs. + // + // TODO: Consider moving to ::G? pub change: Option, + /// The scheduler's additional data. + pub scheduler_addendum: >::Addendum, } impl core::fmt::Debug for Plan { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { @@ -100,6 +107,7 @@ impl core::fmt::Debug for Plan { .field("inputs", &self.inputs) .field("payments", &self.payments) .field("change", &self.change.as_ref().map(ToString::to_string)) + .field("scheduler_addendum", &self.scheduler_addendum) .finish() } } @@ -125,6 +133,10 @@ impl Plan { transcript.append_message(b"change", change.to_string()); } + let mut addendum_bytes = vec![]; + self.scheduler_addendum.write(&mut addendum_bytes).unwrap(); + transcript.append_message(b"scheduler_addendum", addendum_bytes); + transcript } @@ -161,7 +173,8 @@ impl Plan { }; assert!(serai_client::primitives::MAX_ADDRESS_LEN <= u8::MAX.into()); writer.write_all(&[u8::try_from(change.len()).unwrap()])?; - writer.write_all(&change) + writer.write_all(&change)?; + self.scheduler_addendum.write(writer) } pub fn read(reader: &mut R) -> io::Result { @@ -193,6 +206,7 @@ impl Plan { })?) }; - Ok(Plan { key, inputs, payments, change }) + let scheduler_addendum = >::Addendum::read(reader)?; + Ok(Plan { key, inputs, payments, change, scheduler_addendum }) } } diff --git a/processor/src/signer.rs b/processor/src/signer.rs index 7a4fcbed..cab0bceb 100644 --- a/processor/src/signer.rs +++ b/processor/src/signer.rs @@ -2,7 +2,6 @@ use core::{marker::PhantomData, fmt}; use std::collections::HashMap; use rand_core::OsRng; -use ciphersuite::group::GroupEncoding; use frost::{ ThresholdKeys, FrostError, sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, @@ -17,7 +16,7 @@ pub use serai_db::*; use crate::{ Get, DbTxn, Db, - networks::{Transaction, Eventuality, Network}, + networks::{Eventuality, Network}, }; create_db!( @@ -25,7 +24,7 @@ create_db!( CompletionsDb: (id: [u8; 32]) -> Vec, EventualityDb: (id: [u8; 32]) -> Vec, AttemptDb: (id: &SignId) -> (), - TransactionDb: (id: &[u8]) -> Vec, + CompletionDb: (claim: &[u8]) -> Vec, ActiveSignsDb: () -> Vec<[u8; 32]>, CompletedOnChainDb: (id: &[u8; 32]) -> (), } @@ -59,12 +58,20 @@ impl CompletionsDb { fn completions( getter: &impl Get, id: [u8; 32], - ) -> Vec<>::Id> { - let completions = Self::get(getter, id).unwrap_or_default(); + ) -> Vec<::Claim> { + let Some(completions) = Self::get(getter, id) else { return vec![] }; + + // If this was set yet is empty, it's because it's the encoding of a claim with a length of 0 + if completions.is_empty() { + let default = ::Claim::default(); + assert_eq!(default.as_ref().len(), 0); + return vec![default]; + } + let mut completions_ref = completions.as_slice(); let mut res = vec![]; while !completions_ref.is_empty() { - let mut id = >::Id::default(); + let mut id = ::Claim::default(); let id_len = id.as_ref().len(); id.as_mut().copy_from_slice(&completions_ref[.. id_len]); completions_ref = &completions_ref[id_len ..]; @@ -73,25 +80,37 @@ impl CompletionsDb { res } - fn complete(txn: &mut impl DbTxn, id: [u8; 32], tx: &N::Transaction) { - let tx_id = tx.id(); - // Transactions can be completed by multiple signatures + fn complete( + txn: &mut impl DbTxn, + id: [u8; 32], + completion: &::Completion, + ) { + // Completions can be completed by multiple signatures // Save every solution in order to be robust - TransactionDb::save_transaction::(txn, tx); - let mut existing = Self::get(txn, id).unwrap_or_default(); - // Don't add this TX if it's already present - let tx_len = tx_id.as_ref().len(); - assert_eq!(existing.len() % tx_len, 0); + CompletionDb::save_completion::(txn, completion); - let mut i = 0; - while i < existing.len() { - if &existing[i .. (i + tx_len)] == tx_id.as_ref() { - return; - } - i += tx_len; + let claim = N::Eventuality::claim(completion); + let claim: &[u8] = claim.as_ref(); + + // If claim has a 0-byte encoding, the set key, even if empty, is the claim + if claim.is_empty() { + Self::set(txn, id, &vec![]); + return; } - existing.extend(tx_id.as_ref()); + let mut existing = Self::get(txn, id).unwrap_or_default(); + assert_eq!(existing.len() % claim.len(), 0); + + // Don't add this completion if it's already present + let mut i = 0; + while i < existing.len() { + if &existing[i .. (i + claim.len())] == claim { + return; + } + i += claim.len(); + } + + existing.extend(claim); Self::set(txn, id, &existing); } } @@ -110,25 +129,33 @@ impl EventualityDb { } } -impl TransactionDb { - fn save_transaction(txn: &mut impl DbTxn, tx: &N::Transaction) { - Self::set(txn, tx.id().as_ref(), &tx.serialize()); +impl CompletionDb { + fn save_completion( + txn: &mut impl DbTxn, + completion: &::Completion, + ) { + let claim = N::Eventuality::claim(completion); + let claim: &[u8] = claim.as_ref(); + Self::set(txn, claim, &N::Eventuality::serialize_completion(completion)); } - fn transaction( + fn completion( getter: &impl Get, - id: &>::Id, - ) -> Option { - Self::get(getter, id.as_ref()).map(|tx| N::Transaction::read(&mut tx.as_slice()).unwrap()) + claim: &::Claim, + ) -> Option<::Completion> { + Self::get(getter, claim.as_ref()) + .map(|completion| N::Eventuality::read_completion::<&[u8]>(&mut completion.as_ref()).unwrap()) } } type PreprocessFor = <::TransactionMachine as PreprocessMachine>::Preprocess; type SignMachineFor = <::TransactionMachine as PreprocessMachine>::SignMachine; -type SignatureShareFor = - as SignMachine<::Transaction>>::SignatureShare; -type SignatureMachineFor = - as SignMachine<::Transaction>>::SignatureMachine; +type SignatureShareFor = as SignMachine< + <::Eventuality as Eventuality>::Completion, +>>::SignatureShare; +type SignatureMachineFor = as SignMachine< + <::Eventuality as Eventuality>::Completion, +>>::SignatureMachine; pub struct Signer { db: PhantomData, @@ -164,12 +191,11 @@ impl Signer { log::info!("rebroadcasting transactions for plans whose completions yet to be confirmed..."); loop { for active in ActiveSignsDb::get(&db).unwrap_or_default() { - for completion in CompletionsDb::completions::(&db, active) { - log::info!("rebroadcasting {}", hex::encode(&completion)); + for claim in CompletionsDb::completions::(&db, active) { + log::info!("rebroadcasting completion with claim {}", hex::encode(claim.as_ref())); // TODO: Don't drop the error entirely. Check for invariants - let _ = network - .publish_transaction(&TransactionDb::transaction::(&db, &completion).unwrap()) - .await; + let _ = + network.publish_completion(&CompletionDb::completion::(&db, &claim).unwrap()).await; } } // Only run every five minutes so we aren't frequently loading tens to hundreds of KB from @@ -242,7 +268,7 @@ impl Signer { fn complete( &mut self, id: [u8; 32], - tx_id: &>::Id, + claim: &::Claim, ) -> ProcessorMessage { // Assert we're actively signing for this TX assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); @@ -256,7 +282,7 @@ impl Signer { self.signing.remove(&id); // Emit the event for it - ProcessorMessage::Completed { session: self.session, id, tx: tx_id.as_ref().to_vec() } + ProcessorMessage::Completed { session: self.session, id, tx: claim.as_ref().to_vec() } } #[must_use] @@ -264,16 +290,16 @@ impl Signer { &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], - tx: &N::Transaction, + completion: &::Completion, ) -> Option { let first_completion = !Self::already_completed(txn, id); // Save this completion to the DB CompletedOnChainDb::complete_on_chain(txn, &id); - CompletionsDb::complete::(txn, id, tx); + CompletionsDb::complete::(txn, id, completion); if first_completion { - Some(self.complete(id, &tx.id())) + Some(self.complete(id, &N::Eventuality::claim(completion))) } else { None } @@ -286,49 +312,50 @@ impl Signer { &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], - tx_id: &>::Id, + claim: &::Claim, ) -> Option { if let Some(eventuality) = EventualityDb::eventuality::(txn, id) { - // Transaction hasn't hit our mempool/was dropped for a different signature - // The latter can happen given certain latency conditions/a single malicious signer - // In the case of a single malicious signer, they can drag multiple honest validators down - // with them, so we unfortunately can't slash on this case - let Ok(tx) = self.network.get_transaction(tx_id).await else { - warn!( - "a validator claimed {} completed {} yet we didn't have that TX in our mempool {}", - hex::encode(tx_id), - hex::encode(id), - "(or had another connectivity issue)", - ); - return None; - }; + match self.network.confirm_completion(&eventuality, claim).await { + Ok(Some(completion)) => { + info!( + "signer eventuality for {} resolved in {}", + hex::encode(id), + hex::encode(claim.as_ref()) + ); - if self.network.confirm_completion(&eventuality, &tx) { - info!("signer eventuality for {} resolved in TX {}", hex::encode(id), hex::encode(tx_id)); + let first_completion = !Self::already_completed(txn, id); - let first_completion = !Self::already_completed(txn, id); + // Save this completion to the DB + CompletionsDb::complete::(txn, id, &completion); - // Save this completion to the DB - CompletionsDb::complete::(txn, id, &tx); - - if first_completion { - return Some(self.complete(id, &tx.id())); + if first_completion { + return Some(self.complete(id, claim)); + } + } + Ok(None) => { + warn!( + "a validator claimed {} completed {} when it did not", + hex::encode(claim.as_ref()), + hex::encode(id), + ); + } + Err(_) => { + // Transaction hasn't hit our mempool/was dropped for a different signature + // The latter can happen given certain latency conditions/a single malicious signer + // In the case of a single malicious signer, they can drag multiple honest validators down + // with them, so we unfortunately can't slash on this case + warn!( + "a validator claimed {} completed {} yet we couldn't check that claim", + hex::encode(claim.as_ref()), + hex::encode(id), + ); } - } else { - warn!( - "a validator claimed {} completed {} when it did not", - hex::encode(tx_id), - hex::encode(id) - ); } } else { - // If we don't have this in RAM, it should be because we already finished signing it - assert!(!CompletionsDb::completions::(txn, id).is_empty()); - info!( - "signer {} informed of the eventuality completion for plan {}, {}", - hex::encode(self.keys[0].group_key().to_bytes()), + warn!( + "informed of completion {} for eventuality {}, when we didn't have that eventuality", + hex::encode(claim.as_ref()), hex::encode(id), - "which we already marked as completed", ); } None @@ -405,7 +432,7 @@ impl Signer { let mut preprocesses = vec![]; let mut serialized_preprocesses = vec![]; for keys in &self.keys { - let machine = match self.network.attempt_send(keys.clone(), tx.clone()).await { + let machine = match self.network.attempt_sign(keys.clone(), tx.clone()).await { Err(e) => { error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e); return None; @@ -572,7 +599,7 @@ impl Signer { assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); } - let tx = match machine.complete(shares) { + let completion = match machine.complete(shares) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | @@ -588,40 +615,39 @@ impl Signer { }, }; - // Save the transaction in case it's needed for recovery - CompletionsDb::complete::(txn, id.id, &tx); + // Save the completion in case it's needed for recovery + CompletionsDb::complete::(txn, id.id, &completion); // Publish it - let tx_id = tx.id(); - if let Err(e) = self.network.publish_transaction(&tx).await { - error!("couldn't publish {:?}: {:?}", tx, e); + if let Err(e) = self.network.publish_completion(&completion).await { + error!("couldn't publish completion for plan {}: {:?}", hex::encode(id.id), e); } else { - info!("published {} for plan {}", hex::encode(&tx_id), hex::encode(id.id)); + info!("published completion for plan {}", hex::encode(id.id)); } // Stop trying to sign for this TX - Some(self.complete(id.id, &tx_id)) + Some(self.complete(id.id, &N::Eventuality::claim(&completion))) } CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await, - CoordinatorMessage::Completed { session: _, id, tx: mut tx_vec } => { - let mut tx = >::Id::default(); - if tx.as_ref().len() != tx_vec.len() { - let true_len = tx_vec.len(); - tx_vec.truncate(2 * tx.as_ref().len()); + CoordinatorMessage::Completed { session: _, id, tx: mut claim_vec } => { + let mut claim = ::Claim::default(); + if claim.as_ref().len() != claim_vec.len() { + let true_len = claim_vec.len(); + claim_vec.truncate(2 * claim.as_ref().len()); warn!( "a validator claimed {}... (actual length {}) completed {} yet {}", - hex::encode(&tx_vec), + hex::encode(&claim_vec), true_len, hex::encode(id), - "that's not a valid TX ID", + "that's not a valid Claim", ); return None; } - tx.as_mut().copy_from_slice(&tx_vec); + claim.as_mut().copy_from_slice(&claim_vec); - self.claimed_eventuality_completion(txn, id, &tx).await + self.claimed_eventuality_completion(txn, id, &claim).await } } } diff --git a/processor/src/tests/addresses.rs b/processor/src/tests/addresses.rs index da20091b..8f730dbd 100644 --- a/processor/src/tests/addresses.rs +++ b/processor/src/tests/addresses.rs @@ -13,18 +13,23 @@ use serai_db::{DbTxn, MemDb}; use crate::{ Plan, Db, - networks::{OutputType, Output, Block, Network}, - multisigs::scanner::{ScannerEvent, Scanner, ScannerHandle}, + networks::{OutputType, Output, Block, UtxoNetwork}, + multisigs::{ + scheduler::Scheduler, + scanner::{ScannerEvent, Scanner, ScannerHandle}, + }, tests::sign, }; -async fn spend( +async fn spend( db: &mut D, network: &N, keys: &HashMap>, scanner: &mut ScannerHandle, outputs: Vec, -) { +) where + >::Addendum: From<()>, +{ let key = keys[&Participant::new(1).unwrap()].group_key(); let mut keys_txs = HashMap::new(); @@ -41,7 +46,8 @@ async fn spend( key, inputs: outputs.clone(), payments: vec![], - change: Some(N::change_address(key)), + change: Some(N::change_address(key).unwrap()), + scheduler_addendum: ().into(), }, 0, ) @@ -70,13 +76,16 @@ async fn spend( scanner.release_lock().await; txn.commit(); } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } } -pub async fn test_addresses(network: N) { +pub async fn test_addresses(network: N) +where + >::Addendum: From<()>, +{ let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); @@ -101,10 +110,10 @@ pub async fn test_addresses(network: N) { // Receive funds to the various addresses and make sure they're properly identified let mut received_outputs = vec![]; for (kind, address) in [ - (OutputType::External, N::external_address(key)), - (OutputType::Branch, N::branch_address(key)), - (OutputType::Change, N::change_address(key)), - (OutputType::Forwarded, N::forward_address(key)), + (OutputType::External, N::external_address(&network, key).await), + (OutputType::Branch, N::branch_address(key).unwrap()), + (OutputType::Change, N::change_address(key).unwrap()), + (OutputType::Forwarded, N::forward_address(key).unwrap()), ] { let block_id = network.test_send(address).await.id(); @@ -123,7 +132,7 @@ pub async fn test_addresses(network: N) { txn.commit(); received_outputs.extend(outputs); } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs index 192214eb..e2bfdc8a 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/src/tests/literal/mod.rs @@ -65,7 +65,7 @@ mod bitcoin { .unwrap(); ::tweak_keys(&mut keys); let group_key = keys.group_key(); - let serai_btc_address = ::external_address(group_key); + let serai_btc_address = ::external_address(&btc, group_key).await; // btc key pair to send from let private_key = PrivateKey::new(SecretKey::new(&mut rand_core::OsRng), BNetwork::Regtest); diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs index 5aad5bb5..42756d8b 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/src/tests/scanner.rs @@ -11,11 +11,11 @@ use tokio::{sync::Mutex, time::timeout}; use serai_db::{DbTxn, Db, MemDb}; use crate::{ - networks::{OutputType, Output, Block, Network}, + networks::{OutputType, Output, Block, UtxoNetwork}, multisigs::scanner::{ScannerEvent, Scanner, ScannerHandle}, }; -pub async fn new_scanner( +pub async fn new_scanner( network: &N, db: &D, group_key: ::G, @@ -40,7 +40,7 @@ pub async fn new_scanner( scanner } -pub async fn test_scanner(network: N) { +pub async fn test_scanner(network: N) { let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap(); N::tweak_keys(&mut keys); @@ -56,7 +56,7 @@ pub async fn test_scanner(network: N) { let scanner = new_scanner(&network, &db, group_key, &first).await; // Receive funds - let block = network.test_send(N::external_address(keys.group_key())).await; + let block = network.test_send(N::external_address(&network, keys.group_key()).await).await; let block_id = block.id(); // Verify the Scanner picked them up @@ -71,7 +71,7 @@ pub async fn test_scanner(network: N) { assert_eq!(outputs[0].kind(), OutputType::External); outputs } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; @@ -101,7 +101,7 @@ pub async fn test_scanner(network: N) { .is_err()); } -pub async fn test_no_deadlock_in_multisig_completed(network: N) { +pub async fn test_no_deadlock_in_multisig_completed(network: N) { // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; @@ -142,14 +142,14 @@ pub async fn test_no_deadlock_in_multisig_completed(network: N) { assert!(!is_retirement_block); block } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { .. } => {} - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; diff --git a/processor/src/tests/signer.rs b/processor/src/tests/signer.rs index 89d57bf3..524c5d29 100644 --- a/processor/src/tests/signer.rs +++ b/processor/src/tests/signer.rs @@ -17,19 +17,20 @@ use serai_client::{ use messages::sign::*; use crate::{ Payment, Plan, - networks::{Output, Transaction, Network}, + networks::{Output, Transaction, Eventuality, UtxoNetwork}, + multisigs::scheduler::Scheduler, signer::Signer, }; #[allow(clippy::type_complexity)] -pub async fn sign( +pub async fn sign( network: N, session: Session, mut keys_txs: HashMap< Participant, (ThresholdKeys, (N::SignableTransaction, N::Eventuality)), >, -) -> >::Id { +) -> ::Claim { let actual_id = SignId { session, id: [0xaa; 32], attempt: 0 }; let mut keys = HashMap::new(); @@ -65,14 +66,15 @@ pub async fn sign( let mut preprocesses = HashMap::new(); + let mut eventuality = None; for i in 1 ..= signers.len() { let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - let (tx, eventuality) = txs.remove(&i).unwrap(); + let (tx, this_eventuality) = txs.remove(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); match signers .get_mut(&i) .unwrap() - .sign_transaction(&mut txn, actual_id.id, tx, &eventuality) + .sign_transaction(&mut txn, actual_id.id, tx, &this_eventuality) .await { // All participants should emit a preprocess @@ -86,6 +88,11 @@ pub async fn sign( _ => panic!("didn't get preprocess back"), } txn.commit(); + + if eventuality.is_none() { + eventuality = Some(this_eventuality.clone()); + } + assert_eq!(eventuality, Some(this_eventuality)); } let mut shares = HashMap::new(); @@ -140,19 +147,25 @@ pub async fn sign( txn.commit(); } - let mut typed_tx_id = >::Id::default(); - typed_tx_id.as_mut().copy_from_slice(tx_id.unwrap().as_ref()); - typed_tx_id + let mut typed_claim = ::Claim::default(); + typed_claim.as_mut().copy_from_slice(tx_id.unwrap().as_ref()); + assert!(network.check_eventuality_by_claim(&eventuality.unwrap(), &typed_claim).await); + typed_claim } -pub async fn test_signer(network: N) { +pub async fn test_signer(network: N) +where + >::Addendum: From<()>, +{ let mut keys = key_gen(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); - let outputs = network.get_outputs(&network.test_send(N::external_address(key)).await, key).await; + let outputs = network + .get_outputs(&network.test_send(N::external_address(&network, key).await).await, key) + .await; let sync_block = network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS; let amount = 2 * N::DUST; @@ -166,7 +179,7 @@ pub async fn test_signer(network: N) { key, inputs: outputs.clone(), payments: vec![Payment { - address: N::external_address(key), + address: N::external_address(&network, key).await, data: None, balance: Balance { coin: match N::NETWORK { @@ -178,7 +191,8 @@ pub async fn test_signer(network: N) { amount: Amount(amount), }, }], - change: Some(N::change_address(key)), + change: Some(N::change_address(key).unwrap()), + scheduler_addendum: ().into(), }, 0, ) @@ -191,13 +205,12 @@ pub async fn test_signer(network: N) { keys_txs.insert(i, (keys, (signable, eventuality))); } - // The signer may not publish the TX if it has a connection error - // It doesn't fail in this case - let txid = sign(network.clone(), Session(0), keys_txs).await; - let tx = network.get_transaction(&txid).await.unwrap(); - assert_eq!(tx.id(), txid); + let claim = sign(network.clone(), Session(0), keys_txs).await; + // Mine a block, and scan it, to ensure that the TX actually made it on chain network.mine_block().await; + let block_number = network.get_latest_block_number().await.unwrap(); + let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await; let outputs = network .get_outputs( &network.get_block(network.get_latest_block_number().await.unwrap()).await.unwrap(), @@ -212,6 +225,7 @@ pub async fn test_signer(network: N) { // Check the eventualities pass for eventuality in eventualities { - assert!(network.confirm_completion(&eventuality, &tx)); + let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap(); + assert_eq!(N::Eventuality::claim(&completion), claim); } } diff --git a/processor/src/tests/wallet.rs b/processor/src/tests/wallet.rs index c9cc6c66..4600fcbe 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/src/tests/wallet.rs @@ -15,7 +15,7 @@ use serai_client::{ use crate::{ Payment, Plan, - networks::{Output, Transaction, Block, Network}, + networks::{Output, Transaction, Eventuality, Block, UtxoNetwork}, multisigs::{ scanner::{ScannerEvent, Scanner}, scheduler::Scheduler, @@ -24,7 +24,7 @@ use crate::{ }; // Tests the Scanner, Scheduler, and Signer together -pub async fn test_wallet(network: N) { +pub async fn test_wallet(network: N) { // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; @@ -47,7 +47,7 @@ pub async fn test_wallet(network: N) { network.mine_block().await; } - let block = network.test_send(N::external_address(key)).await; + let block = network.test_send(N::external_address(&network, key).await).await; let block_id = block.id(); match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { @@ -58,7 +58,7 @@ pub async fn test_wallet(network: N) { assert_eq!(outputs.len(), 1); (block_id, outputs) } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } @@ -69,22 +69,13 @@ pub async fn test_wallet(network: N) { txn.commit(); let mut txn = db.txn(); - let mut scheduler = Scheduler::new::( - &mut txn, - key, - match N::NETWORK { - NetworkId::Serai => panic!("test_wallet called with Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - ); + let mut scheduler = N::Scheduler::new::(&mut txn, key, N::NETWORK); let amount = 2 * N::DUST; let plans = scheduler.schedule::( &mut txn, outputs.clone(), vec![Payment { - address: N::external_address(key), + address: N::external_address(&network, key).await, data: None, balance: Balance { coin: match N::NETWORK { @@ -100,27 +91,26 @@ pub async fn test_wallet(network: N) { false, ); txn.commit(); + assert_eq!(plans.len(), 1); + assert_eq!(plans[0].key, key); + assert_eq!(plans[0].inputs, outputs); assert_eq!( - plans, - vec![Plan { - key, - inputs: outputs.clone(), - payments: vec![Payment { - address: N::external_address(key), - data: None, - balance: Balance { - coin: match N::NETWORK { - NetworkId::Serai => panic!("test_wallet called with Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - amount: Amount(amount), - } - }], - change: Some(N::change_address(key)), + plans[0].payments, + vec![Payment { + address: N::external_address(&network, key).await, + data: None, + balance: Balance { + coin: match N::NETWORK { + NetworkId::Serai => panic!("test_wallet called with Serai"), + NetworkId::Bitcoin => Coin::Bitcoin, + NetworkId::Ethereum => Coin::Ether, + NetworkId::Monero => Coin::Monero, + }, + amount: Amount(amount), + } }] ); + assert_eq!(plans[0].change, Some(N::change_address(key).unwrap())); { let mut buf = vec![]; @@ -143,10 +133,10 @@ pub async fn test_wallet(network: N) { keys_txs.insert(i, (keys, (signable, eventuality))); } - let txid = sign(network.clone(), Session(0), keys_txs).await; - let tx = network.get_transaction(&txid).await.unwrap(); + let claim = sign(network.clone(), Session(0), keys_txs).await; network.mine_block().await; let block_number = network.get_latest_block_number().await.unwrap(); + let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await; let block = network.get_block(block_number).await.unwrap(); let outputs = network.get_outputs(&block, key).await; assert_eq!(outputs.len(), 2); @@ -154,7 +144,8 @@ pub async fn test_wallet(network: N) { assert!((outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount)); for eventuality in eventualities { - assert!(network.confirm_completion(&eventuality, &tx)); + let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap(); + assert_eq!(N::Eventuality::claim(&completion), claim); } for _ in 1 .. N::CONFIRMATIONS { @@ -168,7 +159,7 @@ pub async fn test_wallet(network: N) { assert_eq!(block_id, block.id()); assert_eq!(these_outputs, outputs); } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } diff --git a/spec/integrations/Ethereum.md b/spec/integrations/Ethereum.md index e66a1f5b..bf32f101 100644 --- a/spec/integrations/Ethereum.md +++ b/spec/integrations/Ethereum.md @@ -15,24 +15,11 @@ is the caller. `data` is limited to 512 bytes. -If `data` is provided, the Ethereum Router will call a contract-calling child -contract in order to sandbox it. The first byte of `data` designates which child -child contract to call. After this byte is read, `data` is solely considered as -`data`, post its first byte. The child contract is sent the funds before this -call is performed. +If `data` isn't provided or is malformed, ETH transfers will execute with 5,000 +gas and token transfers with 100,000 gas. -##### Child Contract 0 - -This contract is intended to enable connecting with other protocols, and should -be used to convert withdrawn assets to other assets on Ethereum. - - 1) Transfers the asset to `destination`. - 2) Calls `destination` with `data`. - -##### Child Contract 1 - -This contract is intended to enable authenticated calls from Serai. - - 1) Transfers the asset to `destination`. - 2) Calls `destination` with `data[.. 4], serai_address, data[4 ..]`, where -`serai_address` is the address which triggered this Out Instruction. +If `data` is provided and well-formed, `destination` is ignored and the Ethereum +Router will construct and call a new contract to proxy the contained calls. The +transfer executes to the constructed contract as above, before the constructed +contract is called with the calls inside `data`. The sandboxed execution has a +gas limit of 350,000. diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index 511382ab..e400057a 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -416,7 +416,11 @@ impl Coordinator { } } - pub async fn get_transaction(&self, ops: &DockerOperations, tx: &[u8]) -> Option> { + pub async fn get_published_transaction( + &self, + ops: &DockerOperations, + tx: &[u8], + ) -> Option> { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { NetworkId::Bitcoin => { @@ -424,8 +428,15 @@ impl Coordinator { let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); + + // Bitcoin publishes a 0-byte TX ID to reduce variables + // Accordingly, read the mempool to find the (presumed relevant) TX + let entries: Vec = + rpc.rpc_call("getrawmempool", serde_json::json!([false])).await.unwrap(); + assert_eq!(entries.len(), 1, "more than one entry in the mempool, so unclear which to get"); + let mut hash = [0; 32]; - hash.copy_from_slice(tx); + hash.copy_from_slice(&hex::decode(&entries[0]).unwrap()); if let Ok(tx) = rpc.get_transaction(&hash).await { let mut buf = vec![]; tx.consensus_encode(&mut buf).unwrap(); diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index 8685af04..4d0d3cd6 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -261,12 +261,12 @@ fn send_test() { let participating = participating.iter().map(|p| usize::from(u16::from(*p) - 1)).collect::>(); for participant in &participating { - assert!(coordinators[*participant].get_transaction(&ops, &tx_id).await.is_some()); + assert!(coordinators[*participant].get_published_transaction(&ops, &tx_id).await.is_some()); } // Publish this transaction to the left out nodes let tx = coordinators[*participating.iter().next().unwrap()] - .get_transaction(&ops, &tx_id) + .get_published_transaction(&ops, &tx_id) .await .unwrap(); for (i, coordinator) in coordinators.iter_mut().enumerate() { From 0ddbaefb382f1fbe45f238fee0513719aa018e85 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 06:11:52 -0400 Subject: [PATCH 080/126] Correct timing around when we verify precommit signatures --- coordinator/tributary/tendermint/src/lib.rs | 65 ++++++++++++--------- 1 file changed, 36 insertions(+), 29 deletions(-) diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index 9ee71a9d..0e328e02 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -745,33 +745,26 @@ impl TendermintMachine { msg.data.step(), ); - // Run all `upons` run for any round - // If it returned true, we added a new block, so return - if self.all_any_round_upons(msg.round).await { - return Ok(()); - } + // L55-56 + // Jump ahead if we should + if (msg.round.0 > self.block.round().number.0) && + (self.block.log.round_participation(msg.round) >= self.weights.fault_threshold()) + { + log::debug!( + target: "tendermint", + "jumping from round {} to round {}", + self.block.round().number.0, + msg.round.0, + ); - // Check if we need to jump ahead - #[allow(clippy::comparison_chain)] - if msg.round.0 < self.block.round().number.0 { - // Prior round, disregard if not finalizing - return Ok(()); - } else if msg.round.0 > self.block.round().number.0 { - // 55-56 - // Jump, enabling processing by the below code - if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() { - log::debug!( - target: "tendermint", - "jumping from round {} to round {}", - self.block.round().number.0, - msg.round.0, - ); + // Jump to the new round. + let old_round = self.block.round().number; + self.round(msg.round, None); - // Jump to the new round. - self.round(msg.round, None); - - // If this round already has precommit messages, verify their signatures - let round_msgs = self.block.log.log[&msg.round].clone(); + // If any jumped over/to round already has precommit messages, verify their signatures + for jumped in (old_round.0 + 1) ..= msg.round.0 { + let jumped = RoundNumber(jumped); + let round_msgs = self.block.log.log.get(&jumped).cloned().unwrap_or_default(); for (validator, msgs) in &round_msgs { if let Some(existing) = msgs.get(&Step::Precommit) { if let Ok(res) = self.verify_precommit_signature(existing).await { @@ -786,7 +779,7 @@ impl TendermintMachine { .block .log .log - .get_mut(&msg.round) + .get_mut(&jumped) .unwrap() .get_mut(validator) .unwrap() @@ -795,12 +788,26 @@ impl TendermintMachine { } } } - } else { - // Future round which we aren't ready to jump to, so return for now - return Ok(()); } } + // Now that we've jumped, and: + // 1) If this is a message for an old round, verified the precommit signatures + // 2) If this is a message for what was the current round, verified the precommit signatures + // 3) If this is a message for what was a future round, verified the precommit signatures if it + // has 34+% participation + // Run all `upons` run for any round, which may produce a Commit if it has 67+% participation + // (returning true if it does, letting us return now) + // It's necessary to verify the precommit signatures before Commit production is allowed, hence + // this specific flow + if self.all_any_round_upons(msg.round).await { + return Ok(()); + } + + // If this is a historic round, or a future round without sufficient participation, return + if msg.round.0 != self.block.round().number.0 { + return Ok(()); + } // msg.round is now guaranteed to be equal to self.block.round().number debug_assert_eq!(msg.round, self.block.round().number); From be7780e69dbe093d7246508112641884353b3707 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 07:02:49 -0400 Subject: [PATCH 081/126] Restart coordinator peer finding upon disconnections --- coordinator/src/p2p.rs | 188 ++++++++++++++++++++++++++++++----------- 1 file changed, 138 insertions(+), 50 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 48a23aa5..8e88f6ee 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -2,7 +2,7 @@ use core::{time::Duration, fmt}; use std::{ sync::Arc, io::Read, - collections::HashMap, + collections::{HashSet, HashMap}, time::{SystemTime, Instant}, }; @@ -292,17 +292,22 @@ impl LibP2p { IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) } + // The addrs we're currently dialing, and the networks associated with them + let dialing_peers = Arc::new(RwLock::new(HashMap::new())); + // Find and connect to peers - let (pending_p2p_connections_send, mut pending_p2p_connections_recv) = + let (connect_to_network_send, mut connect_to_network_recv) = tokio::sync::mpsc::unbounded_channel(); let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel(); tokio::spawn({ - let pending_p2p_connections_send = pending_p2p_connections_send.clone(); + let dialing_peers = dialing_peers.clone(); + let connect_to_network_send = connect_to_network_send.clone(); async move { loop { - // TODO: Add better peer management logic? - { - let connect = |addr: Multiaddr| { + let connect = |network: NetworkId, addr: Multiaddr| { + let dialing_peers = dialing_peers.clone(); + let to_dial_send = to_dial_send.clone(); + async move { log::info!("found peer from substrate: {addr}"); let protocols = addr.iter().filter_map(|piece| match piece { @@ -320,45 +325,78 @@ impl LibP2p { let addr = new_addr; log::debug!("transformed found peer: {addr}"); - // TODO: Check this isn't a duplicate - to_dial_send.send(addr).unwrap(); - }; - - // TODO: We should also connect to random peers from random nets as needed for - // cosigning - let mut to_retry = vec![]; - while let Some(network) = pending_p2p_connections_recv.recv().await { - if let Ok(mut nodes) = serai.p2p_validators(network).await { - // If there's an insufficient amount of nodes known, connect to all yet add it - // back and break - if nodes.len() < 3 { - log::warn!( - "insufficient amount of P2P nodes known for {:?}: {}", - network, - nodes.len() - ); - to_retry.push(network); - for node in nodes { - connect(node); - } - continue; + let (is_fresh_dial, nets) = { + let mut dialing_peers = dialing_peers.write().await; + let is_fresh_dial = dialing_peers.contains_key(&addr); + if !is_fresh_dial { + dialing_peers.insert(addr.clone(), HashSet::new()); } + // Associate this network with this peer + dialing_peers.get_mut(&addr).unwrap().insert(network); - // Randomly select up to 5 - for _ in 0 .. 5 { - if !nodes.is_empty() { - let to_connect = nodes.swap_remove( - usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) - .unwrap(), - ); - connect(to_connect); - } + let nets = dialing_peers.get(&addr).unwrap().clone(); + (is_fresh_dial, nets) + }; + + // Spawn a task to remove this peer from 'dialing' in sixty seconds, in case dialing + // fails + // This performs cleanup and bounds the size of the map to whatever growth occurs + // within a temporal window + tokio::spawn({ + let dialing_peers = dialing_peers.clone(); + let addr = addr.clone(); + async move { + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + let mut dialing_peers = dialing_peers.write().await; + dialing_peers.remove(&addr); + } + }); + + if !is_fresh_dial { + to_dial_send.send((addr, nets)).unwrap(); + } + } + }; + + // TODO: We should also connect to random peers from random nets as needed for + // cosigning + + // Define a buffer, `to_retry`, so we can exhaust this channel before sending more down + // it + let mut to_retry = vec![]; + while let Some(network) = connect_to_network_recv.recv().await { + if let Ok(mut nodes) = serai.p2p_validators(network).await { + // If there's an insufficient amount of nodes known, connect to all yet add it + // back and break + if nodes.len() < 3 { + log::warn!( + "insufficient amount of P2P nodes known for {:?}: {}", + network, + nodes.len() + ); + // TODO: We weren't retry so long as we're told of sufficient nodes + // We should stop retrying when we actually connect to sufficient nodes + to_retry.push(network); + for node in nodes { + connect(network, node).await; + } + continue; + } + + // Randomly select up to 5 + for _ in 0 .. 5 { + if !nodes.is_empty() { + let to_connect = nodes.swap_remove( + usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) + .unwrap(), + ); + connect(network, to_connect).await; } } } - for to_retry in to_retry { - pending_p2p_connections_send.send(to_retry).unwrap(); - } + } + for to_retry in to_retry { + connect_to_network_send.send(to_retry).unwrap(); } // Sleep 60 seconds before moving to the next iteration tokio::time::sleep(core::time::Duration::from_secs(60)).await; @@ -397,8 +435,10 @@ impl LibP2p { } async move { + // The peers we're currently connected to, and the networks associated with them + let mut connected_peers = HashMap::new(); + let mut set_for_genesis = HashMap::new(); - let mut connected_peers = 0; loop { let time_since_last = Instant::now().duration_since(time_of_last_p2p_message); tokio::select! { @@ -411,7 +451,7 @@ impl LibP2p { let topic = topic_for_set(set); if subscribe { log::info!("subscribing to p2p messages for {set:?}"); - pending_p2p_connections_send.send(set.network).unwrap(); + connect_to_network_send.send(set.network).unwrap(); set_for_genesis.insert(genesis, set); swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap(); } else { @@ -440,26 +480,65 @@ impl LibP2p { Some(SwarmEvent::Dialing { connection_id, .. }) => { log::debug!("dialing to peer in connection ID {}", &connection_id); } - Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => { + Some(SwarmEvent::ConnectionEstablished { + peer_id, + connection_id, + endpoint, + .. + }) => { if &peer_id == swarm.local_peer_id() { log::warn!("established a libp2p connection to ourselves"); swarm.close_connection(connection_id); continue; } - connected_peers += 1; + let addr = endpoint.get_remote_address(); + let nets = { + let mut dialing_peers = dialing_peers.write().await; + if let Some(nets) = dialing_peers.remove(addr) { + nets + } else { + log::debug!("connected to a peer who we didn't have within dialing"); + HashSet::new() + } + }; + connected_peers.insert(addr.clone(), nets); + log::debug!( "connection established to peer {} in connection ID {}, connected peers: {}", &peer_id, &connection_id, - connected_peers, + connected_peers.len(), ); } - Some(SwarmEvent::ConnectionClosed { peer_id, .. }) => { - connected_peers -= 1; + Some(SwarmEvent::ConnectionClosed { peer_id, endpoint, .. }) => { + let nets = + connected_peers + .remove(endpoint.get_remote_address()) + .expect("closed connection to peer which never connected"); + + // For each net we lost a peer for, check if we still have sufficient peers + // overall + for net in nets { + let mut remaining_peers = 0; + for nets in connected_peers.values() { + if nets.contains(&net) { + remaining_peers += 1; + } + } + // If we do not, start connecting to this network again + if remaining_peers < 3 { + connect_to_network_send + .send(net) + .expect( + "couldn't send net to connect to due to disconnects (receiver dropped?)" + ); + } + } + log::debug!( "connection with peer {peer_id} closed, connected peers: {}", - connected_peers, + connected_peers.len(), ); } Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( @@ -474,8 +553,17 @@ impl LibP2p { } // Handle peers to dial - addr = to_dial_recv.recv() => { - let addr = addr.expect("received address was None (sender dropped?)"); + addr_and_nets = to_dial_recv.recv() => { + let (addr, nets) = + addr_and_nets.expect("received address was None (sender dropped?)"); + // If we've already dialed and connected to this address, don't further dial them + // Just associate these networks with them + if let Some(existing_nets) = connected_peers.get_mut(&addr) { + for net in nets { + existing_nets.insert(net); + } + } + if let Err(e) = swarm.dial(addr) { log::warn!("dialing peer failed: {e:?}"); } From 320b5627b5bd630eee1f9f82da4be52225241f7a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 07:26:16 -0400 Subject: [PATCH 082/126] Retry if initial dials fail, not just upon disconnect --- coordinator/src/p2p.rs | 59 ++++++++++++++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 14 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 8e88f6ee..b69da990 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -294,6 +294,8 @@ impl LibP2p { // The addrs we're currently dialing, and the networks associated with them let dialing_peers = Arc::new(RwLock::new(HashMap::new())); + // The peers we're currently connected to, and the networks associated with them + let connected_peers = Arc::new(RwLock::new(HashMap::>::new())); // Find and connect to peers let (connect_to_network_send, mut connect_to_network_recv) = @@ -301,12 +303,16 @@ impl LibP2p { let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel(); tokio::spawn({ let dialing_peers = dialing_peers.clone(); + let connected_peers = connected_peers.clone(); + let connect_to_network_send = connect_to_network_send.clone(); async move { loop { let connect = |network: NetworkId, addr: Multiaddr| { let dialing_peers = dialing_peers.clone(); + let connected_peers = connected_peers.clone(); let to_dial_send = to_dial_send.clone(); + let connect_to_network_send = connect_to_network_send.clone(); async move { log::info!("found peer from substrate: {addr}"); @@ -344,15 +350,37 @@ impl LibP2p { // within a temporal window tokio::spawn({ let dialing_peers = dialing_peers.clone(); + let connected_peers = connected_peers.clone(); + let connect_to_network_send = connect_to_network_send.clone(); let addr = addr.clone(); async move { tokio::time::sleep(core::time::Duration::from_secs(60)).await; let mut dialing_peers = dialing_peers.write().await; - dialing_peers.remove(&addr); + if let Some(expected_nets) = dialing_peers.remove(&addr) { + log::debug!("removed addr from dialing upon timeout: {addr}"); + + // TODO: De-duplicate this below instance + // If we failed to dial and haven't gotten enough actual connections, retry + let connected_peers = connected_peers.read().await; + for net in expected_nets { + let mut remaining_peers = 0; + for nets in connected_peers.values() { + if nets.contains(&net) { + remaining_peers += 1; + } + } + // If we do not, start connecting to this network again + if remaining_peers < 3 { + connect_to_network_send.send(net).expect( + "couldn't send net to connect to due to disconnects (receiver dropped?)", + ); + } + } + } } }); - if !is_fresh_dial { + if is_fresh_dial { to_dial_send.send((addr, nets)).unwrap(); } } @@ -374,8 +402,6 @@ impl LibP2p { network, nodes.len() ); - // TODO: We weren't retry so long as we're told of sufficient nodes - // We should stop retrying when we actually connect to sufficient nodes to_retry.push(network); for node in nodes { connect(network, node).await; @@ -435,8 +461,7 @@ impl LibP2p { } async move { - // The peers we're currently connected to, and the networks associated with them - let mut connected_peers = HashMap::new(); + let connected_peers = connected_peers.clone(); let mut set_for_genesis = HashMap::new(); loop { @@ -502,20 +527,26 @@ impl LibP2p { HashSet::new() } }; - connected_peers.insert(addr.clone(), nets); + { + let mut connected_peers = connected_peers.write().await; + connected_peers.insert(addr.clone(), nets); - log::debug!( - "connection established to peer {} in connection ID {}, connected peers: {}", - &peer_id, - &connection_id, - connected_peers.len(), - ); + log::debug!( + "connection established to peer {} in connection ID {}, connected peers: {}", + &peer_id, + &connection_id, + connected_peers.len(), + ); + } } Some(SwarmEvent::ConnectionClosed { peer_id, endpoint, .. }) => { + let mut connected_peers = connected_peers.write().await; let nets = connected_peers .remove(endpoint.get_remote_address()) .expect("closed connection to peer which never connected"); + // Downgrade to a read lock + let connected_peers = connected_peers.downgrade(); // For each net we lost a peer for, check if we still have sufficient peers // overall @@ -558,7 +589,7 @@ impl LibP2p { addr_and_nets.expect("received address was None (sender dropped?)"); // If we've already dialed and connected to this address, don't further dial them // Just associate these networks with them - if let Some(existing_nets) = connected_peers.get_mut(&addr) { + if let Some(existing_nets) = connected_peers.write().await.get_mut(&addr) { for net in nets { existing_nets.insert(net); } From e01848aa9eb16d74fa054b7d5b872276ccfdfcc7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 07:30:18 -0400 Subject: [PATCH 083/126] Correct boolean NOT on is_fresh_dial --- coordinator/src/p2p.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index b69da990..85251dc7 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -333,8 +333,8 @@ impl LibP2p { let (is_fresh_dial, nets) = { let mut dialing_peers = dialing_peers.write().await; - let is_fresh_dial = dialing_peers.contains_key(&addr); - if !is_fresh_dial { + let is_fresh_dial = !dialing_peers.contains_key(&addr); + if is_fresh_dial { dialing_peers.insert(addr.clone(), HashSet::new()); } // Associate this network with this peer From fddbebc7c021deb0a032069016ad25d5888b22f4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 08:02:13 -0400 Subject: [PATCH 084/126] Replace expect with debug log --- coordinator/src/p2p.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 85251dc7..81c43555 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -541,10 +541,10 @@ impl LibP2p { } Some(SwarmEvent::ConnectionClosed { peer_id, endpoint, .. }) => { let mut connected_peers = connected_peers.write().await; - let nets = - connected_peers - .remove(endpoint.get_remote_address()) - .expect("closed connection to peer which never connected"); + let Some(nets) = connected_peers.remove(endpoint.get_remote_address()) else { + log::debug!("closed connection to peer which wasn't in connected_peers"); + continue; + }; // Downgrade to a read lock let connected_peers = connected_peers.downgrade(); From 5a3ea80943b59892a9b0042dae3268d7efcdfa60 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 08:36:30 -0400 Subject: [PATCH 085/126] Add missing continue to prevent dialing a node we're connected to --- coordinator/src/p2p.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 81c43555..6c845f4e 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -593,6 +593,7 @@ impl LibP2p { for net in nets { existing_nets.insert(net); } + continue; } if let Err(e) = swarm.dial(addr) { From 749d783b1e1c488ef0c2333ac74efb09bf21d24d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 08:53:35 -0400 Subject: [PATCH 086/126] Comment the insanely aggressive timeout future trace log --- coordinator/tributary/tendermint/src/round.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/coordinator/tributary/tendermint/src/round.rs b/coordinator/tributary/tendermint/src/round.rs index 445c2784..b39cebe5 100644 --- a/coordinator/tributary/tendermint/src/round.rs +++ b/coordinator/tributary/tendermint/src/round.rs @@ -58,12 +58,14 @@ impl RoundData { // Poll all set timeouts, returning the Step whose timeout has just expired pub(crate) async fn timeout_future(&self) -> Step { let now = Instant::now(); + /* log::trace!( target: "tendermint", "getting timeout_future, from step {:?}, off timeouts: {:?}", self.step, self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::>() ); + */ let timeout_future = |step| { let timeout = self.timeouts.get(&step).copied(); From 5fa7e3d45048ee1eca3695ec8a20ada7bc87ad8c Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 08:55:29 -0400 Subject: [PATCH 087/126] Line for prior commit --- coordinator/tributary/tendermint/src/round.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/tributary/tendermint/src/round.rs b/coordinator/tributary/tendermint/src/round.rs index b39cebe5..a97e3ed1 100644 --- a/coordinator/tributary/tendermint/src/round.rs +++ b/coordinator/tributary/tendermint/src/round.rs @@ -57,8 +57,8 @@ impl RoundData { // Poll all set timeouts, returning the Step whose timeout has just expired pub(crate) async fn timeout_future(&self) -> Step { - let now = Instant::now(); /* + let now = Instant::now(); log::trace!( target: "tendermint", "getting timeout_future, from step {:?}, off timeouts: {:?}", From 933b17aa911d1b27c8c76d5c4715b329d6cbfa5a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 10:14:22 -0400 Subject: [PATCH 088/126] Revert coordinator/tributary to fd4f247917cb83de19f933f75e8cf90008b25976 \#560 is causing notable CI failures, with its logs including slashes at 10x the prior rate. --- coordinator/tributary/src/lib.rs | 34 +- coordinator/tributary/src/tendermint/mod.rs | 32 +- coordinator/tributary/tendermint/src/block.rs | 17 +- coordinator/tributary/tendermint/src/ext.rs | 2 +- coordinator/tributary/tendermint/src/lib.rs | 978 ++++++++---------- .../tributary/tendermint/src/message_log.rs | 15 +- coordinator/tributary/tendermint/src/round.rs | 2 - coordinator/tributary/tendermint/tests/ext.rs | 2 +- 8 files changed, 529 insertions(+), 553 deletions(-) diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 121ac385..dcf38c68 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -1,5 +1,5 @@ use core::{marker::PhantomData, fmt::Debug}; -use std::{sync::Arc, io}; +use std::{sync::Arc, io, collections::VecDeque}; use async_trait::async_trait; @@ -154,6 +154,14 @@ pub struct Tributary { synced_block: Arc>>>, synced_block_result: Arc>, messages: Arc>>>, + + p2p_meta_task_handle: Arc, +} + +impl Drop for Tributary { + fn drop(&mut self) { + self.p2p_meta_task_handle.abort(); + } } impl Tributary { @@ -185,7 +193,28 @@ impl Tributary { ); let blockchain = Arc::new(RwLock::new(blockchain)); - let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p }; + let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new())); + // Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the + // P2P layer + let p2p_meta_task_handle = Arc::new( + tokio::spawn({ + let to_rebroadcast = to_rebroadcast.clone(); + let p2p = p2p.clone(); + async move { + loop { + let to_rebroadcast = to_rebroadcast.read().await.clone(); + for msg in to_rebroadcast { + p2p.broadcast(genesis, msg).await; + } + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + } + } + }) + .abort_handle(), + ); + + let network = + TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p }; let TendermintHandle { synced_block, synced_block_result, messages, machine } = TendermintMachine::new( @@ -206,6 +235,7 @@ impl Tributary { synced_block: Arc::new(RwLock::new(synced_block)), synced_block_result: Arc::new(RwLock::new(synced_block_result)), messages: Arc::new(RwLock::new(messages)), + p2p_meta_task_handle, }) } diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index 0ce6232c..e38efa5d 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -1,5 +1,8 @@ use core::ops::Deref; -use std::{sync::Arc, collections::HashMap}; +use std::{ + sync::Arc, + collections::{VecDeque, HashMap}, +}; use async_trait::async_trait; @@ -267,6 +270,8 @@ pub struct TendermintNetwork { pub(crate) validators: Arc, pub(crate) blockchain: Arc>>, + pub(crate) to_rebroadcast: Arc>>>, + pub(crate) p2p: P, } @@ -303,6 +308,26 @@ impl Network for TendermintNetwork async fn broadcast(&mut self, msg: SignedMessageFor) { let mut to_broadcast = vec![TENDERMINT_MESSAGE]; to_broadcast.extend(msg.encode()); + + // Since we're broadcasting a Tendermint message, set it to be re-broadcasted every second + // until the block it's trying to build is complete + // If the P2P layer drops a message before all nodes obtained access, or a node had an + // intermittent failure, this will ensure reconcilliation + // This is atrocious if there's no content-based deduplication protocol for messages actively + // being gossiped + // LibP2p, as used by Serai, is configured to content-based deduplicate + { + let mut to_rebroadcast_lock = self.to_rebroadcast.write().await; + to_rebroadcast_lock.push_back(to_broadcast.clone()); + // We should have, ideally, 3 * validators messages within a round + // Therefore, this should keep the most recent 2-rounds + // TODO: This isn't perfect. Each participant should just rebroadcast their latest round of + // messages + while to_rebroadcast_lock.len() > (6 * self.validators.weights.len()) { + to_rebroadcast_lock.pop_front(); + } + } + self.p2p.broadcast(self.genesis, to_broadcast).await } @@ -341,7 +366,7 @@ impl Network for TendermintNetwork } } - async fn validate(&self, block: &Self::Block) -> Result<(), TendermintBlockError> { + async fn validate(&mut self, block: &Self::Block) -> Result<(), TendermintBlockError> { let block = Block::read::<&[u8]>(&mut block.0.as_ref()).map_err(|_| TendermintBlockError::Fatal)?; self @@ -403,6 +428,9 @@ impl Network for TendermintNetwork } } + // Since we've added a valid block, clear to_rebroadcast + *self.to_rebroadcast.write().await = VecDeque::new(); + Some(TendermintBlock( self.blockchain.write().await.build_block::(&self.signature_scheme()).serialize(), )) diff --git a/coordinator/tributary/tendermint/src/block.rs b/coordinator/tributary/tendermint/src/block.rs index 236b4816..6dfacfdb 100644 --- a/coordinator/tributary/tendermint/src/block.rs +++ b/coordinator/tributary/tendermint/src/block.rs @@ -3,6 +3,7 @@ use std::{ collections::{HashSet, HashMap}, }; +use parity_scale_codec::Encode; use serai_db::{Get, DbTxn, Db}; use crate::{ @@ -19,7 +20,7 @@ pub(crate) struct BlockData { pub(crate) number: BlockNumber, pub(crate) validator_id: Option, - pub(crate) our_proposal: Option, + pub(crate) proposal: Option, pub(crate) log: MessageLog, pub(crate) slashes: HashSet, @@ -42,7 +43,7 @@ impl BlockData { weights: Arc, number: BlockNumber, validator_id: Option, - our_proposal: Option, + proposal: Option, ) -> BlockData { BlockData { db, @@ -50,7 +51,7 @@ impl BlockData { number, validator_id, - our_proposal, + proposal, log: MessageLog::new(weights), slashes: HashSet::new(), @@ -107,17 +108,17 @@ impl BlockData { self.populate_end_time(round); } - // L11-13 + // 11-13 self.round = Some(RoundData::::new( round, time.unwrap_or_else(|| self.end_time[&RoundNumber(round.0 - 1)]), )); self.end_time.insert(round, self.round().end_time()); - // L14-21 + // 14-21 if Some(proposer) == self.validator_id { let (round, block) = self.valid.clone().unzip(); - block.or_else(|| self.our_proposal.clone()).map(|block| Data::Proposal(round, block)) + block.or_else(|| self.proposal.clone()).map(|block| Data::Proposal(round, block)) } else { self.round_mut().set_timeout(Step::Propose); None @@ -197,8 +198,8 @@ impl BlockData { assert!(!new_round); None?; } - // Put that we're sending this message to the DB - txn.put(&msg_key, []); + // Put this message to the DB + txn.put(&msg_key, res.encode()); txn.commit(); } diff --git a/coordinator/tributary/tendermint/src/ext.rs b/coordinator/tributary/tendermint/src/ext.rs index 3869d9d9..b3d568a2 100644 --- a/coordinator/tributary/tendermint/src/ext.rs +++ b/coordinator/tributary/tendermint/src/ext.rs @@ -288,7 +288,7 @@ pub trait Network: Sized + Send + Sync { async fn slash(&mut self, validator: Self::ValidatorId, slash_event: SlashEvent); /// Validate a block. - async fn validate(&self, block: &Self::Block) -> Result<(), BlockError>; + async fn validate(&mut self, block: &Self::Block) -> Result<(), BlockError>; /// Add a block, returning the proposal for the next one. /// diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index 0e328e02..da80a41c 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -6,7 +6,7 @@ use std::{ collections::VecDeque, }; -use parity_scale_codec::{Encode, Decode, IoReader}; +use parity_scale_codec::{Encode, Decode}; use futures_channel::mpsc; use futures_util::{ @@ -15,8 +15,6 @@ use futures_util::{ }; use tokio::time::sleep; -use serai_db::{Get, DbTxn, Db}; - pub mod time; use time::{sys_time, CanonicalInstant}; @@ -32,11 +30,6 @@ pub(crate) mod message_log; pub mod ext; use ext::*; -const MESSAGE_TAPE_KEY: &[u8] = b"tendermint-machine-message_tape"; -fn message_tape_key(genesis: [u8; 32]) -> Vec { - [MESSAGE_TAPE_KEY, &genesis].concat() -} - pub fn commit_msg(end_time: u64, id: &[u8]) -> Vec { [&end_time.to_le_bytes(), id].concat() } @@ -110,23 +103,9 @@ impl SignedMessage { } } -#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] -pub enum SlashReason { - FailToPropose, - InvalidBlock, - InvalidProposer, -} - -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] -pub enum Evidence { - ConflictingMessages(Vec, Vec), - InvalidPrecommit(Vec), - InvalidValidRound(Vec), -} - #[derive(Clone, PartialEq, Eq, Debug)] -pub enum TendermintError { - Malicious, +pub enum TendermintError { + Malicious(N::ValidatorId, Option), Temporal, AlreadyHandled, InvalidEvidence, @@ -147,6 +126,20 @@ pub type SignedMessageFor = SignedMessage< <::SignatureScheme as SignatureScheme>::Signature, >; +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)] +pub enum SlashReason { + FailToPropose, + InvalidBlock, + InvalidMessage, +} + +#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] +pub enum Evidence { + ConflictingMessages(Vec, Vec), + InvalidPrecommit(Vec), + InvalidValidRound(Vec), +} + pub fn decode_signed_message(mut data: &[u8]) -> Option> { SignedMessageFor::::decode(&mut data).ok() } @@ -154,7 +147,7 @@ pub fn decode_signed_message(mut data: &[u8]) -> Option( data: &[u8], schema: &N::SignatureScheme, -) -> Result, TendermintError> { +) -> Result, TendermintError> { let msg = decode_signed_message::(data).ok_or(TendermintError::InvalidEvidence)?; // verify that evidence messages are signed correctly @@ -169,7 +162,7 @@ pub fn verify_tendermint_evience( evidence: &Evidence, schema: &N::SignatureScheme, commit: impl Fn(u64) -> Option>, -) -> Result<(), TendermintError> { +) -> Result<(), TendermintError> { match evidence { Evidence::ConflictingMessages(first, second) => { let first = decode_and_verify_signed_message::(first, schema)?.msg; @@ -193,16 +186,15 @@ pub fn verify_tendermint_evience( }; // TODO: We need to be passed in the genesis time to handle this edge case if msg.block.0 == 0 { - Err(TendermintError::InvalidEvidence)? - // todo!("invalid precommit signature on first block") + todo!("invalid precommit signature on first block") } // get the last commit let prior_commit = match commit(msg.block.0 - 1) { Some(c) => c, - // If we have yet to sync the block in question, we will return InvalidEvidence based + // If we have yet to sync the block in question, we will return InvalidContent based // on our own temporal ambiguity - // This will also cause an InvalidEvidence for anything using a non-existent block, + // This will also cause an InvalidContent for anything using a non-existent block, // yet that's valid behavior // TODO: Double check the ramifications of this _ => Err(TendermintError::InvalidEvidence)?, @@ -237,16 +229,6 @@ pub enum SlashEvent { WithEvidence(Evidence), } -// Struct for if various upon handlers have been triggered to ensure they don't trigger multiple -// times. -#[derive(Clone, PartialEq, Eq, Debug)] -struct Upons { - upon_prevotes: bool, - upon_successful_current_round_prevotes: bool, - upon_negative_current_round_prevotes: bool, - upon_precommits: bool, -} - /// A machine executing the Tendermint protocol. pub struct TendermintMachine { db: N::Db, @@ -263,7 +245,6 @@ pub struct TendermintMachine { synced_block_result_send: mpsc::UnboundedSender, block: BlockData, - upons: Upons, } pub struct SyncedBlock { @@ -344,13 +325,6 @@ impl TendermintMachine { ); sleep(time_until_round_end).await; - // Clear the message tape - { - let mut txn = self.db.txn(); - txn.del(&message_tape_key(self.genesis)); - txn.commit(); - } - // Clear our outbound message queue self.queue = VecDeque::new(); @@ -364,14 +338,6 @@ impl TendermintMachine { proposal, ); - // Clear upons - self.upons = Upons { - upon_prevotes: false, - upon_successful_current_round_prevotes: false, - upon_negative_current_round_prevotes: false, - upon_precommits: false, - }; - // Start the first round self.round(RoundNumber(0), Some(round_end)); } @@ -409,414 +375,6 @@ impl TendermintMachine { } } - fn proposal_for_round(&self, round: RoundNumber) -> Option<(Option, &N::Block)> { - let proposer = self.weights.proposer(self.block.number, round); - if let Some(proposal_signed) = self.block.log.get(round, proposer, Step::Propose) { - if let Data::Proposal(vr, block) = &proposal_signed.msg.data { - Some((*vr, block)) - } else { - panic!("message for Step::Propose didn't have Data::Proposal"); - } - } else { - None? - } - } - - // L22-27 - fn upon_proposal_without_valid_round(&mut self) { - if self.block.round().step != Step::Propose { - return; - } - - // If we have the proposal message... - let Some((None, block)) = self.proposal_for_round(self.block.round().number) else { - return; - }; - - // There either needs to not be a locked value or it must be equivalent - #[allow(clippy::map_unwrap_or)] - if self - .block - .locked - .as_ref() - .map(|(_round, locked_block)| block.id() == *locked_block) - .unwrap_or(true) - { - self.broadcast(Data::Prevote(Some(block.id()))); - } else { - self.broadcast(Data::Prevote(None)); - } - } - - // L28-33 - fn upon_proposal_with_valid_round(&mut self) { - if self.block.round().step != Step::Propose { - return; - } - - // If we have the proposal message... - let Some((Some(proposal_valid_round), block)) = - self.proposal_for_round(self.block.round().number) - else { - return; - }; - - // Check we have the necessary prevotes - if !self.block.log.has_consensus(proposal_valid_round, &Data::Prevote(Some(block.id()))) { - return; - } - - // We don't check valid round < current round as the `message` function does - - // If locked is None, lockedRoundp is -1 and less than valid round - #[allow(clippy::map_unwrap_or)] - let locked_clause_1 = self - .block - .locked - .as_ref() - .map(|(locked_round, _block)| locked_round.0 <= proposal_valid_round.0) - .unwrap_or(true); - // The second clause is if the locked values are equivalent. If no value is locked, they aren't - #[allow(clippy::map_unwrap_or)] - let locked_clause_2 = self - .block - .locked - .as_ref() - .map(|(_round, locked_block)| block.id() == *locked_block) - .unwrap_or(false); - - if locked_clause_1 || locked_clause_2 { - self.broadcast(Data::Prevote(Some(block.id()))); - } else { - self.broadcast(Data::Prevote(None)); - } - } - - // L34-35 - fn upon_prevotes(&mut self) { - if self.upons.upon_prevotes || (self.block.round().step != Step::Prevote) { - return; - } - - if self.block.log.has_participation(self.block.round().number, Step::Prevote) { - self.block.round_mut().set_timeout(Step::Prevote); - self.upons.upon_prevotes = true; - } - } - - // L36-43 - async fn upon_successful_current_round_prevotes(&mut self) { - // Returning if `self.step == Step::Propose` is equivalent to guarding `step >= prevote` - if self.upons.upon_successful_current_round_prevotes || - (self.block.round().step == Step::Propose) - { - return; - } - - // If we have the proposal message... - let Some((_, block)) = self.proposal_for_round(self.block.round().number) else { - return; - }; - - // Check we have the necessary prevotes - if !self.block.log.has_consensus(self.block.round().number, &Data::Prevote(Some(block.id()))) { - return; - } - - let block = block.clone(); - self.upons.upon_successful_current_round_prevotes = true; - - if self.block.round().step == Step::Prevote { - self.block.locked = Some((self.block.round().number, block.id())); - let signature = self - .signer - .sign(&commit_msg( - self.block.end_time[&self.block.round().number].canonical(), - block.id().as_ref(), - )) - .await; - self.broadcast(Data::Precommit(Some((block.id(), signature)))); - } - self.block.valid = Some((self.block.round().number, block)); - } - - // L44-46 - fn upon_negative_current_round_prevotes(&mut self) { - if self.upons.upon_negative_current_round_prevotes || (self.block.round().step != Step::Prevote) - { - return; - } - - if self.block.log.has_consensus(self.block.round().number, &Data::Prevote(None)) { - self.broadcast(Data::Precommit(None)); - } - - self.upons.upon_negative_current_round_prevotes = true; - } - - // L47-48 - fn upon_precommits(&mut self) { - if self.upons.upon_precommits { - return; - } - - if self.block.log.has_participation(self.block.round().number, Step::Precommit) { - self.block.round_mut().set_timeout(Step::Precommit); - self.upons.upon_precommits = true; - } - } - - // L22-48 - async fn all_current_round_upons(&mut self) { - self.upon_proposal_without_valid_round(); - self.upon_proposal_with_valid_round(); - self.upon_prevotes(); - self.upon_successful_current_round_prevotes().await; - self.upon_negative_current_round_prevotes(); - self.upon_precommits(); - } - - // L49-54 - async fn upon_successful_precommits(&mut self, round: RoundNumber) -> bool { - // If we have the proposal message... - let Some((_, block)) = self.proposal_for_round(round) else { return false }; - - // Check we have the necessary precommits - // The precommit we check we have consensus upon uses a junk signature since message equality - // disregards the signature - if !self - .block - .log - .has_consensus(round, &Data::Precommit(Some((block.id(), self.signer.sign(&[]).await)))) - { - return false; - } - - // Get all participants in this commit - let mut validators = vec![]; - let mut sigs = vec![]; - // Get all precommits for this round - for (validator, msgs) in &self.block.log.log[&round] { - if let Some(signed) = msgs.get(&Step::Precommit) { - if let Data::Precommit(Some((id, sig))) = &signed.msg.data { - // If this precommit was for this block, include it - if *id == block.id() { - validators.push(*validator); - sigs.push(sig.clone()); - } - } - } - } - - // Form the commit itself - let commit_msg = commit_msg(self.block.end_time[&round].canonical(), block.id().as_ref()); - let commit = Commit { - end_time: self.block.end_time[&round].canonical(), - validators: validators.clone(), - signature: self.network.signature_scheme().aggregate(&validators, &commit_msg, &sigs), - }; - debug_assert!(self.network.verify_commit(block.id(), &commit)); - - // Add the block and reset the machine - log::info!( - target: "tendermint", - "TendermintMachine produced block {}", - hex::encode(block.id().as_ref()), - ); - let id = block.id(); - let proposal = self.network.add_block(block.clone(), commit).await; - log::trace!( - target: "tendermint", - "added block {} (produced by machine)", - hex::encode(id.as_ref()), - ); - self.reset(round, proposal).await; - - true - } - - // L49-54 - async fn all_any_round_upons(&mut self, round: RoundNumber) -> bool { - self.upon_successful_precommits(round).await - } - - // Returns Ok(true) if this was a Precommit which had either no signature or its signature - // validated - // Returns Ok(false) if it wasn't a Precommit or the signature wasn't validated yet - // Returns Err if the signature was invalid - async fn verify_precommit_signature( - &mut self, - signed: &SignedMessageFor, - ) -> Result { - let msg = &signed.msg; - if let Data::Precommit(precommit) = &msg.data { - let Some((id, sig)) = precommit else { return Ok(true) }; - // Also verify the end_time of the commit - // Only perform this verification if we already have the end_time - // Else, there's a DoS where we receive a precommit for some round infinitely in the future - // which forces us to calculate every end time - if let Some(end_time) = self.block.end_time.get(&msg.round) { - if !self.validators.verify(msg.sender, &commit_msg(end_time.canonical(), id.as_ref()), sig) - { - log::warn!(target: "tendermint", "validator produced an invalid commit signature"); - self - .slash( - msg.sender, - SlashEvent::WithEvidence(Evidence::InvalidPrecommit(signed.encode())), - ) - .await; - Err(TendermintError::Malicious)?; - } - return Ok(true); - } - } - Ok(false) - } - - async fn message(&mut self, signed: &SignedMessageFor) -> Result<(), TendermintError> { - let msg = &signed.msg; - if msg.block != self.block.number { - Err(TendermintError::Temporal)?; - } - - // If this is a precommit, verify its signature - self.verify_precommit_signature(signed).await?; - - // Only let the proposer propose - if matches!(msg.data, Data::Proposal(..)) && - (msg.sender != self.weights.proposer(msg.block, msg.round)) - { - log::warn!(target: "tendermint", "validator who wasn't the proposer proposed"); - // TODO: This should have evidence - self - .slash(msg.sender, SlashEvent::Id(SlashReason::InvalidProposer, msg.block.0, msg.round.0)) - .await; - Err(TendermintError::Malicious)?; - }; - - // If this is a proposal, verify the block - // If the block is invalid, drop the message, letting the timeout cover it - // This prevents needing to check if valid inside every `upon` block - if let Data::Proposal(_, block) = &msg.data { - match self.network.validate(block).await { - Ok(()) => {} - Err(BlockError::Temporal) => return Err(TendermintError::Temporal), - Err(BlockError::Fatal) => { - log::warn!(target: "tendermint", "validator proposed a fatally invalid block"); - self - .slash( - msg.sender, - SlashEvent::Id(SlashReason::InvalidBlock, self.block.number.0, msg.round.0), - ) - .await; - Err(TendermintError::Malicious)?; - } - }; - } - - // If this is a proposal, verify the valid round isn't fundamentally invalid - if let Data::Proposal(Some(valid_round), _) = msg.data { - if valid_round.0 >= msg.round.0 { - log::warn!( - target: "tendermint", - "proposed proposed with a syntactically invalid valid round", - ); - self - .slash(msg.sender, SlashEvent::WithEvidence(Evidence::InvalidValidRound(msg.encode()))) - .await; - Err(TendermintError::Malicious)?; - } - } - - // Add it to the log, returning if it was already handled - match self.block.log.log(signed.clone()) { - Ok(true) => {} - Ok(false) => Err(TendermintError::AlreadyHandled)?, - Err(evidence) => { - self.slash(msg.sender, SlashEvent::WithEvidence(evidence)).await; - Err(TendermintError::Malicious)?; - } - } - log::debug!( - target: "tendermint", - "received new tendermint message (block: {}, round: {}, step: {:?})", - msg.block.0, - msg.round.0, - msg.data.step(), - ); - - // L55-56 - // Jump ahead if we should - if (msg.round.0 > self.block.round().number.0) && - (self.block.log.round_participation(msg.round) >= self.weights.fault_threshold()) - { - log::debug!( - target: "tendermint", - "jumping from round {} to round {}", - self.block.round().number.0, - msg.round.0, - ); - - // Jump to the new round. - let old_round = self.block.round().number; - self.round(msg.round, None); - - // If any jumped over/to round already has precommit messages, verify their signatures - for jumped in (old_round.0 + 1) ..= msg.round.0 { - let jumped = RoundNumber(jumped); - let round_msgs = self.block.log.log.get(&jumped).cloned().unwrap_or_default(); - for (validator, msgs) in &round_msgs { - if let Some(existing) = msgs.get(&Step::Precommit) { - if let Ok(res) = self.verify_precommit_signature(existing).await { - // Ensure this actually verified the signature instead of believing it shouldn't yet - assert!(res); - } else { - // Remove the message so it isn't counted towards forming a commit/included in one - // This won't remove the fact they precommitted for this block hash in the MessageLog - // TODO: Don't even log these in the first place until we jump, preventing needing - // to do this in the first place - self - .block - .log - .log - .get_mut(&jumped) - .unwrap() - .get_mut(validator) - .unwrap() - .remove(&Step::Precommit) - .unwrap(); - } - } - } - } - } - - // Now that we've jumped, and: - // 1) If this is a message for an old round, verified the precommit signatures - // 2) If this is a message for what was the current round, verified the precommit signatures - // 3) If this is a message for what was a future round, verified the precommit signatures if it - // has 34+% participation - // Run all `upons` run for any round, which may produce a Commit if it has 67+% participation - // (returning true if it does, letting us return now) - // It's necessary to verify the precommit signatures before Commit production is allowed, hence - // this specific flow - if self.all_any_round_upons(msg.round).await { - return Ok(()); - } - - // If this is a historic round, or a future round without sufficient participation, return - if msg.round.0 != self.block.round().number.0 { - return Ok(()); - } - // msg.round is now guaranteed to be equal to self.block.round().number - debug_assert_eq!(msg.round, self.block.round().number); - - // Run all `upons` run for the current round - self.all_current_round_upons().await; - - Ok(()) - } - /// Create a new Tendermint machine, from the specified point, with the specified block as the /// one to propose next. This will return a channel to send messages from the gossip layer and /// the machine itself. The machine should have `run` called from an asynchronous task. @@ -861,7 +419,7 @@ impl TendermintMachine { let validators = network.signature_scheme(); let weights = Arc::new(network.weights()); let validator_id = signer.validator_id().await; - // L01-10 + // 01-10 let mut machine = TendermintMachine { db: db.clone(), genesis, @@ -884,13 +442,6 @@ impl TendermintMachine { validator_id, Some(proposal), ), - - upons: Upons { - upon_prevotes: false, - upon_successful_current_round_prevotes: false, - upon_negative_current_round_prevotes: false, - upon_precommits: false, - }, }; // The end time of the last block is the start time for this one @@ -909,16 +460,16 @@ impl TendermintMachine { pub async fn run(mut self) { log::debug!(target: "tendermint", "running TendermintMachine"); - let mut rebroadcast_future = Box::pin(sleep(Duration::from_secs(60))).fuse(); loop { // Also create a future for if the queue has a message // Does not pop_front as if another message has higher priority, its future will be handled // instead in this loop, and the popped value would be dropped with the next iteration + // While no other message has a higher priority right now, this is a safer practice let mut queue_future = if self.queue.is_empty() { Fuse::terminated() } else { future::ready(()).fuse() }; if let Some((our_message, msg, mut sig)) = futures_util::select_biased! { - // Handle a new block occurring externally (from an external sync loop) + // Handle a new block occurring externally (an external sync loop) // Has the highest priority as it makes all other futures here irrelevant msg = self.synced_block_recv.next() => { if let Some(SyncedBlock { number, block, commit }) = msg { @@ -952,21 +503,18 @@ impl TendermintMachine { Some((true, self.queue.pop_front().unwrap(), None)) }, - // L57-67 // Handle any timeouts step = self.block.round().timeout_future().fuse() => { // Remove the timeout so it doesn't persist, always being the selected future due to bias // While this does enable the timeout to be entered again, the timeout setting code will // never attempt to add a timeout after its timeout has expired - // (due to it setting an `upon` boolean) self.block.round_mut().timeouts.remove(&step); - - match step { - Step::Propose => { - // Only run if it's still the step in question - if self.block.round().step == step { + // Only run if it's still the step in question + if self.block.round().step == step { + match step { + Step::Propose => { // Slash the validator for not proposing when they should've - log::debug!(target: "tendermint", "validator didn't propose when they should have"); + log::debug!(target: "tendermint", "Validator didn't propose when they should have"); // this slash will be voted on. self.slash( self.weights.proposer(self.block.number, self.block.round().number), @@ -977,42 +525,14 @@ impl TendermintMachine { ), ).await; self.broadcast(Data::Prevote(None)); + }, + Step::Prevote => self.broadcast(Data::Precommit(None)), + Step::Precommit => { + self.round(RoundNumber(self.block.round().number.0 + 1), None); + continue; } - }, - Step::Prevote => { - // Only run if it's still the step in question - if self.block.round().step == step { - self.broadcast(Data::Precommit(None)) - } - }, - Step::Precommit => { - self.round(RoundNumber(self.block.round().number.0 + 1), None); } - }; - - // Execute the upons now that the state has changed - self.all_any_round_upons(self.block.round().number).await; - self.all_current_round_upons().await; - - None - }, - - // If it's been more than 60s, rebroadcast our own messages - () = rebroadcast_future => { - let key = message_tape_key(self.genesis); - let messages = self.db.get(key).unwrap_or(vec![]); - let mut messages = messages.as_slice(); - - while !messages.is_empty() { - self.network.broadcast( - SignedMessageFor::::decode(&mut IoReader(&mut messages)) - .expect("saved invalid message to DB") - ).await; } - - // Reset the rebroadcast future - rebroadcast_future = Box::pin(sleep(core::time::Duration::from_secs(60))).fuse(); - None }, @@ -1034,31 +554,429 @@ impl TendermintMachine { } let sig = sig.unwrap(); + // TODO: message may internally call broadcast. We should check within broadcast it's not + // broadcasting our own message at this time. let signed_msg = SignedMessage { msg: msg.clone(), sig: sig.clone() }; let res = self.message(&signed_msg).await; - // If this is our message, and we hit an invariant, we could be slashed. - // We only broadcast our message after running it ourselves, to ensure it doesn't error, to - // ensure we don't get slashed on invariants. if res.is_err() && our_message { panic!("honest node (ourselves) had invalid behavior"); } + // Only now should we allow broadcasts since we're sure an invariant wasn't reached causing + // us to have invalid messages. - // Save this message to a linear tape of all our messages for this block, if ours - // TODO: Since we do this after we mark this message as sent to prevent equivocations, a - // precisely time reboot could cause this message marked as sent yet not added to the tape - if our_message { - let message_tape_key = message_tape_key(self.genesis); - let mut txn = self.db.txn(); - let mut message_tape = txn.get(&message_tape_key).unwrap_or(vec![]); - message_tape.extend(signed_msg.encode()); - txn.put(&message_tape_key, message_tape); + if res.is_ok() { + // Re-broadcast this since it's an original consensus message + self.network.broadcast(signed_msg).await; } - // Re-broadcast this since it's an original consensus message worth handling - if res.is_ok() { - self.network.broadcast(signed_msg).await; + match res { + Ok(None) => {} + Ok(Some(block)) => { + let mut validators = vec![]; + let mut sigs = vec![]; + // Get all precommits for this round + for (validator, msgs) in &self.block.log.log[&msg.round] { + if let Some(signed) = msgs.get(&Step::Precommit) { + if let Data::Precommit(Some((id, sig))) = &signed.msg.data { + // If this precommit was for this block, include it + if *id == block.id() { + validators.push(*validator); + sigs.push(sig.clone()); + } + } + } + } + + let commit_msg = + commit_msg(self.block.end_time[&msg.round].canonical(), block.id().as_ref()); + let commit = Commit { + end_time: self.block.end_time[&msg.round].canonical(), + validators: validators.clone(), + signature: self.network.signature_scheme().aggregate(&validators, &commit_msg, &sigs), + }; + debug_assert!(self.network.verify_commit(block.id(), &commit)); + + log::info!( + target: "tendermint", + "TendermintMachine produced block {}", + hex::encode(block.id().as_ref()), + ); + let id = block.id(); + let proposal = self.network.add_block(block, commit).await; + log::trace!( + target: "tendermint", + "added block {} (produced by machine)", + hex::encode(id.as_ref()), + ); + self.reset(msg.round, proposal).await; + } + Err(TendermintError::Malicious(sender, evidence)) => { + let current_msg = SignedMessage { msg: msg.clone(), sig: sig.clone() }; + + let slash = if let Some(ev) = evidence { + // if the malicious message contains a block, only vote to slash + // TODO: Should this decision be made at a higher level? + // A higher-level system may be able to verify if the contained block is fatally + // invalid + // A higher-level system may accept the bandwidth size of this, even if the issue is + // just the valid round field + if let Data::Proposal(_, _) = ¤t_msg.msg.data { + SlashEvent::Id( + SlashReason::InvalidBlock, + self.block.number.0, + self.block.round().number.0, + ) + } else { + // slash with evidence otherwise + SlashEvent::WithEvidence(ev) + } + } else { + // we don't have evidence. Slash with vote. + SlashEvent::Id( + SlashReason::InvalidMessage, + self.block.number.0, + self.block.round().number.0, + ) + }; + + // Each message that we're voting to slash over needs to be re-broadcasted so other + // validators also trigger their own votes + // TODO: should this be inside slash function? + if let SlashEvent::Id(_, _, _) = slash { + self.network.broadcast(current_msg).await; + } + + self.slash(sender, slash).await + } + Err( + TendermintError::Temporal | + TendermintError::AlreadyHandled | + TendermintError::InvalidEvidence, + ) => (), } } } } + + // Returns Ok(true) if this was a Precommit which had either no signature or its signature + // validated + // Returns Ok(false) if it wasn't a Precommit or the signature wasn't validated yet + // Returns Err if the signature was invalid + fn verify_precommit_signature( + &self, + signed: &SignedMessageFor, + ) -> Result> { + let msg = &signed.msg; + if let Data::Precommit(precommit) = &msg.data { + let Some((id, sig)) = precommit else { return Ok(true) }; + // Also verify the end_time of the commit + // Only perform this verification if we already have the end_time + // Else, there's a DoS where we receive a precommit for some round infinitely in the future + // which forces us to calculate every end time + if let Some(end_time) = self.block.end_time.get(&msg.round) { + if !self.validators.verify(msg.sender, &commit_msg(end_time.canonical(), id.as_ref()), sig) + { + log::warn!(target: "tendermint", "Validator produced an invalid commit signature"); + Err(TendermintError::Malicious( + msg.sender, + Some(Evidence::InvalidPrecommit(signed.encode())), + ))?; + } + return Ok(true); + } + } + Ok(false) + } + + async fn message( + &mut self, + signed: &SignedMessageFor, + ) -> Result, TendermintError> { + let msg = &signed.msg; + if msg.block != self.block.number { + Err(TendermintError::Temporal)?; + } + + if (msg.block == self.block.number) && + (msg.round == self.block.round().number) && + (msg.data.step() == Step::Propose) + { + log::trace!( + target: "tendermint", + "received Propose for block {}, round {}", + msg.block.0, + msg.round.0, + ); + } + + // If this is a precommit, verify its signature + self.verify_precommit_signature(signed)?; + + // Only let the proposer propose + if matches!(msg.data, Data::Proposal(..)) && + (msg.sender != self.weights.proposer(msg.block, msg.round)) + { + log::warn!(target: "tendermint", "Validator who wasn't the proposer proposed"); + // TODO: This should have evidence + Err(TendermintError::Malicious(msg.sender, None))?; + }; + + if !self.block.log.log(signed.clone())? { + return Err(TendermintError::AlreadyHandled); + } + log::debug!( + target: "tendermint", + "received new tendermint message (block: {}, round: {}, step: {:?})", + msg.block.0, + msg.round.0, + msg.data.step(), + ); + + // All functions, except for the finalizer and the jump, are locked to the current round + + // Run the finalizer to see if it applies + // 49-52 + if matches!(msg.data, Data::Proposal(..)) || matches!(msg.data, Data::Precommit(_)) { + let proposer = self.weights.proposer(self.block.number, msg.round); + + // Get the proposal + if let Some(proposal_signed) = self.block.log.get(msg.round, proposer, Step::Propose) { + if let Data::Proposal(_, block) = &proposal_signed.msg.data { + // Check if it has gotten a sufficient amount of precommits + // Uses a junk signature since message equality disregards the signature + if self.block.log.has_consensus( + msg.round, + &Data::Precommit(Some((block.id(), self.signer.sign(&[]).await))), + ) { + // If msg.round is in the future, these Precommits won't have their inner signatures + // verified + // It should be impossible for msg.round to be in the future however, as this requires + // 67% of validators to Precommit, and we jump on 34% participating in the new round + // The one exception would be if a validator had 34%, and could cause participation to + // go from 33% (not enough to jump) to 67%, without executing the below code + // This also would require the local machine to be outside of allowed time tolerances, + // or the validator with 34% to not be publishing Prevotes (as those would cause a + // a jump) + // Both are invariants + // TODO: Replace this panic with an inner signature check + assert!(msg.round.0 <= self.block.round().number.0); + + log::debug!(target: "tendermint", "block {} has consensus", msg.block.0); + return Ok(Some(block.clone())); + } + } + } + } + + // Else, check if we need to jump ahead + #[allow(clippy::comparison_chain)] + if msg.round.0 < self.block.round().number.0 { + // Prior round, disregard if not finalizing + return Ok(None); + } else if msg.round.0 > self.block.round().number.0 { + // 55-56 + // Jump, enabling processing by the below code + if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() { + log::debug!( + target: "tendermint", + "jumping from round {} to round {}", + self.block.round().number.0, + msg.round.0, + ); + + // Jump to the new round. + let proposer = self.round(msg.round, None); + + // If this round already has precommit messages, verify their signatures + let round_msgs = self.block.log.log[&msg.round].clone(); + for (validator, msgs) in &round_msgs { + if let Some(existing) = msgs.get(&Step::Precommit) { + if let Ok(res) = self.verify_precommit_signature(existing) { + // Ensure this actually verified the signature instead of believing it shouldn't yet + assert!(res); + } else { + // Remove the message so it isn't counted towards forming a commit/included in one + // This won't remove the fact they precommitted for this block hash in the MessageLog + // TODO: Don't even log these in the first place until we jump, preventing needing + // to do this in the first place + let msg = self + .block + .log + .log + .get_mut(&msg.round) + .unwrap() + .get_mut(validator) + .unwrap() + .remove(&Step::Precommit) + .unwrap(); + + // Slash the validator for publishing an invalid commit signature + self + .slash( + *validator, + SlashEvent::WithEvidence(Evidence::InvalidPrecommit(msg.encode())), + ) + .await; + } + } + } + + // If we're the proposer, return now we don't waste time on the current round + // (as it doesn't have a proposal, since we didn't propose, and cannot complete) + if proposer { + return Ok(None); + } + } else { + // Future round which we aren't ready to jump to, so return for now + return Ok(None); + } + } + + // msg.round is now guaranteed to be equal to self.block.round().number + debug_assert_eq!(msg.round, self.block.round().number); + + // The paper executes these checks when the step is prevote. Making sure this message warrants + // rerunning these checks is a sane optimization since message instances is a full iteration + // of the round map + if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) { + let (participation, weight) = + self.block.log.message_instances(self.block.round().number, &Data::Prevote(None)); + let threshold_weight = self.weights.threshold(); + if participation < threshold_weight { + log::trace!( + target: "tendermint", + "progess towards setting prevote timeout, participation: {}, needed: {}", + participation, + threshold_weight, + ); + } + // 34-35 + if participation >= threshold_weight { + log::trace!( + target: "tendermint", + "setting timeout for prevote due to sufficient participation", + ); + self.block.round_mut().set_timeout(Step::Prevote); + } + + // 44-46 + if weight >= threshold_weight { + self.broadcast(Data::Precommit(None)); + return Ok(None); + } + } + + // 47-48 + if matches!(msg.data, Data::Precommit(_)) && + self.block.log.has_participation(self.block.round().number, Step::Precommit) + { + log::trace!( + target: "tendermint", + "setting timeout for precommit due to sufficient participation", + ); + self.block.round_mut().set_timeout(Step::Precommit); + } + + // All further operations require actually having the proposal in question + let proposer = self.weights.proposer(self.block.number, self.block.round().number); + let (vr, block) = if let Some(proposal_signed) = + self.block.log.get(self.block.round().number, proposer, Step::Propose) + { + if let Data::Proposal(vr, block) = &proposal_signed.msg.data { + (vr, block) + } else { + panic!("message for Step::Propose didn't have Data::Proposal"); + } + } else { + return Ok(None); + }; + + // 22-33 + if self.block.round().step == Step::Propose { + // Delay error handling (triggering a slash) until after we vote. + let (valid, err) = match self.network.validate(block).await { + Ok(()) => (true, Ok(None)), + Err(BlockError::Temporal) => (false, Ok(None)), + Err(BlockError::Fatal) => (false, { + log::warn!(target: "tendermint", "Validator proposed a fatally invalid block"); + // TODO: Produce evidence of this for the higher level code to decide what to do with + Err(TendermintError::Malicious(proposer, None)) + }), + }; + // Create a raw vote which only requires block validity as a basis for the actual vote. + let raw_vote = Some(block.id()).filter(|_| valid); + + // If locked is none, it has a round of -1 according to the protocol. That satisfies + // 23 and 29. If it's some, both are satisfied if they're for the same ID. If it's some + // with different IDs, the function on 22 rejects yet the function on 28 has one other + // condition + let locked = self.block.locked.as_ref().map_or(true, |(_, id)| id == &block.id()); + let mut vote = raw_vote.filter(|_| locked); + + if let Some(vr) = vr { + // Malformed message + if vr.0 >= self.block.round().number.0 { + log::warn!(target: "tendermint", "Validator claimed a round from the future was valid"); + Err(TendermintError::Malicious( + msg.sender, + Some(Evidence::InvalidValidRound(signed.encode())), + ))?; + } + + if self.block.log.has_consensus(*vr, &Data::Prevote(Some(block.id()))) { + // Allow differing locked values if the proposal has a newer valid round + // This is the other condition described above + if let Some((locked_round, _)) = self.block.locked.as_ref() { + vote = vote.or_else(|| raw_vote.filter(|_| locked_round.0 <= vr.0)); + } + + self.broadcast(Data::Prevote(vote)); + return err; + } + } else { + self.broadcast(Data::Prevote(vote)); + return err; + } + + return Ok(None); + } + + if self.block.valid.as_ref().map_or(true, |(round, _)| round != &self.block.round().number) { + // 36-43 + + // The run once condition is implemented above. Since valid will always be set by this, it + // not being set, or only being set historically, means this has yet to be run + + if self.block.log.has_consensus(self.block.round().number, &Data::Prevote(Some(block.id()))) { + match self.network.validate(block).await { + // BlockError::Temporal is due to a temporal error we have, yet a supermajority of the + // network does not, Because we do not believe this block to be fatally invalid, and + // because a supermajority deems it valid, accept it. + Ok(()) | Err(BlockError::Temporal) => (), + Err(BlockError::Fatal) => { + log::warn!(target: "tendermint", "Validator proposed a fatally invalid block"); + // TODO: Produce evidence of this for the higher level code to decide what to do with + Err(TendermintError::Malicious(proposer, None))? + } + }; + + self.block.valid = Some((self.block.round().number, block.clone())); + if self.block.round().step == Step::Prevote { + self.block.locked = Some((self.block.round().number, block.id())); + self.broadcast(Data::Precommit(Some(( + block.id(), + self + .signer + .sign(&commit_msg( + self.block.end_time[&self.block.round().number].canonical(), + block.id().as_ref(), + )) + .await, + )))); + } + } + } + + Ok(None) + } } diff --git a/coordinator/tributary/tendermint/src/message_log.rs b/coordinator/tributary/tendermint/src/message_log.rs index e65568ca..3959852d 100644 --- a/coordinator/tributary/tendermint/src/message_log.rs +++ b/coordinator/tributary/tendermint/src/message_log.rs @@ -2,7 +2,7 @@ use std::{sync::Arc, collections::HashMap}; use parity_scale_codec::Encode; -use crate::{ext::*, RoundNumber, Step, DataFor, SignedMessageFor, Evidence}; +use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence}; type RoundLog = HashMap<::ValidatorId, HashMap>>; pub(crate) struct MessageLog { @@ -16,7 +16,7 @@ impl MessageLog { } // Returns true if it's a new message - pub(crate) fn log(&mut self, signed: SignedMessageFor) -> Result { + pub(crate) fn log(&mut self, signed: SignedMessageFor) -> Result> { let msg = &signed.msg; // Clarity, and safety around default != new edge cases let round = self.log.entry(msg.round).or_insert_with(HashMap::new); @@ -30,7 +30,10 @@ impl MessageLog { target: "tendermint", "Validator sent multiple messages for the same block + round + step" ); - Err(Evidence::ConflictingMessages(existing.encode(), signed.encode()))?; + Err(TendermintError::Malicious( + msg.sender, + Some(Evidence::ConflictingMessages(existing.encode(), signed.encode())), + ))?; } return Ok(false); } @@ -44,8 +47,7 @@ impl MessageLog { pub(crate) fn message_instances(&self, round: RoundNumber, data: &DataFor) -> (u64, u64) { let mut participating = 0; let mut weight = 0; - let Some(log) = self.log.get(&round) else { return (0, 0) }; - for (participant, msgs) in log { + for (participant, msgs) in &self.log[&round] { if let Some(msg) = msgs.get(&data.step()) { let validator_weight = self.weights.weight(*participant); participating += validator_weight; @@ -71,8 +73,7 @@ impl MessageLog { // Check if a supermajority of nodes have participated on a specific step pub(crate) fn has_participation(&self, round: RoundNumber, step: Step) -> bool { let mut participating = 0; - let Some(log) = self.log.get(&round) else { return false }; - for (participant, msgs) in log { + for (participant, msgs) in &self.log[&round] { if msgs.get(&step).is_some() { participating += self.weights.weight(*participant); } diff --git a/coordinator/tributary/tendermint/src/round.rs b/coordinator/tributary/tendermint/src/round.rs index a97e3ed1..445c2784 100644 --- a/coordinator/tributary/tendermint/src/round.rs +++ b/coordinator/tributary/tendermint/src/round.rs @@ -57,7 +57,6 @@ impl RoundData { // Poll all set timeouts, returning the Step whose timeout has just expired pub(crate) async fn timeout_future(&self) -> Step { - /* let now = Instant::now(); log::trace!( target: "tendermint", @@ -65,7 +64,6 @@ impl RoundData { self.step, self.timeouts.iter().map(|(k, v)| (k, v.duration_since(now))).collect::>() ); - */ let timeout_future = |step| { let timeout = self.timeouts.get(&step).copied(); diff --git a/coordinator/tributary/tendermint/tests/ext.rs b/coordinator/tributary/tendermint/tests/ext.rs index bec95ddc..3b3cf7c3 100644 --- a/coordinator/tributary/tendermint/tests/ext.rs +++ b/coordinator/tributary/tendermint/tests/ext.rs @@ -145,7 +145,7 @@ impl Network for TestNetwork { println!("Slash for {id} due to {event:?}"); } - async fn validate(&self, block: &TestBlock) -> Result<(), BlockError> { + async fn validate(&mut self, block: &TestBlock) -> Result<(), BlockError> { block.valid } From c73acb3d6244340ebe9aed321102abac3109959e Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 19:28:21 -0400 Subject: [PATCH 089/126] Log on new tendermint message debug -> trace --- coordinator/tributary/tendermint/src/lib.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index da80a41c..adc6fef7 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -514,7 +514,7 @@ impl TendermintMachine { match step { Step::Propose => { // Slash the validator for not proposing when they should've - log::debug!(target: "tendermint", "Validator didn't propose when they should have"); + log::debug!(target: "tendermint", "validator didn't propose when they should have"); // this slash will be voted on. self.slash( self.weights.proposer(self.block.number, self.block.round().number), @@ -724,7 +724,7 @@ impl TendermintMachine { if !self.block.log.log(signed.clone())? { return Err(TendermintError::AlreadyHandled); } - log::debug!( + log::trace!( target: "tendermint", "received new tendermint message (block: {}, round: {}, step: {:?})", msg.block.0, From 558a2bfa460eb5d40cd0b0ef8bde6de997094fc5 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 21:51:44 -0400 Subject: [PATCH 090/126] Slight tweaks to BP+ --- coins/monero/src/ringct/bulletproofs/mod.rs | 2 +- .../plus/aggregate_range_proof.rs | 37 ++++++++----------- .../plus/weighted_inner_product.rs | 2 +- .../plus/aggregate_range_proof.rs | 2 +- 4 files changed, 19 insertions(+), 24 deletions(-) diff --git a/coins/monero/src/ringct/bulletproofs/mod.rs b/coins/monero/src/ringct/bulletproofs/mod.rs index df0c6ff8..ce9f7492 100644 --- a/coins/monero/src/ringct/bulletproofs/mod.rs +++ b/coins/monero/src/ringct/bulletproofs/mod.rs @@ -91,7 +91,7 @@ impl Bulletproofs { Bulletproofs::Plus( AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect()) .unwrap() - .prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap())) + .prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs.to_vec()).unwrap())) .unwrap(), ) }) diff --git a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs index af5c0275..cba95014 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs @@ -24,7 +24,7 @@ use crate::{ }, }; -// Figure 3 +// Figure 3 of the Bulletproofs+ Paper #[derive(Clone, Debug)] pub(crate) struct AggregateRangeStatement { generators: Generators, @@ -38,24 +38,15 @@ impl Zeroize for AggregateRangeStatement { } #[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)] -pub(crate) struct AggregateRangeWitness { - values: Vec, - gammas: Vec, -} +pub(crate) struct AggregateRangeWitness(Vec); impl AggregateRangeWitness { - pub(crate) fn new(commitments: &[Commitment]) -> Option { + pub(crate) fn new(commitments: Vec) -> Option { if commitments.is_empty() || (commitments.len() > MAX_M) { return None; } - let mut values = Vec::with_capacity(commitments.len()); - let mut gammas = Vec::with_capacity(commitments.len()); - for commitment in commitments { - values.push(commitment.amount); - gammas.push(Scalar(commitment.mask)); - } - Some(AggregateRangeWitness { values, gammas }) + Some(AggregateRangeWitness(commitments)) } } @@ -162,13 +153,11 @@ impl AggregateRangeStatement { witness: &AggregateRangeWitness, ) -> Option { // Check for consistency with the witness - if self.V.len() != witness.values.len() { + if self.V.len() != witness.0.len() { return None; } - for (commitment, (value, gamma)) in - self.V.iter().zip(witness.values.iter().zip(witness.gammas.iter())) - { - if Commitment::new(**gamma, *value).calculate() != **commitment { + for (commitment, witness) in self.V.iter().zip(witness.0.iter()) { + if witness.calculate() != **commitment { return None; } } @@ -196,7 +185,13 @@ impl AggregateRangeStatement { let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N)); for j in 1 ..= V.len() { d_js.push(Self::d_j(j, V.len())); - a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0); + #[allow(clippy::map_unwrap_or)] + a_l.0.append( + &mut u64_decompose( + *witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0), + ) + .0, + ); } let a_r = a_l.clone() - Scalar::ONE; @@ -223,8 +218,8 @@ impl AggregateRangeStatement { let a_l = a_l - z; let a_r = a_r + &d_descending_y_plus_z; let mut alpha = alpha; - for j in 1 ..= witness.gammas.len() { - alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one; + for j in 1 ..= witness.0.len() { + alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one; } Some(AggregateRangeProof { diff --git a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs index 09bb6748..7cb9a4df 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs @@ -15,7 +15,7 @@ use crate::ringct::bulletproofs::plus::{ ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*, }; -// Figure 1 +// Figure 1 of the Bulletproofs+ paper #[derive(Clone, Debug)] pub(crate) struct WipStatement { generators: Generators, diff --git a/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs index a50b9d40..658da250 100644 --- a/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs @@ -21,7 +21,7 @@ fn test_aggregate_range_proof() { } let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect(); let statement = AggregateRangeStatement::new(commitment_points).unwrap(); - let witness = AggregateRangeWitness::new(&commitments).unwrap(); + let witness = AggregateRangeWitness::new(commitments).unwrap(); let proof = statement.clone().prove(&mut OsRng, &witness).unwrap(); statement.verify(&mut OsRng, &mut verifier, (), proof); From a25e6330bdd6d2f237d731c9229f971130899ad3 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 21 Apr 2024 22:50:07 -0400 Subject: [PATCH 091/126] Remove DLEq proofs from CLSAG multisig 1) Removes the key image DLEq on the Monero side of things, as the produced signature share serves as a DLEq for it. 2) Removes the nonce DLEqs from modular-frost as they're unnecessary for monero-serai. Updates documentation accordingly. Without the proof the nonces are internally consistent, the produced signatures from modular-frost can be argued as a batch-verifiable CP93 DLEq (R0, R1, s), or as a GSP for the CP93 DLEq statement (which naturally produces (R0, R1, s)). The lack of proving the nonces consistent does make the process weaker, yet it's also unnecessary for the class of protocols this is intended to service. To provide DLEqs for the nonces would be to provide PoKs for the nonce commitments (in the traditional Schnorr case). --- Cargo.lock | 2 - coins/monero/Cargo.toml | 4 +- coins/monero/src/ringct/clsag/mod.rs | 8 +- coins/monero/src/ringct/clsag/multisig.rs | 163 ++++++++++++---------- coins/monero/src/wallet/send/multisig.rs | 19 ++- crypto/frost/Cargo.toml | 1 - crypto/frost/src/algorithm.rs | 7 + crypto/frost/src/nonce.rs | 111 +-------------- crypto/frost/src/sign.rs | 31 +--- crypto/frost/src/tests/mod.rs | 4 +- crypto/frost/src/tests/nonces.rs | 76 +--------- crypto/frost/src/tests/vectors.rs | 11 +- 12 files changed, 131 insertions(+), 306 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index edc46693..57e438de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4642,7 +4642,6 @@ dependencies = [ "dalek-ff-group", "digest 0.10.7", "dkg", - "dleq", "flexible-transcript", "hex", "minimal-ed448", @@ -4679,7 +4678,6 @@ dependencies = [ "curve25519-dalek", "dalek-ff-group", "digest_auth", - "dleq", "flexible-transcript", "group", "hex", diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index 9c78e431..357803c9 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -43,7 +43,6 @@ multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features = # Needed for multisig transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true } -dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true } monero-generators = { path = "generators", version = "0.4", default-features = false } @@ -91,7 +90,6 @@ std = [ "multiexp/std", "transcript/std", - "dleq/std", "monero-generators/std", @@ -106,7 +104,7 @@ std = [ cache-distribution = ["async-lock"] http-rpc = ["digest_auth", "simple-request", "tokio"] -multisig = ["transcript", "frost", "dleq", "std"] +multisig = ["transcript", "frost", "std"] binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"] experimental = [] diff --git a/coins/monero/src/ringct/clsag/mod.rs b/coins/monero/src/ringct/clsag/mod.rs index 3fe65254..042d964a 100644 --- a/coins/monero/src/ringct/clsag/mod.rs +++ b/coins/monero/src/ringct/clsag/mod.rs @@ -27,8 +27,6 @@ use crate::{ mod multisig; #[cfg(feature = "multisig")] pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig}; -#[cfg(feature = "multisig")] -pub(crate) use multisig::add_key_image_share; /// Errors returned when CLSAG signing fails. #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -279,8 +277,10 @@ impl Clsag { nonce.deref() * hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]), ); - clsag.s[usize::from(inputs[i].2.decoys.i)] = - (-((p * inputs[i].0.deref()) + c)) + nonce.deref(); + // Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring + // member's commitment and our input commitment (which will only have a known discrete log + // over G if the amounts cancel out) + clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce.deref() - ((p * inputs[i].0.deref()) + c); inputs[i].0.zeroize(); nonce.zeroize(); diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index 85748b78..e9234979 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -1,5 +1,8 @@ use core::{ops::Deref, fmt::Debug}; -use std_shims::io::{self, Read, Write}; +use std_shims::{ + io::{self, Read, Write}, + collections::HashMap, +}; use std::sync::{Arc, RwLock}; use rand_core::{RngCore, CryptoRng, SeedableRng}; @@ -9,11 +12,13 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint}; -use group::{ff::Field, Group, GroupEncoding}; +use group::{ + ff::{Field, PrimeField}, + Group, GroupEncoding, +}; use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group as dfg; -use dleq::DLEqProof; use frost::{ dkg::lagrange, curve::Ed25519, @@ -26,10 +31,6 @@ use crate::ringct::{ clsag::{ClsagInput, Clsag}, }; -fn dleq_transcript() -> RecommendedTranscript { - RecommendedTranscript::new(b"monero_key_image_dleq") -} - impl ClsagInput { fn transcript(&self, transcript: &mut T) { // Doesn't domain separate as this is considered part of the larger CLSAG proof @@ -43,6 +44,7 @@ impl ClsagInput { // They're just a unreliable reference to this data which will be included in the message // if in use transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]); + // This also transcripts the key image generator since it's derived from this key transcript.append_message(b"key", pair[0].compress().to_bytes()); transcript.append_message(b"commitment", pair[1].compress().to_bytes()) } @@ -70,13 +72,11 @@ impl ClsagDetails { #[derive(Clone, PartialEq, Eq, Zeroize, Debug)] pub struct ClsagAddendum { pub(crate) key_image: dfg::EdwardsPoint, - dleq: DLEqProof, } impl WriteAddendum for ClsagAddendum { fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(self.key_image.compress().to_bytes().as_ref())?; - self.dleq.write(writer) + writer.write_all(self.key_image.compress().to_bytes().as_ref()) } } @@ -97,9 +97,8 @@ pub struct ClsagMultisig { transcript: RecommendedTranscript, pub(crate) H: EdwardsPoint, - // Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires - // an extra round - image: EdwardsPoint, + key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>, + image: Option, details: Arc>>, @@ -117,7 +116,8 @@ impl ClsagMultisig { transcript, H: hash_to_point(&output_key), - image: EdwardsPoint::identity(), + key_image_shares: HashMap::new(), + image: None, details, @@ -135,20 +135,6 @@ impl ClsagMultisig { } } -pub(crate) fn add_key_image_share( - image: &mut EdwardsPoint, - generator: EdwardsPoint, - offset: Scalar, - included: &[Participant], - participant: Participant, - share: EdwardsPoint, -) { - if image.is_identity().into() { - *image = generator * offset; - } - *image += share * lagrange::(participant, included).0; -} - impl Algorithm for ClsagMultisig { type Transcript = RecommendedTranscript; type Addendum = ClsagAddendum; @@ -160,23 +146,10 @@ impl Algorithm for ClsagMultisig { fn preprocess_addendum( &mut self, - rng: &mut R, + _rng: &mut R, keys: &ThresholdKeys, ) -> ClsagAddendum { - ClsagAddendum { - key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(), - dleq: DLEqProof::prove( - rng, - // Doesn't take in a larger transcript object due to the usage of this - // Every prover would immediately write their own DLEq proof, when they can only do so in - // the proper order if they want to reach consensus - // It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to - // try to merge later in some form, when it should instead just merge xH (as it does) - &mut dleq_transcript(), - &[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)], - keys.secret_share(), - ), - } + ClsagAddendum { key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref() } } fn read_addendum(&self, reader: &mut R) -> io::Result { @@ -190,7 +163,7 @@ impl Algorithm for ClsagMultisig { Err(io::Error::other("non-canonical key image"))?; } - Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::::read(reader)? }) + Ok(ClsagAddendum { key_image: xH }) } fn process_addendum( @@ -199,33 +172,29 @@ impl Algorithm for ClsagMultisig { l: Participant, addendum: ClsagAddendum, ) -> Result<(), FrostError> { - // TODO: This check is faulty if two shares are additive inverses of each other - if self.image.is_identity().into() { + if self.image.is_none() { self.transcript.domain_separate(b"CLSAG"); + // Transcript the ring self.input().transcript(&mut self.transcript); + // Transcript the mask self.transcript.append_message(b"mask", self.mask().to_bytes()); + + // Init the image to the offset + self.image = Some(dfg::EdwardsPoint(self.H) * view.offset()); } + // Transcript this participant's contribution self.transcript.append_message(b"participant", l.to_bytes()); - - addendum - .dleq - .verify( - &mut dleq_transcript(), - &[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)], - &[view.original_verification_share(l), addendum.key_image], - ) - .map_err(|_| FrostError::InvalidPreprocess(l))?; - self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes()); - add_key_image_share( - &mut self.image, - self.H, - view.offset().0, - view.included(), - l, - addendum.key_image.0, - ); + + // Accumulate the interpolated share + let interpolated_key_image_share = + addendum.key_image * lagrange::(l, view.included()); + *self.image.as_mut().unwrap() += interpolated_key_image_share; + + self + .key_image_shares + .insert(view.verification_share(l).to_bytes(), interpolated_key_image_share); Ok(()) } @@ -253,7 +222,7 @@ impl Algorithm for ClsagMultisig { #[allow(non_snake_case)] let (clsag, pseudo_out, p, c) = Clsag::sign_core( &mut rng, - &self.image, + &self.image.expect("verifying a share despite never processing any addendums").0, &self.input(), self.mask(), self.msg.as_ref().unwrap(), @@ -262,7 +231,8 @@ impl Algorithm for ClsagMultisig { ); self.interim = Some(Interim { p, c, clsag, pseudo_out }); - (-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref() + // r - p x, where p is the challenge for the keys + *nonces[0] - dfg::Scalar(p) * view.secret_share().deref() } #[must_use] @@ -274,11 +244,13 @@ impl Algorithm for ClsagMultisig { ) -> Option { let interim = self.interim.as_ref().unwrap(); let mut clsag = interim.clsag.clone(); + // We produced shares as `r - p x`, yet the signature is `r - p x - c x` + // Substract `c x` (saved as `c`) now clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c; if clsag .verify( &self.input().decoys.ring, - &self.image, + &self.image.expect("verifying a signature despite never processing any addendums").0, &interim.pseudo_out, self.msg.as_ref().unwrap(), ) @@ -296,10 +268,61 @@ impl Algorithm for ClsagMultisig { share: dfg::Scalar, ) -> Result, ()> { let interim = self.interim.as_ref().unwrap(); - Ok(vec![ + + // For a share `r - p x`, the following two equalities should hold: + // - `(r - p x)G == R.0 - pV`, where `V = xG` + // - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share) + // + // This is effectively a discrete log equality proof for: + // V, K over G, H + // with nonces + // R.0, R.1 + // and solution + // s + // + // Which is a batch-verifiable rewrite of the traditional CP93 proof + // (and also writable as Generalized Schnorr Protocol) + // + // That means that given a proper challenge, this alone can be certainly argued to prove the + // key image share is well-formed and the provided signature so proves for that. + + // This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of + // the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically + // is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be + // extracted, and the nonces as used in CLSAG are also part of its prover data/transcript). + + let key_image_share = self.key_image_shares[&verification_share.to_bytes()]; + + // Hash every variable relevant here, using the hahs output as the random weight + let mut weight_transcript = + RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share"); + weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes()); + weight_transcript.append_message(b"H", self.H.to_bytes()); + weight_transcript.append_message(b"xG", verification_share.to_bytes()); + weight_transcript.append_message(b"xH", key_image_share.to_bytes()); + weight_transcript.append_message(b"rG", nonces[0][0].to_bytes()); + weight_transcript.append_message(b"rH", nonces[0][1].to_bytes()); + weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr()); + weight_transcript.append_message(b"s", share.to_repr()); + let weight = weight_transcript.challenge(b"weight"); + let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into())); + + let part_one = vec![ (share, dfg::EdwardsPoint::generator()), - (dfg::Scalar(interim.p), verification_share), + // -(R.0 - pV) == -R.0 + pV (-dfg::Scalar::ONE, nonces[0][0]), - ]) + (dfg::Scalar(interim.p), verification_share), + ]; + + let mut part_two = vec![ + (weight * share, dfg::EdwardsPoint(self.H)), + // -(R.1 - pK) == -R.1 + pK + (-weight, nonces[0][1]), + (weight * dfg::Scalar(interim.p), key_image_share), + ]; + + let mut all = part_one; + all.append(&mut part_two); + Ok(all) } } diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 02626e6a..a5be404a 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -18,6 +18,7 @@ use transcript::{Transcript, RecommendedTranscript}; use frost::{ curve::Ed25519, Participant, FrostError, ThresholdKeys, + dkg::lagrange, sign::{ Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, @@ -27,7 +28,7 @@ use frost::{ use crate::{ random_scalar, ringct::{ - clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share}, + clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig}, RctPrunable, }, transaction::{Input, Transaction}, @@ -261,8 +262,13 @@ impl SignMachine for TransactionSignMachine { included.push(self.i); included.sort_unstable(); - // Convert the unified commitments to a Vec of the individual commitments + // Start calculating the key images, as needed on the TX level let mut images = vec![EdwardsPoint::identity(); self.clsags.len()]; + for (image, (generator, offset)) in images.iter_mut().zip(&self.key_images) { + *image = generator * offset; + } + + // Convert the serialized nonces commitments to a parallelized Vec let mut commitments = (0 .. self.clsags.len()) .map(|c| { included @@ -291,14 +297,7 @@ impl SignMachine for TransactionSignMachine { // provides the easiest API overall, as this is where the TX is (which needs the key // images in its message), along with where the outputs are determined (where our // outputs may need these in order to guarantee uniqueness) - add_key_image_share( - &mut images[c], - self.key_images[c].0, - self.key_images[c].1, - &included, - *l, - preprocess.addendum.key_image.0, - ); + images[c] += preprocess.addendum.key_image.0 * lagrange::(*l, &included).0; Ok((*l, preprocess)) }) diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 128a3667..b89d5290 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -38,7 +38,6 @@ ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] } -dleq = { path = "../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] } dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] } diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index f2da59ea..0b0abd6c 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -39,6 +39,13 @@ pub trait Algorithm: Send + Sync + Clone { /// Obtain the list of nonces to generate, as specified by the generators to create commitments /// against per-nonce. + /// + /// The Algorithm is responsible for all transcripting of these nonce specifications/generators. + /// + /// The prover will be passed the commitments, and the commitments will be sent to all other + /// participants. No guarantees the commitments are internally consistent (have the same discrete + /// logarithm across generators) are made. Any Algorithm which specifies multiple generators for + /// a single nonce must handle that itself. fn nonces(&self) -> Vec>; /// Generate an addendum to FROST"s preprocessing stage. diff --git a/crypto/frost/src/nonce.rs b/crypto/frost/src/nonce.rs index 8638baff..f76f9bc4 100644 --- a/crypto/frost/src/nonce.rs +++ b/crypto/frost/src/nonce.rs @@ -1,13 +1,9 @@ // FROST defines its nonce as sum(Di, Ei * bi) -// Monero needs not just the nonce over G however, yet also over H -// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once // -// Accordingly, in order for this library to be robust, it supports generating an arbitrary amount -// of nonces, each against an arbitrary list of generators +// In order for this library to be robust, it supports generating an arbitrary amount of nonces, +// each against an arbitrary list of generators // // Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b) -// When representations across multiple generators are provided, a DLEq proof is also provided to -// confirm their integrity use core::ops::Deref; use std::{ @@ -24,32 +20,8 @@ use transcript::Transcript; use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding}; use multiexp::multiexp_vartime; -use dleq::MultiDLEqProof; - use crate::{curve::Curve, Participant}; -// Transcript used to aggregate binomial nonces for usage within a single DLEq proof. -fn aggregation_transcript(context: &[u8]) -> T { - let mut transcript = T::new(b"FROST DLEq Aggregation v0.5"); - transcript.append_message(b"context", context); - transcript -} - -// Every participant proves for their commitments at the start of the protocol -// These proofs are verified sequentially, requiring independent transcripts -// In order to make these transcripts more robust, the FROST transcript (at time of preprocess) is -// challenged in order to create a commitment to it, carried in each independent transcript -// (effectively forking the original transcript) -// -// For FROST, as defined by the IETF, this will do nothing (and this transcript will never even be -// constructed). For higher level protocols, the transcript may have contextual info these proofs -// will then be bound to -fn dleq_transcript(context: &[u8]) -> T { - let mut transcript = T::new(b"FROST Commitments DLEq v0.5"); - transcript.append_message(b"context", context); - transcript -} - // Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper // This is considered a single nonce as r = d + be #[derive(Clone, Zeroize)] @@ -69,7 +41,7 @@ impl GeneratorCommitments { } } -// A single nonce's commitments and relevant proofs +// A single nonce's commitments #[derive(Clone, PartialEq, Eq)] pub(crate) struct NonceCommitments { // Called generators as these commitments are indexed by generator later on @@ -121,12 +93,6 @@ impl NonceCommitments { t.append_message(b"commitment_E", commitments.0[1].to_bytes()); } } - - fn aggregation_factor(&self, context: &[u8]) -> C::F { - let mut transcript = aggregation_transcript::(context); - self.transcript(&mut transcript); - ::hash_to_F(b"dleq_aggregation", transcript.challenge(b"binding").as_ref()) - } } /// Commitments for all the nonces across all their generators. @@ -135,51 +101,26 @@ pub(crate) struct Commitments { // Called nonces as these commitments are indexed by nonce // So to get the commitments for the first nonce, it'd be commitments.nonces[0] pub(crate) nonces: Vec>, - // DLEq Proof proving that each set of commitments were generated using a single pair of discrete - // logarithms - pub(crate) dleq: Option>, } impl Commitments { - pub(crate) fn new( + pub(crate) fn new( rng: &mut R, secret_share: &Zeroizing, planned_nonces: &[Vec], - context: &[u8], ) -> (Vec>, Commitments) { let mut nonces = vec![]; let mut commitments = vec![]; - let mut dleq_generators = vec![]; - let mut dleq_nonces = vec![]; for generators in planned_nonces { let (nonce, these_commitments): (Nonce, _) = NonceCommitments::new(&mut *rng, secret_share, generators); - if generators.len() > 1 { - dleq_generators.push(generators.clone()); - dleq_nonces.push(Zeroizing::new( - (these_commitments.aggregation_factor::(context) * nonce.0[1].deref()) + - nonce.0[0].deref(), - )); - } - nonces.push(nonce); commitments.push(these_commitments); } - let dleq = if !dleq_generators.is_empty() { - Some(MultiDLEqProof::prove( - rng, - &mut dleq_transcript::(context), - &dleq_generators, - &dleq_nonces, - )) - } else { - None - }; - - (nonces, Commitments { nonces: commitments, dleq }) + (nonces, Commitments { nonces: commitments }) } pub(crate) fn transcript(&self, t: &mut T) { @@ -187,58 +128,20 @@ impl Commitments { for nonce in &self.nonces { nonce.transcript(t); } - - // Transcripting the DLEqs implicitly transcripts the exact generators used for the nonces in - // an exact order - // This means it shouldn't be possible for variadic generators to cause conflicts - if let Some(dleq) = &self.dleq { - t.append_message(b"dleq", dleq.serialize()); - } } - pub(crate) fn read( - reader: &mut R, - generators: &[Vec], - context: &[u8], - ) -> io::Result { + pub(crate) fn read(reader: &mut R, generators: &[Vec]) -> io::Result { let nonces = (0 .. generators.len()) .map(|i| NonceCommitments::read(reader, &generators[i])) .collect::>, _>>()?; - let mut dleq_generators = vec![]; - let mut dleq_nonces = vec![]; - for (generators, nonce) in generators.iter().cloned().zip(&nonces) { - if generators.len() > 1 { - let binding = nonce.aggregation_factor::(context); - let mut aggregated = vec![]; - for commitments in &nonce.generators { - aggregated.push(commitments.0[0] + (commitments.0[1] * binding)); - } - dleq_generators.push(generators); - dleq_nonces.push(aggregated); - } - } - - let dleq = if !dleq_generators.is_empty() { - let dleq = MultiDLEqProof::read(reader, dleq_generators.len())?; - dleq - .verify(&mut dleq_transcript::(context), &dleq_generators, &dleq_nonces) - .map_err(|_| io::Error::other("invalid DLEq proof"))?; - Some(dleq) - } else { - None - }; - - Ok(Commitments { nonces, dleq }) + Ok(Commitments { nonces }) } pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { for nonce in &self.nonces { nonce.write(writer)?; } - if let Some(dleq) = &self.dleq { - dleq.write(writer)?; - } Ok(()) } } diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index a716dc58..73ea0a7d 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -125,14 +125,8 @@ impl> AlgorithmMachine { let mut params = self.params; let mut rng = ChaCha20Rng::from_seed(*seed.0); - // Get a challenge to the existing transcript for use when proving for the commitments - let commitments_challenge = params.algorithm.transcript().challenge(b"commitments"); - let (nonces, commitments) = Commitments::new::<_, A::Transcript>( - &mut rng, - params.keys.secret_share(), - ¶ms.algorithm.nonces(), - commitments_challenge.as_ref(), - ); + let (nonces, commitments) = + Commitments::new::<_>(&mut rng, params.keys.secret_share(), ¶ms.algorithm.nonces()); let addendum = params.algorithm.preprocess_addendum(&mut rng, ¶ms.keys); let preprocess = Preprocess { commitments, addendum }; @@ -141,27 +135,18 @@ impl> AlgorithmMachine { let mut blame_entropy = [0; 32]; rng.fill_bytes(&mut blame_entropy); ( - AlgorithmSignMachine { - params, - seed, - commitments_challenge, - nonces, - preprocess: preprocess.clone(), - blame_entropy, - }, + AlgorithmSignMachine { params, seed, nonces, preprocess: preprocess.clone(), blame_entropy }, preprocess, ) } #[cfg(any(test, feature = "tests"))] pub(crate) fn unsafe_override_preprocess( - mut self, + self, nonces: Vec>, preprocess: Preprocess, ) -> AlgorithmSignMachine { AlgorithmSignMachine { - commitments_challenge: self.params.algorithm.transcript().challenge(b"commitments"), - params: self.params, seed: CachedPreprocess(Zeroizing::new([0; 32])), @@ -255,8 +240,6 @@ pub struct AlgorithmSignMachine> { params: Params, seed: CachedPreprocess, - #[zeroize(skip)] - commitments_challenge: ::Challenge, pub(crate) nonces: Vec>, // Skips the preprocess due to being too large a bound to feasibly enforce on users #[zeroize(skip)] @@ -285,11 +268,7 @@ impl> SignMachine for AlgorithmSignMachi fn read_preprocess(&self, reader: &mut R) -> io::Result { Ok(Preprocess { - commitments: Commitments::read::<_, A::Transcript>( - reader, - &self.params.algorithm.nonces(), - self.commitments_challenge.as_ref(), - )?, + commitments: Commitments::read::<_>(reader, &self.params.algorithm.nonces())?, addendum: self.params.algorithm.read_addendum(reader)?, }) } diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index e457c703..f93a5fbf 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -12,7 +12,7 @@ use crate::{ /// Tests for the nonce handling code. pub mod nonces; -use nonces::{test_multi_nonce, test_invalid_commitment, test_invalid_dleq_proof}; +use nonces::test_multi_nonce; /// Vectorized test suite to ensure consistency. pub mod vectors; @@ -267,6 +267,4 @@ pub fn test_ciphersuite>(rng: &mut test_schnorr_blame::(rng); test_multi_nonce::(rng); - test_invalid_commitment::(rng); - test_invalid_dleq_proof::(rng); } diff --git a/crypto/frost/src/tests/nonces.rs b/crypto/frost/src/tests/nonces.rs index ee060bef..7b1480e9 100644 --- a/crypto/frost/src/tests/nonces.rs +++ b/crypto/frost/src/tests/nonces.rs @@ -9,14 +9,12 @@ use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::group::{ff::Field, Group, GroupEncoding}; -use dleq::MultiDLEqProof; pub use dkg::tests::{key_gen, recover_key}; use crate::{ Curve, Participant, ThresholdView, ThresholdKeys, FrostError, algorithm::Algorithm, - sign::{Writable, SignMachine}, - tests::{algorithm_machines, preprocess, sign}, + tests::{algorithm_machines, sign}, }; #[derive(Clone)] @@ -157,75 +155,3 @@ pub fn test_multi_nonce(rng: &mut R) { let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); sign(&mut *rng, &MultiNonce::::new(), keys.clone(), machines, &[]); } - -/// Test malleating a commitment for a nonce across generators causes the preprocess to error. -pub fn test_invalid_commitment(rng: &mut R) { - let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); - let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); - - // Select a random participant to give an invalid commitment - let participants = preprocesses.keys().collect::>(); - let faulty = *participants - [usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()]; - - // Grab their preprocess - let mut preprocess = preprocesses.remove(&faulty).unwrap(); - - // Mutate one of the commitments - let nonce = - preprocess.commitments.nonces.get_mut(usize::try_from(rng.next_u64()).unwrap() % 2).unwrap(); - let generators_len = nonce.generators.len(); - nonce.generators[usize::try_from(rng.next_u64()).unwrap() % generators_len].0 - [usize::try_from(rng.next_u64()).unwrap() % 2] = C::G::random(&mut *rng); - - // The commitments are validated at time of deserialization (read_preprocess) - // Accordingly, serialize it and read it again to make sure that errors - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); -} - -/// Test malleating the DLEq proof for a preprocess causes it to error. -pub fn test_invalid_dleq_proof(rng: &mut R) { - let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); - let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); - - // Select a random participant to give an invalid DLEq proof - let participants = preprocesses.keys().collect::>(); - let faulty = *participants - [usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()]; - - // Invalidate it by replacing it with a completely different proof - let dlogs = [Zeroizing::new(C::F::random(&mut *rng)), Zeroizing::new(C::F::random(&mut *rng))]; - let mut preprocess = preprocesses.remove(&faulty).unwrap(); - preprocess.commitments.dleq = Some(MultiDLEqProof::prove( - &mut *rng, - &mut RecommendedTranscript::new(b"Invalid DLEq Proof"), - &nonces::(), - &dlogs, - )); - - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); - - // Also test None for a proof will cause an error - preprocess.commitments.dleq = None; - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); -} diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 3356a6cd..7be6478a 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -14,7 +14,7 @@ use ciphersuite::group::{ff::PrimeField, GroupEncoding}; use crate::{ curve::Curve, Participant, ThresholdCore, ThresholdKeys, - algorithm::{IetfTranscript, Hram, IetfSchnorr}, + algorithm::{Hram, IetfSchnorr}, sign::{ Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, @@ -191,7 +191,6 @@ pub fn test_with_vectors>( nonces: vec![NonceCommitments { generators: vec![GeneratorCommitments(these_commitments)], }], - dleq: None, }, addendum: (), }; @@ -301,12 +300,8 @@ pub fn test_with_vectors>( } // Also test it at the Commitments level - let (generated_nonces, commitments) = Commitments::::new::<_, IetfTranscript>( - &mut TransparentRng(randomness), - &share, - &[vec![C::generator()]], - &[], - ); + let (generated_nonces, commitments) = + Commitments::::new::<_>(&mut TransparentRng(randomness), &share, &[vec![C::generator()]]); assert_eq!(generated_nonces.len(), 1); assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]); From a41329c027b41dd4836518cc33d6493f41ab7274 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 04:31:27 -0400 Subject: [PATCH 092/126] Update clippy now that redundant imports has been reverted --- .github/nightly-version | 2 +- coins/monero/src/wallet/address.rs | 15 ++++++------ common/db/src/parity_db.rs | 2 +- coordinator/tributary/src/provided.rs | 4 ++-- crypto/dalek-ff-group/Cargo.toml | 2 +- crypto/dkg/Cargo.toml | 2 +- crypto/dleq/Cargo.toml | 2 +- crypto/ed448/Cargo.toml | 2 +- processor/src/networks/bitcoin.rs | 6 ++--- processor/src/networks/ethereum.rs | 23 ++++++++++--------- substrate/abi/Cargo.toml | 2 +- substrate/client/src/networks/bitcoin.rs | 8 +++---- substrate/client/src/networks/monero.rs | 8 +++---- substrate/coins/pallet/Cargo.toml | 2 +- substrate/coins/primitives/Cargo.toml | 2 +- substrate/dex/pallet/Cargo.toml | 2 +- substrate/dex/pallet/src/benchmarking.rs | 2 +- substrate/in-instructions/pallet/src/lib.rs | 2 +- .../in-instructions/primitives/Cargo.toml | 2 +- substrate/primitives/Cargo.toml | 2 +- 20 files changed, 46 insertions(+), 46 deletions(-) diff --git a/.github/nightly-version b/.github/nightly-version index 4a8f1e33..e2e82b48 100644 --- a/.github/nightly-version +++ b/.github/nightly-version @@ -1 +1 @@ -nightly-2024-02-07 +nightly-2024-04-23 diff --git a/coins/monero/src/wallet/address.rs b/coins/monero/src/wallet/address.rs index 9c79942b..5eec8091 100644 --- a/coins/monero/src/wallet/address.rs +++ b/coins/monero/src/wallet/address.rs @@ -1,5 +1,4 @@ -use core::{marker::PhantomData, fmt::Debug}; -use std_shims::string::{String, ToString}; +use core::{marker::PhantomData, fmt}; use zeroize::Zeroize; @@ -81,7 +80,7 @@ impl AddressType { } /// A type which returns the byte for a given address. -pub trait AddressBytes: Clone + Copy + PartialEq + Eq + Debug { +pub trait AddressBytes: Clone + Copy + PartialEq + Eq + fmt::Debug { fn network_bytes(network: Network) -> (u8, u8, u8, u8); } @@ -191,8 +190,8 @@ pub struct Address { pub view: EdwardsPoint, } -impl core::fmt::Debug for Address { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { +impl fmt::Debug for Address { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { fmt .debug_struct("Address") .field("meta", &self.meta) @@ -212,8 +211,8 @@ impl Zeroize for Address { } } -impl ToString for Address { - fn to_string(&self) -> String { +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut data = vec![self.meta.to_byte()]; data.extend(self.spend.compress().to_bytes()); data.extend(self.view.compress().to_bytes()); @@ -226,7 +225,7 @@ impl ToString for Address { if let Some(id) = self.meta.kind.payment_id() { data.extend(id); } - encode_check(&data).unwrap() + write!(f, "{}", encode_check(&data).unwrap()) } } diff --git a/common/db/src/parity_db.rs b/common/db/src/parity_db.rs index 06fd0c7c..8c913468 100644 --- a/common/db/src/parity_db.rs +++ b/common/db/src/parity_db.rs @@ -11,7 +11,7 @@ impl Get for Transaction<'_> { let mut res = self.0.get(&key); for change in &self.1 { if change.1 == key.as_ref() { - res = change.2.clone(); + res.clone_from(&change.2); } } res diff --git a/coordinator/tributary/src/provided.rs b/coordinator/tributary/src/provided.rs index 103286af..27c5f3cd 100644 --- a/coordinator/tributary/src/provided.rs +++ b/coordinator/tributary/src/provided.rs @@ -74,7 +74,7 @@ impl ProvidedTransactions { panic!("provided transaction saved to disk wasn't provided"); }; - if res.transactions.get(order).is_none() { + if !res.transactions.contains_key(order) { res.transactions.insert(order, VecDeque::new()); } res.transactions.get_mut(order).unwrap().push_back(tx); @@ -135,7 +135,7 @@ impl ProvidedTransactions { txn.put(current_provided_key, currently_provided); txn.commit(); - if self.transactions.get(order).is_none() { + if !self.transactions.contains_key(order) { self.transactions.insert(order, VecDeque::new()); } self.transactions.get_mut(order).unwrap().push_back(tx); diff --git a/crypto/dalek-ff-group/Cargo.toml b/crypto/dalek-ff-group/Cargo.toml index 0fe4bce0..d8a92194 100644 --- a/crypto/dalek-ff-group/Cargo.toml +++ b/crypto/dalek-ff-group/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-gr authors = ["Luke Parker "] keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"] edition = "2021" -rust-version = "1.65" +rust-version = "1.66" [package.metadata.docs.rs] all-features = true diff --git a/crypto/dkg/Cargo.toml b/crypto/dkg/Cargo.toml index a8d3f0a8..bf308705 100644 --- a/crypto/dkg/Cargo.toml +++ b/crypto/dkg/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg" authors = ["Luke Parker "] keywords = ["dkg", "multisig", "threshold", "ff", "group"] edition = "2021" -rust-version = "1.70" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index 7d8c87e9..c9d525e1 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.73" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/crypto/ed448/Cargo.toml b/crypto/ed448/Cargo.toml index 2302d7b3..b0d0026e 100644 --- a/crypto/ed448/Cargo.toml +++ b/crypto/ed448/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ed448" authors = ["Luke Parker "] keywords = ["ed448", "ff", "group"] edition = "2021" -rust-version = "1.65" +rust-version = "1.66" [package.metadata.docs.rs] all-features = true diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs index 96f76949..3f8174e4 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -517,7 +517,7 @@ impl Bitcoin { if witness.len() >= 2 { let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone()); if Self::segwit_data_pattern(&redeem_script) == Some(true) { - data = witness[witness.len() - 2].clone(); // len() - 1 is the redeem_script + data.clone_from(&witness[witness.len() - 2]); // len() - 1 is the redeem_script break; } } @@ -731,9 +731,9 @@ impl Network for Bitcoin { let data = Self::extract_serai_data(tx); for output in &mut outputs { if output.kind == OutputType::External { - output.data = data.clone(); + output.data.clone_from(&data); } - output.presumed_origin = presumed_origin.clone(); + output.presumed_origin.clone_from(&presumed_origin); } } diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs index 36051980..35979f30 100644 --- a/processor/src/networks/ethereum.rs +++ b/processor/src/networks/ethereum.rs @@ -1,4 +1,4 @@ -use core::{fmt::Debug, time::Duration}; +use core::{fmt, time::Duration}; use std::{ sync::Arc, collections::{HashSet, HashMap}, @@ -108,9 +108,10 @@ impl TryInto> for Address { Ok(self.0.to_vec()) } } -impl ToString for Address { - fn to_string(&self) -> String { - ethereum_serai::alloy_core::primitives::Address::from(self.0).to_string() + +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ethereum_serai::alloy_core::primitives::Address::from(self.0).fmt(f) } } @@ -122,7 +123,7 @@ impl SignableTransaction for RouterCommand { } #[async_trait] -impl TransactionTrait> for Transaction { +impl TransactionTrait> for Transaction { type Id = [u8; 32]; fn id(&self) -> Self::Id { self.hash.0 @@ -155,7 +156,7 @@ impl Epoch { } #[async_trait] -impl Block> for Epoch { +impl Block> for Epoch { type Id = [u8; 32]; fn id(&self) -> [u8; 32] { self.end_hash @@ -168,7 +169,7 @@ impl Block> for Epoch { } } -impl Output> for EthereumInInstruction { +impl Output> for EthereumInInstruction { type Id = [u8; 32]; fn kind(&self) -> OutputType { @@ -281,7 +282,7 @@ impl EventualityTrait for Eventuality { } #[derive(Clone, Debug)] -pub struct Ethereum { +pub struct Ethereum { // This DB is solely used to access the first key generated, as needed to determine the Router's // address. Accordingly, all methods present are consistent to a Serai chain with a finalized // first key (regardless of local state), and this is safe. @@ -290,12 +291,12 @@ pub struct Ethereum { deployer: Deployer, router: Arc>>, } -impl PartialEq for Ethereum { +impl PartialEq for Ethereum { fn eq(&self, _other: &Ethereum) -> bool { true } } -impl Ethereum { +impl Ethereum { pub async fn new(db: D, url: String) -> Self { let provider = Arc::new(RootProvider::new( ClientBuilder::default().transport(SimpleRequest::new(url), true), @@ -360,7 +361,7 @@ impl Ethereum { } #[async_trait] -impl Network for Ethereum { +impl Network for Ethereum { type Curve = Secp256k1; type Transaction = Transaction; diff --git a/substrate/abi/Cargo.toml b/substrate/abi/Cargo.toml index 04350486..3aac979a 100644 --- a/substrate/abi/Cargo.toml +++ b/substrate/abi/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/abi" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/client/src/networks/bitcoin.rs b/substrate/client/src/networks/bitcoin.rs index 42cf41bf..5ea37898 100644 --- a/substrate/client/src/networks/bitcoin.rs +++ b/substrate/client/src/networks/bitcoin.rs @@ -1,4 +1,4 @@ -use core::str::FromStr; +use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; @@ -35,9 +35,9 @@ impl FromStr for Address { } } -impl ToString for Address { - fn to_string(&self) -> String { - self.0.to_string() +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) } } diff --git a/substrate/client/src/networks/monero.rs b/substrate/client/src/networks/monero.rs index e2268ec2..5b43860e 100644 --- a/substrate/client/src/networks/monero.rs +++ b/substrate/client/src/networks/monero.rs @@ -1,4 +1,4 @@ -use core::str::FromStr; +use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; @@ -24,9 +24,9 @@ impl FromStr for Address { } } -impl ToString for Address { - fn to_string(&self) -> String { - self.0.to_string() +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) } } diff --git a/substrate/coins/pallet/Cargo.toml b/substrate/coins/pallet/Cargo.toml index 75011add..da9a27f6 100644 --- a/substrate/coins/pallet/Cargo.toml +++ b/substrate/coins/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/coins/pallet" authors = ["Akil Demir "] edition = "2021" -rust-version = "1.70" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/coins/primitives/Cargo.toml b/substrate/coins/primitives/Cargo.toml index 322016da..ec906929 100644 --- a/substrate/coins/primitives/Cargo.toml +++ b/substrate/coins/primitives/Cargo.toml @@ -5,7 +5,7 @@ description = "Serai coins primitives" license = "MIT" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/dex/pallet/Cargo.toml b/substrate/dex/pallet/Cargo.toml index 6a2eadb8..e2ed3928 100644 --- a/substrate/dex/pallet/Cargo.toml +++ b/substrate/dex/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/dex/pallet" authors = ["Parity Technologies , Akil Demir "] edition = "2021" -rust-version = "1.70" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/dex/pallet/src/benchmarking.rs b/substrate/dex/pallet/src/benchmarking.rs index a00b6edc..fb23b12a 100644 --- a/substrate/dex/pallet/src/benchmarking.rs +++ b/substrate/dex/pallet/src/benchmarking.rs @@ -43,7 +43,7 @@ fn create_coin(coin: &Coin) -> (T::AccountId, AccountIdLookupOf) { let caller_lookup = T::Lookup::unlookup(caller); assert_ok!(Coins::::mint( caller, - Balance { coin: Coin::native(), amount: Amount(SubstrateAmount::max_value().div(1000u64)) } + Balance { coin: Coin::native(), amount: Amount(SubstrateAmount::MAX.div(1000u64)) } )); assert_ok!(Coins::::mint( caller, diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index 3ec63ae5..955a54df 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -10,7 +10,7 @@ pub use in_instructions_primitives as primitives; use primitives::*; // TODO: Investigate why Substrate generates these -#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding)] +#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding, clippy::empty_docs)] #[frame_support::pallet] pub mod pallet { use sp_std::vec; diff --git a/substrate/in-instructions/primitives/Cargo.toml b/substrate/in-instructions/primitives/Cargo.toml index f579f59d..54551134 100644 --- a/substrate/in-instructions/primitives/Cargo.toml +++ b/substrate/in-instructions/primitives/Cargo.toml @@ -5,7 +5,7 @@ description = "Serai instructions library, enabling encoding and decoding" license = "MIT" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/primitives/Cargo.toml b/substrate/primitives/Cargo.toml index 22fc4709..0e1e8f38 100644 --- a/substrate/primitives/Cargo.toml +++ b/substrate/primitives/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/primitives" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true From b5e22dca8f7ca939786f25d39de28d52dd3081d7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 05:25:08 -0400 Subject: [PATCH 093/126] Correct no-std Monero after moving from ToString to Display --- coins/monero/src/wallet/address.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/coins/monero/src/wallet/address.rs b/coins/monero/src/wallet/address.rs index 5eec8091..d080488d 100644 --- a/coins/monero/src/wallet/address.rs +++ b/coins/monero/src/wallet/address.rs @@ -1,4 +1,5 @@ use core::{marker::PhantomData, fmt}; +use std_shims::string::ToString; use zeroize::Zeroize; From 8cef9eff6f2bf77f1be07dacb82767fdcb3ba301 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 05:44:58 -0400 Subject: [PATCH 094/126] Move keep alive, heartbeat, block to request/response --- Cargo.lock | 11 ++ coordinator/Cargo.toml | 2 +- coordinator/src/main.rs | 1 - coordinator/src/p2p.rs | 151 ++++++++---------- coordinator/src/tests/mod.rs | 4 +- coordinator/src/tests/tributary/handle_p2p.rs | 11 +- coordinator/src/tests/tributary/sync.rs | 11 +- 7 files changed, 87 insertions(+), 104 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57e438de..08edc496 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1314,6 +1314,15 @@ dependencies = [ "thiserror", ] +[[package]] +name = "cbor4ii" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59b4c883b9cc4757b061600d39001d4d0232bece4a3174696cf8f58a14db107d" +dependencies = [ + "serde", +] + [[package]] name = "cc" version = "1.0.88" @@ -4120,6 +4129,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8e3b4d67870478db72bac87bfc260ee6641d0734e0e3e275798f089c3fecfd4" dependencies = [ "async-trait", + "cbor4ii", "futures", "instant", "libp2p-core", @@ -4127,6 +4137,7 @@ dependencies = [ "libp2p-swarm", "log", "rand", + "serde", "smallvec", "void", ] diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index 12f8e763..d12c3933 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -51,7 +51,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim futures-util = { version = "0.3", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] } +libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "cbor", "request-response", "gossipsub", "macros"] } [dev-dependencies] tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index e6e49c3e..4de23ae0 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -1292,7 +1292,6 @@ pub async fn run( p2p.clone(), cosign_channel.clone(), tributary_event_listener_4, - ::generator() * key.deref(), )); // Handle all messages from processors diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 6c845f4e..4e476c36 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -9,8 +9,6 @@ use std::{ use async_trait::async_trait; use rand_core::{RngCore, OsRng}; -use ciphersuite::{Ciphersuite, Ristretto}; - use scale::Encode; use borsh::{BorshSerialize, BorshDeserialize}; use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai}; @@ -23,12 +21,16 @@ use tokio::{ time::sleep, }; +// TODO: Remove cbor use libp2p::{ core::multiaddr::{Protocol, Multiaddr}, identity::Keypair, PeerId, tcp::Config as TcpConfig, noise, yamux, + request_response::{ + Config as RrConfig, Message as RrMessage, Event as RrEvent, cbor::Behaviour as RrBehavior, + }, gossipsub::{ IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, IdentityTransform, AllowAllSubscriptionFilter, Event as GsEvent, PublishError, @@ -135,14 +137,14 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]); async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]); - async fn send_raw(&self, to: Self::Id, genesis: Option<[u8; 32]>, msg: Vec); - async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec); + async fn send_raw(&self, to: Self::Id, msg: Vec); + async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec); async fn receive_raw(&self) -> (Self::Id, Vec); async fn send(&self, to: Self::Id, kind: P2pMessageKind, msg: Vec) { let mut actual_msg = kind.serialize(); actual_msg.extend(msg); - self.send_raw(to, kind.genesis(), actual_msg).await; + self.send_raw(to, actual_msg).await; } async fn broadcast(&self, kind: P2pMessageKind, msg: Vec) { let mut actual_msg = kind.serialize(); @@ -159,7 +161,7 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { } ); */ - self.broadcast_raw(kind.genesis(), actual_msg).await; + self.broadcast_raw(kind, actual_msg).await; } async fn receive(&self) -> Message { let (sender, kind, msg) = loop { @@ -194,6 +196,7 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { #[derive(NetworkBehaviour)] struct Behavior { + reqres: RrBehavior, Vec>, gossipsub: GsBehavior, } @@ -201,7 +204,8 @@ struct Behavior { #[derive(Clone)] pub struct LibP2p { subscribe: Arc>>, - broadcast: Arc, Vec)>>>, + send: Arc)>>>, + broadcast: Arc)>>>, receive: Arc)>>>, } impl fmt::Debug for LibP2p { @@ -221,6 +225,7 @@ impl LibP2p { let throwaway_key_pair = Keypair::generate_ed25519(); let behavior = Behavior { + reqres: { RrBehavior::new([], RrConfig::default()) }, gossipsub: { let heartbeat_interval = tributary::tendermint::LATENCY_TIME / 2; let heartbeats_per_block = @@ -284,6 +289,7 @@ impl LibP2p { const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o') swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap(); + let (send_send, mut send_recv) = mpsc::unbounded_channel(); let (broadcast_send, mut broadcast_recv) = mpsc::unbounded_channel(); let (receive_send, receive_recv) = mpsc::unbounded_channel(); let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel(); @@ -486,17 +492,32 @@ impl LibP2p { } } + msg = send_recv.recv() => { + let (peer, msg): (PeerId, Vec) = + msg.expect("send_recv closed. are we shutting down?"); + swarm.behaviour_mut().reqres.send_request(&peer, msg); + }, + // Handle any queued outbound messages msg = broadcast_recv.recv() => { - let (genesis, msg): (Option<[u8; 32]>, Vec) = + let (kind, msg): (P2pMessageKind, Vec) = msg.expect("broadcast_recv closed. are we shutting down?"); - let set = genesis.and_then(|genesis| set_for_genesis.get(&genesis).copied()); - broadcast_raw( - &mut swarm, - &mut time_of_last_p2p_message, - set, - msg, - ); + if matches!(kind, P2pMessageKind::KeepAlive) || + matches!(kind, P2pMessageKind::Heartbeat(_)) { + // Use request/response + for peer_id in swarm.connected_peers().copied().collect::>() { + swarm.behaviour_mut().reqres.send_request(&peer_id, msg.clone()); + } + } else { + // Use gossipsub + let set = kind.genesis().and_then(|genesis| set_for_genesis.get(&genesis).copied()); + broadcast_raw( + &mut swarm, + &mut time_of_last_p2p_message, + set, + msg, + ); + } } // Handle new incoming messages @@ -572,9 +593,21 @@ impl LibP2p { connected_peers.len(), ); } + Some(SwarmEvent::Behaviour(BehaviorEvent::Reqres( + RrEvent::Message { peer, message }, + ))) => { + let message = match message { + RrMessage::Request { request, .. } => request, + RrMessage::Response { response, .. } => response, + }; + receive_send + .send((peer, message)) + .expect("receive_send closed. are we shutting down?"); + } Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( GsEvent::Message { propagation_source, message, .. }, ))) => { + // TODO: Ban Heartbeat/Blocks received over gossipsub receive_send .send((propagation_source, message.data)) .expect("receive_send closed. are we shutting down?"); @@ -623,6 +656,7 @@ impl LibP2p { LibP2p { subscribe: Arc::new(Mutex::new(subscribe_send)), + send: Arc::new(Mutex::new(send_send)), broadcast: Arc::new(Mutex::new(broadcast_send)), receive: Arc::new(Mutex::new(receive_recv)), } @@ -651,16 +685,16 @@ impl P2p for LibP2p { .expect("subscribe_send closed. are we shutting down?"); } - async fn send_raw(&self, _: Self::Id, genesis: Option<[u8; 32]>, msg: Vec) { - self.broadcast_raw(genesis, msg).await; + async fn send_raw(&self, peer: Self::Id, msg: Vec) { + self.send.lock().await.send((peer, msg)).expect("send_send closed. are we shutting down?"); } - async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec) { + async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec) { self .broadcast .lock() .await - .send((genesis, msg)) + .send((kind, msg)) .expect("broadcast_send closed. are we shutting down?"); } @@ -678,17 +712,6 @@ impl TributaryP2p for LibP2p { } } -fn heartbeat_time_unit() -> u64 { - // Also include the timestamp so LibP2p doesn't flag this as an old message re-circulating - let timestamp = SystemTime::now() - .duration_since(SystemTime::UNIX_EPOCH) - .expect("system clock is wrong") - .as_secs(); - // Divide by the block time so if multiple parties send a Heartbeat, they're more likely to - // overlap - timestamp / u64::from(Tributary::::block_time()) -} - pub async fn heartbeat_tributaries_task( p2p: P, mut tributary_event: broadcast::Receiver>, @@ -723,8 +746,11 @@ pub async fn heartbeat_tributaries_task( if SystemTime::now() > (block_time + Duration::from_secs(60)) { log::warn!("last known tributary block was over a minute ago"); let mut msg = tip.to_vec(); - let time_unit = heartbeat_time_unit::(); - msg.extend(time_unit.to_le_bytes()); + let time: u64 = SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("system clock is wrong") + .as_secs(); + msg.extend(time.to_le_bytes()); P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), msg).await; } } @@ -738,7 +764,6 @@ pub async fn handle_p2p_task( p2p: P, cosign_channel: mpsc::UnboundedSender, mut tributary_event: broadcast::Receiver>, - our_key: ::G, ) { let channels = Arc::new(RwLock::new(HashMap::<_, mpsc::UnboundedSender>>::new())); tokio::spawn({ @@ -764,7 +789,6 @@ pub async fn handle_p2p_task( tokio::spawn({ let p2p = p2p.clone(); async move { - let mut last_replied_to_heartbeat = 0; loop { let Some(mut msg) = recv.recv().await else { // Channel closure happens when the tributary retires @@ -781,76 +805,37 @@ pub async fn handle_p2p_task( } } - // TODO2: Rate limit this per timestamp - // And/or slash on Heartbeat which justifies a response, since the node + // TODO: Slash on Heartbeat which justifies a response, since the node // obviously was offline and we must now use our bandwidth to compensate for // them? P2pMessageKind::Heartbeat(msg_genesis) => { assert_eq!(msg_genesis, genesis); - - let current_time_unit = heartbeat_time_unit::(); - if current_time_unit.saturating_sub(last_replied_to_heartbeat) < 10 { - continue; - } - if msg.msg.len() != 40 { log::error!("validator sent invalid heartbeat"); continue; } // Only respond to recent heartbeats - let msg_time_unit = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect( + let msg_time = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect( "length-checked heartbeat message didn't have 8 bytes for the u64", )); - if current_time_unit.saturating_sub(msg_time_unit) > 1 { + if SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("system clock is wrong") + .as_secs() + .saturating_sub(msg_time) > + 10 + { continue; } - // This is the network's last replied to, not ours specifically - last_replied_to_heartbeat = current_time_unit; + log::debug!("received heartbeat with a recent timestamp"); let reader = tributary.tributary.reader(); - // Have sqrt(n) nodes reply with the blocks - #[allow(clippy::cast_possible_truncation, clippy::cast_sign_loss)] - let mut responders = f32::from(tributary.spec.n(&[])).sqrt().floor() as u64; - // Try to have at least 3 responders - if responders < 3 { - responders = tributary.spec.n(&[]).min(3).into(); - } - - // Decide which nodes will respond by using the latest block's hash as a - // mutually agreed upon entropy source - // This isn't a secure source of entropy, yet it's fine for this - let entropy = u64::from_le_bytes(reader.tip()[.. 8].try_into().unwrap()); - // If n = 10, responders = 3, we want `start` to be 0 ..= 7 - // (so the highest is 7, 8, 9) - // entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7 - let start = usize::try_from( - entropy % (u64::from(tributary.spec.n(&[]) + 1) - responders), - ) - .unwrap(); - let mut selected = false; - for validator in &tributary.spec.validators() - [start .. (start + usize::try_from(responders).unwrap())] - { - if our_key == validator.0 { - selected = true; - continue; - } - } - if !selected { - log::debug!("received heartbeat and not selected to respond"); - continue; - } - - log::debug!("received heartbeat and selected to respond"); - let p2p = p2p.clone(); // Spawn a dedicated task as this may require loading large amounts of data // from disk and take a notable amount of time tokio::spawn(async move { - // Have the selected nodes respond - // TODO: Spawn a dedicated topic for this heartbeat response? let mut latest = msg.msg[.. 32].try_into().unwrap(); let mut to_send = vec![]; while let Some(next) = reader.block_after(&latest) { diff --git a/coordinator/src/tests/mod.rs b/coordinator/src/tests/mod.rs index 45a62297..55b6c99f 100644 --- a/coordinator/src/tests/mod.rs +++ b/coordinator/src/tests/mod.rs @@ -65,11 +65,11 @@ impl P2p for LocalP2p { async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} - async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec) { + async fn send_raw(&self, to: Self::Id, msg: Vec) { self.1.write().await.1[to].push_back((self.0, msg)); } - async fn broadcast_raw(&self, _genesis: Option<[u8; 32]>, msg: Vec) { + async fn broadcast_raw(&self, _kind: P2pMessageKind, msg: Vec) { // Content-based deduplication let mut lock = self.1.write().await; { diff --git a/coordinator/src/tests/tributary/handle_p2p.rs b/coordinator/src/tests/tributary/handle_p2p.rs index daa0cf02..756f4561 100644 --- a/coordinator/src/tests/tributary/handle_p2p.rs +++ b/coordinator/src/tests/tributary/handle_p2p.rs @@ -3,8 +3,6 @@ use std::sync::Arc; use rand_core::OsRng; -use ciphersuite::{Ciphersuite, Ristretto}; - use tokio::{ sync::{mpsc, broadcast}, time::sleep, @@ -37,17 +35,12 @@ async fn handle_p2p_test() { let mut tributary_senders = vec![]; let mut tributary_arcs = vec![]; - for (i, (p2p, tributary)) in tributaries.drain(..).enumerate() { + for (p2p, tributary) in tributaries.drain(..) { let tributary = Arc::new(tributary); tributary_arcs.push(tributary.clone()); let (new_tributary_send, new_tributary_recv) = broadcast::channel(5); let (cosign_send, _) = mpsc::unbounded_channel(); - tokio::spawn(handle_p2p_task( - p2p, - cosign_send, - new_tributary_recv, - ::generator() * *keys[i], - )); + tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv)); new_tributary_send .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary })) .map_err(|_| "failed to send ActiveTributary") diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs index 9f01ca4d..18f60864 100644 --- a/coordinator/src/tests/tributary/sync.rs +++ b/coordinator/src/tests/tributary/sync.rs @@ -45,17 +45,12 @@ async fn sync_test() { let mut tributary_senders = vec![]; let mut tributary_arcs = vec![]; let mut p2p_threads = vec![]; - for (i, (p2p, tributary)) in tributaries.drain(..).enumerate() { + for (p2p, tributary) in tributaries.drain(..) { let tributary = Arc::new(tributary); tributary_arcs.push(tributary.clone()); let (new_tributary_send, new_tributary_recv) = broadcast::channel(5); let (cosign_send, _) = mpsc::unbounded_channel(); - let thread = tokio::spawn(handle_p2p_task( - p2p, - cosign_send, - new_tributary_recv, - ::generator() * *keys[i], - )); + let thread = tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv)); new_tributary_send .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary })) .map_err(|_| "failed to send ActiveTributary") @@ -91,7 +86,7 @@ async fn sync_test() { let syncer_tributary = Arc::new(syncer_tributary); let (syncer_tributary_send, syncer_tributary_recv) = broadcast::channel(5); let (cosign_send, _) = mpsc::unbounded_channel(); - tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv, syncer_key)); + tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv)); syncer_tributary_send .send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), From 023275bcb65599a2b061be37875afa14af72261a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 06:37:41 -0400 Subject: [PATCH 095/126] Properly diversify ReqResMessageKind/GossipMessageKind --- coordinator/src/cosign_evaluator.rs | 4 +- coordinator/src/main.rs | 2 +- coordinator/src/p2p.rs | 269 +++++++++++++---------- coordinator/src/tests/mod.rs | 35 ++- coordinator/src/tests/tributary/chain.rs | 8 +- 5 files changed, 181 insertions(+), 137 deletions(-) diff --git a/coordinator/src/cosign_evaluator.rs b/coordinator/src/cosign_evaluator.rs index 4ce7faf7..29d9cc4b 100644 --- a/coordinator/src/cosign_evaluator.rs +++ b/coordinator/src/cosign_evaluator.rs @@ -22,7 +22,7 @@ use serai_db::{Get, DbTxn, Db, create_db}; use processor_messages::coordinator::cosign_block_msg; use crate::{ - p2p::{CosignedBlock, P2pMessageKind, P2p}, + p2p::{CosignedBlock, GossipMessageKind, P2p}, substrate::LatestCosignedBlock, }; @@ -323,7 +323,7 @@ impl CosignEvaluator { for cosign in cosigns { let mut buf = vec![]; cosign.serialize(&mut buf).unwrap(); - P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, buf).await; + P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await; } sleep(Duration::from_secs(60)).await; } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 4de23ae0..58de348d 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -260,7 +260,7 @@ async fn handle_processor_message( cosign_channel.send(cosigned_block).unwrap(); let mut buf = vec![]; cosigned_block.serialize(&mut buf).unwrap(); - P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await; + P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await; None } // This causes an action on Substrate yet not on any Tributary diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 4e476c36..ba84a8b8 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -55,71 +55,112 @@ pub struct CosignedBlock { } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum P2pMessageKind { +pub enum ReqResMessageKind { KeepAlive, - Tributary([u8; 32]), Heartbeat([u8; 32]), Block([u8; 32]), +} + +impl ReqResMessageKind { + pub fn read(reader: &mut R) -> Option { + let mut kind = [0; 1]; + reader.read_exact(&mut kind).ok()?; + match kind[0] { + 0 => Some(ReqResMessageKind::KeepAlive), + 1 => Some({ + let mut genesis = [0; 32]; + reader.read_exact(&mut genesis).ok()?; + ReqResMessageKind::Heartbeat(genesis) + }), + 2 => Some({ + let mut genesis = [0; 32]; + reader.read_exact(&mut genesis).ok()?; + ReqResMessageKind::Block(genesis) + }), + _ => None, + } + } + + pub fn serialize(&self) -> Vec { + match self { + ReqResMessageKind::KeepAlive => vec![0], + ReqResMessageKind::Heartbeat(genesis) => { + let mut res = vec![1]; + res.extend(genesis); + res + } + ReqResMessageKind::Block(genesis) => { + let mut res = vec![2]; + res.extend(genesis); + res + } + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum GossipMessageKind { + Tributary([u8; 32]), CosignedBlock, } +impl GossipMessageKind { + pub fn read(reader: &mut R) -> Option { + let mut kind = [0; 1]; + reader.read_exact(&mut kind).ok()?; + match kind[0] { + 0 => Some({ + let mut genesis = [0; 32]; + reader.read_exact(&mut genesis).ok()?; + GossipMessageKind::Tributary(genesis) + }), + 1 => Some(GossipMessageKind::CosignedBlock), + _ => None, + } + } + + pub fn serialize(&self) -> Vec { + match self { + GossipMessageKind::Tributary(genesis) => { + let mut res = vec![0]; + res.extend(genesis); + res + } + GossipMessageKind::CosignedBlock => { + vec![1] + } + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum P2pMessageKind { + ReqRes(ReqResMessageKind), + Gossip(GossipMessageKind), +} + impl P2pMessageKind { fn genesis(&self) -> Option<[u8; 32]> { match self { - P2pMessageKind::KeepAlive | P2pMessageKind::CosignedBlock => None, - P2pMessageKind::Tributary(genesis) | - P2pMessageKind::Heartbeat(genesis) | - P2pMessageKind::Block(genesis) => Some(*genesis), + P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) | + P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => None, + P2pMessageKind::ReqRes( + ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis), + ) | + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => Some(*genesis), } } +} - fn serialize(&self) -> Vec { - match self { - P2pMessageKind::KeepAlive => vec![0], - P2pMessageKind::Tributary(genesis) => { - let mut res = vec![1]; - res.extend(genesis); - res - } - P2pMessageKind::Heartbeat(genesis) => { - let mut res = vec![2]; - res.extend(genesis); - res - } - P2pMessageKind::Block(genesis) => { - let mut res = vec![3]; - res.extend(genesis); - res - } - P2pMessageKind::CosignedBlock => { - vec![4] - } - } +impl From for P2pMessageKind { + fn from(kind: ReqResMessageKind) -> P2pMessageKind { + P2pMessageKind::ReqRes(kind) } +} - fn read(reader: &mut R) -> Option { - let mut kind = [0; 1]; - reader.read_exact(&mut kind).ok()?; - match kind[0] { - 0 => Some(P2pMessageKind::KeepAlive), - 1 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - P2pMessageKind::Tributary(genesis) - }), - 2 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - P2pMessageKind::Heartbeat(genesis) - }), - 3 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - P2pMessageKind::Block(genesis) - }), - 4 => Some(P2pMessageKind::CosignedBlock), - _ => None, - } +impl From for P2pMessageKind { + fn from(kind: GossipMessageKind) -> P2pMessageKind { + P2pMessageKind::Gossip(kind) } } @@ -139,15 +180,19 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { async fn send_raw(&self, to: Self::Id, msg: Vec); async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec); - async fn receive_raw(&self) -> (Self::Id, Vec); + async fn receive(&self) -> Message; - async fn send(&self, to: Self::Id, kind: P2pMessageKind, msg: Vec) { + async fn send(&self, to: Self::Id, kind: ReqResMessageKind, msg: Vec) { let mut actual_msg = kind.serialize(); actual_msg.extend(msg); self.send_raw(to, actual_msg).await; } - async fn broadcast(&self, kind: P2pMessageKind, msg: Vec) { - let mut actual_msg = kind.serialize(); + async fn broadcast(&self, kind: impl Send + Into, msg: Vec) { + let kind = kind.into(); + let mut actual_msg = match kind { + P2pMessageKind::ReqRes(kind) => kind.serialize(), + P2pMessageKind::Gossip(kind) => kind.serialize(), + }; actual_msg.extend(msg); /* log::trace!( @@ -163,35 +208,6 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { */ self.broadcast_raw(kind, actual_msg).await; } - async fn receive(&self) -> Message { - let (sender, kind, msg) = loop { - let (sender, msg) = self.receive_raw().await; - if msg.is_empty() { - log::error!("empty p2p message from {sender:?}"); - continue; - } - - let mut msg_ref = msg.as_ref(); - let Some(kind) = P2pMessageKind::read::<&[u8]>(&mut msg_ref) else { - log::error!("invalid p2p message kind from {sender:?}"); - continue; - }; - break (sender, kind, msg_ref.to_vec()); - }; - /* - log::trace!( - "received p2p message (kind {})", - match kind { - P2pMessageKind::KeepAlive => "KeepAlive".to_string(), - P2pMessageKind::Tributary(genesis) => format!("Tributary({})", hex::encode(genesis)), - P2pMessageKind::Heartbeat(genesis) => format!("Heartbeat({})", hex::encode(genesis)), - P2pMessageKind::Block(genesis) => format!("Block({})", hex::encode(genesis)), - P2pMessageKind::CosignedBlock => "CosignedBlock".to_string(), - } - ); - */ - Message { sender, kind, msg } - } } #[derive(NetworkBehaviour)] @@ -206,7 +222,7 @@ pub struct LibP2p { subscribe: Arc>>, send: Arc)>>>, broadcast: Arc)>>>, - receive: Arc)>>>, + receive: Arc>>>, } impl fmt::Debug for LibP2p { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -502,8 +518,7 @@ impl LibP2p { msg = broadcast_recv.recv() => { let (kind, msg): (P2pMessageKind, Vec) = msg.expect("broadcast_recv closed. are we shutting down?"); - if matches!(kind, P2pMessageKind::KeepAlive) || - matches!(kind, P2pMessageKind::Heartbeat(_)) { + if matches!(kind, P2pMessageKind::ReqRes(_)) { // Use request/response for peer_id in swarm.connected_peers().copied().collect::>() { swarm.behaviour_mut().reqres.send_request(&peer_id, msg.clone()); @@ -600,17 +615,27 @@ impl LibP2p { RrMessage::Request { request, .. } => request, RrMessage::Response { response, .. } => response, }; - receive_send - .send((peer, message)) - .expect("receive_send closed. are we shutting down?"); + + let mut msg_ref = message.as_slice(); + let Some(kind) = ReqResMessageKind::read(&mut msg_ref) else { continue }; + let message = Message { + sender: peer, + kind: P2pMessageKind::ReqRes(kind), + msg: msg_ref.to_vec(), + }; + receive_send.send(message).expect("receive_send closed. are we shutting down?"); } Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( GsEvent::Message { propagation_source, message, .. }, ))) => { - // TODO: Ban Heartbeat/Blocks received over gossipsub - receive_send - .send((propagation_source, message.data)) - .expect("receive_send closed. are we shutting down?"); + let mut msg_ref = message.data.as_slice(); + let Some(kind) = GossipMessageKind::read(&mut msg_ref) else { continue }; + let message = Message { + sender: propagation_source, + kind: P2pMessageKind::Gossip(kind), + msg: msg_ref.to_vec(), + }; + receive_send.send(message).expect("receive_send closed. are we shutting down?"); } _ => {} } @@ -642,12 +667,13 @@ impl LibP2p { // (where a finalized block only occurs due to network activity), meaning this won't be // run () = tokio::time::sleep(Duration::from_secs(80).saturating_sub(time_since_last)) => { - broadcast_raw( - &mut swarm, - &mut time_of_last_p2p_message, - None, - P2pMessageKind::KeepAlive.serialize() - ); + time_of_last_p2p_message = Instant::now(); + for peer_id in swarm.connected_peers().copied().collect::>() { + swarm + .behaviour_mut() + .reqres + .send_request(&peer_id, ReqResMessageKind::KeepAlive.serialize()); + } } } } @@ -700,7 +726,7 @@ impl P2p for LibP2p { // TODO: We only have a single handle call this. Differentiate Send/Recv to remove this constant // lock acquisition? - async fn receive_raw(&self) -> (Self::Id, Vec) { + async fn receive(&self) -> Message { self.receive.lock().await.recv().await.expect("receive_recv closed. are we shutting down?") } } @@ -708,7 +734,7 @@ impl P2p for LibP2p { #[async_trait] impl TributaryP2p for LibP2p { async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { - ::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await + ::broadcast(self, GossipMessageKind::Tributary(genesis), msg).await } } @@ -751,7 +777,7 @@ pub async fn heartbeat_tributaries_task( .expect("system clock is wrong") .as_secs(); msg.extend(time.to_le_bytes()); - P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), msg).await; + P2p::broadcast(&p2p, ReqResMessageKind::Heartbeat(tributary.genesis()), msg).await; } } @@ -795,20 +821,12 @@ pub async fn handle_p2p_task( break; }; match msg.kind { - P2pMessageKind::KeepAlive => {} - - P2pMessageKind::Tributary(msg_genesis) => { - assert_eq!(msg_genesis, genesis); - log::trace!("handling message for tributary {:?}", spec_set); - if tributary.tributary.handle_message(&msg.msg).await { - P2p::broadcast(&p2p, msg.kind, msg.msg).await; - } - } + P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {} // TODO: Slash on Heartbeat which justifies a response, since the node // obviously was offline and we must now use our bandwidth to compensate for // them? - P2pMessageKind::Heartbeat(msg_genesis) => { + P2pMessageKind::ReqRes(ReqResMessageKind::Heartbeat(msg_genesis)) => { assert_eq!(msg_genesis, genesis); if msg.msg.len() != 40 { log::error!("validator sent invalid heartbeat"); @@ -848,13 +866,13 @@ pub async fn handle_p2p_task( res.extend(reader.commit(&next).unwrap()); // Also include the timestamp used within the Heartbeat res.extend(&msg.msg[32 .. 40]); - p2p.send(msg.sender, P2pMessageKind::Block(genesis), res).await; + p2p.send(msg.sender, ReqResMessageKind::Block(genesis), res).await; } } }); } - P2pMessageKind::Block(msg_genesis) => { + P2pMessageKind::ReqRes(ReqResMessageKind::Block(msg_genesis)) => { assert_eq!(msg_genesis, genesis); let mut msg_ref: &[u8] = msg.msg.as_ref(); let Ok(block) = Block::::read(&mut msg_ref) else { @@ -873,7 +891,15 @@ pub async fn handle_p2p_task( ); } - P2pMessageKind::CosignedBlock => unreachable!(), + P2pMessageKind::Gossip(GossipMessageKind::Tributary(msg_genesis)) => { + assert_eq!(msg_genesis, genesis); + log::trace!("handling message for tributary {:?}", spec_set); + if tributary.tributary.handle_message(&msg.msg).await { + P2p::broadcast(&p2p, msg.kind, msg.msg).await; + } + } + + P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => unreachable!(), } } } @@ -893,15 +919,16 @@ pub async fn handle_p2p_task( loop { let msg = p2p.receive().await; match msg.kind { - P2pMessageKind::KeepAlive => {} - P2pMessageKind::Tributary(genesis) | - P2pMessageKind::Heartbeat(genesis) | - P2pMessageKind::Block(genesis) => { + P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {} + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) | + P2pMessageKind::ReqRes( + ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis), + ) => { if let Some(channel) = channels.read().await.get(&genesis) { channel.send(msg).unwrap(); } } - P2pMessageKind::CosignedBlock => { + P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => { let Ok(msg) = CosignedBlock::deserialize_reader(&mut msg.msg.as_slice()) else { log::error!("received CosignedBlock message with invalidly serialized contents"); continue; diff --git a/coordinator/src/tests/mod.rs b/coordinator/src/tests/mod.rs index 55b6c99f..db4c158f 100644 --- a/coordinator/src/tests/mod.rs +++ b/coordinator/src/tests/mod.rs @@ -14,7 +14,7 @@ use tokio::sync::RwLock; use crate::{ processors::{Message, Processors}, - TributaryP2p, P2pMessageKind, P2p, + TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p, }; pub mod tributary; @@ -45,7 +45,10 @@ impl Processors for MemProcessors { #[allow(clippy::type_complexity)] #[derive(Clone, Debug)] -pub struct LocalP2p(usize, pub Arc>, Vec)>>)>>); +pub struct LocalP2p( + usize, + pub Arc>, Vec)>>)>>, +); impl LocalP2p { pub fn new(validators: usize) -> Vec { @@ -66,10 +69,12 @@ impl P2p for LocalP2p { async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} async fn send_raw(&self, to: Self::Id, msg: Vec) { - self.1.write().await.1[to].push_back((self.0, msg)); + let mut msg_ref = msg.as_slice(); + let kind = ReqResMessageKind::read(&mut msg_ref).unwrap(); + self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec())); } - async fn broadcast_raw(&self, _kind: P2pMessageKind, msg: Vec) { + async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec) { // Content-based deduplication let mut lock = self.1.write().await; { @@ -81,19 +86,26 @@ impl P2p for LocalP2p { } let queues = &mut lock.1; + let kind_len = (match kind { + P2pMessageKind::ReqRes(kind) => kind.serialize(), + P2pMessageKind::Gossip(kind) => kind.serialize(), + }) + .len(); + let msg = msg[kind_len ..].to_vec(); + for (i, msg_queue) in queues.iter_mut().enumerate() { if i == self.0 { continue; } - msg_queue.push_back((self.0, msg.clone())); + msg_queue.push_back((self.0, kind, msg.clone())); } } - async fn receive_raw(&self) -> (Self::Id, Vec) { + async fn receive(&self) -> P2pMessage { // This is a cursed way to implement an async read from a Vec loop { - if let Some(res) = self.1.write().await.1[self.0].pop_front() { - return res; + if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() { + return P2pMessage { sender, kind, msg }; } tokio::time::sleep(std::time::Duration::from_millis(100)).await; } @@ -103,6 +115,11 @@ impl P2p for LocalP2p { #[async_trait] impl TributaryP2p for LocalP2p { async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { - ::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await + ::broadcast( + self, + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)), + msg, + ) + .await } } diff --git a/coordinator/src/tests/tributary/chain.rs b/coordinator/src/tests/tributary/chain.rs index 360af7ec..7fc6a064 100644 --- a/coordinator/src/tests/tributary/chain.rs +++ b/coordinator/src/tests/tributary/chain.rs @@ -26,7 +26,7 @@ use serai_db::MemDb; use tributary::Tributary; use crate::{ - P2pMessageKind, P2p, + GossipMessageKind, P2pMessageKind, P2p, tributary::{Transaction, TributarySpec}, tests::LocalP2p, }; @@ -98,7 +98,7 @@ pub async fn run_tributaries( for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { - P2pMessageKind::Tributary(genesis) => { + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); if tributary.handle_message(&msg.msg).await { p2p.broadcast(msg.kind, msg.msg).await; @@ -173,7 +173,7 @@ async fn tributary_test() { for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { - P2pMessageKind::Tributary(genesis) => { + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); tributary.handle_message(&msg.msg).await; } @@ -199,7 +199,7 @@ async fn tributary_test() { for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { - P2pMessageKind::Tributary(genesis) => { + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); tributary.handle_message(&msg.msg).await; } From 7dd587a8647fb83c305a1988da58f1be431edfe6 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 06:44:21 -0400 Subject: [PATCH 096/126] Inline broadcast_raw now that it doesn't have multiple callers --- coordinator/src/p2p.rs | 63 +++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 35 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index ba84a8b8..5e56073a 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -36,7 +36,7 @@ use libp2p::{ IdentityTransform, AllowAllSubscriptionFilter, Event as GsEvent, PublishError, Behaviour as GsBehavior, }, - swarm::{NetworkBehaviour, SwarmEvent, Swarm}, + swarm::{NetworkBehaviour, SwarmEvent}, SwarmBuilder, }; @@ -456,32 +456,6 @@ impl LibP2p { tokio::spawn({ let mut time_of_last_p2p_message = Instant::now(); - #[allow(clippy::needless_pass_by_ref_mut)] // False positive - fn broadcast_raw( - p2p: &mut Swarm, - time_of_last_p2p_message: &mut Instant, - set: Option, - msg: Vec, - ) { - // Update the time of last message - *time_of_last_p2p_message = Instant::now(); - - let topic = - if let Some(set) = set { topic_for_set(set) } else { IdentTopic::new(LIBP2P_TOPIC) }; - - match p2p.behaviour_mut().gossipsub.publish(topic, msg.clone()) { - Err(PublishError::SigningError(e)) => panic!("signing error when broadcasting: {e}"), - Err(PublishError::InsufficientPeers) => { - log::warn!("failed to send p2p message due to insufficient peers") - } - Err(PublishError::MessageTooLarge) => { - panic!("tried to send a too large message: {}", hex::encode(msg)) - } - Err(PublishError::TransformFailed(e)) => panic!("IdentityTransform failed: {e}"), - Err(PublishError::Duplicate) | Ok(_) => {} - } - } - async move { let connected_peers = connected_peers.clone(); @@ -516,22 +490,41 @@ impl LibP2p { // Handle any queued outbound messages msg = broadcast_recv.recv() => { + // Update the time of last message + time_of_last_p2p_message = Instant::now(); + let (kind, msg): (P2pMessageKind, Vec) = msg.expect("broadcast_recv closed. are we shutting down?"); + if matches!(kind, P2pMessageKind::ReqRes(_)) { - // Use request/response + // Use request/response, yet send to all connected peers for peer_id in swarm.connected_peers().copied().collect::>() { swarm.behaviour_mut().reqres.send_request(&peer_id, msg.clone()); } } else { // Use gossipsub - let set = kind.genesis().and_then(|genesis| set_for_genesis.get(&genesis).copied()); - broadcast_raw( - &mut swarm, - &mut time_of_last_p2p_message, - set, - msg, - ); + + let set = + kind.genesis().and_then(|genesis| set_for_genesis.get(&genesis).copied()); + let topic = if let Some(set) = set { + topic_for_set(set) + } else { + IdentTopic::new(LIBP2P_TOPIC) + }; + + match swarm.behaviour_mut().gossipsub.publish(topic, msg.clone()) { + Err(PublishError::SigningError(e)) => { + panic!("signing error when broadcasting: {e}") + }, + Err(PublishError::InsufficientPeers) => { + log::warn!("failed to send p2p message due to insufficient peers") + } + Err(PublishError::MessageTooLarge) => { + panic!("tried to send a too large message: {}", hex::encode(msg)) + } + Err(PublishError::TransformFailed(e)) => panic!("IdentityTransform failed: {e}"), + Err(PublishError::Duplicate) | Ok(_) => {} + } } } From 95591218bb675ea0586b81c47f433f77e3b31744 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 07:01:07 -0400 Subject: [PATCH 097/126] Remove cbor --- Cargo.lock | 11 ------- coordinator/Cargo.toml | 2 +- coordinator/src/p2p.rs | 72 +++++++++++++++++++++++++++++++++++++----- 3 files changed, 65 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 08edc496..57e438de 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1314,15 +1314,6 @@ dependencies = [ "thiserror", ] -[[package]] -name = "cbor4ii" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b4c883b9cc4757b061600d39001d4d0232bece4a3174696cf8f58a14db107d" -dependencies = [ - "serde", -] - [[package]] name = "cc" version = "1.0.88" @@ -4129,7 +4120,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8e3b4d67870478db72bac87bfc260ee6641d0734e0e3e275798f089c3fecfd4" dependencies = [ "async-trait", - "cbor4ii", "futures", "instant", "libp2p-core", @@ -4137,7 +4127,6 @@ dependencies = [ "libp2p-swarm", "log", "rand", - "serde", "smallvec", "void", ] diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index d12c3933..ae4e2be7 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -51,7 +51,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim futures-util = { version = "0.3", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "cbor", "request-response", "gossipsub", "macros"] } +libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] } [dev-dependencies] tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] } diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 5e56073a..d147b588 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -1,7 +1,7 @@ use core::{time::Duration, fmt}; use std::{ sync::Arc, - io::Read, + io::{self, Read}, collections::{HashSet, HashMap}, time::{SystemTime, Instant}, }; @@ -15,13 +15,12 @@ use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorS use serai_db::Db; -use futures_util::StreamExt; +use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, StreamExt}; use tokio::{ sync::{Mutex, RwLock, mpsc, broadcast}, time::sleep, }; -// TODO: Remove cbor use libp2p::{ core::multiaddr::{Protocol, Multiaddr}, identity::Keypair, @@ -29,7 +28,8 @@ use libp2p::{ tcp::Config as TcpConfig, noise, yamux, request_response::{ - Config as RrConfig, Message as RrMessage, Event as RrEvent, cbor::Behaviour as RrBehavior, + Codec as RrCodecTrait, Message as RrMessage, Event as RrEvent, Config as RrConfig, + Behaviour as RrBehavior, }, gossipsub::{ IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, @@ -44,6 +44,8 @@ pub(crate) use tributary::{ReadWrite, P2p as TributaryP2p}; use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent}; +// Block size limit + 1 KB of space for signatures/metadata +const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024; const LIBP2P_TOPIC: &str = "serai-coordinator"; #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] @@ -210,9 +212,66 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { } } +#[derive(Default, Clone, Copy, PartialEq, Eq, Debug)] +struct RrCodec; +#[async_trait] +impl RrCodecTrait for RrCodec { + type Protocol = &'static str; + type Request = Vec; + type Response = Vec; + + async fn read_request( + &mut self, + _: &Self::Protocol, + io: &mut R, + ) -> io::Result> { + let mut len = [0; 4]; + io.read_exact(&mut len).await?; + let len = usize::try_from(u32::from_le_bytes(len)).expect("not a 32-bit platform?"); + if len > MAX_LIBP2P_MESSAGE_SIZE { + Err(io::Error::other("request length exceeded MAX_LIBP2P_MESSAGE_SIZE"))?; + } + // This may be a non-trivial allocation easily causable + // While we could chunk the read, meaning we only perform the allocation as bandwidth is used, + // the max message size should be sufficiently sane + let mut buf = vec![0; len]; + io.read_exact(&mut buf).await?; + Ok(buf) + } + async fn read_response( + &mut self, + proto: &Self::Protocol, + io: &mut R, + ) -> io::Result> { + self.read_request(proto, io).await + } + async fn write_request( + &mut self, + _: &Self::Protocol, + io: &mut W, + req: Vec, + ) -> io::Result<()> { + io.write_all( + &u32::try_from(req.len()) + .map_err(|_| io::Error::other("request length exceeded 2**32"))? + .to_le_bytes(), + ) + .await?; + io.write_all(&req).await + } + async fn write_response( + &mut self, + proto: &Self::Protocol, + io: &mut W, + res: Vec, + ) -> io::Result<()> { + self.write_request(proto, io, res).await + } +} + #[derive(NetworkBehaviour)] struct Behavior { - reqres: RrBehavior, Vec>, + reqres: RrBehavior, gossipsub: GsBehavior, } @@ -233,9 +292,6 @@ impl fmt::Debug for LibP2p { impl LibP2p { #[allow(clippy::new_without_default)] pub fn new(serai: Arc) -> Self { - // Block size limit + 1 KB of space for signatures/metadata - const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024; - log::info!("creating a libp2p instance"); let throwaway_key_pair = Keypair::generate_ed25519(); From 43dc0366604056cdc81c57270446dc864c9ba462 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 10:55:52 -0400 Subject: [PATCH 098/126] Use a HashSet for which networks to try peer finding for Prevents a flood of retries from individually failed attempts within a batch of peer connection attempts. --- coordinator/src/p2p.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index d147b588..387e4455 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -467,10 +467,12 @@ impl LibP2p { // TODO: We should also connect to random peers from random nets as needed for // cosigning - // Define a buffer, `to_retry`, so we can exhaust this channel before sending more down - // it - let mut to_retry = vec![]; + // Drain the chainnel, de-duplicating any networks in it + let mut connect_to_network_networks = HashSet::new(); while let Some(network) = connect_to_network_recv.recv().await { + connect_to_network_networks.insert(network); + } + for network in connect_to_network_networks { if let Ok(mut nodes) = serai.p2p_validators(network).await { // If there's an insufficient amount of nodes known, connect to all yet add it // back and break @@ -480,7 +482,8 @@ impl LibP2p { network, nodes.len() ); - to_retry.push(network); + // Retry this later + connect_to_network_send.send(network).unwrap(); for node in nodes { connect(network, node).await; } @@ -499,9 +502,6 @@ impl LibP2p { } } } - for to_retry in to_retry { - connect_to_network_send.send(to_retry).unwrap(); - } // Sleep 60 seconds before moving to the next iteration tokio::time::sleep(core::time::Duration::from_secs(60)).await; } From d94c9a4a5e171d03df00aac49d98fe0cd54c8231 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 11:59:38 -0400 Subject: [PATCH 099/126] Use a constant for the target amount of peer --- coordinator/src/p2p.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 387e4455..f8a3f952 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -370,6 +370,9 @@ impl LibP2p { IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) } + // TODO: If a network has less than TARGET_PEERS, this will cause retried ad infinitum + const TARGET_PEERS: usize = 8; + // The addrs we're currently dialing, and the networks associated with them let dialing_peers = Arc::new(RwLock::new(HashMap::new())); // The peers we're currently connected to, and the networks associated with them @@ -448,7 +451,7 @@ impl LibP2p { } } // If we do not, start connecting to this network again - if remaining_peers < 3 { + if remaining_peers < TARGET_PEERS { connect_to_network_send.send(net).expect( "couldn't send net to connect to due to disconnects (receiver dropped?)", ); @@ -476,7 +479,7 @@ impl LibP2p { if let Ok(mut nodes) = serai.p2p_validators(network).await { // If there's an insufficient amount of nodes known, connect to all yet add it // back and break - if nodes.len() < 3 { + if nodes.len() < TARGET_PEERS { log::warn!( "insufficient amount of P2P nodes known for {:?}: {}", network, @@ -643,7 +646,7 @@ impl LibP2p { } } // If we do not, start connecting to this network again - if remaining_peers < 3 { + if remaining_peers < TARGET_PEERS { connect_to_network_send .send(net) .expect( From 19e68f7f7594dc9f3944ae8aefbd51e047d98de0 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 12:04:30 -0400 Subject: [PATCH 100/126] Correct selection of to-try peers to prevent infinite loops when to-try < target --- coordinator/src/p2p.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index f8a3f952..d31ccaf2 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -493,8 +493,8 @@ impl LibP2p { continue; } - // Randomly select up to 5 - for _ in 0 .. 5 { + // Randomly select up to 2 * TARGET_PEERS + for _ in 0 .. (2 * TARGET_PEERS) { if !nodes.is_empty() { let to_connect = nodes.swap_remove( usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) From cc7202e0bf74f9039763bb1d73d686998001709f Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 12:38:59 -0400 Subject: [PATCH 101/126] Correct recv to try_recv when exhausting channel --- coordinator/src/p2p.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index d31ccaf2..92355295 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -370,7 +370,7 @@ impl LibP2p { IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) } - // TODO: If a network has less than TARGET_PEERS, this will cause retried ad infinitum + // TODO: If a network has less than TARGET_PEERS, this will cause retries ad infinitum const TARGET_PEERS: usize = 8; // The addrs we're currently dialing, and the networks associated with them @@ -472,7 +472,7 @@ impl LibP2p { // Drain the chainnel, de-duplicating any networks in it let mut connect_to_network_networks = HashSet::new(); - while let Some(network) = connect_to_network_recv.recv().await { + while let Ok(network) = connect_to_network_recv.try_recv() { connect_to_network_networks.insert(network); } for network in connect_to_network_networks { From b39c7514030a78dee0f9cf858ca5449e6d5743cc Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 23 Apr 2024 12:59:34 -0400 Subject: [PATCH 102/126] Reduce target peers a bit --- coordinator/src/p2p.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 92355295..ef876f9a 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -371,7 +371,7 @@ impl LibP2p { } // TODO: If a network has less than TARGET_PEERS, this will cause retries ad infinitum - const TARGET_PEERS: usize = 8; + const TARGET_PEERS: usize = 5; // The addrs we're currently dialing, and the networks associated with them let dialing_peers = Arc::new(RwLock::new(HashMap::new())); @@ -493,8 +493,8 @@ impl LibP2p { continue; } - // Randomly select up to 2 * TARGET_PEERS - for _ in 0 .. (2 * TARGET_PEERS) { + // Randomly select up to 150% of the TARGET_PEERS + for _ in 0 .. ((3 * TARGET_PEERS) / 2) { if !nodes.is_empty() { let to_connect = nodes.swap_remove( usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) From d1474e9188be181f45642fbbb5352d2579787f16 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 24 Apr 2024 03:38:31 -0400 Subject: [PATCH 103/126] Route top-level transfers through to the processor --- coins/ethereum/src/erc20.rs | 4 +- coins/ethereum/src/router.rs | 40 +++++++++--------- processor/src/networks/ethereum.rs | 65 ++++++++++++++++++++++++++++-- 3 files changed, 85 insertions(+), 24 deletions(-) diff --git a/coins/ethereum/src/erc20.rs b/coins/ethereum/src/erc20.rs index 3b5bbee2..86bd1b2d 100644 --- a/coins/ethereum/src/erc20.rs +++ b/coins/ethereum/src/erc20.rs @@ -22,8 +22,8 @@ pub struct TopLevelErc20Transfer { /// A view for an ERC20 contract. #[derive(Clone, Debug)] -pub struct ERC20(Arc>, Address); -impl ERC20 { +pub struct Erc20(Arc>, Address); +impl Erc20 { /// Construct a new view of the specified ERC20 contract. /// /// This checks a contract is deployed at that address yet does not check the contract is diff --git a/coins/ethereum/src/router.rs b/coins/ethereum/src/router.rs index c4399ae3..d2750a02 100644 --- a/coins/ethereum/src/router.rs +++ b/coins/ethereum/src/router.rs @@ -229,30 +229,32 @@ impl Router { } } + pub async fn key_at_end_of_block(&self, block: u64) -> Result { + let filter = Filter::new().from_block(0).to_block(block).address(self.1); + let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); + let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?; + let last_key_x_coordinate = last_key_x_coordinate_log + .log_decode::() + .map_err(|_| Error::ConnectionError)? + .inner + .data + .key; + + let mut compressed_point = ::Repr::default(); + compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY); + compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice()); + + Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError) + } + pub async fn in_instructions( &self, block: u64, allowed_tokens: &HashSet<[u8; 20]>, ) -> Result, Error> { - let key_at_end_of_block = { - let filter = Filter::new().from_block(0).to_block(block).address(self.1); - let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); - let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; - - let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?; - let last_key_x_coordinate = last_key_x_coordinate_log - .log_decode::() - .map_err(|_| Error::ConnectionError)? - .inner - .data - .key; - - let mut compressed_point = ::Repr::default(); - compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY); - compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice()); - - ProjectivePoint::from_bytes(&compressed_point).expect("router's last key wasn't a valid key") - }; + let key_at_end_of_block = self.key_at_end_of_block(block).await?; let filter = Filter::new().from_block(block).to_block(block).address(self.1); let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs index 35979f30..4de08837 100644 --- a/processor/src/networks/ethereum.rs +++ b/processor/src/networks/ethereum.rs @@ -17,6 +17,7 @@ use ethereum_serai::{ alloy_rpc_client::ClientBuilder, alloy_provider::{Provider, RootProvider}, crypto::{PublicKey, Signature}, + erc20::Erc20, deployer::Deployer, router::{Router, Coin as EthereumCoin, InInstruction as EthereumInInstruction}, machine::*, @@ -475,10 +476,59 @@ impl Network for Ethereum { ) -> Vec { let router = self.router().await; let router = router.as_ref().unwrap(); - - // TODO: Top-level transfers + // Grab the key at the end of the epoch + let key_at_end_of_block = loop { + match router.key_at_end_of_block(block.start + 31).await { + Ok(key) => break key, + Err(e) => { + log::error!("couldn't connect to router for the key at the end of the block: {e:?}"); + sleep(Duration::from_secs(5)).await; + continue; + } + } + }; let mut all_events = vec![]; + let mut top_level_txids = HashSet::new(); + for erc20_addr in [DAI] { + let erc20 = loop { + let Ok(Some(erc20)) = Erc20::new(self.provider.clone(), erc20_addr).await else { + log::error!( + "couldn't connect to Ethereum node for an ERC20: {}", + hex::encode(erc20_addr) + ); + sleep(Duration::from_secs(5)).await; + continue; + }; + break erc20; + }; + + for block in block.start .. (block.start + 32) { + let transfers = loop { + match erc20.top_level_transfers(block, router.address()).await { + Ok(transfers) => break transfers, + Err(e) => { + log::error!("couldn't connect to Ethereum node for the top-level transfers: {e:?}"); + sleep(Duration::from_secs(5)).await; + continue; + } + } + }; + + for transfer in transfers { + top_level_txids.insert(transfer.id); + all_events.push(EthereumInInstruction { + id: (transfer.id, 0), + from: transfer.from, + coin: EthereumCoin::Erc20(erc20_addr), + amount: transfer.amount, + data: transfer.data, + key_at_end_of_block, + }); + } + } + } + for block in block.start .. (block.start + 32) { let mut events = router.in_instructions(block, &HashSet::from([DAI])).await; while let Err(e) = events { @@ -486,7 +536,16 @@ impl Network for Ethereum { sleep(Duration::from_secs(5)).await; events = router.in_instructions(block, &HashSet::from([DAI])).await; } - all_events.extend(events.unwrap()); + let mut events = events.unwrap(); + for event in &mut events { + // A transaction should either be a top-level transfer or a Router InInstruction + if top_level_txids.contains(&event.id.0) { + panic!("top-level transfer had {} and router had {:?}", hex::encode(event.id.0), event); + } + // Overwrite the key at end of block to key at end of epoch + event.key_at_end_of_block = key_at_end_of_block; + } + all_events.extend(events); } for event in &all_events { From d57fef899912bb4aabeee53877a65cd25766150d Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 24 Apr 2024 03:55:23 -0400 Subject: [PATCH 104/126] Slight documentation tweaks --- coins/ethereum/src/deployer.rs | 1 + processor/src/multisigs/scanner.rs | 1 + processor/src/multisigs/scheduler/mod.rs | 1 + spec/integrations/Ethereum.md | 12 +++++++----- 4 files changed, 10 insertions(+), 5 deletions(-) diff --git a/coins/ethereum/src/deployer.rs b/coins/ethereum/src/deployer.rs index d6cfeee9..1a16664c 100644 --- a/coins/ethereum/src/deployer.rs +++ b/coins/ethereum/src/deployer.rs @@ -100,6 +100,7 @@ impl Deployer { let to_block = BlockNumberOrTag::Latest; // Find the first log using this init code (where the init code is binding to the key) + // TODO: Make an abstraction for event filtering (de-duplicating common code) let filter = Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address())); let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH); diff --git a/processor/src/multisigs/scanner.rs b/processor/src/multisigs/scanner.rs index 20c61192..3d28f3e8 100644 --- a/processor/src/multisigs/scanner.rs +++ b/processor/src/multisigs/scanner.rs @@ -29,6 +29,7 @@ pub enum ScannerEvent { outputs: Vec, }, // Eventuality completion found on-chain + // TODO: Move this from a tuple Completed( Vec, usize, diff --git a/processor/src/multisigs/scheduler/mod.rs b/processor/src/multisigs/scheduler/mod.rs index 6ec95fc4..26c940fe 100644 --- a/processor/src/multisigs/scheduler/mod.rs +++ b/processor/src/multisigs/scheduler/mod.rs @@ -53,6 +53,7 @@ pub trait Scheduler: Sized + Clone + PartialEq + Debug { txn: &mut D::Transaction<'_>, utxos: Vec, payments: Vec>, + // TODO: Tighten this to multisig_for_any_change key_for_any_change: ::G, force_spend: bool, ) -> Vec>; diff --git a/spec/integrations/Ethereum.md b/spec/integrations/Ethereum.md index bf32f101..1e1f3ba1 100644 --- a/spec/integrations/Ethereum.md +++ b/spec/integrations/Ethereum.md @@ -2,14 +2,16 @@ ### Addresses -Ethereum addresses are 20-byte hashes. +Ethereum addresses are 20-byte hashes, identical to Ethereum proper. ### In Instructions -Ethereum In Instructions are present via being appended to the calldata -transferring funds to Serai. `origin` is automatically set to the party from -which funds are being transferred. For an ERC20, this is `from`. For ETH, this -is the caller. +In Instructions may be created in one of two ways. + +1) Have an EOA call `transfer` or `transferFrom` on an ERC20, appending the + encoded InInstruction directly after the calldata. `origin` defaults to the + party transferred from. +2) Call `inInstruction` on the Router. `origin` defaults to `msg.sender`. ### Out Instructions From cef63a631adbe696af730596cd29d71f3f320352 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 24 Apr 2024 09:25:11 -0400 Subject: [PATCH 105/126] Add a dev ethereum Docker setup Also adds untested Dockerfiles for reth, lighthouse, and nimbus. --- orchestration/dev/coins/ethereum/run.sh | 5 +- orchestration/src/coins/bitcoin.rs | 2 +- orchestration/src/coins/ethereum.rs | 5 -- .../coins/ethereum/consensus/lighthouse.rs | 36 ++++++++++++++ .../src/coins/ethereum/consensus/mod.rs | 6 +++ .../src/coins/ethereum/consensus/nimbus.rs | 49 +++++++++++++++++++ .../src/coins/ethereum/execution/anvil.rs | 14 ++++++ .../src/coins/ethereum/execution/mod.rs | 5 ++ .../src/coins/ethereum/execution/reth.rs | 38 ++++++++++++++ orchestration/src/coins/ethereum/mod.rs | 43 ++++++++++++++++ orchestration/src/coins/monero.rs | 2 +- orchestration/src/coordinator.rs | 2 +- orchestration/src/main.rs | 2 +- orchestration/src/message_queue.rs | 2 +- orchestration/src/processor.rs | 2 +- orchestration/src/serai.rs | 2 +- .../ethereum/consensus/lighthouse/run.sh | 3 ++ .../coins/ethereum/consensus/nimbus/run.sh | 3 ++ .../coins/ethereum/execution/geth/run.sh | 8 +++ .../coins/ethereum/execution/reth/run.sh | 3 ++ orchestration/testnet/coins/ethereum/run.sh | 4 +- 21 files changed, 217 insertions(+), 19 deletions(-) delete mode 100644 orchestration/src/coins/ethereum.rs create mode 100644 orchestration/src/coins/ethereum/consensus/lighthouse.rs create mode 100644 orchestration/src/coins/ethereum/consensus/mod.rs create mode 100644 orchestration/src/coins/ethereum/consensus/nimbus.rs create mode 100644 orchestration/src/coins/ethereum/execution/anvil.rs create mode 100644 orchestration/src/coins/ethereum/execution/mod.rs create mode 100644 orchestration/src/coins/ethereum/execution/reth.rs create mode 100644 orchestration/src/coins/ethereum/mod.rs create mode 100755 orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh create mode 100755 orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh create mode 100755 orchestration/testnet/coins/ethereum/execution/geth/run.sh create mode 100755 orchestration/testnet/coins/ethereum/execution/reth/run.sh diff --git a/orchestration/dev/coins/ethereum/run.sh b/orchestration/dev/coins/ethereum/run.sh index 0b86ff69..4fee3e46 100755 --- a/orchestration/dev/coins/ethereum/run.sh +++ b/orchestration/dev/coins/ethereum/run.sh @@ -1,6 +1,3 @@ #!/bin/sh -geth --dev --networkid 5208 --datadir "eth-devnet" \ - --http --http.api "web3,net,eth,miner" \ - --http.addr 0.0.0.0 --http.port 8545 \ - --http.vhosts="*" --http.corsdomain "*" +~/.foundry/bin/anvil --no-mining --slots-in-an-epoch 32 diff --git a/orchestration/src/coins/bitcoin.rs b/orchestration/src/coins/bitcoin.rs index 527b1062..94686244 100644 --- a/orchestration/src/coins/bitcoin.rs +++ b/orchestration/src/coins/bitcoin.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use crate::{Network, Os, mimalloc, os, write_dockerfile}; diff --git a/orchestration/src/coins/ethereum.rs b/orchestration/src/coins/ethereum.rs deleted file mode 100644 index 2e15d370..00000000 --- a/orchestration/src/coins/ethereum.rs +++ /dev/null @@ -1,5 +0,0 @@ -use std::path::Path; - -pub fn ethereum(_orchestration_path: &Path) { - // TODO -} diff --git a/orchestration/src/coins/ethereum/consensus/lighthouse.rs b/orchestration/src/coins/ethereum/consensus/lighthouse.rs new file mode 100644 index 00000000..add9728b --- /dev/null +++ b/orchestration/src/coins/ethereum/consensus/lighthouse.rs @@ -0,0 +1,36 @@ +use crate::Network; + +pub fn lighthouse(network: Network) -> (String, String, String) { + assert_ne!(network, Network::Dev); + + #[rustfmt::skip] + const DOWNLOAD_LIGHTHOUSE: &str = r#" +FROM alpine:latest as lighthouse + +ENV LIGHTHOUSE_VERSION=5.1.3 + +RUN apk --no-cache add git gnupg + +# Download lighthouse +RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc + +# Verify the signature +gpg --keyserver keyserver.ubuntu.com --recv-keys 15E66D941F697E28F49381F426416DC3F30674B0 +gpg --verify lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz + +# Extract lighthouse +RUN tar xvf lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +"#; + + let run_lighthouse = format!( + r#" +COPY --from=lighthouse --chown=ethereum lighthouse /bin + +ADD /orchestration/{}/coins/ethereum/consensus/lighthouse/run.sh /consensus_layer.sh +"#, + network.label() + ); + + (DOWNLOAD_LIGHTHOUSE.to_string(), String::new(), run_lighthouse) +} diff --git a/orchestration/src/coins/ethereum/consensus/mod.rs b/orchestration/src/coins/ethereum/consensus/mod.rs new file mode 100644 index 00000000..4f64c0d8 --- /dev/null +++ b/orchestration/src/coins/ethereum/consensus/mod.rs @@ -0,0 +1,6 @@ +mod lighthouse; +#[allow(unused)] +pub use lighthouse::lighthouse; + +mod nimbus; +pub use nimbus::nimbus; diff --git a/orchestration/src/coins/ethereum/consensus/nimbus.rs b/orchestration/src/coins/ethereum/consensus/nimbus.rs new file mode 100644 index 00000000..07006aa9 --- /dev/null +++ b/orchestration/src/coins/ethereum/consensus/nimbus.rs @@ -0,0 +1,49 @@ +use crate::Network; + +pub fn nimbus(network: Network) -> (String, String, String) { + assert_ne!(network, Network::Dev); + + let platform = match std::env::consts::ARCH { + "x86_64" => "amd64", + "arm" => "arm32v7", + "aarch64" => "arm64v8", + _ => panic!("unsupported platform"), + }; + + #[rustfmt::skip] + let checksum = match platform { + "amd64" => "5da10222cfb555ce2e3820ece12e8e30318945e3ed4b2b88d295963c879daeee071623c47926f880f3db89ce537fd47c6b26fe37e47aafbae3222b58bcec2fba", + "arm32v7" => "7055da77bfa1186ee2e7ce2a48b923d45ccb039592f529c58d93d55a62bca46566ada451bd7497c3ae691260544f0faf303602afd85ccc18388fdfdac0bb2b45", + "arm64v8" => "1a68f44598462abfade0dbeb6adf10b52614ba03605a8bf487b99493deb41468317926ef2d657479fcc26fce640aeebdbd880956beec3fb110b5abc97bd83556", + _ => panic!("unsupported platform"), + }; + + #[rustfmt::skip] + let download_nimbus = format!(r#" +FROM alpine:latest as nimbus + +ENV NIMBUS_VERSION=24.3.0 +ENV NIMBUS_COMMIT=dc19b082 + +# Download nimbus +RUN wget https://github.com/status-im/nimbus-eth2/releases/download/v${{NIMBUS_VERSION}}/nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz + +# Extract nimbus +RUN tar xvf nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz +RUN mv nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}/build/nimbus_beacon_node ./nimbus + +# Verify the checksum +RUN sha512sum nimbus | grep {checksum} +"#); + + let run_nimbus = format!( + r#" +COPY --from=nimbus --chown=ethereum nimbus /bin + +ADD /orchestration/{}/coins/ethereum/consensus/nimbus/run.sh /consensus_layer.sh +"#, + network.label() + ); + + (download_nimbus, String::new(), run_nimbus) +} diff --git a/orchestration/src/coins/ethereum/execution/anvil.rs b/orchestration/src/coins/ethereum/execution/anvil.rs new file mode 100644 index 00000000..53d894ec --- /dev/null +++ b/orchestration/src/coins/ethereum/execution/anvil.rs @@ -0,0 +1,14 @@ +use crate::Network; + +pub fn anvil(network: Network) -> (String, String, String) { + assert_eq!(network, Network::Dev); + + const ANVIL_SETUP: &str = r#" +RUN curl -L https://foundry.paradigm.xyz | bash || exit 0 +RUN ~/.foundry/bin/foundryup + +EXPOSE 8545 +"#; + + (String::new(), "RUN apt install git curl -y".to_string(), ANVIL_SETUP.to_string()) +} diff --git a/orchestration/src/coins/ethereum/execution/mod.rs b/orchestration/src/coins/ethereum/execution/mod.rs new file mode 100644 index 00000000..3db59c84 --- /dev/null +++ b/orchestration/src/coins/ethereum/execution/mod.rs @@ -0,0 +1,5 @@ +mod reth; +pub use reth::reth; + +mod anvil; +pub use anvil::anvil; diff --git a/orchestration/src/coins/ethereum/execution/reth.rs b/orchestration/src/coins/ethereum/execution/reth.rs new file mode 100644 index 00000000..8c80a9fa --- /dev/null +++ b/orchestration/src/coins/ethereum/execution/reth.rs @@ -0,0 +1,38 @@ +use crate::Network; + +pub fn reth(network: Network) -> (String, String, String) { + assert_ne!(network, Network::Dev); + + #[rustfmt::skip] + const DOWNLOAD_RETH: &str = r#" +FROM alpine:latest as reth + +ENV RETH_VERSION=0.2.0-beta.6 + +RUN apk --no-cache add git gnupg + +# Download reth +RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc + +# Verify the signature +gpg --keyserver keyserver.ubuntu.com --recv-keys A3AE097C89093A124049DF1F5391A3C4100530B4 +gpg --verify reth-v${RETH_VERSION}-$(uname -m).tar.gz.asc reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz + +# Extract reth +RUN tar xvf reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +"#; + + let run_reth = format!( + r#" +COPY --from=reth --chown=ethereum reth /bin + +EXPOSE 30303 9001 8545 + +ADD /orchestration/{}/coins/ethereum/execution/reth/run.sh /execution_layer.sh +"#, + network.label() + ); + + (DOWNLOAD_RETH.to_string(), String::new(), run_reth) +} diff --git a/orchestration/src/coins/ethereum/mod.rs b/orchestration/src/coins/ethereum/mod.rs new file mode 100644 index 00000000..a06318c0 --- /dev/null +++ b/orchestration/src/coins/ethereum/mod.rs @@ -0,0 +1,43 @@ +use std::path::Path; + +use crate::{Network, Os, mimalloc, os, write_dockerfile}; + +mod execution; +use execution::*; + +mod consensus; +use consensus::*; + +pub fn ethereum(orchestration_path: &Path, network: Network) { + let ((el_download, el_run_as_root, el_run), (cl_download, cl_run_as_root, cl_run)) = + if network == Network::Dev { + (anvil(network), (String::new(), String::new(), String::new())) + } else { + // TODO: Select an EL/CL based off a RNG seeded from the public key + (reth(network), nimbus(network)) + }; + + let download = mimalloc(Os::Alpine).to_string() + &el_download + &cl_download; + + let run = format!( + r#" +ADD /orchestration/{}/coins/ethereum/run.sh /run.sh +CMD ["/run.sh"] +"#, + network.label() + ); + let run = mimalloc(Os::Debian).to_string() + + &os(Os::Debian, &(el_run_as_root + "\r\n" + &cl_run_as_root), "ethereum") + + &el_run + + &cl_run + + &run; + + let res = download + &run; + + let mut ethereum_path = orchestration_path.to_path_buf(); + ethereum_path.push("coins"); + ethereum_path.push("ethereum"); + ethereum_path.push("Dockerfile"); + + write_dockerfile(ethereum_path, &res); +} diff --git a/orchestration/src/coins/monero.rs b/orchestration/src/coins/monero.rs index 873c6458..c21bc610 100644 --- a/orchestration/src/coins/monero.rs +++ b/orchestration/src/coins/monero.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use crate::{Network, Os, mimalloc, write_dockerfile}; diff --git a/orchestration/src/coordinator.rs b/orchestration/src/coordinator.rs index 67a24527..13fdff59 100644 --- a/orchestration/src/coordinator.rs +++ b/orchestration/src/coordinator.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use zeroize::Zeroizing; diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 4be84cd4..0e6c7cb0 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -266,7 +266,7 @@ fn dockerfiles(network: Network) { let orchestration_path = orchestration_path(network); bitcoin(&orchestration_path, network); - ethereum(&orchestration_path); + ethereum(&orchestration_path, network); monero(&orchestration_path, network); if network == Network::Dev { monero_wallet_rpc(&orchestration_path); diff --git a/orchestration/src/message_queue.rs b/orchestration/src/message_queue.rs index f16c6cbe..eb662b67 100644 --- a/orchestration/src/message_queue.rs +++ b/orchestration/src/message_queue.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index 7ee69d11..8a2c8c77 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use zeroize::Zeroizing; diff --git a/orchestration/src/serai.rs b/orchestration/src/serai.rs index 77d098b6..2e1e915c 100644 --- a/orchestration/src/serai.rs +++ b/orchestration/src/serai.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use zeroize::Zeroizing; use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto}; diff --git a/orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh b/orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh new file mode 100755 index 00000000..1b3857bf --- /dev/null +++ b/orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +RUST_LOG=info lighthouse bn --execution-endpoint http://localhost:8551 --execution-jwt /home/ethereum/.jwt diff --git a/orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh b/orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh new file mode 100755 index 00000000..2bb8d868 --- /dev/null +++ b/orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +exit 1 diff --git a/orchestration/testnet/coins/ethereum/execution/geth/run.sh b/orchestration/testnet/coins/ethereum/execution/geth/run.sh new file mode 100755 index 00000000..fee4a57c --- /dev/null +++ b/orchestration/testnet/coins/ethereum/execution/geth/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +#geth --dev --networkid 5208 \ +# --http --http.api "web3,net,eth,miner" \ +# --http.addr 0.0.0.0 --http.port 8545 \ +# --http.vhosts="*" --http.corsdomain "*" + +exit 1 diff --git a/orchestration/testnet/coins/ethereum/execution/reth/run.sh b/orchestration/testnet/coins/ethereum/execution/reth/run.sh new file mode 100755 index 00000000..5be8924a --- /dev/null +++ b/orchestration/testnet/coins/ethereum/execution/reth/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +RUST_LOG=info reth node --authrpc.jwtsecret /home/ethereum/.jwt diff --git a/orchestration/testnet/coins/ethereum/run.sh b/orchestration/testnet/coins/ethereum/run.sh index 2bb8d868..82b8ff58 100755 --- a/orchestration/testnet/coins/ethereum/run.sh +++ b/orchestration/testnet/coins/ethereum/run.sh @@ -1,3 +1 @@ -#!/bin/sh - -exit 1 +/execution_layer.sh & /consensus_layer.sh From bc1dec79917d37d326ac3d9bc571a64131b0424a Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sun, 28 Apr 2024 04:04:53 -0400 Subject: [PATCH 106/126] Move TRANSACTION_MESSAGE to 1 --- coordinator/tributary/src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index dcf38c68..a4c6bfe5 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -59,7 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50; pub const BLOCK_SIZE_LIMIT: usize = 3_001_000; pub(crate) const TENDERMINT_MESSAGE: u8 = 0; -pub(crate) const TRANSACTION_MESSAGE: u8 = 2; // TODO: Normalize to 1 +pub(crate) const TRANSACTION_MESSAGE: u8 = 1; #[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq, Debug)] From 21123590bb600323aa424f64ffaa5d321b1b22ed Mon Sep 17 00:00:00 2001 From: GitHub Actions <> Date: Wed, 1 May 2024 01:18:23 +0000 Subject: [PATCH 107/126] Update nightly --- .github/nightly-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/nightly-version b/.github/nightly-version index e2e82b48..514aef61 100644 --- a/.github/nightly-version +++ b/.github/nightly-version @@ -1 +1 @@ -nightly-2024-04-23 +nightly-2024-05-01 From 5501de1f3af12589e6193bb881a1de49cccc7dab Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 10 May 2024 13:33:56 -0400 Subject: [PATCH 108/126] Update to the latest alloy Also makes various tweaks as necessary. --- Cargo.lock | 63 ++++++++++--------- coins/ethereum/Cargo.toml | 15 +++-- .../alloy-simple-request-transport/Cargo.toml | 4 +- coins/ethereum/src/crypto.rs | 5 +- coins/ethereum/src/erc20.rs | 23 ++----- coins/ethereum/src/lib.rs | 8 ++- coins/ethereum/src/router.rs | 31 ++++++--- coins/ethereum/src/tests/mod.rs | 11 +++- coins/ethereum/src/tests/schnorr.rs | 4 +- 9 files changed, 91 insertions(+), 73 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 57e438de..66212c4a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -102,7 +102,7 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-eips", "alloy-primitives", @@ -110,7 +110,6 @@ dependencies = [ "alloy-serde", "c-kzg", "serde", - "sha2", ] [[package]] @@ -125,7 +124,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -133,23 +132,25 @@ dependencies = [ "c-kzg", "once_cell", "serde", + "sha2", ] [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-primitives", "alloy-serde", "serde", + "serde_json", ] [[package]] name = "alloy-json-abi" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83a35ddfd27576474322a5869e4c123e5f3e7b2177297c18e4e82ea501cb125b" +checksum = "786689872ec4e7d354810ab0dffd48bb40b838c047522eb031cbd47d15634849" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -159,7 +160,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-primitives", "serde", @@ -171,7 +172,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-consensus", "alloy-eips", @@ -179,6 +180,7 @@ dependencies = [ "alloy-primitives", "alloy-rpc-types", "alloy-signer", + "alloy-sol-types", "async-trait", "futures-utils-wasm", "thiserror", @@ -187,7 +189,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -201,9 +203,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99bbad0a6b588ef4aec1b5ddbbfdacd9ef04e00b979617765b03174318ee1f3a" +checksum = "525448f6afc1b70dd0f9d0a8145631bf2f5e434678ab23ab18409ca264cae6b3" dependencies = [ "alloy-rlp", "bytes", @@ -224,7 +226,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -271,7 +273,7 @@ dependencies = [ [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -289,7 +291,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-consensus", "alloy-eips", @@ -307,7 +309,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -319,7 +321,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-primitives", "serde", @@ -329,7 +331,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-primitives", "async-trait", @@ -352,9 +354,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452d929748ac948a10481fff4123affead32c553cf362841c5103dd508bdfc16" +checksum = "89c80a2cb97e7aa48611cbb63950336f9824a174cdf670527cc6465078a26ea1" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -371,9 +373,9 @@ dependencies = [ [[package]] name = "alloy-sol-macro-input" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df64e094f6d2099339f9e82b5b38440b159757b6920878f28316243f8166c8d1" +checksum = "c58894b58ac50979eeac6249661991ac40b9d541830d9a725f7714cc9ef08c23" dependencies = [ "alloy-json-abi", "const-hex", @@ -388,18 +390,18 @@ dependencies = [ [[package]] name = "alloy-sol-type-parser" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "715f4d09a330cc181fc7c361b5c5c2766408fa59a0bac60349dcb7baabd404cc" +checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" dependencies = [ "winnow 0.6.6", ] [[package]] name = "alloy-sol-types" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bc2d6dfc2a19fd56644494479510f98b1ee929e04cf0d4aa45e98baa3e545b" +checksum = "399287f68d1081ed8b1f4903c49687658b95b142207d7cb4ae2f4813915343ef" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -410,7 +412,7 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-json-rpc", "base64 0.22.0", @@ -428,7 +430,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" dependencies = [ "alloy-transport", "url", @@ -2331,6 +2333,7 @@ version = "0.1.0" dependencies = [ "alloy-consensus", "alloy-core", + "alloy-network", "alloy-node-bindings", "alloy-provider", "alloy-rpc-client", @@ -3790,7 +3793,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.4", ] [[package]] @@ -9341,9 +9344,9 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.0" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4497156948bd342b52038035a6fa514a89626e37af9d2c52a5e8d8ebcc7ee479" +checksum = "5aa0cefd02f532035d83cfec82647c6eb53140b0485220760e669f4bad489e36" dependencies = [ "paste", "proc-macro2", diff --git a/coins/ethereum/Cargo.toml b/coins/ethereum/Cargo.toml index 4bb92fe4..dc30764e 100644 --- a/coins/ethereum/Cargo.toml +++ b/coins/ethereum/Cargo.toml @@ -29,18 +29,21 @@ frost = { package = "modular-frost", path = "../../crypto/frost", default-featur alloy-core = { version = "0.7", default-features = false } alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false, features = ["k256"] } -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } -alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false, features = ["k256"] } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } +alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } + +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false, optional = true } [dev-dependencies] frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] } tokio = { version = "1", features = ["macros"] } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } [features] -tests = [] +tests = ["alloy-node-bindings"] diff --git a/coins/ethereum/alloy-simple-request-transport/Cargo.toml b/coins/ethereum/alloy-simple-request-transport/Cargo.toml index 115998e4..0d9ea6b8 100644 --- a/coins/ethereum/alloy-simple-request-transport/Cargo.toml +++ b/coins/ethereum/alloy-simple-request-transport/Cargo.toml @@ -21,8 +21,8 @@ tower = "0.4" serde_json = { version = "1", default-features = false } simple-request = { path = "../../../common/request", default-features = false } -alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } -alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } +alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } +alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } [features] default = ["tls"] diff --git a/coins/ethereum/src/crypto.rs b/coins/ethereum/src/crypto.rs index ca228eb5..6ea6a0b0 100644 --- a/coins/ethereum/src/crypto.rs +++ b/coins/ethereum/src/crypto.rs @@ -31,7 +31,10 @@ pub fn address(point: &ProjectivePoint) -> [u8; 20] { keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() } -pub(crate) fn deterministically_sign(tx: &TxLegacy) -> Signed { +/// Deterministically sign a transaction. +/// +/// This function panics if passed a transaction with a non-None chain ID. +pub fn deterministically_sign(tx: &TxLegacy) -> Signed { assert!( tx.chain_id.is_none(), "chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)" diff --git a/coins/ethereum/src/erc20.rs b/coins/ethereum/src/erc20.rs index 86bd1b2d..1d874403 100644 --- a/coins/ethereum/src/erc20.rs +++ b/coins/ethereum/src/erc20.rs @@ -4,7 +4,7 @@ use alloy_core::primitives::{Address, B256, U256}; use alloy_sol_types::{SolInterface, SolEvent}; -use alloy_rpc_types::{BlockNumberOrTag, Filter}; +use alloy_rpc_types::Filter; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; @@ -25,22 +25,8 @@ pub struct TopLevelErc20Transfer { pub struct Erc20(Arc>, Address); impl Erc20 { /// Construct a new view of the specified ERC20 contract. - /// - /// This checks a contract is deployed at that address yet does not check the contract is - /// actually an ERC20. - pub async fn new( - provider: Arc>, - address: [u8; 20], - ) -> Result, Error> { - let code = provider - .get_code_at(address.into(), BlockNumberOrTag::Finalized.into()) - .await - .map_err(|_| Error::ConnectionError)?; - // Contract has yet to be deployed - if code.is_empty() { - return Ok(None); - } - Ok(Some(Self(provider.clone(), Address::from(&address)))) + pub fn new(provider: Arc>, address: [u8; 20]) -> Self { + Self(provider, Address::from(&address)) } pub async fn top_level_transfers( @@ -65,7 +51,8 @@ impl Erc20 { } let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?; - let tx = self.0.get_transaction_by_hash(tx_id).await.map_err(|_| Error::ConnectionError)?; + let tx = + self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?; // If this is a top-level call... if tx.to == Some(self.1) { diff --git a/coins/ethereum/src/lib.rs b/coins/ethereum/src/lib.rs index 8d4a5312..eda54c72 100644 --- a/coins/ethereum/src/lib.rs +++ b/coins/ethereum/src/lib.rs @@ -1,8 +1,10 @@ use thiserror::Error; pub use alloy_core; -pub use alloy_consensus; +pub use alloy_sol_types; +pub use alloy_consensus; +pub use alloy_network; pub use alloy_rpc_types; pub use alloy_simple_request_transport; pub use alloy_rpc_client; @@ -18,8 +20,8 @@ pub mod router; pub mod machine; -#[cfg(test)] -mod tests; +#[cfg(any(test, feature = "tests"))] +pub mod tests; #[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] pub enum Error { diff --git a/coins/ethereum/src/router.rs b/coins/ethereum/src/router.rs index d2750a02..8d46b24f 100644 --- a/coins/ethereum/src/router.rs +++ b/coins/ethereum/src/router.rs @@ -159,11 +159,12 @@ impl Router { #[cfg(test)] pub async fn serai_key(&self, at: [u8; 32]) -> Result { let call = TransactionRequest::default() - .to(Some(self.1)) + .to(self.1) .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into())); let bytes = self .0 - .call(&call, Some(BlockId::Hash(B256::from(at).into()))) + .call(&call) + .block(BlockId::Hash(B256::from(at).into())) .await .map_err(|_| Error::ConnectionError)?; let res = @@ -197,11 +198,12 @@ impl Router { #[cfg(test)] pub async fn nonce(&self, at: [u8; 32]) -> Result { let call = TransactionRequest::default() - .to(Some(self.1)) + .to(self.1) .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into())); let bytes = self .0 - .call(&call, Some(BlockId::Hash(B256::from(at).into()))) + .call(&call) + .block(BlockId::Hash(B256::from(at).into())) .await .map_err(|_| Error::ConnectionError)?; let res = @@ -229,10 +231,13 @@ impl Router { } } - pub async fn key_at_end_of_block(&self, block: u64) -> Result { + pub async fn key_at_end_of_block(&self, block: u64) -> Result, Error> { let filter = Filter::new().from_block(0).to_block(block).address(self.1); let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + if all_keys.is_empty() { + return Ok(None); + }; let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?; let last_key_x_coordinate = last_key_x_coordinate_log @@ -246,7 +251,9 @@ impl Router { compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY); compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice()); - Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError) + let key = + Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?; + Ok(Some(key)) } pub async fn in_instructions( @@ -254,7 +261,9 @@ impl Router { block: u64, allowed_tokens: &HashSet<[u8; 20]>, ) -> Result, Error> { - let key_at_end_of_block = self.key_at_end_of_block(block).await?; + let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else { + return Ok(vec![]); + }; let filter = Filter::new().from_block(block).to_block(block).address(self.1); let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); @@ -274,7 +283,13 @@ impl Router { ); let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?; - let tx = self.0.get_transaction_by_hash(tx_hash).await.map_err(|_| Error::ConnectionError)?; + let tx = self + .0 + .get_transaction_by_hash(tx_hash) + .await + .ok() + .flatten() + .ok_or(Error::ConnectionError)?; let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; diff --git a/coins/ethereum/src/tests/mod.rs b/coins/ethereum/src/tests/mod.rs index 3a381d42..085ef3a2 100644 --- a/coins/ethereum/src/tests/mod.rs +++ b/coins/ethereum/src/tests/mod.rs @@ -11,16 +11,20 @@ use alloy_core::{ }; use alloy_consensus::{SignableTransaction, TxLegacy}; -use alloy_rpc_types::TransactionReceipt; +use alloy_rpc_types::{BlockNumberOrTag, TransactionReceipt}; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; use crate::crypto::{address, deterministically_sign, PublicKey}; +#[cfg(test)] mod crypto; +#[cfg(test)] mod abi; +#[cfg(test)] mod schnorr; +#[cfg(test)] mod router; pub fn key_gen() -> (HashMap>, PublicKey) { @@ -53,14 +57,15 @@ pub async fn send( // let chain_id = provider.get_chain_id().await.unwrap(); // tx.chain_id = Some(chain_id); tx.chain_id = None; - tx.nonce = provider.get_transaction_count(address, None).await.unwrap(); + tx.nonce = + provider.get_transaction_count(address, BlockNumberOrTag::Latest.into()).await.unwrap(); // 100 gwei tx.gas_price = 100_000_000_000u128; let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap(); assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap()); assert!( - provider.get_balance(address, None).await.unwrap() > + provider.get_balance(address, BlockNumberOrTag::Latest.into()).await.unwrap() > ((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value) ); diff --git a/coins/ethereum/src/tests/schnorr.rs b/coins/ethereum/src/tests/schnorr.rs index 9311c292..21d8b45a 100644 --- a/coins/ethereum/src/tests/schnorr.rs +++ b/coins/ethereum/src/tests/schnorr.rs @@ -56,12 +56,12 @@ pub async fn call_verify( let px: [u8; 32] = public_key.px.to_repr().into(); let c_bytes: [u8; 32] = signature.c.to_repr().into(); let s_bytes: [u8; 32] = signature.s.to_repr().into(); - let call = TransactionRequest::default().to(Some(contract)).input(TransactionInput::new( + let call = TransactionRequest::default().to(contract).input(TransactionInput::new( abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into())) .abi_encode() .into(), )); - let bytes = provider.call(&call, None).await.map_err(|_| Error::ConnectionError)?; + let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?; let res = abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; From 0c9dd5048e01bf6d7405e5acea9efd2a82cac2ab Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 10 May 2024 14:04:58 -0400 Subject: [PATCH 109/126] Processor scanner tests for Ethereum --- common/request/src/lib.rs | 6 +- orchestration/dev/coins/ethereum/run.sh | 2 +- processor/Cargo.toml | 2 +- processor/src/networks/ethereum.rs | 69 +++++---- processor/src/tests/addresses.rs | 11 +- processor/src/tests/literal/mod.rs | 186 ++++++++++++++++++++++-- processor/src/tests/mod.rs | 101 +++++++------ processor/src/tests/scanner.rs | 31 +++- processor/src/tests/signer.rs | 9 +- processor/src/tests/wallet.rs | 19 ++- 10 files changed, 329 insertions(+), 107 deletions(-) diff --git a/common/request/src/lib.rs b/common/request/src/lib.rs index ad452a0c..60e51019 100644 --- a/common/request/src/lib.rs +++ b/common/request/src/lib.rs @@ -55,6 +55,8 @@ impl Client { fn connector() -> Connector { let mut res = HttpConnector::new(); res.set_keepalive(Some(core::time::Duration::from_secs(60))); + res.set_nodelay(true); + res.set_reuse_address(true); #[cfg(feature = "tls")] let res = HttpsConnectorBuilder::new() .with_native_roots() @@ -68,7 +70,9 @@ impl Client { pub fn with_connection_pool() -> Client { Client { connection: Connection::ConnectionPool( - HyperClient::builder(TokioExecutor::new()).build(Self::connector()), + HyperClient::builder(TokioExecutor::new()) + .pool_idle_timeout(core::time::Duration::from_secs(60)) + .build(Self::connector()), ), } } diff --git a/orchestration/dev/coins/ethereum/run.sh b/orchestration/dev/coins/ethereum/run.sh index 4fee3e46..464f4c6e 100755 --- a/orchestration/dev/coins/ethereum/run.sh +++ b/orchestration/dev/coins/ethereum/run.sh @@ -1,3 +1,3 @@ #!/bin/sh -~/.foundry/bin/anvil --no-mining --slots-in-an-epoch 32 +~/.foundry/bin/anvil --host 0.0.0.0 --no-cors --no-mining --slots-in-an-epoch 32 --silent diff --git a/processor/Cargo.toml b/processor/Cargo.toml index cbc022a1..f90f6117 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -84,7 +84,7 @@ serai-docker-tests = { path = "../tests/docker" } secp256k1 = ["k256", "frost/secp256k1"] bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] -ethereum = ["secp256k1", "ethereum-serai"] +ethereum = ["secp256k1", "ethereum-serai/tests"] ed25519 = ["dalek-ff-group", "frost/ed25519"] monero = ["ed25519", "monero-serai", "serai-client/monero"] diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs index 4de08837..3bb012ca 100644 --- a/processor/src/networks/ethereum.rs +++ b/processor/src/networks/ethereum.rs @@ -124,7 +124,7 @@ impl SignableTransaction for RouterCommand { } #[async_trait] -impl TransactionTrait> for Transaction { +impl TransactionTrait> for Transaction { type Id = [u8; 32]; fn id(&self) -> Self::Id { self.hash.0 @@ -157,7 +157,7 @@ impl Epoch { } #[async_trait] -impl Block> for Epoch { +impl Block> for Epoch { type Id = [u8; 32]; fn id(&self) -> [u8; 32] { self.end_hash @@ -170,7 +170,7 @@ impl Block> for Epoch { } } -impl Output> for EthereumInInstruction { +impl Output> for EthereumInInstruction { type Id = [u8; 32]; fn kind(&self) -> OutputType { @@ -282,8 +282,8 @@ impl EventualityTrait for Eventuality { } } -#[derive(Clone, Debug)] -pub struct Ethereum { +#[derive(Clone)] +pub struct Ethereum { // This DB is solely used to access the first key generated, as needed to determine the Router's // address. Accordingly, all methods present are consistent to a Serai chain with a finalized // first key (regardless of local state), and this is safe. @@ -292,20 +292,26 @@ pub struct Ethereum { deployer: Deployer, router: Arc>>, } -impl PartialEq for Ethereum { +impl PartialEq for Ethereum { fn eq(&self, _other: &Ethereum) -> bool { true } } -impl Ethereum { +impl fmt::Debug for Ethereum { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt + .debug_struct("Ethereum") + .field("deployer", &self.deployer) + .field("router", &self.router) + .finish_non_exhaustive() + } +} +impl Ethereum { pub async fn new(db: D, url: String) -> Self { let provider = Arc::new(RootProvider::new( ClientBuilder::default().transport(SimpleRequest::new(url), true), )); - #[cfg(test)] // TODO: Move to test code - provider.raw_request::<_, ()>("evm_setAutomine".into(), false).await.unwrap(); - let mut deployer = Deployer::new(provider.clone()).await; while !matches!(deployer, Ok(Some(_))) { log::error!("Deployer wasn't deployed yet or networking error"); @@ -362,7 +368,7 @@ impl Ethereum { } #[async_trait] -impl Network for Ethereum { +impl Network for Ethereum { type Curve = Secp256k1; type Transaction = Transaction; @@ -479,7 +485,8 @@ impl Network for Ethereum { // Grab the key at the end of the epoch let key_at_end_of_block = loop { match router.key_at_end_of_block(block.start + 31).await { - Ok(key) => break key, + Ok(Some(key)) => break key, + Ok(None) => return vec![], Err(e) => { log::error!("couldn't connect to router for the key at the end of the block: {e:?}"); sleep(Duration::from_secs(5)).await; @@ -491,17 +498,7 @@ impl Network for Ethereum { let mut all_events = vec![]; let mut top_level_txids = HashSet::new(); for erc20_addr in [DAI] { - let erc20 = loop { - let Ok(Some(erc20)) = Erc20::new(self.provider.clone(), erc20_addr).await else { - log::error!( - "couldn't connect to Ethereum node for an ERC20: {}", - hex::encode(erc20_addr) - ); - sleep(Duration::from_secs(5)).await; - continue; - }; - break erc20; - }; + let erc20 = Erc20::new(self.provider.clone(), erc20_addr); for block in block.start .. (block.start + 32) { let transfers = loop { @@ -821,6 +818,7 @@ impl Network for Ethereum { .provider .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) .await + .unwrap() .unwrap(); }; @@ -830,20 +828,26 @@ impl Network for Ethereum { .to_block(((block + 1) * 32) - 1) .topic1(nonce); let logs = self.provider.get_logs(&filter).await.unwrap(); - self.provider.get_transaction_by_hash(logs[0].transaction_hash.unwrap()).await.unwrap() + self + .provider + .get_transaction_by_hash(logs[0].transaction_hash.unwrap()) + .await + .unwrap() + .unwrap() } } } #[cfg(test)] async fn mine_block(&self) { - self.provider.raw_request::<_, ()>("anvil_mine".into(), [32]).await.unwrap(); + self.provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); } #[cfg(test)] async fn test_send(&self, send_to: Self::Address) -> Self::Block { use rand_core::OsRng; use ciphersuite::group::ff::Field; + use ethereum_serai::alloy_sol_types::SolCall; let key = ::F::random(&mut OsRng); let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); @@ -858,15 +862,22 @@ impl Network for Ethereum { .await .unwrap(); + let value = U256::from_str_radix("1000000000000000000", 10).unwrap(); let tx = ethereum_serai::alloy_consensus::TxLegacy { chain_id: None, nonce: 0, - gas_price: 100_000_000_000u128, - gas_limit: 21_0000u128, + gas_price: 1_000_000_000u128, + gas_limit: 200_000u128, to: ethereum_serai::alloy_core::primitives::TxKind::Call(send_to.0.into()), // 1 ETH - value: U256::from_str_radix("1000000000000000000", 10).unwrap(), - input: vec![].into(), + value, + input: ethereum_serai::router::abi::inInstructionCall::new(( + [0; 20].into(), + value, + vec![].into(), + )) + .abi_encode() + .into(), }; use ethereum_serai::alloy_consensus::SignableTransaction; diff --git a/processor/src/tests/addresses.rs b/processor/src/tests/addresses.rs index 8f730dbd..3d4d6d4c 100644 --- a/processor/src/tests/addresses.rs +++ b/processor/src/tests/addresses.rs @@ -1,4 +1,4 @@ -use core::time::Duration; +use core::{time::Duration, pin::Pin, future::Future}; use std::collections::HashMap; use rand_core::OsRng; @@ -82,8 +82,9 @@ async fn spend( } } -pub async fn test_addresses(network: N) -where +pub async fn test_addresses( + new_network: impl Fn(MemDb) -> Pin>>, +) where >::Addendum: From<()>, { let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng); @@ -92,12 +93,14 @@ where } let key = keys[&Participant::new(1).unwrap()].group_key(); + let mut db = MemDb::new(); + let network = new_network(db.clone()).await; + // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } - let mut db = MemDb::new(); let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); assert!(current_keys.is_empty()); let mut txn = db.txn(); diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs index e2bfdc8a..20aa1083 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/src/tests/literal/mod.rs @@ -3,6 +3,8 @@ use dockertest::{ TestBodySpecification, DockerOperations, DockerTest, }; +use serai_db::MemDb; + #[cfg(feature = "bitcoin")] mod bitcoin { use std::sync::Arc; @@ -33,8 +35,6 @@ mod bitcoin { sync::Mutex, }; - use serai_db::MemDb; - use super::*; use crate::{ networks::{Network, Bitcoin, Output, OutputType, Block}, @@ -57,7 +57,7 @@ mod bitcoin { fn test_receive_data_from_input() { let docker = spawn_bitcoin(); docker.run(|ops| async move { - let btc = bitcoin(&ops).await; + let btc = bitcoin(&ops).await(MemDb::new()).await; // generate a multisig address to receive the coins let mut keys = frost::tests::key_gen::<_, ::Curve>(&mut OsRng) @@ -208,23 +208,26 @@ mod bitcoin { test } - async fn bitcoin(ops: &DockerOperations) -> Bitcoin { + async fn bitcoin( + ops: &DockerOperations, + ) -> impl Fn(MemDb) -> Pin>> { let handle = ops.handle("serai-dev-bitcoin").host_port(8332).unwrap(); - let bitcoin = Bitcoin::new(format!("http://serai:seraidex@{}:{}", handle.0, handle.1)).await; + let url = format!("http://serai:seraidex@{}:{}", handle.0, handle.1); + let bitcoin = Bitcoin::new(url.clone()).await; bitcoin.fresh_chain().await; - bitcoin + move |_db| Box::pin(Bitcoin::new(url.clone())) } - test_network!( + test_utxo_network!( Bitcoin, spawn_bitcoin, bitcoin, bitcoin_key_gen, bitcoin_scanner, + bitcoin_no_deadlock_in_multisig_completed, bitcoin_signer, bitcoin_wallet, bitcoin_addresses, - bitcoin_no_deadlock_in_multisig_completed, ); } @@ -252,24 +255,181 @@ mod monero { test } - async fn monero(ops: &DockerOperations) -> Monero { + async fn monero( + ops: &DockerOperations, + ) -> impl Fn(MemDb) -> Pin>> { let handle = ops.handle("serai-dev-monero").host_port(18081).unwrap(); - let monero = Monero::new(format!("http://serai:seraidex@{}:{}", handle.0, handle.1)).await; + let url = format!("http://serai:seraidex@{}:{}", handle.0, handle.1); + let monero = Monero::new(url.clone()).await; while monero.get_latest_block_number().await.unwrap() < 150 { monero.mine_block().await; } - monero + move |_db| Box::pin(Monero::new(url.clone())) } - test_network!( + test_utxo_network!( Monero, spawn_monero, monero, monero_key_gen, monero_scanner, + monero_no_deadlock_in_multisig_completed, monero_signer, monero_wallet, monero_addresses, - monero_no_deadlock_in_multisig_completed, + ); +} + +#[cfg(feature = "ethereum")] +mod ethereum { + use super::*; + + use ciphersuite::{Ciphersuite, Secp256k1}; + + use serai_client::validator_sets::primitives::Session; + + use crate::networks::Ethereum; + + fn spawn_ethereum() -> DockerTest { + serai_docker_tests::build("ethereum".to_string()); + + let composition = TestBodySpecification::with_image( + Image::with_repository("serai-dev-ethereum").pull_policy(PullPolicy::Never), + ) + .set_start_policy(StartPolicy::Strict) + .set_log_options(Some(LogOptions { + action: LogAction::Forward, + policy: LogPolicy::OnError, + source: LogSource::Both, + })) + .set_publish_all_ports(true); + + let mut test = DockerTest::new(); + test.provide_container(composition); + test + } + + async fn ethereum( + ops: &DockerOperations, + ) -> impl Fn(MemDb) -> Pin>>> { + use std::sync::Arc; + use ethereum_serai::{ + alloy_core::primitives::U256, + alloy_simple_request_transport::SimpleRequest, + alloy_rpc_client::ClientBuilder, + alloy_provider::{Provider, RootProvider}, + deployer::Deployer, + }; + + let handle = ops.handle("serai-dev-ethereum").host_port(8545).unwrap(); + let url = format!("http://{}:{}", handle.0, handle.1); + tokio::time::sleep(core::time::Duration::from_secs(15)).await; + + { + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(url.clone()), true), + )); + provider.raw_request::<_, ()>("evm_setAutomine".into(), [false]).await.unwrap(); + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + + // Perform deployment + { + // Make sure the Deployer constructor returns None, as it doesn't exist yet + assert!(Deployer::new(provider.clone()).await.unwrap().is_none()); + + // Deploy the Deployer + let tx = Deployer::deployment_tx(); + + provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [ + tx.recover_signer().unwrap().to_string(), + (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), + ], + ) + .await + .unwrap(); + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + + let pending_tx = provider.send_raw_transaction(&bytes).await.unwrap(); + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + //tokio::time::sleep(core::time::Duration::from_secs(15)).await; + let receipt = pending_tx.get_receipt().await.unwrap(); + assert!(receipt.status()); + + let _ = Deployer::new(provider.clone()) + .await + .expect("network error") + .expect("deployer wasn't deployed"); + } + } + + move |db| { + let url = url.clone(); + Box::pin(async move { + { + let db = db.clone(); + let url = url.clone(); + // Spawn a task to deploy the proper Router when the time comes + tokio::spawn(async move { + let key = loop { + let Some(key) = crate::key_gen::NetworkKeyDb::get(&db, Session(0)) else { + tokio::time::sleep(core::time::Duration::from_secs(1)).await; + continue; + }; + break ethereum_serai::crypto::PublicKey::new( + Secp256k1::read_G(&mut key.as_slice()).unwrap(), + ) + .unwrap(); + }; + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(url.clone()), true), + )); + let deployer = Deployer::new(provider.clone()).await.unwrap().unwrap(); + + let mut tx = deployer.deploy_router(&key); + tx.gas_limit = 1_000_000u64.into(); + tx.gas_price = 1_000_000_000u64.into(); + let tx = ethereum_serai::crypto::deterministically_sign(&tx); + + provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [ + tx.recover_signer().unwrap().to_string(), + (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), + ], + ) + .await + .unwrap(); + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + let pending_tx = provider.send_raw_transaction(&bytes).await.unwrap(); + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + let receipt = pending_tx.get_receipt().await.unwrap(); + assert!(receipt.status()); + + let _router = deployer.find_router(provider.clone(), &key).await.unwrap().unwrap(); + }); + } + + Ethereum::new(db, url.clone()).await + }) + } + } + + test_network!( + Ethereum, + spawn_ethereum, + ethereum, + ethereum_key_gen, + ethereum_scanner, + ethereum_no_deadlock_in_multisig_completed, ); } diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 974be10b..26b49635 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -1,22 +1,18 @@ use std::sync::OnceLock; mod key_gen; -pub(crate) use key_gen::test_key_gen; mod scanner; -pub(crate) use scanner::{test_scanner, test_no_deadlock_in_multisig_completed}; mod signer; -pub(crate) use signer::{sign, test_signer}; +pub(crate) use signer::sign; mod cosigner; mod batch_signer; mod wallet; -pub(crate) use wallet::test_wallet; mod addresses; -pub(crate) use addresses::test_addresses; // Effective Once static INIT_LOGGER_CELL: OnceLock<()> = OnceLock::new(); @@ -27,22 +23,21 @@ fn init_logger() { #[macro_export] macro_rules! test_network { ( - $N: ident, + $N: ty, $docker: ident, $network: ident, $key_gen: ident, $scanner: ident, - $signer: ident, - $wallet: ident, - $addresses: ident, $no_deadlock_in_multisig_completed: ident, ) => { + use core::{pin::Pin, future::Future}; use $crate::tests::{ - init_logger, test_key_gen, test_scanner, test_no_deadlock_in_multisig_completed, test_signer, - test_wallet, test_addresses, + init_logger, + key_gen::test_key_gen, + scanner::{test_scanner, test_no_deadlock_in_multisig_completed}, }; - // This doesn't interact with a node and accordingly doesn't need to be run + // This doesn't interact with a node and accordingly doesn't need to be spawn one #[tokio::test] async fn $key_gen() { init_logger(); @@ -54,34 +49,8 @@ macro_rules! test_network { init_logger(); let docker = $docker(); docker.run(|ops| async move { - test_scanner($network(&ops).await).await; - }); - } - - #[test] - fn $signer() { - init_logger(); - let docker = $docker(); - docker.run(|ops| async move { - test_signer($network(&ops).await).await; - }); - } - - #[test] - fn $wallet() { - init_logger(); - let docker = $docker(); - docker.run(|ops| async move { - test_wallet($network(&ops).await).await; - }); - } - - #[test] - fn $addresses() { - init_logger(); - let docker = $docker(); - docker.run(|ops| async move { - test_addresses($network(&ops).await).await; + let new_network = $network(&ops).await; + test_scanner(new_network).await; }); } @@ -90,7 +59,57 @@ macro_rules! test_network { init_logger(); let docker = $docker(); docker.run(|ops| async move { - test_no_deadlock_in_multisig_completed($network(&ops).await).await; + let new_network = $network(&ops).await; + test_no_deadlock_in_multisig_completed(new_network).await; + }); + } + }; +} + +#[macro_export] +macro_rules! test_utxo_network { + ( + $N: ty, + $docker: ident, + $network: ident, + $key_gen: ident, + $scanner: ident, + $no_deadlock_in_multisig_completed: ident, + $signer: ident, + $wallet: ident, + $addresses: ident, + ) => { + use $crate::tests::{signer::test_signer, wallet::test_wallet, addresses::test_addresses}; + + test_network!($N, $docker, $network, $key_gen, $scanner, $no_deadlock_in_multisig_completed,); + + #[test] + fn $signer() { + init_logger(); + let docker = $docker(); + docker.run(|ops| async move { + let new_network = $network(&ops).await; + test_signer(new_network).await; + }); + } + + #[test] + fn $wallet() { + init_logger(); + let docker = $docker(); + docker.run(|ops| async move { + let new_network = $network(&ops).await; + test_wallet(new_network).await; + }); + } + + #[test] + fn $addresses() { + init_logger(); + let docker = $docker(); + docker.run(|ops| async move { + let new_network = $network(&ops).await; + test_addresses(new_network).await; }); } }; diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs index 42756d8b..16885dab 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/src/tests/scanner.rs @@ -1,21 +1,23 @@ -use core::time::Duration; +use core::{pin::Pin, time::Duration, future::Future}; use std::sync::Arc; -use ciphersuite::Ciphersuite; use rand_core::OsRng; +use ciphersuite::{group::GroupEncoding, Ciphersuite}; use frost::{Participant, tests::key_gen}; use tokio::{sync::Mutex, time::timeout}; use serai_db::{DbTxn, Db, MemDb}; +use serai_client::validator_sets::primitives::Session; use crate::{ - networks::{OutputType, Output, Block, UtxoNetwork}, + networks::{OutputType, Output, Block, Network}, + key_gen::NetworkKeyDb, multisigs::scanner::{ScannerEvent, Scanner, ScannerHandle}, }; -pub async fn new_scanner( +pub async fn new_scanner( network: &N, db: &D, group_key: ::G, @@ -40,18 +42,27 @@ pub async fn new_scanner( scanner } -pub async fn test_scanner(network: N) { +pub async fn test_scanner( + new_network: impl Fn(MemDb) -> Pin>>, +) { let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap(); N::tweak_keys(&mut keys); let group_key = keys.group_key(); + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + NetworkKeyDb::set(&mut txn, Session(0), &group_key.to_bytes().as_ref().to_vec()); + txn.commit(); + } + let network = new_network(db.clone()).await; + // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } - let db = MemDb::new(); let first = Arc::new(Mutex::new(true)); let scanner = new_scanner(&network, &db, group_key, &first).await; @@ -101,13 +112,17 @@ pub async fn test_scanner(network: N) { .is_err()); } -pub async fn test_no_deadlock_in_multisig_completed(network: N) { +pub async fn test_no_deadlock_in_multisig_completed( + new_network: impl Fn(MemDb) -> Pin>>, +) { + let mut db = MemDb::new(); + let network = new_network(db.clone()).await; + // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; } - let mut db = MemDb::new(); let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); assert!(current_keys.is_empty()); diff --git a/processor/src/tests/signer.rs b/processor/src/tests/signer.rs index 524c5d29..85444d63 100644 --- a/processor/src/tests/signer.rs +++ b/processor/src/tests/signer.rs @@ -1,3 +1,4 @@ +use core::{pin::Pin, future::Future}; use std::collections::HashMap; use rand_core::{RngCore, OsRng}; @@ -153,8 +154,9 @@ pub async fn sign( typed_claim } -pub async fn test_signer(network: N) -where +pub async fn test_signer( + new_network: impl Fn(MemDb) -> Pin>>, +) where >::Addendum: From<()>, { let mut keys = key_gen(&mut OsRng); @@ -163,6 +165,9 @@ where } let key = keys[&Participant::new(1).unwrap()].group_key(); + let db = MemDb::new(); + let network = new_network(db).await; + let outputs = network .get_outputs(&network.test_send(N::external_address(&network, key).await).await, key) .await; diff --git a/processor/src/tests/wallet.rs b/processor/src/tests/wallet.rs index 4600fcbe..acd3cb65 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/src/tests/wallet.rs @@ -1,4 +1,5 @@ -use std::{time::Duration, collections::HashMap}; +use core::{time::Duration, pin::Pin, future::Future}; +use std::collections::HashMap; use rand_core::OsRng; @@ -24,12 +25,9 @@ use crate::{ }; // Tests the Scanner, Scheduler, and Signer together -pub async fn test_wallet(network: N) { - // Mine blocks so there's a confirmed block - for _ in 0 .. N::CONFIRMATIONS { - network.mine_block().await; - } - +pub async fn test_wallet( + new_network: impl Fn(MemDb) -> Pin>>, +) { let mut keys = key_gen(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); @@ -37,6 +35,13 @@ pub async fn test_wallet(network: N) { let key = keys[&Participant::new(1).unwrap()].group_key(); let mut db = MemDb::new(); + let network = new_network(db.clone()).await; + + // Mine blocks so there's a confirmed block + for _ in 0 .. N::CONFIRMATIONS { + network.mine_block().await; + } + let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); assert!(current_keys.is_empty()); let (block_id, outputs) = { From 79a79db399b7cb96b710952f2f9faed8e5621d97 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 10 May 2024 15:50:07 -0400 Subject: [PATCH 110/126] Update dockertest specification --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66212c4a..719ed497 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2125,7 +2125,7 @@ dependencies = [ [[package]] name = "dockertest" version = "0.4.0" -source = "git+https://github.com/kayabaNerve/dockertest-rs?branch=arc#c0ea77997048f9edc9987984bbe20e43fac74e06" +source = "git+https://github.com/orcalabs/dockertest-rs?rev=4dd6ae24738aa6dc5c89444cc822ea4745517493#4dd6ae24738aa6dc5c89444cc822ea4745517493" dependencies = [ "anyhow", "async-trait", diff --git a/Cargo.toml b/Cargo.toml index 8a19d159..94b52ffb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -110,7 +110,7 @@ panic = "unwind" lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" } # Needed due to dockertest's usage of `Rc`s when we need `Arc`s -dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "arc" } +dockertest = { git = "https://github.com/orcalabs/dockertest-rs", rev = "4dd6ae24738aa6dc5c89444cc822ea4745517493" } # wasmtime pulls in an old version for this zstd = { path = "patches/zstd" } From 02c4417a464ef162636036942987e40d01b54bab Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 10 May 2024 15:57:05 -0400 Subject: [PATCH 111/126] Update no_deadlock_in_multisig test to set the initial key in the DB --- processor/src/tests/scanner.rs | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs index 16885dab..078a07d5 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/src/tests/scanner.rs @@ -126,25 +126,33 @@ pub async fn test_no_deadlock_in_multisig_completed( let (mut scanner, current_keys) = Scanner::new(network.clone(), db.clone()); assert!(current_keys.is_empty()); - let mut txn = db.txn(); // Register keys to cause Block events at CONFIRMATIONS (dropped since first keys), // CONFIRMATIONS + 1, and CONFIRMATIONS + 2 for i in 0 .. 3 { + let key = { + let mut keys = key_gen(&mut OsRng); + for keys in keys.values_mut() { + N::tweak_keys(keys); + } + let key = keys[&Participant::new(1).unwrap()].group_key(); + if i == 0 { + let mut txn = db.txn(); + NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec()); + txn.commit(); + } + key + }; + + let mut txn = db.txn(); scanner .register_key( &mut txn, network.get_latest_block_number().await.unwrap() + N::CONFIRMATIONS + i, - { - let mut keys = key_gen(&mut OsRng); - for keys in keys.values_mut() { - N::tweak_keys(keys); - } - keys[&Participant::new(1).unwrap()].group_key() - }, + key, ) .await; + txn.commit(); } - txn.commit(); for _ in 0 .. (3 * N::CONFIRMATIONS) { network.mine_block().await; From d27d93480aa8a849d84214ad4c71d83ce6fea0c1 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 11 May 2024 00:11:14 -0400 Subject: [PATCH 112/126] Get processor signer/wallet tests working for Ethereum They are handicapped by the fact Ethereum self-sends don't show up as outputs, yet that's fundamental (unless we add a *harmful* fallback function). --- deny.toml | 2 +- .../src/multisigs/scheduler/smart_contract.rs | 4 +- processor/src/networks/ethereum.rs | 108 +++++++++--------- processor/src/networks/mod.rs | 7 +- processor/src/tests/literal/mod.rs | 2 + processor/src/tests/mod.rs | 51 ++++++--- processor/src/tests/signer.rs | 96 +++++++++------- processor/src/tests/wallet.rs | 74 ++++++++---- 8 files changed, 203 insertions(+), 141 deletions(-) diff --git a/deny.toml b/deny.toml index 60331289..d6972d5e 100644 --- a/deny.toml +++ b/deny.toml @@ -101,5 +101,5 @@ allow-git = [ "https://github.com/serai-dex/substrate", "https://github.com/alloy-rs/alloy", "https://github.com/monero-rs/base58-monero", - "https://github.com/kayabaNerve/dockertest-rs", + "https://github.com/orcalabs/dockertest-rs", ] diff --git a/processor/src/multisigs/scheduler/smart_contract.rs b/processor/src/multisigs/scheduler/smart_contract.rs index 27268b82..4f48e391 100644 --- a/processor/src/multisigs/scheduler/smart_contract.rs +++ b/processor/src/multisigs/scheduler/smart_contract.rs @@ -116,7 +116,7 @@ impl> SchedulerTrait for Scheduler { assert!(self.coins.contains(&utxo.balance().coin)); } - let mut nonce = LastNonce::get(txn).map_or(0, |nonce| nonce + 1); + let mut nonce = LastNonce::get(txn).map_or(1, |nonce| nonce + 1); let mut plans = vec![]; for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) { // Once we rotate, all further payments should be scheduled via the new multisig @@ -179,7 +179,7 @@ impl> SchedulerTrait for Scheduler { .and_then(|key_bytes| ::read_G(&mut key_bytes.as_slice()).ok()) .unwrap_or(self.key); - let nonce = LastNonce::get(txn).map_or(0, |nonce| nonce + 1); + let nonce = LastNonce::get(txn).map_or(1, |nonce| nonce + 1); LastNonce::set(txn, &(nonce + 1)); Plan { key: current_key, diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs index 3bb012ca..f3d562d7 100644 --- a/processor/src/networks/ethereum.rs +++ b/processor/src/networks/ethereum.rs @@ -719,22 +719,6 @@ impl Network for Ethereum { // Publish this using a dummy account we fund with magic RPC commands #[cfg(test)] { - use rand_core::OsRng; - use ciphersuite::group::ff::Field; - - let key = ::F::random(&mut OsRng); - let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); - - // Set a 1.1 ETH balance - self - .provider - .raw_request::<_, ()>( - "anvil_setBalance".into(), - [Address(address).to_string(), "1100000000000000000".into()], - ) - .await - .unwrap(); - let router = self.router().await; let router = router.as_ref().unwrap(); @@ -747,17 +731,30 @@ impl Network for Ethereum { completion.signature(), ), }; - tx.gas_price = 100_000_000_000u128; + tx.gas_limit = 1_000_000u64.into(); + tx.gas_price = 1_000_000_000u64.into(); + let tx = ethereum_serai::crypto::deterministically_sign(&tx); - use ethereum_serai::alloy_consensus::SignableTransaction; - let sig = - k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) - .sign_prehash_recoverable(tx.signature_hash().as_ref()) + if self.provider.get_transaction_by_hash(*tx.hash()).await.unwrap().is_none() { + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [ + tx.recover_signer().unwrap().to_string(), + (U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(), + ], + ) + .await .unwrap(); - let mut bytes = vec![]; - tx.encode_with_signature_fields(&sig.into(), &mut bytes); - let _ = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + let pending_tx = self.provider.send_raw_transaction(&bytes).await.unwrap(); + self.mine_block().await; + assert!(pending_tx.get_receipt().await.unwrap().status()); + } Ok(()) } @@ -801,41 +798,50 @@ impl Network for Ethereum { block: usize, eventuality: &Self::Eventuality, ) -> Self::Transaction { - match eventuality.1 { - RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { - let router = self.router().await; - let router = router.as_ref().unwrap(); + // We mine 96 blocks to ensure the 32 blocks relevant are finalized + // Back-check the prior two epochs in response to this + // TODO: Review why this is sub(3) and not sub(2) + for block in block.saturating_sub(3) ..= block { + match eventuality.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + let router = self.router().await; + let router = router.as_ref().unwrap(); - let block = u64::try_from(block).unwrap(); - let filter = router - .key_updated_filter() - .from_block(block * 32) - .to_block(((block + 1) * 32) - 1) - .topic1(nonce); - let logs = self.provider.get_logs(&filter).await.unwrap(); - if let Some(log) = logs.first() { + let block = u64::try_from(block).unwrap(); + let filter = router + .key_updated_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + if let Some(log) = logs.first() { + return self + .provider + .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) + .await + .unwrap() + .unwrap(); + }; + + let filter = router + .executed_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + if logs.is_empty() { + continue; + } return self .provider - .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) + .get_transaction_by_hash(logs[0].transaction_hash.unwrap()) .await .unwrap() .unwrap(); - }; - - let filter = router - .executed_filter() - .from_block(block * 32) - .to_block(((block + 1) * 32) - 1) - .topic1(nonce); - let logs = self.provider.get_logs(&filter).await.unwrap(); - self - .provider - .get_transaction_by_hash(logs[0].transaction_hash.unwrap()) - .await - .unwrap() - .unwrap() + } } } + panic!("couldn't find completion in any three of checked blocks"); } #[cfg(test)] diff --git a/processor/src/networks/mod.rs b/processor/src/networks/mod.rs index 803ed40a..ee3cd24a 100644 --- a/processor/src/networks/mod.rs +++ b/processor/src/networks/mod.rs @@ -432,9 +432,12 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug { let plan_id = plan.id(); let Plan { key, inputs, mut payments, change, scheduler_addendum } = plan; - let theoretical_change_amount = + let theoretical_change_amount = if change.is_some() { inputs.iter().map(|input| input.balance().amount.0).sum::() - - payments.iter().map(|payment| payment.balance.amount.0).sum::(); + payments.iter().map(|payment| payment.balance.amount.0).sum::() + } else { + 0 + }; let Some(tx_fee) = self.needed_fee(block_number, &inputs, &payments, &change).await? else { // This Plan is not fulfillable diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs index 20aa1083..cecd5a3b 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/src/tests/literal/mod.rs @@ -431,5 +431,7 @@ mod ethereum { ethereum_key_gen, ethereum_scanner, ethereum_no_deadlock_in_multisig_completed, + ethereum_signer, + ethereum_wallet, ); } diff --git a/processor/src/tests/mod.rs b/processor/src/tests/mod.rs index 26b49635..7ab57bde 100644 --- a/processor/src/tests/mod.rs +++ b/processor/src/tests/mod.rs @@ -29,12 +29,16 @@ macro_rules! test_network { $key_gen: ident, $scanner: ident, $no_deadlock_in_multisig_completed: ident, + $signer: ident, + $wallet: ident, ) => { use core::{pin::Pin, future::Future}; use $crate::tests::{ init_logger, key_gen::test_key_gen, scanner::{test_scanner, test_no_deadlock_in_multisig_completed}, + signer::test_signer, + wallet::test_wallet, }; // This doesn't interact with a node and accordingly doesn't need to be spawn one @@ -63,25 +67,6 @@ macro_rules! test_network { test_no_deadlock_in_multisig_completed(new_network).await; }); } - }; -} - -#[macro_export] -macro_rules! test_utxo_network { - ( - $N: ty, - $docker: ident, - $network: ident, - $key_gen: ident, - $scanner: ident, - $no_deadlock_in_multisig_completed: ident, - $signer: ident, - $wallet: ident, - $addresses: ident, - ) => { - use $crate::tests::{signer::test_signer, wallet::test_wallet, addresses::test_addresses}; - - test_network!($N, $docker, $network, $key_gen, $scanner, $no_deadlock_in_multisig_completed,); #[test] fn $signer() { @@ -102,6 +87,34 @@ macro_rules! test_utxo_network { test_wallet(new_network).await; }); } + }; +} + +#[macro_export] +macro_rules! test_utxo_network { + ( + $N: ty, + $docker: ident, + $network: ident, + $key_gen: ident, + $scanner: ident, + $no_deadlock_in_multisig_completed: ident, + $signer: ident, + $wallet: ident, + $addresses: ident, + ) => { + use $crate::tests::addresses::test_addresses; + + test_network!( + $N, + $docker, + $network, + $key_gen, + $scanner, + $no_deadlock_in_multisig_completed, + $signer, + $wallet, + ); #[test] fn $addresses() { diff --git a/processor/src/tests/signer.rs b/processor/src/tests/signer.rs index 85444d63..77307ef2 100644 --- a/processor/src/tests/signer.rs +++ b/processor/src/tests/signer.rs @@ -3,6 +3,7 @@ use std::collections::HashMap; use rand_core::{RngCore, OsRng}; +use ciphersuite::group::GroupEncoding; use frost::{ Participant, ThresholdKeys, dkg::tests::{key_gen, clone_without}, @@ -17,14 +18,15 @@ use serai_client::{ use messages::sign::*; use crate::{ - Payment, Plan, - networks::{Output, Transaction, Eventuality, UtxoNetwork}, + Payment, + networks::{Output, Transaction, Eventuality, Network}, + key_gen::NetworkKeyDb, multisigs::scheduler::Scheduler, signer::Signer, }; #[allow(clippy::type_complexity)] -pub async fn sign( +pub async fn sign( network: N, session: Session, mut keys_txs: HashMap< @@ -154,57 +156,55 @@ pub async fn sign( typed_claim } -pub async fn test_signer( +pub async fn test_signer( new_network: impl Fn(MemDb) -> Pin>>, -) where - >::Addendum: From<()>, -{ +) { let mut keys = key_gen(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); - let db = MemDb::new(); - let network = new_network(db).await; + let mut db = MemDb::new(); + { + let mut txn = db.txn(); + NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec()); + txn.commit(); + } + let network = new_network(db.clone()).await; let outputs = network .get_outputs(&network.test_send(N::external_address(&network, key).await).await, key) .await; let sync_block = network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS; - let amount = 2 * N::DUST; + let amount = (2 * N::DUST) + 1000; + let plan = { + let mut txn = db.txn(); + let mut scheduler = N::Scheduler::new::(&mut txn, key, N::NETWORK); + let payments = vec![Payment { + address: N::external_address(&network, key).await, + data: None, + balance: Balance { + coin: match N::NETWORK { + NetworkId::Serai => panic!("test_signer called with Serai"), + NetworkId::Bitcoin => Coin::Bitcoin, + NetworkId::Ethereum => Coin::Ether, + NetworkId::Monero => Coin::Monero, + }, + amount: Amount(amount), + }, + }]; + let mut plans = scheduler.schedule::(&mut txn, outputs.clone(), payments, key, false); + assert_eq!(plans.len(), 1); + plans.swap_remove(0) + }; + let mut keys_txs = HashMap::new(); let mut eventualities = vec![]; for (i, keys) in keys.drain() { - let (signable, eventuality) = network - .prepare_send( - sync_block, - Plan { - key, - inputs: outputs.clone(), - payments: vec![Payment { - address: N::external_address(&network, key).await, - data: None, - balance: Balance { - coin: match N::NETWORK { - NetworkId::Serai => panic!("test_signer called with Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - amount: Amount(amount), - }, - }], - change: Some(N::change_address(key).unwrap()), - scheduler_addendum: ().into(), - }, - 0, - ) - .await - .unwrap() - .tx - .unwrap(); + let (signable, eventuality) = + network.prepare_send(sync_block, plan.clone(), 0).await.unwrap().tx.unwrap(); eventualities.push(eventuality.clone()); keys_txs.insert(i, (keys, (signable, eventuality))); @@ -222,11 +222,21 @@ pub async fn test_signer( key, ) .await; - assert_eq!(outputs.len(), 2); - // Adjust the amount for the fees - let amount = amount - tx.fee(&network).await; - // Check either output since Monero will randomize its output order - assert!((outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount)); + // Don't run if Ethereum as the received output will revert by the contract + // (and therefore not actually exist) + if N::NETWORK != NetworkId::Ethereum { + assert_eq!(outputs.len(), 1 + usize::from(u8::from(plan.change.is_some()))); + // Adjust the amount for the fees + let amount = amount - tx.fee(&network).await; + if plan.change.is_some() { + // Check either output since Monero will randomize its output order + assert!( + (outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount) + ); + } else { + assert!(outputs[0].balance().amount.0 == amount); + } + } // Check the eventualities pass for eventuality in eventualities { diff --git a/processor/src/tests/wallet.rs b/processor/src/tests/wallet.rs index acd3cb65..86a27349 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/src/tests/wallet.rs @@ -3,6 +3,7 @@ use std::collections::HashMap; use rand_core::OsRng; +use ciphersuite::group::GroupEncoding; use frost::{Participant, dkg::tests::key_gen}; use tokio::time::timeout; @@ -16,16 +17,17 @@ use serai_client::{ use crate::{ Payment, Plan, - networks::{Output, Transaction, Eventuality, Block, UtxoNetwork}, + networks::{Output, Transaction, Eventuality, Block, Network}, + key_gen::NetworkKeyDb, multisigs::{ scanner::{ScannerEvent, Scanner}, - scheduler::Scheduler, + scheduler::{self, Scheduler}, }, tests::sign, }; // Tests the Scanner, Scheduler, and Signer together -pub async fn test_wallet( +pub async fn test_wallet( new_network: impl Fn(MemDb) -> Pin>>, ) { let mut keys = key_gen(&mut OsRng); @@ -35,6 +37,11 @@ pub async fn test_wallet( let key = keys[&Participant::new(1).unwrap()].group_key(); let mut db = MemDb::new(); + { + let mut txn = db.txn(); + NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec()); + txn.commit(); + } let network = new_network(db.clone()).await; // Mine blocks so there's a confirmed block @@ -98,7 +105,13 @@ pub async fn test_wallet( txn.commit(); assert_eq!(plans.len(), 1); assert_eq!(plans[0].key, key); - assert_eq!(plans[0].inputs, outputs); + if std::any::TypeId::of::() == + std::any::TypeId::of::>() + { + assert_eq!(plans[0].inputs, vec![]); + } else { + assert_eq!(plans[0].inputs, outputs); + } assert_eq!( plans[0].payments, vec![Payment { @@ -115,7 +128,7 @@ pub async fn test_wallet( } }] ); - assert_eq!(plans[0].change, Some(N::change_address(key).unwrap())); + assert_eq!(plans[0].change, N::change_address(key)); { let mut buf = vec![]; @@ -144,9 +157,22 @@ pub async fn test_wallet( let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await; let block = network.get_block(block_number).await.unwrap(); let outputs = network.get_outputs(&block, key).await; - assert_eq!(outputs.len(), 2); - let amount = amount - tx.fee(&network).await; - assert!((outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount)); + + // Don't run if Ethereum as the received output will revert by the contract + // (and therefore not actually exist) + if N::NETWORK != NetworkId::Ethereum { + assert_eq!(outputs.len(), 1 + usize::from(u8::from(plans[0].change.is_some()))); + // Adjust the amount for the fees + let amount = amount - tx.fee(&network).await; + if plans[0].change.is_some() { + // Check either output since Monero will randomize its output order + assert!( + (outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount) + ); + } else { + assert!(outputs[0].balance().amount.0 == amount); + } + } for eventuality in eventualities { let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap(); @@ -157,21 +183,23 @@ pub async fn test_wallet( network.mine_block().await; } - match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { - ScannerEvent::Block { is_retirement_block, block: block_id, outputs: these_outputs } => { - scanner.multisig_completed.send(false).unwrap(); - assert!(!is_retirement_block); - assert_eq!(block_id, block.id()); - assert_eq!(these_outputs, outputs); + if N::NETWORK != NetworkId::Ethereum { + match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { + ScannerEvent::Block { is_retirement_block, block: block_id, outputs: these_outputs } => { + scanner.multisig_completed.send(false).unwrap(); + assert!(!is_retirement_block); + assert_eq!(block_id, block.id()); + assert_eq!(these_outputs, outputs); + } + ScannerEvent::Completed(_, _, _, _, _) => { + panic!("unexpectedly got eventuality completion"); + } } - ScannerEvent::Completed(_, _, _, _, _) => { - panic!("unexpectedly got eventuality completion"); - } - } - // Check the Scanner DB can reload the outputs - let mut txn = db.txn(); - assert_eq!(scanner.ack_block(&mut txn, block.id()).await.1, outputs); - scanner.release_lock().await; - txn.commit(); + // Check the Scanner DB can reload the outputs + let mut txn = db.txn(); + assert_eq!(scanner.ack_block(&mut txn, block.id()).await.1, outputs); + scanner.release_lock().await; + txn.commit(); + } } From af795864880a966e768cbdcb35f2d9870e69e514 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 14 May 2024 01:33:55 -0400 Subject: [PATCH 113/126] Fill out Ethereum functions in the processor Docker tests --- tests/docker/src/lib.rs | 2 +- tests/processor/Cargo.toml | 2 + tests/processor/src/lib.rs | 194 +++++++++++++++++++++++--------- tests/processor/src/networks.rs | 79 ++++++++++++- 4 files changed, 220 insertions(+), 57 deletions(-) diff --git a/tests/docker/src/lib.rs b/tests/docker/src/lib.rs index ee68b979..3493d502 100644 --- a/tests/docker/src/lib.rs +++ b/tests/docker/src/lib.rs @@ -124,7 +124,7 @@ pub fn build(name: String) { // Check any additionally specified paths let meta = |path: PathBuf| (path.clone(), fs::metadata(path)); let mut metadatas = match name.as_str() { - "bitcoin" | "monero" => vec![], + "bitcoin" | "ethereum" | "monero" => vec![], "message-queue" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), diff --git a/tests/processor/Cargo.toml b/tests/processor/Cargo.toml index 686dbcea..be27e7de 100644 --- a/tests/processor/Cargo.toml +++ b/tests/processor/Cargo.toml @@ -27,12 +27,14 @@ ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, fea dkg = { path = "../../crypto/dkg", default-features = false, features = ["tests"] } bitcoin-serai = { path = "../../coins/bitcoin" } +ethereum-serai = { path = "../../coins/ethereum" } monero-serai = { path = "../../coins/monero" } messages = { package = "serai-processor-messages", path = "../../processor/messages" } scale = { package = "parity-scale-codec", version = "3" } serai-client = { path = "../../substrate/client" } +serai-db = { path = "../../common/db", default-features = false } serai-message-queue = { path = "../../message-queue" } borsh = { version = "1", features = ["de_strict_order"] } diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index e400057a..66aa28c4 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -181,7 +181,28 @@ impl Coordinator { break; } } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use ethereum_serai::{ + alloy_simple_request_transport::SimpleRequest, + alloy_rpc_client::ClientBuilder, + alloy_provider::{Provider, RootProvider}, + alloy_network::Ethereum, + }; + + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + + loop { + if handle + .block_on(provider.raw_request::<_, ()>("evm_setAutomine".into(), [false])) + .is_ok() + { + break; + } + handle.block_on(tokio::time::sleep(core::time::Duration::from_secs(1))); + } + } NetworkId::Monero => { use monero_serai::rpc::HttpRpc; @@ -271,7 +292,45 @@ impl Coordinator { block.consensus_encode(&mut block_buf).unwrap(); (hash, block_buf) } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use ethereum_serai::{ + alloy_simple_request_transport::SimpleRequest, + alloy_rpc_types::BlockNumberOrTag, + alloy_rpc_client::ClientBuilder, + alloy_provider::{Provider, RootProvider}, + alloy_network::Ethereum, + }; + + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + let start = provider + .get_block(BlockNumberOrTag::Latest.into(), false) + .await + .unwrap() + .unwrap() + .header + .number + .unwrap(); + // We mine 96 blocks to mine one epoch, then cause its finalization + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + let end_of_epoch = start + 31; + let hash = provider + .get_block(BlockNumberOrTag::Number(end_of_epoch).into(), false) + .await + .unwrap() + .unwrap() + .header + .hash + .unwrap(); + + let state = provider + .raw_request::<_, String>("anvil_dumpState".into(), ()) + .await + .unwrap() + .into_bytes(); + (hash.into(), state) + } NetworkId::Monero => { use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar}; use monero_serai::{ @@ -303,39 +362,6 @@ impl Coordinator { } } - pub async fn broadcast_block(&self, ops: &DockerOperations, block: &[u8]) { - let rpc_url = network_rpc(self.network, ops, &self.network_handle); - match self.network { - NetworkId::Bitcoin => { - use bitcoin_serai::rpc::Rpc; - - let rpc = - Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); - let res: Option = - rpc.rpc_call("submitblock", serde_json::json!([hex::encode(block)])).await.unwrap(); - if let Some(err) = res { - panic!("submitblock failed: {err}"); - } - } - NetworkId::Ethereum => todo!(), - NetworkId::Monero => { - use monero_serai::rpc::HttpRpc; - - let rpc = - HttpRpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Monero RPC"); - let res: serde_json::Value = rpc - .json_rpc_call("submit_block", Some(serde_json::json!([hex::encode(block)]))) - .await - .unwrap(); - let err = res.get("error"); - if err.is_some() && (err.unwrap() != &serde_json::Value::Null) { - panic!("failed to submit Monero block: {res}"); - } - } - NetworkId::Serai => panic!("processor tests broadcasting block to Serai"), - } - } - pub async fn sync(&self, ops: &DockerOperations, others: &[Coordinator]) { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { @@ -345,13 +371,8 @@ impl Coordinator { let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the Bitcoin RPC"); let to = rpc.get_latest_block_number().await.unwrap(); for coordinator in others { - let from = Rpc::new(network_rpc(self.network, ops, &coordinator.network_handle)) - .await - .expect("couldn't connect to the Bitcoin RPC") - .get_latest_block_number() - .await - .unwrap() + - 1; + let from = rpc.get_latest_block_number().await.unwrap() + 1; + for b in from ..= to { let mut buf = vec![]; rpc @@ -360,11 +381,40 @@ impl Coordinator { .unwrap() .consensus_encode(&mut buf) .unwrap(); - coordinator.broadcast_block(ops, &buf).await; + + let rpc_url = network_rpc(coordinator.network, ops, &coordinator.network_handle); + let rpc = + Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); + + let res: Option = + rpc.rpc_call("submitblock", serde_json::json!([hex::encode(buf)])).await.unwrap(); + if let Some(err) = res { + panic!("submitblock failed: {err}"); + } } } } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use ethereum_serai::{ + alloy_simple_request_transport::SimpleRequest, + alloy_rpc_client::ClientBuilder, + alloy_provider::{Provider, RootProvider}, + alloy_network::Ethereum, + }; + + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + let state = provider.raw_request::<_, String>("anvil_dumpState".into(), ()).await.unwrap(); + + for coordinator in others { + let rpc_url = network_rpc(coordinator.network, ops, &coordinator.network_handle); + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + provider.raw_request::<_, ()>("anvil_loadState".into(), &state).await.unwrap(); + } + } NetworkId::Monero => { use monero_serai::rpc::HttpRpc; @@ -378,12 +428,21 @@ impl Coordinator { .await .unwrap(); for b in from .. to { - coordinator - .broadcast_block( - ops, - &rpc.get_block(rpc.get_block_hash(b).await.unwrap()).await.unwrap().serialize(), - ) - .await; + let block = + rpc.get_block(rpc.get_block_hash(b).await.unwrap()).await.unwrap().serialize(); + + let rpc_url = network_rpc(coordinator.network, ops, &coordinator.network_handle); + let rpc = HttpRpc::new(rpc_url) + .await + .expect("couldn't connect to the coordinator's Monero RPC"); + let res: serde_json::Value = rpc + .json_rpc_call("submit_block", Some(serde_json::json!([hex::encode(block)]))) + .await + .unwrap(); + let err = res.get("error"); + if err.is_some() && (err.unwrap() != &serde_json::Value::Null) { + panic!("failed to submit Monero block: {res}"); + } } } } @@ -404,7 +463,19 @@ impl Coordinator { Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); rpc.send_raw_transaction(&Transaction::consensus_decode(&mut &*tx).unwrap()).await.unwrap(); } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use ethereum_serai::{ + alloy_simple_request_transport::SimpleRequest, + alloy_rpc_client::ClientBuilder, + alloy_provider::{Provider, RootProvider}, + alloy_network::Ethereum, + }; + + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + let _ = provider.send_raw_transaction(tx).await.unwrap(); + } NetworkId::Monero => { use monero_serai::{transaction::Transaction, rpc::HttpRpc}; @@ -445,7 +516,26 @@ impl Coordinator { None } } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use ethereum_serai::{ + alloy_simple_request_transport::SimpleRequest, + alloy_consensus::{TxLegacy, Signed}, + alloy_rpc_client::ClientBuilder, + alloy_provider::{Provider, RootProvider}, + alloy_network::Ethereum, + }; + + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + let mut hash = [0; 32]; + hash.copy_from_slice(tx); + let tx = provider.get_transaction_by_hash(hash.into()).await.unwrap()?; + let (tx, sig, _) = Signed::::try_from(tx).unwrap().into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + Some(bytes) + } NetworkId::Monero => { use monero_serai::rpc::HttpRpc; diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index 882b9e89..7a81062a 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -19,6 +19,7 @@ pub const RPC_USER: &str = "serai"; pub const RPC_PASS: &str = "seraidex"; pub const BTC_PORT: u32 = 8332; +pub const ETH_PORT: u32 = 8545; pub const XMR_PORT: u32 = 18081; pub fn bitcoin_instance() -> (TestBodySpecification, u32) { @@ -31,6 +32,17 @@ pub fn bitcoin_instance() -> (TestBodySpecification, u32) { (composition, BTC_PORT) } +pub fn ethereum_instance() -> (TestBodySpecification, u32) { + serai_docker_tests::build("ethereum".to_string()); + + let composition = TestBodySpecification::with_image( + Image::with_repository("serai-dev-ethereum").pull_policy(PullPolicy::Never), + ) + .set_start_policy(StartPolicy::Strict) + .set_publish_all_ports(true); + (composition, ETH_PORT) +} + pub fn monero_instance() -> (TestBodySpecification, u32) { serai_docker_tests::build("monero".to_string()); @@ -45,7 +57,7 @@ pub fn monero_instance() -> (TestBodySpecification, u32) { pub fn network_instance(network: NetworkId) -> (TestBodySpecification, u32) { match network { NetworkId::Bitcoin => bitcoin_instance(), - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => ethereum_instance(), NetworkId::Monero => monero_instance(), NetworkId::Serai => { panic!("Serai is not a valid network to spawn an instance of for a processor") @@ -58,7 +70,7 @@ pub fn network_rpc(network: NetworkId, ops: &DockerOperations, handle: &str) -> .handle(handle) .host_port(match network { NetworkId::Bitcoin => BTC_PORT, - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => ETH_PORT, NetworkId::Monero => XMR_PORT, NetworkId::Serai => panic!("getting port for external network yet it was Serai"), }) @@ -70,7 +82,7 @@ pub fn confirmations(network: NetworkId) -> usize { use processor::networks::*; match network { NetworkId::Bitcoin => Bitcoin::CONFIRMATIONS, - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => Ethereum::::CONFIRMATIONS, NetworkId::Monero => Monero::CONFIRMATIONS, NetworkId::Serai => panic!("getting confirmations required for Serai"), } @@ -83,6 +95,10 @@ pub enum Wallet { public_key: bitcoin_serai::bitcoin::PublicKey, input_tx: bitcoin_serai::bitcoin::Transaction, }, + Ethereum { + key: ::F, + nonce: u64, + }, Monero { handle: String, spend_key: Zeroizing, @@ -138,7 +154,37 @@ impl Wallet { Wallet::Bitcoin { private_key, public_key, input_tx: funds } } - NetworkId::Ethereum => todo!(), + NetworkId::Ethereum => { + use ciphersuite::{group::ff::Field, Ciphersuite, Secp256k1}; + use ethereum_serai::{ + alloy_core::primitives::{U256, Address}, + alloy_simple_request_transport::SimpleRequest, + alloy_rpc_client::ClientBuilder, + alloy_provider::{Provider, RootProvider}, + alloy_network::Ethereum, + }; + + let key = ::F::random(&mut OsRng); + let address = + ethereum_serai::crypto::address(&(::generator() * key)); + + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + + provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [Address(address.into()).to_string(), { + let nine_decimals = U256::from(1_000_000_000u64); + (U256::from(100u64) * nine_decimals * nine_decimals).to_string() + }], + ) + .await + .unwrap(); + + Wallet::Ethereum { key, nonce: 0 } + } NetworkId::Monero => { use curve25519_dalek::{constants::ED25519_BASEPOINT_POINT, scalar::Scalar}; @@ -282,6 +328,24 @@ impl Wallet { (buf, Balance { coin: Coin::Bitcoin, amount: Amount(AMOUNT) }) } + Wallet::Ethereum { key, ref mut nonce } => { + /* + use ethereum_serai::alloy_core::primitives::U256; + + let eight_decimals = U256::from(100_000_000u64); + let nine_decimals = eight_decimals * U256::from(10u64); + let eighteen_decimals = nine_decimals * nine_decimals; + + let tx = todo!("send to router"); + + *nonce += 1; + (tx, Balance { coin: Coin::Ether, amount: Amount(u64::try_from(eight_decimals).unwrap()) }) + */ + let _ = key; + let _ = nonce; + todo!() + } + Wallet::Monero { handle, ref spend_key, ref view_pair, ref mut inputs } => { use curve25519_dalek::constants::ED25519_BASEPOINT_POINT; use monero_serai::{ @@ -374,6 +438,13 @@ impl Wallet { ) .unwrap() } + Wallet::Ethereum { key, .. } => { + use ciphersuite::{Ciphersuite, Secp256k1}; + ExternalAddress::new( + ethereum_serai::crypto::address(&(Secp256k1::generator() * key)).into(), + ) + .unwrap() + } Wallet::Monero { view_pair, .. } => { use monero_serai::wallet::address::{Network, AddressSpec}; ExternalAddress::new( From ae8a27b8767ac4e5892af854b2e48c78d6851570 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 14 May 2024 01:42:18 -0400 Subject: [PATCH 114/126] Add our own alloy meta module to deduplicate alloy prefixes --- Cargo.lock | 2 ++ coins/ethereum/src/lib.rs | 19 ++++++----- processor/src/networks/ethereum.rs | 26 +++++++------- processor/src/tests/literal/mod.rs | 10 +++--- tests/processor/src/lib.rs | 54 +++++++++++++++--------------- tests/processor/src/networks.rs | 14 ++++---- 6 files changed, 67 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 719ed497..75d6b7cc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8068,11 +8068,13 @@ dependencies = [ "curve25519-dalek", "dkg", "dockertest", + "ethereum-serai", "hex", "monero-serai", "parity-scale-codec", "rand_core", "serai-client", + "serai-db", "serai-docker-tests", "serai-message-queue", "serai-message-queue-tests", diff --git a/coins/ethereum/src/lib.rs b/coins/ethereum/src/lib.rs index eda54c72..322b5f83 100644 --- a/coins/ethereum/src/lib.rs +++ b/coins/ethereum/src/lib.rs @@ -1,14 +1,17 @@ use thiserror::Error; -pub use alloy_core; -pub use alloy_sol_types; +pub mod alloy { + pub use alloy_core::primitives; + pub use alloy_core as core; + pub use alloy_sol_types as sol_types; -pub use alloy_consensus; -pub use alloy_network; -pub use alloy_rpc_types; -pub use alloy_simple_request_transport; -pub use alloy_rpc_client; -pub use alloy_provider; + pub use alloy_consensus as consensus; + pub use alloy_network as network; + pub use alloy_rpc_types as rpc_types; + pub use alloy_simple_request_transport as simple_request_transport; + pub use alloy_rpc_client as rpc_client; + pub use alloy_provider as provider; +} pub mod crypto; diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs index f3d562d7..7ffe7041 100644 --- a/processor/src/networks/ethereum.rs +++ b/processor/src/networks/ethereum.rs @@ -11,11 +11,13 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; use frost::ThresholdKeys; use ethereum_serai::{ - alloy_core::primitives::U256, - alloy_rpc_types::{BlockNumberOrTag, Transaction}, - alloy_simple_request_transport::SimpleRequest, - alloy_rpc_client::ClientBuilder, - alloy_provider::{Provider, RootProvider}, + alloy::{ + primitives::U256, + rpc_types::{BlockNumberOrTag, Transaction}, + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + }, crypto::{PublicKey, Signature}, erc20::Erc20, deployer::Deployer, @@ -23,7 +25,7 @@ use ethereum_serai::{ machine::*, }; #[cfg(test)] -use ethereum_serai::alloy_core::primitives::B256; +use ethereum_serai::alloy::primitives::B256; use tokio::{ time::sleep, @@ -112,7 +114,7 @@ impl TryInto> for Address { impl fmt::Display for Address { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - ethereum_serai::alloy_core::primitives::Address::from(self.0).fmt(f) + ethereum_serai::alloy::primitives::Address::from(self.0).fmt(f) } } @@ -181,7 +183,7 @@ impl Output> for EthereumInInstruction { let mut id = [0; 40]; id[.. 32].copy_from_slice(&self.id.0); id[32 ..].copy_from_slice(&self.id.1.to_le_bytes()); - *ethereum_serai::alloy_core::primitives::keccak256(id) + *ethereum_serai::alloy::primitives::keccak256(id) } fn tx_id(&self) -> [u8; 32] { self.id.0 @@ -853,7 +855,7 @@ impl Network for Ethereum { async fn test_send(&self, send_to: Self::Address) -> Self::Block { use rand_core::OsRng; use ciphersuite::group::ff::Field; - use ethereum_serai::alloy_sol_types::SolCall; + use ethereum_serai::alloy::sol_types::SolCall; let key = ::F::random(&mut OsRng); let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); @@ -869,12 +871,12 @@ impl Network for Ethereum { .unwrap(); let value = U256::from_str_radix("1000000000000000000", 10).unwrap(); - let tx = ethereum_serai::alloy_consensus::TxLegacy { + let tx = ethereum_serai::alloy::consensus::TxLegacy { chain_id: None, nonce: 0, gas_price: 1_000_000_000u128, gas_limit: 200_000u128, - to: ethereum_serai::alloy_core::primitives::TxKind::Call(send_to.0.into()), + to: ethereum_serai::alloy::primitives::TxKind::Call(send_to.0.into()), // 1 ETH value, input: ethereum_serai::router::abi::inInstructionCall::new(( @@ -886,7 +888,7 @@ impl Network for Ethereum { .into(), }; - use ethereum_serai::alloy_consensus::SignableTransaction; + use ethereum_serai::alloy::consensus::SignableTransaction; let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) .sign_prehash_recoverable(tx.signature_hash().as_ref()) .unwrap(); diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs index cecd5a3b..5c5f3203 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/src/tests/literal/mod.rs @@ -314,10 +314,12 @@ mod ethereum { ) -> impl Fn(MemDb) -> Pin>>> { use std::sync::Arc; use ethereum_serai::{ - alloy_core::primitives::U256, - alloy_simple_request_transport::SimpleRequest, - alloy_rpc_client::ClientBuilder, - alloy_provider::{Provider, RootProvider}, + alloy::{ + primitives::U256, + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + }, deployer::Deployer, }; diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index 66aa28c4..5e854272 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -182,11 +182,11 @@ impl Coordinator { } } NetworkId::Ethereum => { - use ethereum_serai::{ - alloy_simple_request_transport::SimpleRequest, - alloy_rpc_client::ClientBuilder, - alloy_provider::{Provider, RootProvider}, - alloy_network::Ethereum, + use ethereum_serai::alloy::{ + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, }; let provider = RootProvider::<_, Ethereum>::new( @@ -293,12 +293,12 @@ impl Coordinator { (hash, block_buf) } NetworkId::Ethereum => { - use ethereum_serai::{ - alloy_simple_request_transport::SimpleRequest, - alloy_rpc_types::BlockNumberOrTag, - alloy_rpc_client::ClientBuilder, - alloy_provider::{Provider, RootProvider}, - alloy_network::Ethereum, + use ethereum_serai::alloy::{ + simple_request_transport::SimpleRequest, + rpc_types::BlockNumberOrTag, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, }; let provider = RootProvider::<_, Ethereum>::new( @@ -395,11 +395,11 @@ impl Coordinator { } } NetworkId::Ethereum => { - use ethereum_serai::{ - alloy_simple_request_transport::SimpleRequest, - alloy_rpc_client::ClientBuilder, - alloy_provider::{Provider, RootProvider}, - alloy_network::Ethereum, + use ethereum_serai::alloy::{ + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, }; let provider = RootProvider::<_, Ethereum>::new( @@ -464,11 +464,11 @@ impl Coordinator { rpc.send_raw_transaction(&Transaction::consensus_decode(&mut &*tx).unwrap()).await.unwrap(); } NetworkId::Ethereum => { - use ethereum_serai::{ - alloy_simple_request_transport::SimpleRequest, - alloy_rpc_client::ClientBuilder, - alloy_provider::{Provider, RootProvider}, - alloy_network::Ethereum, + use ethereum_serai::alloy::{ + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, }; let provider = RootProvider::<_, Ethereum>::new( @@ -517,12 +517,12 @@ impl Coordinator { } } NetworkId::Ethereum => { - use ethereum_serai::{ - alloy_simple_request_transport::SimpleRequest, - alloy_consensus::{TxLegacy, Signed}, - alloy_rpc_client::ClientBuilder, - alloy_provider::{Provider, RootProvider}, - alloy_network::Ethereum, + use ethereum_serai::alloy::{ + consensus::{TxLegacy, Signed}, + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, }; let provider = RootProvider::<_, Ethereum>::new( diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index 7a81062a..5b54cc01 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -156,12 +156,12 @@ impl Wallet { NetworkId::Ethereum => { use ciphersuite::{group::ff::Field, Ciphersuite, Secp256k1}; - use ethereum_serai::{ - alloy_core::primitives::{U256, Address}, - alloy_simple_request_transport::SimpleRequest, - alloy_rpc_client::ClientBuilder, - alloy_provider::{Provider, RootProvider}, - alloy_network::Ethereum, + use ethereum_serai::alloy::{ + primitives::{U256, Address}, + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, }; let key = ::F::random(&mut OsRng); @@ -330,7 +330,7 @@ impl Wallet { Wallet::Ethereum { key, ref mut nonce } => { /* - use ethereum_serai::alloy_core::primitives::U256; + use ethereum_serai::alloy::primitives::U256; let eight_decimals = U256::from(100_000_000u64); let nine_decimals = eight_decimals * U256::from(10u64); From 11ec9e353515950f882561c45f3a94ef06abe6e7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 21 May 2024 00:29:33 -0400 Subject: [PATCH 115/126] Ethereum processor docker tests, barring send We need the TX publication relay thingy for send to work (though that is the point the test fails at). --- Cargo.lock | 2 + coins/ethereum/Cargo.toml | 2 +- orchestration/src/coordinator.rs | 1 + orchestration/src/main.rs | 4 +- orchestration/src/message_queue.rs | 2 +- orchestration/src/processor.rs | 11 +- orchestration/src/serai.rs | 4 +- processor/src/key_gen.rs | 1 + processor/src/multisigs/mod.rs | 19 ++- processor/src/multisigs/scanner.rs | 4 +- .../src/multisigs/scheduler/smart_contract.rs | 2 +- processor/src/multisigs/scheduler/utxo.rs | 12 +- processor/src/networks/ethereum.rs | 2 +- tests/processor/Cargo.toml | 7 +- tests/processor/src/lib.rs | 136 +++++++++++++----- tests/processor/src/networks.rs | 119 ++++++++++++--- tests/processor/src/tests/batch.rs | 20 ++- tests/processor/src/tests/key_gen.rs | 2 +- tests/processor/src/tests/mod.rs | 8 +- tests/processor/src/tests/send.rs | 38 ++++- 20 files changed, 305 insertions(+), 91 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 75d6b7cc..bf98299d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3706,6 +3706,7 @@ dependencies = [ "elliptic-curve", "once_cell", "sha2", + "signature", ] [[package]] @@ -8070,6 +8071,7 @@ dependencies = [ "dockertest", "ethereum-serai", "hex", + "k256", "monero-serai", "parity-scale-codec", "rand_core", diff --git a/coins/ethereum/Cargo.toml b/coins/ethereum/Cargo.toml index dc30764e..f600c21d 100644 --- a/coins/ethereum/Cargo.toml +++ b/coins/ethereum/Cargo.toml @@ -46,4 +46,4 @@ tokio = { version = "1", features = ["macros"] } alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } [features] -tests = ["alloy-node-bindings"] +tests = ["alloy-node-bindings", "frost/tests"] diff --git a/orchestration/src/coordinator.rs b/orchestration/src/coordinator.rs index 13fdff59..26058886 100644 --- a/orchestration/src/coordinator.rs +++ b/orchestration/src/coordinator.rs @@ -17,6 +17,7 @@ pub fn coordinator( let longer_reattempts = if network == Network::Dev { "longer-reattempts" } else { "" }; let setup = mimalloc(Os::Debian).to_string() + &build_serai_service( + "", network.release(), &format!("{db} {longer_reattempts}"), "serai-coordinator", diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 0e6c7cb0..1925b94c 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -137,7 +137,7 @@ WORKDIR /home/{user} } } -fn build_serai_service(release: bool, features: &str, package: &str) -> String { +fn build_serai_service(prelude: &str, release: bool, features: &str, package: &str) -> String { let profile = if release { "release" } else { "debug" }; let profile_flag = if release { "--release" } else { "" }; @@ -159,6 +159,8 @@ RUN apt install -y make protobuf-compiler # Add the wasm toolchain RUN rustup target add wasm32-unknown-unknown +{prelude} + # Add files for build ADD patches /serai/patches ADD common /serai/common diff --git a/orchestration/src/message_queue.rs b/orchestration/src/message_queue.rs index eb662b67..ea97a619 100644 --- a/orchestration/src/message_queue.rs +++ b/orchestration/src/message_queue.rs @@ -13,7 +13,7 @@ pub fn message_queue( monero_key: ::G, ) { let setup = mimalloc(Os::Debian).to_string() + - &build_serai_service(network.release(), network.db(), "serai-message-queue"); + &build_serai_service("", network.release(), network.db(), "serai-message-queue"); let env_vars = [ ("COORDINATOR_KEY", hex::encode(coordinator_key.to_bytes())), diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index 8a2c8c77..85f7ec5f 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -17,6 +17,15 @@ pub fn processor( ) { let setup = mimalloc(Os::Debian).to_string() + &build_serai_service( + if coin == "ethereum" { + r#" +RUN cargo install svm-rs +RUN svm install 0.8.25 +RUN svm use 0.8.25 +"# + } else { + "" + }, network.release(), &format!("binaries {} {coin}", network.db()), "serai-processor", @@ -34,7 +43,7 @@ RUN apt install -y ca-certificates let hostname = format!("serai-{}-{coin}", network.label()); let port = match coin { "bitcoin" => 8332, - "ethereum" => return, // TODO + "ethereum" => 8545, "monero" => 18081, _ => panic!("unrecognized external network"), }; diff --git a/orchestration/src/serai.rs b/orchestration/src/serai.rs index 2e1e915c..e2f96f6a 100644 --- a/orchestration/src/serai.rs +++ b/orchestration/src/serai.rs @@ -11,9 +11,9 @@ pub fn serai( serai_key: &Zeroizing<::F>, ) { // Always builds in release for performance reasons - let setup = mimalloc(Os::Debian).to_string() + &build_serai_service(true, "", "serai-node"); + let setup = mimalloc(Os::Debian).to_string() + &build_serai_service("", true, "", "serai-node"); let setup_fast_epoch = - mimalloc(Os::Debian).to_string() + &build_serai_service(true, "fast-epoch", "serai-node"); + mimalloc(Os::Debian).to_string() + &build_serai_service("", true, "fast-epoch", "serai-node"); let env_vars = [("KEY", hex::encode(serai_key.to_repr()))]; let mut env_vars_str = String::new(); diff --git a/processor/src/key_gen.rs b/processor/src/key_gen.rs index f1a5b47c..6976e225 100644 --- a/processor/src/key_gen.rs +++ b/processor/src/key_gen.rs @@ -512,6 +512,7 @@ impl KeyGen { ProcessorMessage::GeneratedKeyPair { id, substrate_key: generated_substrate_key.unwrap().to_bytes(), + // TODO: This can be made more efficient since tweaked keys may be a subset of keys network_key: generated_network_key.unwrap().to_bytes().as_ref().to_vec(), } } diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs index 75c91675..12f01715 100644 --- a/processor/src/multisigs/mod.rs +++ b/processor/src/multisigs/mod.rs @@ -63,9 +63,22 @@ fn instruction_from_output( return (presumed_origin, None); } - let Ok(shorthand) = Shorthand::decode(&mut data) else { return (presumed_origin, None) }; - let Ok(instruction) = RefundableInInstruction::try_from(shorthand) else { - return (presumed_origin, None); + let shorthand = match Shorthand::decode(&mut data) { + Ok(shorthand) => shorthand, + Err(e) => { + info!("data in output {} wasn't valid shorthand: {e:?}", hex::encode(output.id())); + return (presumed_origin, None); + } + }; + let instruction = match RefundableInInstruction::try_from(shorthand) { + Ok(instruction) => instruction, + Err(e) => { + info!( + "shorthand in output {} wasn't convertible to a RefundableInInstruction: {e:?}", + hex::encode(output.id()) + ); + return (presumed_origin, None); + } }; let mut balance = output.balance(); diff --git a/processor/src/multisigs/scanner.rs b/processor/src/multisigs/scanner.rs index 3d28f3e8..1b25e108 100644 --- a/processor/src/multisigs/scanner.rs +++ b/processor/src/multisigs/scanner.rs @@ -279,6 +279,8 @@ impl ScannerHandle { activation_number: usize, key: ::G, ) { + info!("Registering key {} in scanner at {activation_number}", hex::encode(key.to_bytes())); + let mut scanner_lock = self.scanner.write().await; let scanner = scanner_lock.as_mut().unwrap(); assert!( @@ -286,8 +288,6 @@ impl ScannerHandle { "activation block of new keys was already scanned", ); - info!("Registering key {} in scanner at {activation_number}", hex::encode(key.to_bytes())); - if scanner.keys.is_empty() { assert!(scanner.ram_scanned.is_none()); scanner.ram_scanned = Some(activation_number); diff --git a/processor/src/multisigs/scheduler/smart_contract.rs b/processor/src/multisigs/scheduler/smart_contract.rs index 4f48e391..3da8acf4 100644 --- a/processor/src/multisigs/scheduler/smart_contract.rs +++ b/processor/src/multisigs/scheduler/smart_contract.rs @@ -116,7 +116,7 @@ impl> SchedulerTrait for Scheduler { assert!(self.coins.contains(&utxo.balance().coin)); } - let mut nonce = LastNonce::get(txn).map_or(1, |nonce| nonce + 1); + let mut nonce = LastNonce::get(txn).unwrap_or(1); let mut plans = vec![]; for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) { // Once we rotate, all further payments should be scheduled via the new multisig diff --git a/processor/src/multisigs/scheduler/utxo.rs b/processor/src/multisigs/scheduler/utxo.rs index e9aa3351..1865cab9 100644 --- a/processor/src/multisigs/scheduler/utxo.rs +++ b/processor/src/multisigs/scheduler/utxo.rs @@ -432,7 +432,7 @@ impl> Scheduler { } // If there's a UTXO to restore, restore it - // This is down now as if there is a to_restore output, and it was inserted into self.utxos + // This is done now as if there is a to_restore output, and it was inserted into self.utxos // earlier, self.utxos.len() may become `N::MAX_INPUTS + 1` // The prior block requires the len to be `<= N::MAX_INPUTS` if let Some(to_restore) = to_restore { @@ -442,9 +442,10 @@ impl> Scheduler { txn.put(scheduler_key::(&self.key), self.serialize()); log::info!( - "created {} plans containing {} payments to sign", + "created {} plans containing {} payments to sign, with {} payments pending scheduling", plans.len(), payments_at_start - self.payments.len(), + self.payments.len(), ); plans } @@ -589,7 +590,8 @@ impl> SchedulerTrait for Scheduler { output: N::Output, refund_to: N::Address, ) -> Plan { - Plan { + let output_id = output.id().as_ref().to_vec(); + let res = Plan { key: output.key(), // Uses a payment as this will still be successfully sent due to fee amortization, // and because change is currently always a Serai key @@ -597,7 +599,9 @@ impl> SchedulerTrait for Scheduler { inputs: vec![output], change: None, scheduler_addendum: (), - } + }; + log::info!("refund plan for {} has ID {}", hex::encode(output_id), hex::encode(res.id())); + res } fn shim_forward_plan(output: N::Output, to: ::G) -> Option> { diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs index 7ffe7041..802ea68b 100644 --- a/processor/src/networks/ethereum.rs +++ b/processor/src/networks/ethereum.rs @@ -426,7 +426,7 @@ impl Network for Ethereum { .get_block(BlockNumberOrTag::Finalized.into(), false) .await .map_err(|_| NetworkError::ConnectionError)? - .expect("no blocks were finalized") + .ok_or(NetworkError::ConnectionError)? .header .number .unwrap(); diff --git a/tests/processor/Cargo.toml b/tests/processor/Cargo.toml index be27e7de..e46312c5 100644 --- a/tests/processor/Cargo.toml +++ b/tests/processor/Cargo.toml @@ -23,11 +23,14 @@ zeroize = { version = "1", default-features = false } rand_core = { version = "0.6", default-features = false, features = ["getrandom"] } curve25519-dalek = "4" -ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["ristretto"] } +ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["secp256k1", "ristretto"] } dkg = { path = "../../crypto/dkg", default-features = false, features = ["tests"] } bitcoin-serai = { path = "../../coins/bitcoin" } + +k256 = "0.13" ethereum-serai = { path = "../../coins/ethereum" } + monero-serai = { path = "../../coins/monero" } messages = { package = "serai-processor-messages", path = "../../processor/messages" } @@ -43,7 +46,7 @@ serde_json = { version = "1", default-features = false } tokio = { version = "1", features = ["time"] } -processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "monero"] } +processor = { package = "serai-processor", path = "../../processor", features = ["bitcoin", "ethereum", "monero"] } dockertest = "0.4" serai-docker-tests = { path = "../docker" } diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index 5e854272..6e78e397 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -61,6 +61,7 @@ pub fn processor_instance( pub type Handles = (String, String, String); pub fn processor_stack( network: NetworkId, + network_hostname_override: Option, ) -> (Handles, ::F, Vec) { let (network_composition, network_rpc_port) = network_instance(network); @@ -113,7 +114,10 @@ pub fn processor_stack( } let processor_composition = compositions.last_mut().unwrap(); - processor_composition.inject_container_name(handles[0].clone(), "NETWORK_RPC_HOSTNAME"); + processor_composition.inject_container_name( + network_hostname_override.unwrap_or_else(|| handles[0].clone()), + "NETWORK_RPC_HOSTNAME", + ); processor_composition.inject_container_name(handles[1].clone(), "MESSAGE_QUEUE_RPC"); ((handles[0].clone(), handles[1].clone(), handles[2].clone()), coord_key, compositions) @@ -182,25 +186,52 @@ impl Coordinator { } } NetworkId::Ethereum => { - use ethereum_serai::alloy::{ - simple_request_transport::SimpleRequest, - rpc_client::ClientBuilder, - provider::{Provider, RootProvider}, - network::Ethereum, + use std::sync::Arc; + use ethereum_serai::{ + alloy::{ + simple_request_transport::SimpleRequest, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, + }, + deployer::Deployer, }; - let provider = RootProvider::<_, Ethereum>::new( + let provider = Arc::new(RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), - ); + )); - loop { - if handle - .block_on(provider.raw_request::<_, ()>("evm_setAutomine".into(), [false])) - .is_ok() - { - break; - } - handle.block_on(tokio::time::sleep(core::time::Duration::from_secs(1))); + if handle + .block_on(provider.raw_request::<_, ()>("evm_setAutomine".into(), [false])) + .is_ok() + { + handle.block_on(async { + // Deploy the deployer + let tx = Deployer::deployment_tx(); + let signer = tx.recover_signer().unwrap(); + let (tx, sig, _) = tx.into_parts(); + + provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [signer.to_string(), (tx.gas_limit * tx.gas_price).to_string()], + ) + .await + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + let _ = provider.send_raw_transaction(&bytes).await.unwrap(); + + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + + let _ = Deployer::new(provider.clone()).await.unwrap().unwrap(); + + // Sleep until the actual time is ahead of whatever time is in the epoch we just + // mined + tokio::time::sleep(core::time::Duration::from_secs(30)).await; + }); + break; } } NetworkId::Monero => { @@ -371,7 +402,10 @@ impl Coordinator { let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the Bitcoin RPC"); let to = rpc.get_latest_block_number().await.unwrap(); for coordinator in others { - let from = rpc.get_latest_block_number().await.unwrap() + 1; + let other_rpc = Rpc::new(network_rpc(self.network, ops, &coordinator.network_handle)) + .await + .expect("couldn't connect to the Bitcoin RPC"); + let from = other_rpc.get_latest_block_number().await.unwrap() + 1; for b in from ..= to { let mut buf = vec![]; @@ -382,12 +416,10 @@ impl Coordinator { .consensus_encode(&mut buf) .unwrap(); - let rpc_url = network_rpc(coordinator.network, ops, &coordinator.network_handle); - let rpc = - Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); - - let res: Option = - rpc.rpc_call("submitblock", serde_json::json!([hex::encode(buf)])).await.unwrap(); + let res: Option = other_rpc + .rpc_call("submitblock", serde_json::json!([hex::encode(buf)])) + .await + .unwrap(); if let Some(err) = res { panic!("submitblock failed: {err}"); } @@ -397,22 +429,52 @@ impl Coordinator { NetworkId::Ethereum => { use ethereum_serai::alloy::{ simple_request_transport::SimpleRequest, + rpc_types::BlockNumberOrTag, rpc_client::ClientBuilder, provider::{Provider, RootProvider}, network::Ethereum, }; - let provider = RootProvider::<_, Ethereum>::new( - ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), - ); - let state = provider.raw_request::<_, String>("anvil_dumpState".into(), ()).await.unwrap(); + let (expected_number, state) = { + let provider = RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + ); + + let expected_number = provider + .get_block(BlockNumberOrTag::Latest.into(), false) + .await + .unwrap() + .unwrap() + .header + .number; + ( + expected_number, + provider.raw_request::<_, String>("anvil_dumpState".into(), ()).await.unwrap(), + ) + }; for coordinator in others { let rpc_url = network_rpc(coordinator.network, ops, &coordinator.network_handle); let provider = RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), ); - provider.raw_request::<_, ()>("anvil_loadState".into(), &state).await.unwrap(); + assert!(provider + .raw_request::<_, bool>("anvil_loadState".into(), &[&state]) + .await + .unwrap()); + + let new_number = provider + .get_block(BlockNumberOrTag::Latest.into(), false) + .await + .unwrap() + .unwrap() + .header + .number; + + // TODO: https://github.com/foundry-rs/foundry/issues/7955 + let _ = expected_number; + let _ = new_number; + //assert_eq!(expected_number, new_number); } } NetworkId::Monero => { @@ -421,21 +483,17 @@ impl Coordinator { let rpc = HttpRpc::new(rpc_url).await.expect("couldn't connect to the Monero RPC"); let to = rpc.get_height().await.unwrap(); for coordinator in others { - let from = HttpRpc::new(network_rpc(self.network, ops, &coordinator.network_handle)) - .await - .expect("couldn't connect to the Monero RPC") - .get_height() - .await - .unwrap(); + let other_rpc = + HttpRpc::new(network_rpc(coordinator.network, ops, &coordinator.network_handle)) + .await + .expect("couldn't connect to the Monero RPC"); + + let from = other_rpc.get_height().await.unwrap(); for b in from .. to { let block = rpc.get_block(rpc.get_block_hash(b).await.unwrap()).await.unwrap().serialize(); - let rpc_url = network_rpc(coordinator.network, ops, &coordinator.network_handle); - let rpc = HttpRpc::new(rpc_url) - .await - .expect("couldn't connect to the coordinator's Monero RPC"); - let res: serde_json::Value = rpc + let res: serde_json::Value = other_rpc .json_rpc_call("submit_block", Some(serde_json::json!([hex::encode(block)]))) .await .unwrap(); diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index 5b54cc01..61762f71 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -96,6 +96,7 @@ pub enum Wallet { input_tx: bitcoin_serai::bitcoin::Transaction, }, Ethereum { + rpc_url: String, key: ::F, nonce: u64, }, @@ -155,7 +156,7 @@ impl Wallet { } NetworkId::Ethereum => { - use ciphersuite::{group::ff::Field, Ciphersuite, Secp256k1}; + use ciphersuite::{group::ff::Field, Secp256k1}; use ethereum_serai::alloy::{ primitives::{U256, Address}, simple_request_transport::SimpleRequest, @@ -183,7 +184,7 @@ impl Wallet { .await .unwrap(); - Wallet::Ethereum { key, nonce: 0 } + Wallet::Ethereum { rpc_url: rpc_url.clone(), key, nonce: 0 } } NetworkId::Monero => { @@ -328,22 +329,107 @@ impl Wallet { (buf, Balance { coin: Coin::Bitcoin, amount: Amount(AMOUNT) }) } - Wallet::Ethereum { key, ref mut nonce } => { - /* - use ethereum_serai::alloy::primitives::U256; + Wallet::Ethereum { rpc_url, key, ref mut nonce } => { + use std::sync::Arc; + use ethereum_serai::{ + alloy::{ + primitives::{U256, TxKind}, + sol_types::SolCall, + simple_request_transport::SimpleRequest, + consensus::{TxLegacy, SignableTransaction}, + rpc_client::ClientBuilder, + provider::{Provider, RootProvider}, + network::Ethereum, + }, + crypto::PublicKey, + deployer::Deployer, + }; let eight_decimals = U256::from(100_000_000u64); let nine_decimals = eight_decimals * U256::from(10u64); let eighteen_decimals = nine_decimals * nine_decimals; + let one_eth = eighteen_decimals; - let tx = todo!("send to router"); + let provider = Arc::new(RootProvider::<_, Ethereum>::new( + ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), + )); + + let to_as_key = PublicKey::new( + ::read_G(&mut to.as_slice()).unwrap(), + ) + .unwrap(); + let router_addr = { + // Find the deployer + let deployer = Deployer::new(provider.clone()).await.unwrap().unwrap(); + + // Find the router, deploying if non-existent + let router = if let Some(router) = + deployer.find_router(provider.clone(), &to_as_key).await.unwrap() + { + router + } else { + let mut tx = deployer.deploy_router(&to_as_key); + tx.gas_price = 1_000_000_000u64.into(); + let tx = ethereum_serai::crypto::deterministically_sign(&tx); + let signer = tx.recover_signer().unwrap(); + let (tx, sig, _) = tx.into_parts(); + + provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [signer.to_string(), (tx.gas_limit * tx.gas_price).to_string()], + ) + .await + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + let _ = provider.send_raw_transaction(&bytes).await.unwrap(); + + provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap(); + + deployer.find_router(provider.clone(), &to_as_key).await.unwrap().unwrap() + }; + + router.address() + }; + + let tx = TxLegacy { + chain_id: None, + nonce: *nonce, + gas_price: 1_000_000_000u128, + gas_limit: 200_000u128, + to: TxKind::Call(router_addr.into()), + // 1 ETH + value: one_eth, + input: ethereum_serai::router::abi::inInstructionCall::new(( + [0; 20].into(), + one_eth, + if let Some(instruction) = instruction { + Shorthand::Raw(RefundableInInstruction { origin: None, instruction }).encode().into() + } else { + vec![].into() + }, + )) + .abi_encode() + .into(), + }; *nonce += 1; - (tx, Balance { coin: Coin::Ether, amount: Amount(u64::try_from(eight_decimals).unwrap()) }) - */ - let _ = key; - let _ = nonce; - todo!() + + let sig = + k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(*key).unwrap()) + .sign_prehash_recoverable(tx.signature_hash().as_ref()) + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig.into(), &mut bytes); + + // We drop the bottom 10 decimals + ( + bytes, + Balance { coin: Coin::Ether, amount: Amount(u64::try_from(eight_decimals).unwrap()) }, + ) } Wallet::Monero { handle, ref spend_key, ref view_pair, ref mut inputs } => { @@ -438,13 +524,10 @@ impl Wallet { ) .unwrap() } - Wallet::Ethereum { key, .. } => { - use ciphersuite::{Ciphersuite, Secp256k1}; - ExternalAddress::new( - ethereum_serai::crypto::address(&(Secp256k1::generator() * key)).into(), - ) - .unwrap() - } + Wallet::Ethereum { key, .. } => ExternalAddress::new( + ethereum_serai::crypto::address(&(ciphersuite::Secp256k1::generator() * key)).into(), + ) + .unwrap(), Wallet::Monero { view_pair, .. } => { use monero_serai::wallet::address::{Network, AddressSpec}; ExternalAddress::new( diff --git a/tests/processor/src/tests/batch.rs b/tests/processor/src/tests/batch.rs index 5729fd73..5397ad2d 100644 --- a/tests/processor/src/tests/batch.rs +++ b/tests/processor/src/tests/batch.rs @@ -17,7 +17,8 @@ use serai_client::{ validator_sets::primitives::Session, }; -use processor::networks::{Network, Bitcoin, Monero}; +use serai_db::MemDb; +use processor::networks::{Network, Bitcoin, Ethereum, Monero}; use crate::{*, tests::*}; @@ -188,7 +189,7 @@ pub(crate) async fn substrate_block( #[test] fn batch_test() { - for network in [NetworkId::Bitcoin, NetworkId::Monero] { + for network in [NetworkId::Bitcoin, NetworkId::Ethereum, NetworkId::Monero] { let (coordinators, test) = new_test(network); test.run(|ops| async move { @@ -245,6 +246,8 @@ fn batch_test() { // The scanner works on a 5s interval, so this leaves a few s for any processing/latency tokio::time::sleep(Duration::from_secs(10)).await; + println!("sent in transaction. with in instruction: {}", instruction.is_some()); + let expected_batch = Batch { network, id: i, @@ -256,10 +259,11 @@ fn batch_test() { coin: balance_sent.coin, amount: Amount( balance_sent.amount.0 - - (2 * if network == NetworkId::Bitcoin { - Bitcoin::COST_TO_AGGREGATE - } else { - Monero::COST_TO_AGGREGATE + (2 * match network { + NetworkId::Bitcoin => Bitcoin::COST_TO_AGGREGATE, + NetworkId::Ethereum => Ethereum::::COST_TO_AGGREGATE, + NetworkId::Monero => Monero::COST_TO_AGGREGATE, + NetworkId::Serai => panic!("minted for Serai?"), }), ), }, @@ -272,6 +276,8 @@ fn batch_test() { }, }; + println!("receiving batch preprocesses..."); + // Make sure the processors picked it up by checking they're trying to sign a batch for it let (mut id, mut preprocesses) = recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, 0).await; @@ -291,6 +297,8 @@ fn batch_test() { recv_batch_preprocesses(&mut coordinators, Session(0), &expected_batch, attempt).await; } + println!("signing batch..."); + // Continue with signing the batch let batch = sign_batch(&mut coordinators, key_pair.0 .0, id, preprocesses).await; diff --git a/tests/processor/src/tests/key_gen.rs b/tests/processor/src/tests/key_gen.rs index d50c12b7..7dea0bfd 100644 --- a/tests/processor/src/tests/key_gen.rs +++ b/tests/processor/src/tests/key_gen.rs @@ -144,7 +144,7 @@ pub(crate) async fn key_gen(coordinators: &mut [Coordinator]) -> KeyPair { #[test] fn key_gen_test() { - for network in [NetworkId::Bitcoin, NetworkId::Monero] { + for network in [NetworkId::Bitcoin, NetworkId::Ethereum, NetworkId::Monero] { let (coordinators, test) = new_test(network); test.run(|ops| async move { diff --git a/tests/processor/src/tests/mod.rs b/tests/processor/src/tests/mod.rs index 54a17020..afda97d5 100644 --- a/tests/processor/src/tests/mod.rs +++ b/tests/processor/src/tests/mod.rs @@ -20,8 +20,14 @@ pub(crate) const THRESHOLD: usize = ((COORDINATORS * 2) / 3) + 1; fn new_test(network: NetworkId) -> (Vec<(Handles, ::F)>, DockerTest) { let mut coordinators = vec![]; let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); + let mut eth_handle = None; for _ in 0 .. COORDINATORS { - let (handles, coord_key, compositions) = processor_stack(network); + let (handles, coord_key, compositions) = processor_stack(network, eth_handle.clone()); + // TODO: Remove this once https://github.com/foundry-rs/foundry/issues/7955 + // This has all processors share an Ethereum node until we can sync controlled nodes + if network == NetworkId::Ethereum { + eth_handle = eth_handle.or_else(|| Some(handles.0.clone())); + } coordinators.push((handles, coord_key)); for composition in compositions { test.provide_container(composition); diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index 4d0d3cd6..b764f306 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -8,12 +8,15 @@ use dkg::{Participant, tests::clone_without}; use messages::{sign::SignId, SubstrateContext}; use serai_client::{ - primitives::{BlockHash, NetworkId}, + primitives::{BlockHash, NetworkId, Amount, Balance, SeraiAddress}, coins::primitives::{OutInstruction, OutInstructionWithBalance}, - in_instructions::primitives::Batch, + in_instructions::primitives::{InInstruction, InInstructionWithBalance, Batch}, validator_sets::primitives::Session, }; +use serai_db::MemDb; +use processor::networks::{Network, Bitcoin, Ethereum, Monero}; + use crate::{*, tests::*}; #[allow(unused)] @@ -144,7 +147,7 @@ pub(crate) async fn sign_tx( #[test] fn send_test() { - for network in [NetworkId::Bitcoin, NetworkId::Monero] { + for network in [NetworkId::Bitcoin, /* TODO NetworkId::Ethereum, */ NetworkId::Monero] { let (coordinators, test) = new_test(network); test.run(|ops| async move { @@ -173,7 +176,11 @@ fn send_test() { coordinators[0].sync(&ops, &coordinators[1 ..]).await; // Send into the processor's wallet - let (tx, balance_sent) = wallet.send_to_address(&ops, &key_pair.1, None).await; + let mut serai_address = [0; 32]; + OsRng.fill_bytes(&mut serai_address); + let instruction = InInstruction::Transfer(SeraiAddress(serai_address)); + let (tx, balance_sent) = + wallet.send_to_address(&ops, &key_pair.1, Some(instruction.clone())).await; for coordinator in &mut coordinators { coordinator.publish_transacton(&ops, &tx).await; } @@ -192,8 +199,25 @@ fn send_test() { // The scanner works on a 5s interval, so this leaves a few s for any processing/latency tokio::time::sleep(Duration::from_secs(10)).await; - let expected_batch = - Batch { network, id: 0, block: BlockHash(block_with_tx.unwrap()), instructions: vec![] }; + let amount_minted = Amount( + balance_sent.amount.0 - + (2 * match network { + NetworkId::Bitcoin => Bitcoin::COST_TO_AGGREGATE, + NetworkId::Ethereum => Ethereum::::COST_TO_AGGREGATE, + NetworkId::Monero => Monero::COST_TO_AGGREGATE, + NetworkId::Serai => panic!("minted for Serai?"), + }), + ); + + let expected_batch = Batch { + network, + id: 0, + block: BlockHash(block_with_tx.unwrap()), + instructions: vec![InInstructionWithBalance { + instruction, + balance: Balance { coin: balance_sent.coin, amount: amount_minted }, + }], + }; // Make sure the proceessors picked it up by checking they're trying to sign a batch for it let (id, preprocesses) = @@ -221,7 +245,7 @@ fn send_test() { block: substrate_block_num, burns: vec![OutInstructionWithBalance { instruction: OutInstruction { address: wallet.address(), data: None }, - balance: balance_sent, + balance: Balance { coin: balance_sent.coin, amount: amount_minted }, }], batches: vec![batch.batch.id], }, From fb7d12ee6e6c8970485cb0c128b8ad21c4b6ed04 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 21 May 2024 03:20:44 -0400 Subject: [PATCH 116/126] Short-circuit test_no_deadlock_in_multisig_completed if preconditions not met --- processor/src/tests/scanner.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs index 078a07d5..6421c499 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/src/tests/scanner.rs @@ -115,6 +115,12 @@ pub async fn test_scanner( pub async fn test_no_deadlock_in_multisig_completed( new_network: impl Fn(MemDb) -> Pin>>, ) { + // This test scans two blocks then acknowledges one, yet a network with one confirm won't scan + // two blocks before the first is acknowledged (due to the look-ahead limit) + if N::CONFIRMATIONS <= 1 { + return; + } + let mut db = MemDb::new(); let network = new_network(db.clone()).await; @@ -139,6 +145,10 @@ pub async fn test_no_deadlock_in_multisig_completed( let mut txn = db.txn(); NetworkKeyDb::set(&mut txn, Session(0), &key.to_bytes().as_ref().to_vec()); txn.commit(); + + // Sleep for 5 seconds as setting the Network key value will trigger an async task for + // Ethereum + tokio::time::sleep(Duration::from_secs(5)).await; } key }; @@ -158,6 +168,7 @@ pub async fn test_no_deadlock_in_multisig_completed( network.mine_block().await; } + // Block for the second set of keys registered let block_id = match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { is_retirement_block, block, outputs: _ } => { @@ -170,6 +181,7 @@ pub async fn test_no_deadlock_in_multisig_completed( } }; + // Block for the third set of keys registered match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { .. } => {} ScannerEvent::Completed(_, _, _, _, _) => { From a0a7d63dad26722a2b1e5c7314e7e97dbd5d27ad Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 21 May 2024 05:27:01 -0400 Subject: [PATCH 117/126] bitcoin 0.32 --- Cargo.lock | 84 ++++++++------ coins/bitcoin/Cargo.toml | 4 +- coins/bitcoin/src/rpc.rs | 6 +- coins/bitcoin/src/tests/crypto.rs | 2 +- coins/bitcoin/src/wallet/mod.rs | 35 +++--- coins/bitcoin/src/wallet/send.rs | 6 +- coins/bitcoin/tests/wallet.rs | 21 ++-- processor/Cargo.toml | 2 +- processor/src/networks/bitcoin.rs | 35 +++--- processor/src/tests/literal/mod.rs | 14 ++- substrate/client/Cargo.toml | 2 +- substrate/client/src/networks/bitcoin.rs | 119 +++++++++++--------- tests/full-stack/src/tests/mint_and_burn.rs | 39 ++++--- tests/processor/src/networks.rs | 23 ++-- 14 files changed, 224 insertions(+), 168 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index bf98299d..3320988d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -862,6 +862,16 @@ dependencies = [ "tiny-keccak", ] +[[package]] +name = "base58ck" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c8d66485a3a2ea485c1913c4572ce0256067a5377ac8c75c4960e1cda98605f" +dependencies = [ + "bitcoin-internals", + "bitcoin_hashes", +] + [[package]] name = "base64" version = "0.13.1" @@ -888,9 +898,9 @@ checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] name = "bech32" -version = "0.10.0-beta" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98f7eed2b2781a6f0b5c903471d48e15f56fb4e1165df8a9a2337fd1a59d45ea" +checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" [[package]] name = "beef" @@ -947,14 +957,16 @@ checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" [[package]] name = "bitcoin" -version = "0.31.2" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c85783c2fe40083ea54a33aa2f0ba58831d90fcd190f5bdc47e74e84d2a96ae" +checksum = "7170e7750a20974246f17ece04311b4205a6155f1db564c5b224af817663c3ea" dependencies = [ + "base58ck", "bech32", "bitcoin-internals", + "bitcoin-io", + "bitcoin-units", "bitcoin_hashes", - "core2 0.3.3", "hex-conservative", "hex_lit", "secp256k1", @@ -963,13 +975,19 @@ dependencies = [ [[package]] name = "bitcoin-internals" -version = "0.2.0" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9425c3bf7089c983facbae04de54513cce73b41c7f9ff8c845b54e7bc64ebbfb" +checksum = "30bdbe14aa07b06e6cfeffc529a1f099e5fbe249524f8125358604df99a4bed2" dependencies = [ "serde", ] +[[package]] +name = "bitcoin-io" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "340e09e8399c7bd8912f495af6aa58bea0c9214773417ffaa8f6460f93aaee56" + [[package]] name = "bitcoin-serai" version = "0.3.0" @@ -991,13 +1009,22 @@ dependencies = [ ] [[package]] -name = "bitcoin_hashes" -version = "0.13.0" +name = "bitcoin-units" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1930a4dabfebb8d7d9992db18ebe3ae2876f0a305fab206fd168df931ede293b" +checksum = "cb54da0b28892f3c52203a7191534033e051b6f4b52bc15480681b57b7e036f5" dependencies = [ "bitcoin-internals", - "core2 0.3.3", + "serde", +] + +[[package]] +name = "bitcoin_hashes" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb18c03d0db0247e147a21a6faafd5a7eb851c743db062de72018b6b7e8e4d16" +dependencies = [ + "bitcoin-io", "hex-conservative", "serde", ] @@ -1400,7 +1427,7 @@ version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd94671561e36e4e7de75f753f577edafb0e7c05d6e4547229fdf7938fbcd2c3" dependencies = [ - "core2 0.4.0", + "core2", "multibase", "multihash 0.18.1", "serde", @@ -1584,15 +1611,6 @@ version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" -[[package]] -name = "core2" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "239fa3ae9b63c2dc74bd3fa852d4792b8b305ae64eeede946265b6af62f1fff3" -dependencies = [ - "memchr", -] - [[package]] name = "core2" version = "0.4.0" @@ -3128,11 +3146,11 @@ dependencies = [ [[package]] name = "hex-conservative" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30ed443af458ccb6d81c1e7e661545f94d3176752fb1df2f543b902a1e0f51e2" +checksum = "e1aa273bf451e37ed35ced41c71a5e2a4e29064afb104158f2514bcd71c2c986" dependencies = [ - "core2 0.3.3", + "arrayvec", ] [[package]] @@ -3288,7 +3306,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.14", - "socket2 0.4.10", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -4758,7 +4776,7 @@ dependencies = [ "blake2b_simd", "blake2s_simd", "blake3", - "core2 0.4.0", + "core2", "digest 0.10.7", "multihash-derive 0.8.0", "sha2", @@ -4772,7 +4790,7 @@ version = "0.19.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "076d548d76a0e2a0d4ab471d0b1c36c577786dfc4471242035d97a12a735c492" dependencies = [ - "core2 0.4.0", + "core2", "unsigned-varint", ] @@ -4785,7 +4803,7 @@ dependencies = [ "blake2b_simd", "blake2s_simd", "blake3", - "core2 0.4.0", + "core2", "digest 0.10.7", "multihash-derive 0.9.0", "ripemd", @@ -4815,7 +4833,7 @@ version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "890e72cb7396cb99ed98c1246a97b243cc16394470d94e0bc8b0c2c11d84290e" dependencies = [ - "core2 0.4.0", + "core2", "multihash 0.19.1", "multihash-derive-impl", ] @@ -7548,9 +7566,9 @@ dependencies = [ [[package]] name = "secp256k1" -version = "0.28.2" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d24b59d129cdadea20aea4fb2352fa053712e5d713eee47d700cd4b2bc002f10" +checksum = "0e0cc0f1cf93f4969faf3ea1c7d8a9faed25918d96affa959720823dfe86d4f3" dependencies = [ "bitcoin_hashes", "rand", @@ -7560,9 +7578,9 @@ dependencies = [ [[package]] name = "secp256k1-sys" -version = "0.9.2" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d1746aae42c19d583c3c1a8c646bfad910498e2051c551a7f2e3c0c9fbb7eb" +checksum = "1433bd67156263443f14d603720b082dd3121779323fce20cba2aa07b874bc1b" dependencies = [ "cc", ] diff --git a/coins/bitcoin/Cargo.toml b/coins/bitcoin/Cargo.toml index 4ff0f79a..66fcc014 100644 --- a/coins/bitcoin/Cargo.toml +++ b/coins/bitcoin/Cargo.toml @@ -23,7 +23,7 @@ thiserror = { version = "1", default-features = false, optional = true } zeroize = { version = "^1.5", default-features = false } rand_core = { version = "0.6", default-features = false } -bitcoin = { version = "0.31", default-features = false, features = ["no-std"] } +bitcoin = { version = "0.32", default-features = false } k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic", "bits"] } @@ -36,7 +36,7 @@ serde_json = { version = "1", default-features = false, optional = true } simple-request = { path = "../../common/request", version = "0.1", default-features = false, features = ["tls", "basic-auth"], optional = true } [dev-dependencies] -secp256k1 = { version = "0.28", default-features = false, features = ["std"] } +secp256k1 = { version = "0.29", default-features = false, features = ["std"] } frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] } diff --git a/coins/bitcoin/src/rpc.rs b/coins/bitcoin/src/rpc.rs index 6778636b..fb1c35d6 100644 --- a/coins/bitcoin/src/rpc.rs +++ b/coins/bitcoin/src/rpc.rs @@ -195,13 +195,13 @@ impl Rpc { // If this was already successfully published, consider this having succeeded if let RpcError::RequestError(Error { code, .. }) = e { if code == RPC_VERIFY_ALREADY_IN_CHAIN { - return Ok(tx.txid()); + return Ok(tx.compute_txid()); } } Err(e)? } }; - if txid != tx.txid() { + if txid != tx.compute_txid() { Err(RpcError::InvalidResponse("returned TX ID inequals calculated TX ID"))?; } Ok(txid) @@ -215,7 +215,7 @@ impl Rpc { let tx: Transaction = encode::deserialize(&bytes) .map_err(|_| RpcError::InvalidResponse("node sent an improperly serialized transaction"))?; - let mut tx_hash = *tx.txid().as_raw_hash().as_byte_array(); + let mut tx_hash = *tx.compute_txid().as_raw_hash().as_byte_array(); tx_hash.reverse(); if hash != &tx_hash { Err(RpcError::InvalidResponse("node replied with a different transaction"))?; diff --git a/coins/bitcoin/src/tests/crypto.rs b/coins/bitcoin/src/tests/crypto.rs index 2170219c..cfc694f4 100644 --- a/coins/bitcoin/src/tests/crypto.rs +++ b/coins/bitcoin/src/tests/crypto.rs @@ -39,7 +39,7 @@ fn test_algorithm() { .verify_schnorr( &Signature::from_slice(&sig) .expect("couldn't convert produced signature to secp256k1::Signature"), - &Message::from(Hash::hash(MESSAGE)), + &Message::from_digest_slice(Hash::hash(MESSAGE).as_ref()).unwrap(), &x_only(&keys[&Participant::new(1).unwrap()].group_key()), ) .unwrap() diff --git a/coins/bitcoin/src/wallet/mod.rs b/coins/bitcoin/src/wallet/mod.rs index 3f099faa..ed6f00ce 100644 --- a/coins/bitcoin/src/wallet/mod.rs +++ b/coins/bitcoin/src/wallet/mod.rs @@ -4,7 +4,7 @@ use std_shims::{ io::{self, Write}, }; #[cfg(feature = "std")] -use std_shims::io::Read; +use std::io::{Read, BufReader}; use k256::{ elliptic_curve::sec1::{Tag, ToEncodedPoint}, @@ -18,8 +18,8 @@ use frost::{ }; use bitcoin::{ - consensus::encode::serialize, key::TweakedPublicKey, address::Payload, OutPoint, ScriptBuf, - TxOut, Transaction, Block, + consensus::encode::serialize, key::TweakedPublicKey, OutPoint, ScriptBuf, TxOut, Transaction, + Block, }; #[cfg(feature = "std")] use bitcoin::consensus::encode::Decodable; @@ -46,12 +46,12 @@ pub fn tweak_keys(keys: &ThresholdKeys) -> ThresholdKeys { /// Return the Taproot address payload for a public key. /// /// If the key is odd, this will return None. -pub fn address_payload(key: ProjectivePoint) -> Option { +pub fn p2tr_script_buf(key: ProjectivePoint) -> Option { if key.to_encoded_point(true).tag() != Tag::CompressedEvenY { return None; } - Some(Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key)))) + Some(ScriptBuf::new_p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked(x_only(&key)))) } /// A spendable output. @@ -89,11 +89,17 @@ impl ReceivedOutput { /// Read a ReceivedOutput from a generic satisfying Read. #[cfg(feature = "std")] pub fn read(r: &mut R) -> io::Result { - Ok(ReceivedOutput { - offset: Secp256k1::read_F(r)?, - output: TxOut::consensus_decode(r).map_err(|_| io::Error::other("invalid TxOut"))?, - outpoint: OutPoint::consensus_decode(r).map_err(|_| io::Error::other("invalid OutPoint"))?, - }) + let offset = Secp256k1::read_F(r)?; + let output; + let outpoint; + { + let mut buf_r = BufReader::new(r); + output = + TxOut::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid TxOut"))?; + outpoint = + OutPoint::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid OutPoint"))?; + } + Ok(ReceivedOutput { offset, output, outpoint }) } /// Write a ReceivedOutput to a generic satisfying Write. @@ -124,7 +130,7 @@ impl Scanner { /// Returns None if this key can't be scanned for. pub fn new(key: ProjectivePoint) -> Option { let mut scripts = HashMap::new(); - scripts.insert(address_payload(key)?.script_pubkey(), Scalar::ZERO); + scripts.insert(p2tr_script_buf(key)?, Scalar::ZERO); Some(Scanner { key, scripts }) } @@ -141,9 +147,8 @@ impl Scanner { // chance of being even // That means this should terminate within a very small amount of iterations loop { - match address_payload(self.key + (ProjectivePoint::GENERATOR * offset)) { - Some(address) => { - let script = address.script_pubkey(); + match p2tr_script_buf(self.key + (ProjectivePoint::GENERATOR * offset)) { + Some(script) => { if self.scripts.contains_key(&script) { None?; } @@ -166,7 +171,7 @@ impl Scanner { res.push(ReceivedOutput { offset: *offset, output: output.clone(), - outpoint: OutPoint::new(tx.txid(), vout), + outpoint: OutPoint::new(tx.compute_txid(), vout), }); } } diff --git a/coins/bitcoin/src/wallet/send.rs b/coins/bitcoin/src/wallet/send.rs index 24594ab4..d547c56a 100644 --- a/coins/bitcoin/src/wallet/send.rs +++ b/coins/bitcoin/src/wallet/send.rs @@ -23,7 +23,7 @@ use bitcoin::{ use crate::{ crypto::Schnorr, - wallet::{ReceivedOutput, address_payload}, + wallet::{ReceivedOutput, p2tr_script_buf}, }; #[rustfmt::skip] @@ -248,7 +248,7 @@ impl SignableTransaction { /// Returns the TX ID of the transaction this will create. pub fn txid(&self) -> [u8; 32] { - let mut res = self.tx.txid().to_byte_array(); + let mut res = self.tx.compute_txid().to_byte_array(); res.reverse(); res } @@ -288,7 +288,7 @@ impl SignableTransaction { transcript.append_message(b"signing_input", u32::try_from(i).unwrap().to_le_bytes()); let offset = keys.clone().offset(self.offsets[i]); - if address_payload(offset.group_key())?.script_pubkey() != self.prevouts[i].script_pubkey { + if p2tr_script_buf(offset.group_key())? != self.prevouts[i].script_pubkey { None?; } diff --git a/coins/bitcoin/tests/wallet.rs b/coins/bitcoin/tests/wallet.rs index 9eca20c7..8aa2546e 100644 --- a/coins/bitcoin/tests/wallet.rs +++ b/coins/bitcoin/tests/wallet.rs @@ -22,11 +22,10 @@ use bitcoin_serai::{ hashes::Hash as HashTrait, blockdata::opcodes::all::OP_RETURN, script::{PushBytesBuf, Instruction, Instructions, Script}, - address::NetworkChecked, OutPoint, Amount, TxOut, Transaction, Network, Address, }, wallet::{ - tweak_keys, address_payload, ReceivedOutput, Scanner, TransactionError, SignableTransaction, + tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, SignableTransaction, }, rpc::Rpc, }; @@ -48,7 +47,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint) "generatetoaddress", serde_json::json!([ 1, - Address::::new(Network::Regtest, address_payload(key).unwrap()) + Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap() ]), ) .await @@ -69,7 +68,7 @@ async fn send_and_get_output(rpc: &Rpc, scanner: &Scanner, key: ProjectivePoint) assert_eq!(outputs, scanner.scan_transaction(&block.txdata[0])); assert_eq!(outputs.len(), 1); - assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].txid(), 0)); + assert_eq!(outputs[0].outpoint(), &OutPoint::new(block.txdata[0].compute_txid(), 0)); assert_eq!(outputs[0].value(), block.txdata[0].output[0].value.to_sat()); assert_eq!( @@ -193,7 +192,7 @@ async_sequential! { assert_eq!(output.offset(), Scalar::ZERO); let inputs = vec![output]; - let addr = || Address::::new(Network::Regtest, address_payload(key).unwrap()); + let addr = || Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap(); let payments = vec![(addr(), 1000)]; assert!(SignableTransaction::new(inputs.clone(), &payments, None, None, FEE).is_ok()); @@ -261,14 +260,14 @@ async_sequential! { // Declare payments, change, fee let payments = [ - (Address::::new(Network::Regtest, address_payload(key).unwrap()), 1005), - (Address::::new(Network::Regtest, address_payload(offset_key).unwrap()), 1007) + (Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap(), 1005), + (Address::from_script(&p2tr_script_buf(offset_key).unwrap(), Network::Regtest).unwrap(), 1007) ]; let change_offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap(); let change_key = key + (ProjectivePoint::GENERATOR * change_offset); let change_addr = - Address::::new(Network::Regtest, address_payload(change_key).unwrap()); + Address::from_script(&p2tr_script_buf(change_key).unwrap(), Network::Regtest).unwrap(); // Create and sign the TX let tx = SignableTransaction::new( @@ -287,7 +286,7 @@ async_sequential! { // Ensure we can scan it let outputs = scanner.scan_transaction(&tx); for (o, output) in outputs.iter().enumerate() { - assert_eq!(output.outpoint(), &OutPoint::new(tx.txid(), u32::try_from(o).unwrap())); + assert_eq!(output.outpoint(), &OutPoint::new(tx.compute_txid(), u32::try_from(o).unwrap())); assert_eq!(&ReceivedOutput::read::<&[u8]>(&mut output.serialize().as_ref()).unwrap(), output); } @@ -320,7 +319,7 @@ async_sequential! { // This also tests send_raw_transaction and get_transaction, which the RPC test can't // effectively test rpc.send_raw_transaction(&tx).await.unwrap(); - let mut hash = *tx.txid().as_raw_hash().as_byte_array(); + let mut hash = *tx.compute_txid().as_raw_hash().as_byte_array(); hash.reverse(); assert_eq!(tx, rpc.get_transaction(&hash).await.unwrap()); assert_eq!(expected_id, hash); @@ -344,7 +343,7 @@ async_sequential! { &SignableTransaction::new( vec![output], &[], - Some(&Address::::new(Network::Regtest, address_payload(key).unwrap())), + Some(&Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap()), Some(data.clone()), FEE ).unwrap() diff --git a/processor/Cargo.toml b/processor/Cargo.toml index f90f6117..cc010848 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -45,7 +45,7 @@ frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } # Bitcoin -secp256k1 = { version = "0.28", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } +secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } bitcoin-serai = { path = "../coins/bitcoin", default-features = false, features = ["std"], optional = true } # Ethereum diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs index 3f8174e4..b7c6c2fb 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -20,12 +20,12 @@ use bitcoin_serai::{ key::{Parity, XOnlyPublicKey}, consensus::{Encodable, Decodable}, script::Instruction, - address::{NetworkChecked, Address as BAddress}, + address::Address as BAddress, Transaction, Block, Network as BNetwork, ScriptBuf, opcodes::all::{OP_SHA256, OP_EQUALVERIFY}, }, wallet::{ - tweak_keys, address_payload, ReceivedOutput, Scanner, TransactionError, + tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError, SignableTransaction as BSignableTransaction, TransactionMachine, }, rpc::{RpcError, Rpc}, @@ -175,7 +175,7 @@ pub struct Fee(u64); impl TransactionTrait for Transaction { type Id = [u8; 32]; fn id(&self) -> Self::Id { - let mut hash = *self.txid().as_raw_hash().as_byte_array(); + let mut hash = *self.compute_txid().as_raw_hash().as_byte_array(); hash.reverse(); hash } @@ -243,7 +243,8 @@ impl EventualityTrait for Eventuality { buf } fn read_completion(reader: &mut R) -> io::Result { - Transaction::consensus_decode(reader).map_err(|e| io::Error::other(format!("{e}"))) + Transaction::consensus_decode(&mut io::BufReader::new(reader)) + .map_err(|e| io::Error::other(format!("{e}"))) } } @@ -535,11 +536,11 @@ impl Bitcoin { private_key: &PrivateKey, ) -> ScriptBuf { let public_key = PublicKey::from_private_key(SECP256K1, private_key); - let main_addr = BAddress::p2pkh(&public_key, BNetwork::Regtest); + let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); let mut der = SECP256K1 .sign_ecdsa_low_r( - &Message::from( + &Message::from_digest_slice( SighashCache::new(tx) .legacy_signature_hash( input_index, @@ -547,8 +548,10 @@ impl Bitcoin { EcdsaSighashType::All.to_u32(), ) .unwrap() - .to_raw_hash(), - ), + .to_raw_hash() + .as_ref(), + ) + .unwrap(), &private_key.inner, ) .serialize_der() @@ -577,8 +580,14 @@ const MAX_INPUTS: usize = 520; const MAX_OUTPUTS: usize = 520; fn address_from_key(key: ProjectivePoint) -> Address { - Address::new(BAddress::::new(BNetwork::Bitcoin, address_payload(key).unwrap())) - .unwrap() + Address::new( + BAddress::from_script( + &p2tr_script_buf(key).expect("creating address from key which isn't properly tweaked"), + BNetwork::Bitcoin, + ) + .expect("couldn't go from p2tr script buf to address"), + ) + .expect("couldn't create Serai-representable address for bitcoin address") } #[async_trait] @@ -858,7 +867,7 @@ impl Network for Bitcoin { Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?, // TODO: Distinguish already in pool vs double spend (other signing attempt succeeded) vs // invalid transaction - Err(e) => panic!("failed to publish TX {}: {e}", tx.txid()), + Err(e) => panic!("failed to publish TX {}: {e}", tx.compute_txid()), } Ok(()) } @@ -909,7 +918,7 @@ impl Network for Bitcoin { let secret_key = SecretKey::new(&mut rand_core::OsRng); let private_key = PrivateKey::new(secret_key, BNetwork::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let main_addr = BAddress::p2pkh(&public_key, BNetwork::Regtest); + let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); let new_block = self.get_latest_block_number().await.unwrap() + 1; self @@ -923,7 +932,7 @@ impl Network for Bitcoin { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { - previous_output: OutPoint { txid: tx.txid(), vout: 0 }, + previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs index 5c5f3203..238dde1a 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/src/tests/literal/mod.rs @@ -70,7 +70,7 @@ mod bitcoin { // btc key pair to send from let private_key = PrivateKey::new(SecretKey::new(&mut rand_core::OsRng), BNetwork::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let main_addr = BAddress::p2pkh(&public_key, BNetwork::Regtest); + let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); // get unlocked coins let new_block = btc.get_latest_block_number().await.unwrap() + 1; @@ -107,7 +107,7 @@ mod bitcoin { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { - previous_output: OutPoint { txid: tx.txid(), vout: 0 }, + previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), @@ -128,7 +128,7 @@ mod bitcoin { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { - previous_output: OutPoint { txid: tx.txid(), vout: 0 }, + previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::new(), @@ -143,12 +143,14 @@ mod bitcoin { // This is the standard script with an extra argument of the InInstruction let mut sig = SECP256K1 .sign_ecdsa_low_r( - &Message::from( + &Message::from_digest_slice( SighashCache::new(&tx) .p2wsh_signature_hash(0, &script, initial_output_value, EcdsaSighashType::All) .unwrap() - .to_raw_hash(), - ), + .to_raw_hash() + .as_ref(), + ) + .unwrap(), &private_key.inner, ) .serialize_der() diff --git a/substrate/client/Cargo.toml b/substrate/client/Cargo.toml index f97e40fb..0eeb3a2f 100644 --- a/substrate/client/Cargo.toml +++ b/substrate/client/Cargo.toml @@ -36,7 +36,7 @@ async-lock = "3" simple-request = { path = "../../common/request", version = "0.1", optional = true } -bitcoin = { version = "0.31", optional = true } +bitcoin = { version = "0.32", optional = true } ciphersuite = { path = "../../crypto/ciphersuite", version = "0.4", optional = true } monero-serai = { path = "../../coins/monero", version = "0.1.4-alpha", optional = true } diff --git a/substrate/client/src/networks/bitcoin.rs b/substrate/client/src/networks/bitcoin.rs index 5ea37898..10965bdf 100644 --- a/substrate/client/src/networks/bitcoin.rs +++ b/substrate/client/src/networks/bitcoin.rs @@ -6,8 +6,8 @@ use bitcoin::{ hashes::{Hash as HashTrait, hash160::Hash}, PubkeyHash, ScriptHash, network::Network, - WitnessVersion, WitnessProgram, - address::{Error, Payload, NetworkChecked, Address as BAddressGeneric}, + WitnessVersion, WitnessProgram, ScriptBuf, + address::{AddressType, NetworkChecked, Address as BAddressGeneric}, }; type BAddress = BAddressGeneric; @@ -17,21 +17,22 @@ pub struct Address(BAddress); impl PartialEq for Address { fn eq(&self, other: &Self) -> bool { - // Since Serai defines the Bitcoin-address specification as a variant of the payload alone, - // define equivalency as the payload alone - self.0.payload() == other.0.payload() + // Since Serai defines the Bitcoin-address specification as a variant of the script alone, + // define equivalency as the script alone + self.0.script_pubkey() == other.0.script_pubkey() } } impl FromStr for Address { - type Err = Error; - fn from_str(str: &str) -> Result { + type Err = (); + fn from_str(str: &str) -> Result { Address::new( BAddressGeneric::from_str(str) - .map_err(|_| Error::UnrecognizedScript)? - .require_network(Network::Bitcoin)?, + .map_err(|_| ())? + .require_network(Network::Bitcoin) + .map_err(|_| ())?, ) - .ok_or(Error::UnrecognizedScript) + .ok_or(()) } } @@ -54,55 +55,65 @@ enum EncodedAddress { impl TryFrom> for Address { type Error = (); fn try_from(data: Vec) -> Result { - Ok(Address(BAddress::new( - Network::Bitcoin, - match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? { - EncodedAddress::P2PKH(hash) => { - Payload::PubkeyHash(PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) - } - EncodedAddress::P2SH(hash) => { - Payload::ScriptHash(ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) - } - EncodedAddress::P2WPKH(hash) => { - Payload::WitnessProgram(WitnessProgram::new(WitnessVersion::V0, hash).unwrap()) - } - EncodedAddress::P2WSH(hash) => { - Payload::WitnessProgram(WitnessProgram::new(WitnessVersion::V0, hash).unwrap()) - } - EncodedAddress::P2TR(key) => { - Payload::WitnessProgram(WitnessProgram::new(WitnessVersion::V1, key).unwrap()) - } - }, - ))) + Ok(Address(match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? { + EncodedAddress::P2PKH(hash) => { + BAddress::p2pkh(PubkeyHash::from_raw_hash(Hash::from_byte_array(hash)), Network::Bitcoin) + } + EncodedAddress::P2SH(hash) => { + let script_hash = ScriptHash::from_raw_hash(Hash::from_byte_array(hash)); + let res = + BAddress::from_script(&ScriptBuf::new_p2sh(&script_hash), Network::Bitcoin).unwrap(); + debug_assert_eq!(res.script_hash(), Some(script_hash)); + res + } + EncodedAddress::P2WPKH(hash) => BAddress::from_witness_program( + WitnessProgram::new(WitnessVersion::V0, &hash).unwrap(), + Network::Bitcoin, + ), + EncodedAddress::P2WSH(hash) => BAddress::from_witness_program( + WitnessProgram::new(WitnessVersion::V0, &hash).unwrap(), + Network::Bitcoin, + ), + EncodedAddress::P2TR(key) => BAddress::from_witness_program( + WitnessProgram::new(WitnessVersion::V1, &key).unwrap(), + Network::Bitcoin, + ), + })) } } fn try_to_vec(addr: &Address) -> Result, ()> { + let witness_program = |addr: &Address| { + let script = addr.0.script_pubkey(); + let program_push = script.as_script().instructions().last().ok_or(())?.map_err(|_| ())?; + let program = program_push.push_bytes().ok_or(())?.as_bytes(); + Ok::<_, ()>(program.to_vec()) + }; Ok( - (match addr.0.payload() { - Payload::PubkeyHash(hash) => EncodedAddress::P2PKH(*hash.as_raw_hash().as_byte_array()), - Payload::ScriptHash(hash) => EncodedAddress::P2SH(*hash.as_raw_hash().as_byte_array()), - Payload::WitnessProgram(program) => match program.version() { - WitnessVersion::V0 => { - let program = program.program(); - if program.len() == 20 { - let mut buf = [0; 20]; - buf.copy_from_slice(program.as_ref()); - EncodedAddress::P2WPKH(buf) - } else if program.len() == 32 { - let mut buf = [0; 32]; - buf.copy_from_slice(program.as_ref()); - EncodedAddress::P2WSH(buf) - } else { - Err(())? - } - } - WitnessVersion::V1 => { - let program_ref: &[u8] = program.program().as_ref(); - EncodedAddress::P2TR(program_ref.try_into().map_err(|_| ())?) - } - _ => Err(())?, - }, + (match addr.0.address_type() { + Some(AddressType::P2pkh) => { + EncodedAddress::P2PKH(*addr.0.pubkey_hash().unwrap().as_raw_hash().as_byte_array()) + } + Some(AddressType::P2sh) => { + EncodedAddress::P2SH(*addr.0.script_hash().unwrap().as_raw_hash().as_byte_array()) + } + Some(AddressType::P2wpkh) => { + let program = witness_program(addr)?; + let mut buf = [0; 20]; + buf.copy_from_slice(program.as_ref()); + EncodedAddress::P2WPKH(buf) + } + Some(AddressType::P2wsh) => { + let program = witness_program(addr)?; + let mut buf = [0; 32]; + buf.copy_from_slice(program.as_ref()); + EncodedAddress::P2WSH(buf) + } + Some(AddressType::P2tr) => { + let program = witness_program(addr)?; + let program_ref: &[u8] = program.as_ref(); + EncodedAddress::P2TR(program_ref.try_into().map_err(|_| ())?) + } _ => Err(())?, }) .encode(), diff --git a/tests/full-stack/src/tests/mint_and_burn.rs b/tests/full-stack/src/tests/mint_and_burn.rs index 51b8156c..e1153bae 100644 --- a/tests/full-stack/src/tests/mint_and_burn.rs +++ b/tests/full-stack/src/tests/mint_and_burn.rs @@ -57,7 +57,7 @@ async fn mint_and_burn_test() { }; let addr = Address::p2pkh( - &PublicKey::from_private_key( + PublicKey::from_private_key( SECP256K1, &PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin), ), @@ -266,14 +266,13 @@ async fn mint_and_burn_test() { script::{PushBytesBuf, Script, ScriptBuf, Builder}, absolute::LockTime, transaction::{Version, Transaction}, - address::Payload, - Sequence, Witness, OutPoint, TxIn, Amount, TxOut, Network, + Sequence, Witness, OutPoint, TxIn, Amount, TxOut, Network, Address, }; let private_key = PrivateKey::new(SecretKey::from_slice(&[0x01; 32]).unwrap(), Network::Bitcoin); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let addr = Payload::p2pkh(&public_key); + let addr = Address::p2pkh(public_key, Network::Bitcoin); // Use the first block's coinbase let rpc = handles[0].bitcoin(&ops).await; @@ -284,7 +283,7 @@ async fn mint_and_burn_test() { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { - previous_output: OutPoint { txid: tx.txid(), vout: 0 }, + previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), @@ -292,17 +291,23 @@ async fn mint_and_burn_test() { output: vec![ TxOut { value: Amount::from_sat(1_100_000_00), - script_pubkey: Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked( - XOnlyPublicKey::from_slice(&bitcoin_key_pair.1[1 ..]).unwrap(), - )) + script_pubkey: Address::p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked( + XOnlyPublicKey::from_slice(&bitcoin_key_pair.1[1 ..]).unwrap(), + ), + Network::Bitcoin, + ) .script_pubkey(), }, TxOut { // change = amount spent - fee value: Amount::from_sat(tx.output[0].value.to_sat() - 1_100_000_00 - 1_000_00), - script_pubkey: Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked( - XOnlyPublicKey::from_slice(&public_key.inner.serialize()[1 ..]).unwrap(), - )) + script_pubkey: Address::p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked( + XOnlyPublicKey::from_slice(&public_key.inner.serialize()[1 ..]).unwrap(), + ), + Network::Bitcoin, + ) .script_pubkey(), }, TxOut { @@ -316,12 +321,14 @@ async fn mint_and_burn_test() { let mut der = SECP256K1 .sign_ecdsa_low_r( - &Message::from( + &Message::from_digest_slice( SighashCache::new(&tx) .legacy_signature_hash(0, &addr.script_pubkey(), EcdsaSighashType::All.to_u32()) .unwrap() - .to_raw_hash(), - ), + .to_raw_hash() + .as_ref(), + ) + .unwrap(), &private_key.inner, ) .serialize_der() @@ -449,9 +456,9 @@ async fn mint_and_burn_test() { let bitcoin_addr = { use bitcoin_serai::bitcoin::{network::Network, key::PublicKey, address::Address}; // Uses Network::Bitcoin since it doesn't actually matter, Serai strips it out - // TODO: Move Serai to Payload from Address + // TODO: Move Serai to ScriptBuf from Address Address::p2pkh( - &loop { + loop { let mut bytes = [0; 33]; OsRng.fill_bytes(&mut bytes); bytes[0] %= 4; diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index 61762f71..81c18bfa 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -126,7 +126,7 @@ impl Wallet { let secret_key = SecretKey::new(&mut rand_core::OsRng); let private_key = PrivateKey::new(secret_key, Network::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); - let main_addr = Address::p2pkh(&public_key, Network::Regtest); + let main_addr = Address::p2pkh(public_key, Network::Regtest); let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the Bitcoin RPC"); @@ -258,10 +258,10 @@ impl Wallet { consensus::Encodable, sighash::{EcdsaSighashType, SighashCache}, script::{PushBytesBuf, Script, ScriptBuf, Builder}, - address::Payload, OutPoint, Sequence, Witness, TxIn, Amount, TxOut, absolute::LockTime, transaction::{Version, Transaction}, + Network, Address, }; const AMOUNT: u64 = 100000000; @@ -269,7 +269,7 @@ impl Wallet { version: Version(2), lock_time: LockTime::ZERO, input: vec![TxIn { - previous_output: OutPoint { txid: input_tx.txid(), vout: 0 }, + previous_output: OutPoint { txid: input_tx.compute_txid(), vout: 0 }, script_sig: Script::new().into(), sequence: Sequence(u32::MAX), witness: Witness::default(), @@ -281,9 +281,12 @@ impl Wallet { }, TxOut { value: Amount::from_sat(AMOUNT), - script_pubkey: Payload::p2tr_tweaked(TweakedPublicKey::dangerous_assume_tweaked( - XOnlyPublicKey::from_slice(&to[1 ..]).unwrap(), - )) + script_pubkey: Address::p2tr_tweaked( + TweakedPublicKey::dangerous_assume_tweaked( + XOnlyPublicKey::from_slice(&to[1 ..]).unwrap(), + ), + Network::Bitcoin, + ) .script_pubkey(), }, ], @@ -303,7 +306,7 @@ impl Wallet { let mut der = SECP256K1 .sign_ecdsa_low_r( - &Message::from( + &Message::from_digest_slice( SighashCache::new(&tx) .legacy_signature_hash( 0, @@ -311,8 +314,10 @@ impl Wallet { EcdsaSighashType::All.to_u32(), ) .unwrap() - .to_raw_hash(), - ), + .to_raw_hash() + .as_ref(), + ) + .unwrap(), &private_key.inner, ) .serialize_der() From 400319cd292149d9afd84afabb3ec466879e9a9b Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 21 May 2024 06:09:04 -0400 Subject: [PATCH 118/126] cargo update Also updates our gems --- Cargo.lock | 622 +++++++++--------- coins/ethereum/Cargo.toml | 14 +- .../alloy-simple-request-transport/Cargo.toml | 4 +- coins/ethereum/src/deployer.rs | 9 +- coins/ethereum/src/tests/mod.rs | 7 +- docs/Gemfile.lock | 14 +- 6 files changed, 339 insertions(+), 331 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3320988d..dedc21d1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -102,7 +102,7 @@ checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "alloy-consensus" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-eips", "alloy-primitives", @@ -114,9 +114,9 @@ dependencies = [ [[package]] name = "alloy-core" -version = "0.7.0" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcbd9ee412dfb4e81d23cd1ae816d828c494a77d1eb00358035043695d4c5808" +checksum = "f7253846c7bf55147775fd66c334abc1dd0a41e97e6155577b3dc513c6e66ef2" dependencies = [ "alloy-primitives", ] @@ -124,7 +124,7 @@ dependencies = [ [[package]] name = "alloy-eips" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -138,7 +138,7 @@ dependencies = [ [[package]] name = "alloy-genesis" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", "alloy-serde", @@ -148,9 +148,9 @@ dependencies = [ [[package]] name = "alloy-json-abi" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786689872ec4e7d354810ab0dffd48bb40b838c047522eb031cbd47d15634849" +checksum = "7e30946aa6173020259055a44971f5cf40a7d76c931d209caeb51b333263df4f" dependencies = [ "alloy-primitives", "alloy-sol-type-parser", @@ -160,7 +160,7 @@ dependencies = [ [[package]] name = "alloy-json-rpc" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", "serde", @@ -172,7 +172,7 @@ dependencies = [ [[package]] name = "alloy-network" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -189,7 +189,7 @@ dependencies = [ [[package]] name = "alloy-node-bindings" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -203,9 +203,9 @@ dependencies = [ [[package]] name = "alloy-primitives" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525448f6afc1b70dd0f9d0a8145631bf2f5e434678ab23ab18409ca264cae6b3" +checksum = "db8aa973e647ec336810a9356af8aea787249c9d00b1525359f3db29a68d231b" dependencies = [ "alloy-rlp", "bytes", @@ -226,7 +226,7 @@ dependencies = [ [[package]] name = "alloy-provider" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-eips", "alloy-json-rpc", @@ -243,6 +243,7 @@ dependencies = [ "futures", "futures-utils-wasm", "lru", + "pin-project", "serde_json", "tokio", "tracing", @@ -267,13 +268,13 @@ checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] name = "alloy-rpc-client" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -291,7 +292,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-consensus", "alloy-eips", @@ -309,7 +310,7 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", "alloy-rpc-types", @@ -321,7 +322,7 @@ dependencies = [ [[package]] name = "alloy-serde" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", "serde", @@ -331,7 +332,7 @@ dependencies = [ [[package]] name = "alloy-signer" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-primitives", "async-trait", @@ -354,9 +355,23 @@ dependencies = [ [[package]] name = "alloy-sol-macro" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89c80a2cb97e7aa48611cbb63950336f9824a174cdf670527cc6465078a26ea1" +checksum = "7dbd17d67f3e89478c8a634416358e539e577899666c927bc3d2b1328ee9b6ca" +dependencies = [ + "alloy-sol-macro-expander", + "alloy-sol-macro-input", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.65", +] + +[[package]] +name = "alloy-sol-macro-expander" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c6da95adcf4760bb4b108fefa51d50096c5e5fdd29ee72fed3e86ee414f2e34" dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", @@ -366,16 +381,16 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", "syn-solidity", "tiny-keccak", ] [[package]] name = "alloy-sol-macro-input" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c58894b58ac50979eeac6249661991ac40b9d541830d9a725f7714cc9ef08c23" +checksum = "32c8da04c1343871fb6ce5a489218f9c85323c8340a36e9106b5fc98d4dd59d5" dependencies = [ "alloy-json-abi", "const-hex", @@ -384,24 +399,24 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.60", + "syn 2.0.65", "syn-solidity", ] [[package]] name = "alloy-sol-type-parser" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7da8e71ea68e780cc203919e03f69f59e7afe92d2696fb1dcb6662f61e4031b6" +checksum = "368cae4dc052cad1d8f72eb2ae0c38027116933eeb49213c200a9e9875f208d7" dependencies = [ - "winnow 0.6.6", + "winnow 0.6.8", ] [[package]] name = "alloy-sol-types" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399287f68d1081ed8b1f4903c49687658b95b142207d7cb4ae2f4813915343ef" +checksum = "40a64d2d2395c1ac636b62419a7b17ec39031d6b2367e66e9acbf566e6055e9c" dependencies = [ "alloy-json-abi", "alloy-primitives", @@ -412,10 +427,10 @@ dependencies = [ [[package]] name = "alloy-transport" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-json-rpc", - "base64 0.22.0", + "base64 0.22.1", "futures-util", "futures-utils-wasm", "serde", @@ -430,7 +445,7 @@ dependencies = [ [[package]] name = "alloy-transport-http" version = "0.1.0" -source = "git+https://github.com/alloy-rs/alloy?rev=b79db21734cffddc11753fe62ba571565c896f42#b79db21734cffddc11753fe62ba571565c896f42" +source = "git+https://github.com/alloy-rs/alloy?rev=64feb9bc51c8021ea08535694c44de84222f474e#64feb9bc51c8021ea08535694c44de84222f474e" dependencies = [ "alloy-transport", "url", @@ -476,33 +491,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8901269c6307e8d93993578286ac0edf7f195079ffff5ebdeea6a59ffb7e36bc" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c75ac65da39e5fe5ab759307499ddad880d724eed2f6ce5b5e8a26f4f387928c" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.2" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e28923312444cdd728e4738b3f9c9cac739500909bb3d3c94b43551b16517648" +checksum = "a64c907d4e79225ac72e2a354c9ce84d50ebb4586dee56c82b3ee73004f537f5" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd54b81ec8d6180e24654d0b371ad22fc3dd083b6ff8ba325b72e00c87660a7" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -510,9 +525,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.82" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" +checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" [[package]] name = "approx" @@ -655,9 +670,9 @@ dependencies = [ [[package]] name = "array-bytes" -version = "6.2.2" +version = "6.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f840fb7195bcfc5e17ea40c26e5ce6d5b9ce5d584466e17703209657e459ae0" +checksum = "5d5dde061bd34119e902bbb2d9b90c5692635cf59fb91d582c2b68043f1b8293" [[package]] name = "arrayref" @@ -770,7 +785,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -781,7 +796,7 @@ checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -816,14 +831,14 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] name = "autocfg" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backtrace" @@ -886,9 +901,9 @@ checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" -version = "0.22.0" +version = "0.22.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" @@ -937,7 +952,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -1191,9 +1206,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0901fc8eb0aca4c83be0106d6f2db17d86a08dfc2c25f0e84464bf381158add6" +checksum = "dbe5b10e214954177fb1dc9fbd20a1a2608fe99e6c832033bdc7cea287a20d77" dependencies = [ "borsh-derive", "cfg_aliases", @@ -1201,15 +1216,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51670c3aa053938b0ee3bd67c3817e471e626151131b934038e83c5bf8de48f5" +checksum = "d7a8646f94ab393e43e8b35a2558b1624bed28b97ee09c5d15456e3c9463f46d" dependencies = [ "once_cell", "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", "syn_derive", ] @@ -1267,9 +1282,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytemuck" -version = "1.15.0" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d6d68c57235a3a081186990eca2867354726650f42f7516ca50c28d6281fd15" +checksum = "78834c15cb5d5efe3452d58b1e8ba890dd62d21907f867f383358198e56ebca5" [[package]] name = "byteorder" @@ -1299,9 +1314,9 @@ dependencies = [ [[package]] name = "c-kzg" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3130f3d8717cc02e668a896af24984d5d5d4e8bf12e278e982e0f1bd88a0f9af" +checksum = "cdf100c4cea8f207e883ff91ca886d621d8a166cb04971dfaa9bb8fd99ed95df" dependencies = [ "blst", "cc", @@ -1313,9 +1328,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.6" +version = "1.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" dependencies = [ "serde", ] @@ -1337,7 +1352,7 @@ checksum = "e7daec1a2a2129eeba1644b220b4647ec537b0b5d4bfd6876fcc5a540056b592" dependencies = [ "camino", "cargo-platform", - "semver 1.0.22", + "semver 1.0.23", "serde", "serde_json", "thiserror", @@ -1510,7 +1525,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -1531,24 +1546,24 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "concurrent-queue" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d16048cd947b08fa32c24458a22f5dc5e835264f689f4f5653210c69fd107363" +checksum = "4ca0197aee26d1ae37445ee532fefce43251d24cc7c166799f4d46817f1d3973" dependencies = [ "crossbeam-utils", ] [[package]] name = "const-hex" -version = "1.11.3" +version = "1.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ba00838774b4ab0233e355d26710fbfc8327a05c017f6dc4873f876d1f79f78" +checksum = "70ff96486ccc291d36a958107caf2c0af8c78c0af7d31ae2f35ce055130de1a6" dependencies = [ "cfg-if", "cpufeatures", @@ -1748,9 +1763,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.4.0" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3855a8a784b474f333699ef2bbca9db2c4a1f6d9088a90a2d25b1eb53111eaa" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] @@ -1776,9 +1791,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +checksum = "22ec99545bb0ed0ea7bb9b8e1e9122ea386ff8a48c0922e43f36d45ab09e0e80" [[package]] name = "crunchy" @@ -1845,7 +1860,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -1872,7 +1887,7 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -1889,7 +1904,7 @@ checksum = "ad08a837629ad949b73d032c637653d069e909cffe4ee7870b02301939ce39cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -1915,7 +1930,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core 0.9.9", @@ -1923,15 +1938,15 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e962a19be5cfc3f3bf6dd8f61eb50107f356ad6270fbb3ed41476571db78be5" +checksum = "e8566979429cf69b49a5c740c60791108e86440e8be149bbea4fe54d2c32d6e2" [[package]] name = "data-encoding-macro" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20c01c06f5f429efdf2bae21eb67c28b3df3cf85b7dd2d8ef09c0838dac5d33e" +checksum = "f1559b6cba622276d6d63706db152618eeb15b89b3e4041446b05876e352e639" dependencies = [ "data-encoding", "data-encoding-macro-internal", @@ -1939,9 +1954,9 @@ dependencies = [ [[package]] name = "data-encoding-macro-internal" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0047d07f2c89b17dd631c80450d69841a6b5d7fb17278cbc43d7e4cfcf2576f3" +checksum = "332d754c0af53bc87c108fed664d121ecf59207ec4196041f04d6ab9002ad33f" dependencies = [ "data-encoding", "syn 1.0.109", @@ -2101,7 +2116,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -2253,7 +2268,7 @@ checksum = "7d9ce6874da5d4415896cd45ffbc4d1cfc0c4f9c079427bd870742c30f2f65a9" dependencies = [ "curve25519-dalek", "ed25519", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "hex", "rand_core", "sha2", @@ -2262,9 +2277,9 @@ dependencies = [ [[package]] name = "either" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" [[package]] name = "elliptic-curve" @@ -2307,7 +2322,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -2337,9 +2352,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "errno" -version = "0.3.8" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a258e46cdc063eb8519c00b9fc845fc47bcfca4130e2f08e88665ceda8474245" +checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" dependencies = [ "libc", "windows-sys 0.52.0", @@ -2413,7 +2428,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -2424,9 +2439,9 @@ checksum = "4443176a9f2c162692bd3d352d745ef9413eec5782a80d8fd6f8a1ac692a07f7" [[package]] name = "fastrand" -version = "2.0.2" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" +checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" [[package]] name = "fastrlp" @@ -2475,9 +2490,9 @@ dependencies = [ [[package]] name = "fiat-crypto" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "file-per-thread-logger" @@ -2513,7 +2528,7 @@ dependencies = [ "log", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "scale-info", ] @@ -2688,7 +2703,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -2700,7 +2715,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -2710,7 +2725,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -2869,7 +2884,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -2879,7 +2894,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.21.11", + "rustls 0.21.12", ] [[package]] @@ -3109,9 +3124,9 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ "ahash", "allocator-api2", @@ -3306,7 +3321,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.14", - "socket2 0.5.6", + "socket2 0.4.10", "tokio", "tower-service", "tracing", @@ -3342,7 +3357,7 @@ dependencies = [ "http 1.1.0", "hyper 1.3.1", "hyper-util", - "rustls 0.23.5", + "rustls 0.23.7", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -3363,7 +3378,7 @@ dependencies = [ "http-body 1.0.0", "hyper 1.3.1", "pin-project-lite 0.2.14", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "tower", "tower-service", @@ -3532,7 +3547,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "serde", ] @@ -3548,9 +3563,9 @@ dependencies = [ [[package]] name = "instant" -version = "0.1.12" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" dependencies = [ "cfg-if", ] @@ -3576,7 +3591,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.6", + "socket2 0.5.7", "widestring", "windows-sys 0.48.0", "winreg", @@ -3653,7 +3668,7 @@ dependencies = [ "globset", "hyper 0.14.28", "jsonrpsee-types", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "rustc-hash", "serde", @@ -3738,9 +3753,9 @@ dependencies = [ [[package]] name = "keccak-asm" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb8515fff80ed850aea4a1595f2e519c003e2a00a82fe168ebf5269196caf444" +checksum = "47a3633291834c4fbebf8673acbc1b04ec9d151418ff9b8e26dcd79129928758" dependencies = [ "digest 0.10.7", "sha3-asm", @@ -3762,7 +3777,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bf7a85fe66f9ff9cd74e169fdd2c94c6e1e74c412c99a73b4df3200b5d3760b2" dependencies = [ "kvdb", - "parking_lot 0.12.1", + "parking_lot 0.12.2", ] [[package]] @@ -3773,7 +3788,7 @@ checksum = "b644c70b92285f66bfc2032922a79000ea30af7bc2ab31902992a5dcb9b434f6" dependencies = [ "kvdb", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "regex", "rocksdb 0.21.0", "smallvec", @@ -3801,9 +3816,9 @@ checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" [[package]] name = "libc" -version = "0.2.153" +version = "0.2.155" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" +checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" @@ -3812,7 +3827,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.4", + "windows-targets 0.48.5", ] [[package]] @@ -3900,7 +3915,7 @@ dependencies = [ "multihash 0.19.1", "multistream-select", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "quick-protobuf", "rand", @@ -3922,7 +3937,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "smallvec", "trust-dns-resolver", ] @@ -4044,7 +4059,7 @@ dependencies = [ "log", "rand", "smallvec", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", "trust-dns-proto 0.22.0", "void", @@ -4125,12 +4140,12 @@ dependencies = [ "libp2p-identity", "libp2p-tls", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "quinn", "rand", "ring 0.16.20", - "rustls 0.21.11", - "socket2 0.5.6", + "rustls 0.21.12", + "socket2 0.5.7", "thiserror", "tokio", ] @@ -4186,7 +4201,7 @@ dependencies = [ "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -4202,7 +4217,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "socket2 0.5.6", + "socket2 0.5.7", "tokio", ] @@ -4218,7 +4233,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls 0.21.11", + "rustls 0.21.12", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -4267,7 +4282,7 @@ dependencies = [ "libp2p-core", "libp2p-identity", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "quicksink", "rw-stream-sink", "soketto", @@ -4350,15 +4365,15 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.13" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -4389,7 +4404,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3262e75e648fce39813cb56ac41f3c3e3f65217ebf3844d818d1f9398cfb0dc" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", ] [[package]] @@ -4439,7 +4454,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -4453,7 +4468,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -4464,7 +4479,7 @@ checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -4475,7 +4490,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -5070,9 +5085,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da0df0e5185db44f69b44f26786fe401b6c293d1907744beaa7fa62b2e5a517a" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" dependencies = [ "autocfg", "libm", @@ -5324,7 +5339,7 @@ dependencies = [ "log", "lz4", "memmap2", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "siphasher", "snap", @@ -5333,9 +5348,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "881331e34fa842a2fb61cc2db9643a8fedc615e47cfcc52597d1af0db9a7e8fe" +checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ "arrayvec", "bitvec", @@ -5348,11 +5363,11 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.9" +version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" +checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 2.0.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 1.0.109", @@ -5383,9 +5398,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", "parking_lot_core 0.9.9", @@ -5452,9 +5467,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pbkdf2" @@ -5494,9 +5509,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.9" +version = "2.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +checksum = "560131c633294438da9f7c4b08189194b20946c8274c6b9e38881a7874dc8ee8" dependencies = [ "memchr", "thiserror", @@ -5505,9 +5520,9 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", "indexmap 2.2.6", @@ -5530,7 +5545,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -5575,9 +5590,9 @@ checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "polling" -version = "3.6.0" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0c976a60b2d7e99d6f229e414670a9b85d13ac305cc6d1e9c134de58c5aaaf6" +checksum = "645493cf344456ef24219d02a768cf1fb92ddf8c92161679ae3d91b91a637be3" dependencies = [ "cfg-if", "concurrent-queue", @@ -5743,29 +5758,29 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] name = "proc-macro2" -version = "1.0.81" +version = "1.0.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" +checksum = "0b33eb56c327dec362a9e55b3ad14f9d2f0904fb5a5b03b513ab5465399e9f43" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "449811d15fbdf5ceb5c1144416066429cf82316e2ec8ce0c1f6f8a02e7bbcf8c" +checksum = "3d33c28a30771f7f96db69893f78b857f7450d7e0237e9c8fc6427a81bae7ed1" dependencies = [ "cfg-if", "fnv", "lazy_static", "memchr", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "thiserror", ] @@ -5777,7 +5792,7 @@ checksum = "3c99afa9a01501019ac3a14d71d9f94050346f55ca471ce90c799a15c58f61e2" dependencies = [ "dtoa", "itoa", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "prometheus-client-derive-encode", ] @@ -5789,7 +5804,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -5926,7 +5941,7 @@ dependencies = [ "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.11", + "rustls 0.21.12", "thiserror", "tokio", "tracing", @@ -5942,7 +5957,7 @@ dependencies = [ "rand", "ring 0.16.20", "rustc-hash", - "rustls 0.21.11", + "rustls 0.21.12", "slab", "thiserror", "tinyvec", @@ -5957,7 +5972,7 @@ checksum = "055b4e778e8feb9f93c4e439f71dc2156ef13360b432b799e179a8c4cdf0b1d7" dependencies = [ "bytes", "libc", - "socket2 0.5.6", + "socket2 0.5.7", "tracing", "windows-sys 0.48.0", ] @@ -6104,22 +6119,22 @@ dependencies = [ [[package]] name = "ref-cast" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4846d4c50d1721b1a3bef8af76924eef20d5e723647333798c1b519b3a9473f" +checksum = "ccf0a6f84d5f1d581da8b41b47ec8600871962f2a528115b542b362d4b744931" dependencies = [ "ref-cast-impl", ] [[package]] name = "ref-cast-impl" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" +checksum = "bcc303e793d3734489387d205e9b186fac9c6cfacedd98cbb2e8a5943595f3e6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -6333,9 +6348,9 @@ checksum = "f86854cf50259291520509879a5c294c3c9a4c334e9ff65071c51e42ef1e2343" [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -6364,7 +6379,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" dependencies = [ - "semver 1.0.22", + "semver 1.0.23", ] [[package]] @@ -6378,9 +6393,9 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.32" +version = "0.38.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" +checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" dependencies = [ "bitflags 2.5.0", "errno", @@ -6391,9 +6406,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.11" +version = "0.21.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" +checksum = "3f56a14d1f48b391359b22f731fd4bd7e43c97f3c50eee276f3aa09c94784d3e" dependencies = [ "log", "ring 0.17.8", @@ -6403,14 +6418,14 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.5" +version = "0.23.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcee0551bd1aa3e18e5adbf2c0544722014b899adb31bd186ec638d3da97e" +checksum = "ebbbdb961df0ad3f2652da8f3fdc4b36122f568f968f45ad3316f26c025c677b" dependencies = [ "once_cell", "ring 0.17.8", "rustls-pki-types", - "rustls-webpki 0.102.2", + "rustls-webpki 0.102.4", "subtle", "zeroize", ] @@ -6434,15 +6449,15 @@ version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.22.0", + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.4.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" +checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" [[package]] name = "rustls-webpki" @@ -6456,9 +6471,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.2" +version = "0.102.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "faaa0a62740bedb9b2ef5afa303da42764c012f743917351dc9a237ea1663610" +checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -6467,9 +6482,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.15" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "rusty-fork" @@ -6496,9 +6511,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e86697c916019a8588c99b5fac3cead74ec0b4b819707a682fd4d23fa0ce1ba1" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "safe_arch" @@ -6622,7 +6637,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -6673,7 +6688,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-executor", "sc-transaction-pool-api", "sc-utils", @@ -6702,7 +6717,7 @@ dependencies = [ "log", "parity-db", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-client-api", "sc-state-db", "schnellru", @@ -6726,7 +6741,7 @@ dependencies = [ "libp2p-identity", "log", "mockall", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-client-api", "sc-utils", "serde", @@ -6753,7 +6768,7 @@ dependencies = [ "num-rational", "num-traits", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-client-api", "sc-consensus", "sc-consensus-epochs", @@ -6804,7 +6819,7 @@ dependencies = [ "futures-timer", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "sc-block-builder", "sc-chain-spec", @@ -6859,7 +6874,7 @@ version = "0.10.0-dev" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-executor-common", "sc-executor-wasmtime", "schnellru", @@ -6926,7 +6941,7 @@ version = "4.0.0-dev" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ "array-bytes", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "serde_json", "sp-application-crypto", "sp-core", @@ -6954,7 +6969,7 @@ dependencies = [ "log", "mockall", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "partial_sort", "pin-project", "rand", @@ -7120,7 +7135,7 @@ dependencies = [ "num_cpus", "once_cell", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "sc-client-api", "sc-network", @@ -7154,7 +7169,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-block-builder", "sc-chain-spec", "sc-client-api", @@ -7221,7 +7236,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-chain-spec", "sc-client-api", "sc-transaction-pool-api", @@ -7248,7 +7263,7 @@ dependencies = [ "jsonrpsee", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand", "sc-block-builder", @@ -7305,7 +7320,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sp-core", ] @@ -7337,7 +7352,7 @@ dependencies = [ "futures", "libp2p", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand", "sc-utils", @@ -7357,7 +7372,7 @@ dependencies = [ "lazy_static", "libc", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "regex", "rustc-hash", "sc-client-api", @@ -7383,7 +7398,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -7397,7 +7412,7 @@ dependencies = [ "linked-hash-map", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sc-client-api", "sc-transaction-pool-api", "sc-utils", @@ -7438,16 +7453,16 @@ dependencies = [ "futures-timer", "lazy_static", "log", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "prometheus", "sp-arithmetic", ] [[package]] name = "scale-info" -version = "2.11.2" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c453e59a955f81fb62ee5d596b450383d699f152d350e9d23a0db2adb78e4c0" +checksum = "eca070c12893629e2cc820a9761bedf6ce1dcddc9852984d1dc734b8bd9bd024" dependencies = [ "bitvec", "cfg-if", @@ -7459,11 +7474,11 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.2" +version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18cf6c6447f813ef19eb450e985bcce6705f9ce7660db221b59093d15c79c4b7" +checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 1.3.1", + "proc-macro-crate 3.1.0", "proc-macro2", "quote", "syn 1.0.109", @@ -7480,9 +7495,9 @@ dependencies = [ [[package]] name = "schnellru" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "772575a524feeb803e5b0fcbc6dd9f367e579488197c94c6e4023aad2305774d" +checksum = "c9a8ef13a93c54d20580de1e5c413e624e53121d42fc7e2c11d10ef7f8b02367" dependencies = [ "ahash", "cfg-if", @@ -7596,11 +7611,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.5.0", "core-foundation", "core-foundation-sys", "libc", @@ -7609,9 +7624,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -7637,9 +7652,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92d43fe69e652f3df9bdc2b85b2854a0825b86e4fb76bc44d945137d053639ca" +checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" dependencies = [ "serde", ] @@ -8126,7 +8141,7 @@ dependencies = [ "frame-support", "frame-system", "frame-system-rpc-runtime-api", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "pallet-authorship", "pallet-babe", "pallet-grandpa", @@ -8191,7 +8206,7 @@ version = "0.1.0" dependencies = [ "frame-support", "frame-system", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "pallet-babe", "pallet-grandpa", "parity-scale-codec", @@ -8227,9 +8242,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.198" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" +checksum = "226b61a0d411b2ba5ff6d7f73a476ac4f8bb900373459cd00fab8512828ba395" dependencies = [ "serde_derive", ] @@ -8245,20 +8260,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.198" +version = "1.0.202" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" +checksum = "6048858004bcff69094cd972ed40a32500f153bd3be9f716b2eed2e8217c4838" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] name = "serde_json" -version = "1.0.116" +version = "1.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" +checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" dependencies = [ "itoa", "ryu", @@ -8273,14 +8288,14 @@ checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] name = "serde_spanned" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" +checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" dependencies = [ "serde", ] @@ -8299,11 +8314,11 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.7.0" +version = "3.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee80b0e361bbf88fd2f6e242ccd19cfda072cb0faa6ae694ecee08199938569a" +checksum = "0ad483d2ab0149d5a5ebcd9972a3852711e0153d863bf5a5d0391d28883c4a20" dependencies = [ - "base64 0.21.7", + "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", @@ -8361,9 +8376,9 @@ dependencies = [ [[package]] name = "sha3-asm" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bac61da6b35ad76b195eb4771210f947734321a8d81d7738e1580d953bc7a15e" +checksum = "a9b57fd861253bff08bb1919e995f90ba8f4889de2726091c8876f3a4e823b40" dependencies = [ "cc", "cfg-if", @@ -8386,9 +8401,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -8492,9 +8507,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.6" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", "windows-sys 0.52.0", @@ -8548,7 +8563,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -8609,7 +8624,7 @@ dependencies = [ "futures", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "schnellru", "sp-api", "sp-consensus", @@ -8702,7 +8717,7 @@ dependencies = [ "log", "merlin", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "paste", "primitive-types", "rand", @@ -8744,7 +8759,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -8753,7 +8768,7 @@ version = "4.0.0-dev" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ "kvdb", - "parking_lot 0.12.1", + "parking_lot 0.12.2", ] [[package]] @@ -8763,7 +8778,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -8830,7 +8845,7 @@ version = "0.27.0" source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46148aa8c7d737a" dependencies = [ "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "sp-core", "sp-externalities", "thiserror", @@ -8935,7 +8950,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -8975,7 +8990,7 @@ dependencies = [ "hash-db", "log", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "smallvec", "sp-core", @@ -9047,12 +9062,12 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "ahash", "hash-db", - "hashbrown 0.14.3", + "hashbrown 0.14.5", "lazy_static", "memory-db", "nohash-hasher", "parity-scale-codec", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "scale-info", "schnellru", "sp-core", @@ -9088,7 +9103,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -9206,7 +9221,7 @@ dependencies = [ name = "std-shims" version = "0.1.1" dependencies = [ - "hashbrown 0.14.3", + "hashbrown 0.14.5", "spin 0.9.8", ] @@ -9267,7 +9282,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -9355,9 +9370,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.60" +version = "2.0.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" +checksum = "d2863d96a84c6439701d7a38f9de935ec562c8832cc55d1dde0f513b52fad106" dependencies = [ "proc-macro2", "quote", @@ -9366,14 +9381,14 @@ dependencies = [ [[package]] name = "syn-solidity" -version = "0.7.2" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aa0cefd02f532035d83cfec82647c6eb53140b0485220760e669f4bad489e36" +checksum = "b8db114c44cf843a8bacd37a146e37987a0b823a0e8bc4fdc610c9c72ab397a5" dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -9385,7 +9400,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -9477,22 +9492,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.59" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.59" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -9597,10 +9612,10 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project-lite 0.2.14", "signal-hook-registry", - "socket2 0.5.6", + "socket2 0.5.7", "tokio-macros", "windows-sys 0.48.0", ] @@ -9613,7 +9628,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -9622,7 +9637,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.5", + "rustls 0.23.7", "rustls-pki-types", "tokio", ] @@ -9641,9 +9656,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.10" +version = "0.7.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419f34732d9eb6ee4c3578b7989078579b7f039cbbb9ca2c4da015749371e15" +checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" dependencies = [ "bytes", "futures-core", @@ -9651,7 +9666,6 @@ dependencies = [ "futures-sink", "pin-project-lite 0.2.14", "tokio", - "tracing", ] [[package]] @@ -9677,9 +9691,9 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" dependencies = [ "serde", ] @@ -9774,7 +9788,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -9967,7 +9981,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "rand", "resolv-conf", "smallvec", @@ -10054,9 +10068,9 @@ dependencies = [ [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "68f5e5f3158ecfd4b8ff6fe086db7c8467a2dfdac97fe420f2b7c4aa97af66d6" [[package]] name = "unicode-xid" @@ -10200,7 +10214,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", "wasm-bindgen-shared", ] @@ -10234,7 +10248,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10325,7 +10339,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dfcdb72d96f01e6c85b6bf20102e7423bdbaad5c337301bab2bbf253d26413c" dependencies = [ "indexmap 2.2.6", - "semver 1.0.22", + "semver 1.0.23", ] [[package]] @@ -10543,7 +10557,7 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -10576,9 +10590,9 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.16" +version = "0.7.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81a1851a719f11d1d2fea40e15c72f6c00de8c142d7ac47c1441cc7e4d0d5bc6" +checksum = "21e005a4cc35784183a9e39cb22e9a9c46353ef6a7f113fd8d36ddc58c15ef3c" dependencies = [ "bytemuck", "safe_arch", @@ -10608,11 +10622,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" dependencies = [ - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -10792,9 +10806,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352" +checksum = "c3c52e9c97a68071b23e836c9380edae937f17b9c4667bd021973efc689f618d" [[package]] name = "winreg" @@ -10868,7 +10882,7 @@ dependencies = [ "futures", "log", "nohash-hasher", - "parking_lot 0.12.1", + "parking_lot 0.12.2", "pin-project", "rand", "static_assertions", @@ -10892,22 +10906,22 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.32" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] @@ -10927,7 +10941,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.60", + "syn 2.0.65", ] [[package]] diff --git a/coins/ethereum/Cargo.toml b/coins/ethereum/Cargo.toml index f600c21d..3366f072 100644 --- a/coins/ethereum/Cargo.toml +++ b/coins/ethereum/Cargo.toml @@ -29,21 +29,21 @@ frost = { package = "modular-frost", path = "../../crypto/frost", default-featur alloy-core = { version = "0.7", default-features = false } alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] } -alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false, features = ["k256"] } -alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } -alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } -alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false, features = ["k256"] } +alloy-network = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } +alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false } -alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false, optional = true } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false, optional = true } [dev-dependencies] frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] } tokio = { version = "1", features = ["macros"] } -alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } [features] tests = ["alloy-node-bindings", "frost/tests"] diff --git a/coins/ethereum/alloy-simple-request-transport/Cargo.toml b/coins/ethereum/alloy-simple-request-transport/Cargo.toml index 0d9ea6b8..f44427f7 100644 --- a/coins/ethereum/alloy-simple-request-transport/Cargo.toml +++ b/coins/ethereum/alloy-simple-request-transport/Cargo.toml @@ -21,8 +21,8 @@ tower = "0.4" serde_json = { version = "1", default-features = false } simple-request = { path = "../../../common/request", default-features = false } -alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } -alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "b79db21734cffddc11753fe62ba571565c896f42", default-features = false } +alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } +alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "64feb9bc51c8021ea08535694c44de84222f474e", default-features = false } [features] default = ["tls"] diff --git a/coins/ethereum/src/deployer.rs b/coins/ethereum/src/deployer.rs index 1a16664c..7c0bf16c 100644 --- a/coins/ethereum/src/deployer.rs +++ b/coins/ethereum/src/deployer.rs @@ -58,14 +58,7 @@ impl Deployer { /// Construct a new view of the `Deployer`. pub async fn new(provider: Arc>) -> Result, Error> { let address = Self::address(); - #[cfg(not(test))] - let required_block = BlockNumberOrTag::Finalized; - #[cfg(test)] - let required_block = BlockNumberOrTag::Latest; - let code = provider - .get_code_at(address.into(), required_block.into()) - .await - .map_err(|_| Error::ConnectionError)?; + let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?; // Contract has yet to be deployed if code.is_empty() { return Ok(None); diff --git a/coins/ethereum/src/tests/mod.rs b/coins/ethereum/src/tests/mod.rs index 085ef3a2..e88e90e5 100644 --- a/coins/ethereum/src/tests/mod.rs +++ b/coins/ethereum/src/tests/mod.rs @@ -11,7 +11,7 @@ use alloy_core::{ }; use alloy_consensus::{SignableTransaction, TxLegacy}; -use alloy_rpc_types::{BlockNumberOrTag, TransactionReceipt}; +use alloy_rpc_types::TransactionReceipt; use alloy_simple_request_transport::SimpleRequest; use alloy_provider::{Provider, RootProvider}; @@ -57,15 +57,14 @@ pub async fn send( // let chain_id = provider.get_chain_id().await.unwrap(); // tx.chain_id = Some(chain_id); tx.chain_id = None; - tx.nonce = - provider.get_transaction_count(address, BlockNumberOrTag::Latest.into()).await.unwrap(); + tx.nonce = provider.get_transaction_count(address).await.unwrap(); // 100 gwei tx.gas_price = 100_000_000_000u128; let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap(); assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap()); assert!( - provider.get_balance(address, BlockNumberOrTag::Latest.into()).await.unwrap() > + provider.get_balance(address).await.unwrap() > ((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value) ); diff --git a/docs/Gemfile.lock b/docs/Gemfile.lock index 34e40cd9..f6d6627d 100644 --- a/docs/Gemfile.lock +++ b/docs/Gemfile.lock @@ -13,7 +13,7 @@ GEM forwardable-extended (2.6.0) google-protobuf (3.25.3-x86_64-linux) http_parser.rb (0.8.0) - i18n (1.14.4) + i18n (1.14.5) concurrent-ruby (~> 1.0) jekyll (4.3.3) addressable (~> 2.4) @@ -55,17 +55,19 @@ GEM mercenary (0.4.0) pathutil (0.16.2) forwardable-extended (~> 2.6) - public_suffix (5.0.4) - rake (13.1.0) + public_suffix (5.0.5) + rake (13.2.1) rb-fsevent (0.11.2) - rb-inotify (0.10.1) + rb-inotify (0.11.1) ffi (~> 1.0) - rexml (3.2.6) - rouge (4.2.0) + rexml (3.2.8) + strscan (>= 3.0.9) + rouge (4.2.1) safe_yaml (1.0.5) sass-embedded (1.63.6) google-protobuf (~> 3.23) rake (>= 13.0.0) + strscan (3.1.0) terminal-table (3.0.2) unicode-display_width (>= 1.1.1, < 3) unicode-display_width (2.5.0) From f93214012d3c1edf975ffd2f8f9e2cd6242455d3 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 21 May 2024 06:44:59 -0400 Subject: [PATCH 119/126] Use ScriptBuf over Address where possible --- coins/bitcoin/src/wallet/send.rs | 26 +++---- coins/bitcoin/tests/wallet.rs | 21 +++--- processor/src/networks/bitcoin.rs | 25 ++++--- processor/src/tests/literal/mod.rs | 2 +- substrate/client/src/networks/bitcoin.rs | 78 +++++++++------------ tests/full-stack/src/tests/mint_and_burn.rs | 14 ++-- tests/processor/src/networks.rs | 16 ++--- 7 files changed, 80 insertions(+), 102 deletions(-) diff --git a/coins/bitcoin/src/wallet/send.rs b/coins/bitcoin/src/wallet/send.rs index d547c56a..1980a554 100644 --- a/coins/bitcoin/src/wallet/send.rs +++ b/coins/bitcoin/src/wallet/send.rs @@ -18,7 +18,7 @@ use bitcoin::{ absolute::LockTime, script::{PushBytesBuf, ScriptBuf}, transaction::{Version, Transaction}, - OutPoint, Sequence, Witness, TxIn, Amount, TxOut, Address, + OutPoint, Sequence, Witness, TxIn, Amount, TxOut, }; use crate::{ @@ -61,7 +61,11 @@ pub struct SignableTransaction { } impl SignableTransaction { - fn calculate_weight(inputs: usize, payments: &[(Address, u64)], change: Option<&Address>) -> u64 { + fn calculate_weight( + inputs: usize, + payments: &[(ScriptBuf, u64)], + change: Option<&ScriptBuf>, + ) -> u64 { // Expand this a full transaction in order to use the bitcoin library's weight function let mut tx = Transaction { version: Version(2), @@ -86,14 +90,14 @@ impl SignableTransaction { // The script pub key is not of a fixed size and does have to be used here .map(|payment| TxOut { value: Amount::from_sat(payment.1), - script_pubkey: payment.0.script_pubkey(), + script_pubkey: payment.0.clone(), }) .collect(), }; if let Some(change) = change { // Use a 0 value since we're currently unsure what the change amount will be, and since // the value is fixed size (so any value could be used here) - tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.script_pubkey() }); + tx.output.push(TxOut { value: Amount::ZERO, script_pubkey: change.clone() }); } u64::from(tx.weight()) } @@ -121,8 +125,8 @@ impl SignableTransaction { /// If data is specified, an OP_RETURN output will be added with it. pub fn new( mut inputs: Vec, - payments: &[(Address, u64)], - change: Option<&Address>, + payments: &[(ScriptBuf, u64)], + change: Option, data: Option>, fee_per_weight: u64, ) -> Result { @@ -159,10 +163,7 @@ impl SignableTransaction { let payment_sat = payments.iter().map(|payment| payment.1).sum::(); let mut tx_outs = payments .iter() - .map(|payment| TxOut { - value: Amount::from_sat(payment.1), - script_pubkey: payment.0.script_pubkey(), - }) + .map(|payment| TxOut { value: Amount::from_sat(payment.1), script_pubkey: payment.0.clone() }) .collect::>(); // Add the OP_RETURN output @@ -213,12 +214,11 @@ impl SignableTransaction { // If there's a change address, check if there's change to give it if let Some(change) = change { - let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(change)); + let weight_with_change = Self::calculate_weight(tx_ins.len(), payments, Some(&change)); let fee_with_change = fee_per_weight * weight_with_change; if let Some(value) = input_sat.checked_sub(payment_sat + fee_with_change) { if value >= DUST { - tx_outs - .push(TxOut { value: Amount::from_sat(value), script_pubkey: change.script_pubkey() }); + tx_outs.push(TxOut { value: Amount::from_sat(value), script_pubkey: change }); weight = weight_with_change; needed_fee = fee_with_change; } diff --git a/coins/bitcoin/tests/wallet.rs b/coins/bitcoin/tests/wallet.rs index 8aa2546e..9db004f5 100644 --- a/coins/bitcoin/tests/wallet.rs +++ b/coins/bitcoin/tests/wallet.rs @@ -192,7 +192,7 @@ async_sequential! { assert_eq!(output.offset(), Scalar::ZERO); let inputs = vec![output]; - let addr = || Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap(); + let addr = || p2tr_script_buf(key).unwrap(); let payments = vec![(addr(), 1000)]; assert!(SignableTransaction::new(inputs.clone(), &payments, None, None, FEE).is_ok()); @@ -205,7 +205,7 @@ async_sequential! { // No change assert!(SignableTransaction::new(inputs.clone(), &[(addr(), 1000)], None, None, FEE).is_ok()); // Consolidation TX - assert!(SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, FEE).is_ok()); + assert!(SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, FEE).is_ok()); // Data assert!(SignableTransaction::new(inputs.clone(), &[], None, Some(vec![]), FEE).is_ok()); // No outputs @@ -228,7 +228,7 @@ async_sequential! { ); assert_eq!( - SignableTransaction::new(inputs.clone(), &[], Some(&addr()), None, 0), + SignableTransaction::new(inputs.clone(), &[], Some(addr()), None, 0), Err(TransactionError::TooLowFee), ); @@ -260,20 +260,19 @@ async_sequential! { // Declare payments, change, fee let payments = [ - (Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap(), 1005), - (Address::from_script(&p2tr_script_buf(offset_key).unwrap(), Network::Regtest).unwrap(), 1007) + (p2tr_script_buf(key).unwrap(), 1005), + (p2tr_script_buf(offset_key).unwrap(), 1007) ]; let change_offset = scanner.register_offset(Scalar::random(&mut OsRng)).unwrap(); let change_key = key + (ProjectivePoint::GENERATOR * change_offset); - let change_addr = - Address::from_script(&p2tr_script_buf(change_key).unwrap(), Network::Regtest).unwrap(); + let change_addr = p2tr_script_buf(change_key).unwrap(); // Create and sign the TX let tx = SignableTransaction::new( vec![output.clone(), offset_output.clone()], &payments, - Some(&change_addr), + Some(change_addr.clone()), None, FEE ).unwrap(); @@ -298,7 +297,7 @@ async_sequential! { for ((output, scanned), payment) in tx.output.iter().zip(outputs.iter()).zip(payments.iter()) { assert_eq!( output, - &TxOut { script_pubkey: payment.0.script_pubkey(), value: Amount::from_sat(payment.1) }, + &TxOut { script_pubkey: payment.0.clone(), value: Amount::from_sat(payment.1) }, ); assert_eq!(scanned.value(), payment.1 ); } @@ -313,7 +312,7 @@ async_sequential! { input_value - payments.iter().map(|payment| payment.1).sum::() - needed_fee; assert_eq!( tx.output[2], - TxOut { script_pubkey: change_addr.script_pubkey(), value: Amount::from_sat(change_amount) }, + TxOut { script_pubkey: change_addr, value: Amount::from_sat(change_amount) }, ); // This also tests send_raw_transaction and get_transaction, which the RPC test can't @@ -343,7 +342,7 @@ async_sequential! { &SignableTransaction::new( vec![output], &[], - Some(&Address::from_script(&p2tr_script_buf(key).unwrap(), Network::Regtest).unwrap()), + Some(p2tr_script_buf(key).unwrap()), Some(data.clone()), FEE ).unwrap() diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs index b7c6c2fb..e89c9138 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -20,8 +20,7 @@ use bitcoin_serai::{ key::{Parity, XOnlyPublicKey}, consensus::{Encodable, Decodable}, script::Instruction, - address::Address as BAddress, - Transaction, Block, Network as BNetwork, ScriptBuf, + Transaction, Block, ScriptBuf, opcodes::all::{OP_SHA256, OP_EQUALVERIFY}, }, wallet::{ @@ -454,7 +453,7 @@ impl Bitcoin { match BSignableTransaction::new( inputs.iter().map(|input| input.output.clone()).collect(), &payments, - change.as_ref().map(AsRef::as_ref), + change.clone().map(Into::into), None, fee.0, ) { @@ -535,6 +534,8 @@ impl Bitcoin { input_index: usize, private_key: &PrivateKey, ) -> ScriptBuf { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + let public_key = PublicKey::from_private_key(SECP256K1, private_key); let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest); @@ -581,13 +582,9 @@ const MAX_OUTPUTS: usize = 520; fn address_from_key(key: ProjectivePoint) -> Address { Address::new( - BAddress::from_script( - &p2tr_script_buf(key).expect("creating address from key which isn't properly tweaked"), - BNetwork::Bitcoin, - ) - .expect("couldn't go from p2tr script buf to address"), + p2tr_script_buf(key).expect("creating address from key which isn't properly tweaked"), ) - .expect("couldn't create Serai-representable address for bitcoin address") + .expect("couldn't create Serai-representable address for P2TR script") } #[async_trait] @@ -733,9 +730,7 @@ impl Network for Bitcoin { } tx.unwrap().output.swap_remove(usize::try_from(input.previous_output.vout).unwrap()) }; - BAddress::from_script(&spent_output.script_pubkey, BNetwork::Bitcoin) - .ok() - .and_then(Address::new) + Address::new(spent_output.script_pubkey) }; let data = Self::extract_serai_data(tx); for output in &mut outputs { @@ -903,6 +898,8 @@ impl Network for Bitcoin { #[cfg(test)] async fn mine_block(&self) { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + self .rpc .rpc_call::>( @@ -915,6 +912,8 @@ impl Network for Bitcoin { #[cfg(test)] async fn test_send(&self, address: Address) -> Block { + use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress}; + let secret_key = SecretKey::new(&mut rand_core::OsRng); let private_key = PrivateKey::new(secret_key, BNetwork::Regtest); let public_key = PublicKey::from_private_key(SECP256K1, &private_key); @@ -939,7 +938,7 @@ impl Network for Bitcoin { }], output: vec![TxOut { value: tx.output[0].value - BAmount::from_sat(10000), - script_pubkey: address.as_ref().script_pubkey(), + script_pubkey: address.clone().into(), }], }; tx.input[0].script_sig = Self::sign_btc_input_for_p2pkh(&tx, 0, &private_key); diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs index 238dde1a..15c27b8c 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/src/tests/literal/mod.rs @@ -135,7 +135,7 @@ mod bitcoin { }], output: vec![TxOut { value: tx.output[0].value - BAmount::from_sat(10000), - script_pubkey: serai_btc_address.as_ref().script_pubkey(), + script_pubkey: serai_btc_address.into(), }], }; diff --git a/substrate/client/src/networks/bitcoin.rs b/substrate/client/src/networks/bitcoin.rs index 10965bdf..9f5ff1dd 100644 --- a/substrate/client/src/networks/bitcoin.rs +++ b/substrate/client/src/networks/bitcoin.rs @@ -7,19 +7,23 @@ use bitcoin::{ PubkeyHash, ScriptHash, network::Network, WitnessVersion, WitnessProgram, ScriptBuf, - address::{AddressType, NetworkChecked, Address as BAddressGeneric}, + address::{AddressType, NetworkChecked, Address as BAddress}, }; -type BAddress = BAddressGeneric; - #[derive(Clone, Eq, Debug)] -pub struct Address(BAddress); +pub struct Address(ScriptBuf); impl PartialEq for Address { fn eq(&self, other: &Self) -> bool { // Since Serai defines the Bitcoin-address specification as a variant of the script alone, // define equivalency as the script alone - self.0.script_pubkey() == other.0.script_pubkey() + self.0 == other.0 + } +} + +impl From
for ScriptBuf { + fn from(addr: Address) -> ScriptBuf { + addr.0 } } @@ -27,10 +31,11 @@ impl FromStr for Address { type Err = (); fn from_str(str: &str) -> Result { Address::new( - BAddressGeneric::from_str(str) + BAddress::from_str(str) .map_err(|_| ())? .require_network(Network::Bitcoin) - .map_err(|_| ())?, + .map_err(|_| ())? + .script_pubkey(), ) .ok_or(()) } @@ -38,7 +43,9 @@ impl FromStr for Address { impl fmt::Display for Address { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.0.fmt(f) + BAddress::::from_script(&self.0, Network::Bitcoin) + .map_err(|_| fmt::Error)? + .fmt(f) } } @@ -57,45 +64,40 @@ impl TryFrom> for Address { fn try_from(data: Vec) -> Result { Ok(Address(match EncodedAddress::decode(&mut data.as_ref()).map_err(|_| ())? { EncodedAddress::P2PKH(hash) => { - BAddress::p2pkh(PubkeyHash::from_raw_hash(Hash::from_byte_array(hash)), Network::Bitcoin) + ScriptBuf::new_p2pkh(&PubkeyHash::from_raw_hash(Hash::from_byte_array(hash))) } EncodedAddress::P2SH(hash) => { - let script_hash = ScriptHash::from_raw_hash(Hash::from_byte_array(hash)); - let res = - BAddress::from_script(&ScriptBuf::new_p2sh(&script_hash), Network::Bitcoin).unwrap(); - debug_assert_eq!(res.script_hash(), Some(script_hash)); - res + ScriptBuf::new_p2sh(&ScriptHash::from_raw_hash(Hash::from_byte_array(hash))) + } + EncodedAddress::P2WPKH(hash) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) + } + EncodedAddress::P2WSH(hash) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V0, &hash).unwrap()) + } + EncodedAddress::P2TR(key) => { + ScriptBuf::new_witness_program(&WitnessProgram::new(WitnessVersion::V1, &key).unwrap()) } - EncodedAddress::P2WPKH(hash) => BAddress::from_witness_program( - WitnessProgram::new(WitnessVersion::V0, &hash).unwrap(), - Network::Bitcoin, - ), - EncodedAddress::P2WSH(hash) => BAddress::from_witness_program( - WitnessProgram::new(WitnessVersion::V0, &hash).unwrap(), - Network::Bitcoin, - ), - EncodedAddress::P2TR(key) => BAddress::from_witness_program( - WitnessProgram::new(WitnessVersion::V1, &key).unwrap(), - Network::Bitcoin, - ), })) } } fn try_to_vec(addr: &Address) -> Result, ()> { let witness_program = |addr: &Address| { - let script = addr.0.script_pubkey(); - let program_push = script.as_script().instructions().last().ok_or(())?.map_err(|_| ())?; + let program_push = addr.0.as_script().instructions().last().ok_or(())?.map_err(|_| ())?; let program = program_push.push_bytes().ok_or(())?.as_bytes(); Ok::<_, ()>(program.to_vec()) }; + + let parsed_addr = + BAddress::::from_script(&addr.0, Network::Bitcoin).map_err(|_| ())?; Ok( - (match addr.0.address_type() { + (match parsed_addr.address_type() { Some(AddressType::P2pkh) => { - EncodedAddress::P2PKH(*addr.0.pubkey_hash().unwrap().as_raw_hash().as_byte_array()) + EncodedAddress::P2PKH(*parsed_addr.pubkey_hash().unwrap().as_raw_hash().as_byte_array()) } Some(AddressType::P2sh) => { - EncodedAddress::P2SH(*addr.0.script_hash().unwrap().as_raw_hash().as_byte_array()) + EncodedAddress::P2SH(*parsed_addr.script_hash().unwrap().as_raw_hash().as_byte_array()) } Some(AddressType::P2wpkh) => { let program = witness_program(addr)?; @@ -127,20 +129,8 @@ impl From
for Vec { } } -impl From
for BAddress { - fn from(addr: Address) -> BAddress { - addr.0 - } -} - -impl AsRef for Address { - fn as_ref(&self) -> &BAddress { - &self.0 - } -} - impl Address { - pub fn new(address: BAddress) -> Option { + pub fn new(address: ScriptBuf) -> Option { let res = Self(address); if try_to_vec(&res).is_ok() { return Some(res); diff --git a/tests/full-stack/src/tests/mint_and_burn.rs b/tests/full-stack/src/tests/mint_and_burn.rs index e1153bae..4093e47d 100644 --- a/tests/full-stack/src/tests/mint_and_burn.rs +++ b/tests/full-stack/src/tests/mint_and_burn.rs @@ -454,19 +454,17 @@ async fn mint_and_burn_test() { // Create a random Bitcoin/Monero address let bitcoin_addr = { - use bitcoin_serai::bitcoin::{network::Network, key::PublicKey, address::Address}; - // Uses Network::Bitcoin since it doesn't actually matter, Serai strips it out - // TODO: Move Serai to ScriptBuf from Address - Address::p2pkh( - loop { + use bitcoin_serai::bitcoin::{key::PublicKey, ScriptBuf}; + ScriptBuf::new_p2pkh( + &(loop { let mut bytes = [0; 33]; OsRng.fill_bytes(&mut bytes); bytes[0] %= 4; if let Ok(key) = PublicKey::from_slice(&bytes) { break key; } - }, - Network::Bitcoin, + }) + .pubkey_hash(), ) }; @@ -559,7 +557,7 @@ async fn mint_and_burn_test() { let received_output = block.txdata[1] .output .iter() - .find(|output| output.script_pubkey == bitcoin_addr.script_pubkey()) + .find(|output| output.script_pubkey == bitcoin_addr) .unwrap(); let tx_fee = 1_100_000_00 - diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index 81c18bfa..3ad2b59a 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -261,7 +261,6 @@ impl Wallet { OutPoint, Sequence, Witness, TxIn, Amount, TxOut, absolute::LockTime, transaction::{Version, Transaction}, - Network, Address, }; const AMOUNT: u64 = 100000000; @@ -281,13 +280,11 @@ impl Wallet { }, TxOut { value: Amount::from_sat(AMOUNT), - script_pubkey: Address::p2tr_tweaked( + script_pubkey: ScriptBuf::new_p2tr_tweaked( TweakedPublicKey::dangerous_assume_tweaked( XOnlyPublicKey::from_slice(&to[1 ..]).unwrap(), ), - Network::Bitcoin, - ) - .script_pubkey(), + ), }, ], }; @@ -521,13 +518,8 @@ impl Wallet { match self { Wallet::Bitcoin { public_key, .. } => { - use bitcoin_serai::bitcoin::{Network, Address}; - ExternalAddress::new( - networks::bitcoin::Address::new(Address::p2pkh(public_key, Network::Regtest)) - .unwrap() - .into(), - ) - .unwrap() + use bitcoin_serai::bitcoin::ScriptBuf; + ExternalAddress::new(ScriptBuf::new_p2pkh(&public_key.pubkey_hash()).into()).unwrap() } Wallet::Ethereum { key, .. } => ExternalAddress::new( ethereum_serai::crypto::address(&(ciphersuite::Secp256k1::generator() * key)).into(), From 09aac202931fc9ad134b5a001e9ba40495cce9fa Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 21 May 2024 07:06:13 -0400 Subject: [PATCH 120/126] Set the BufReader capacity to 0 Fixes issues with bitcoin. We only use a BufReader as it's the only way to use a std::io::Read generic as a bitcoin::io::Read object. --- coins/bitcoin/src/wallet/mod.rs | 2 +- processor/src/networks/bitcoin.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/coins/bitcoin/src/wallet/mod.rs b/coins/bitcoin/src/wallet/mod.rs index ed6f00ce..195182ff 100644 --- a/coins/bitcoin/src/wallet/mod.rs +++ b/coins/bitcoin/src/wallet/mod.rs @@ -93,7 +93,7 @@ impl ReceivedOutput { let output; let outpoint; { - let mut buf_r = BufReader::new(r); + let mut buf_r = BufReader::with_capacity(0, r); output = TxOut::consensus_decode(&mut buf_r).map_err(|_| io::Error::other("invalid TxOut"))?; outpoint = diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs index e89c9138..183444b1 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -242,7 +242,7 @@ impl EventualityTrait for Eventuality { buf } fn read_completion(reader: &mut R) -> io::Result { - Transaction::consensus_decode(&mut io::BufReader::new(reader)) + Transaction::consensus_decode(&mut io::BufReader::with_capacity(0, reader)) .map_err(|e| io::Error::other(format!("{e}"))) } } From a473800c26223ef91a40099382b51f3e7fd8e258 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 21 May 2024 08:07:32 -0400 Subject: [PATCH 121/126] More aggressive cargo update Adds a few deps which are fine. Patches an old parking_lot(_core) version. --- Cargo.lock | 268 +++++++++++++++++----------- Cargo.toml | 4 + patches/parking_lot/Cargo.toml | 17 ++ patches/parking_lot/src/lib.rs | 1 + patches/parking_lot_core/Cargo.toml | 17 ++ patches/parking_lot_core/src/lib.rs | 1 + 6 files changed, 205 insertions(+), 103 deletions(-) create mode 100644 patches/parking_lot/Cargo.toml create mode 100644 patches/parking_lot/src/lib.rs create mode 100644 patches/parking_lot_core/Cargo.toml create mode 100644 patches/parking_lot_core/src/lib.rs diff --git a/Cargo.lock b/Cargo.lock index dedc21d1..4b7997cd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -477,15 +477,16 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.13" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d96bd03f33fe50a863e394ee9718a706f988b9079b20c3784fb726e7678b62fb" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] @@ -842,9 +843,9 @@ checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line 0.21.0", "cc", @@ -1360,11 +1361,13 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.88" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02f341c093d19155a6e41631ce5971aac4e9a868262212153124c15fa22d1cdc" +checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" dependencies = [ + "jobserver", "libc", + "once_cell", ] [[package]] @@ -1433,7 +1436,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -1865,9 +1868,9 @@ dependencies = [ [[package]] name = "cxx" -version = "1.0.121" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21db378d04296a84d8b7d047c36bb3954f0b46529db725d7e62fb02f9ba53ccc" +checksum = "bb497fad022245b29c2a0351df572e2d67c1046bcef2260ebc022aec81efea82" dependencies = [ "cc", "cxxbridge-flags", @@ -1877,9 +1880,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.121" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e5262a7fa3f0bae2a55b767c223ba98032d7c328f5c13fa5cdc980b77fc0658" +checksum = "9327c7f9fbd6329a200a5d4aa6f674c60ab256525ff0084b52a889d4e4c60cee" dependencies = [ "cc", "codespan-reporting", @@ -1892,15 +1895,15 @@ dependencies = [ [[package]] name = "cxxbridge-flags" -version = "1.0.121" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be8dcadd2e2fb4a501e1d9e93d6e88e6ea494306d8272069c92d5a9edf8855c0" +checksum = "688c799a4a846f1c0acb9f36bb9c6272d9b3d9457f3633c7753c6057270df13c" [[package]] name = "cxxbridge-macro" -version = "1.0.121" +version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad08a837629ad949b73d032c637653d069e909cffe4ee7870b02301939ce39cc" +checksum = "928bc249a7e3cd554fd2e8e08a426e9670c50bbfc9a621653cfa9accc9641783" dependencies = [ "proc-macro2", "quote", @@ -1933,7 +1936,7 @@ dependencies = [ "hashbrown 0.14.5", "lock_api", "once_cell", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] @@ -2974,15 +2977,16 @@ dependencies = [ [[package]] name = "generator" -version = "0.7.5" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc16584ff22b460a382b7feec54b23d2908d858152e5739a120b949293bd74e" +checksum = "186014d53bc231d0090ef8d6f03e0920c54d85a5ed22f4f2f74315ec56cf83fb" dependencies = [ "cc", + "cfg-if", "libc", "log", "rustversion", - "windows 0.48.0", + "windows 0.54.0", ] [[package]] @@ -3007,15 +3011,25 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.12" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "190092ea657667030ac6a35e305e62fc4dd69fd98ac98631e5d3a2b1575a12b5" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "libc", "wasi", ] +[[package]] +name = "getrandom_or_panic" +version = "0.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea1015b5a70616b688dc230cfe50c8af89d972cb132d5a622814d29773b10b9" +dependencies = [ + "rand", + "rand_core", +] + [[package]] name = "ghash" version = "0.5.1" @@ -3321,7 +3335,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite 0.2.14", - "socket2 0.4.10", + "socket2 0.5.7", "tokio", "tower-service", "tracing", @@ -3409,7 +3423,7 @@ dependencies = [ "iana-time-zone-haiku", "js-sys", "wasm-bindgen", - "windows-core", + "windows-core 0.51.1", ] [[package]] @@ -3607,6 +3621,12 @@ checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" name = "is-terminal" version = "0.4.10" +[[package]] +name = "is_terminal_polyfill" +version = "1.70.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" + [[package]] name = "itertools" version = "0.10.5" @@ -3631,6 +3651,15 @@ version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +[[package]] +name = "jobserver" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +dependencies = [ + "libc", +] + [[package]] name = "js-sys" version = "0.3.69" @@ -4303,6 +4332,16 @@ dependencies = [ "yamux", ] +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.5.0", + "libc", +] + [[package]] name = "librocksdb-sys" version = "0.16.0+8.10.0" @@ -4387,9 +4426,9 @@ checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "loom" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e045d70ddfbc984eacfa964ded019534e8f6cbf36f6410aee0ed5cefa5a9175" +checksum = "419e0dc8046cb947daa77eb95ae174acfbddb7673b4151f56d1eed8e93fbfaca" dependencies = [ "cfg-if", "generator", @@ -4626,9 +4665,9 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d811f3e15f28568be3407c8e7fdb6514c1cda3cb30683f15b6a1a1dc4ea14a7" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", ] @@ -5034,24 +5073,29 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "608e7659b5c3d7cba262d894801b9ec9d00de989e8a82bd4bef91d08da45cdc0" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ - "autocfg", "num-integer", "num-traits", ] [[package]] name = "num-complex" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23c6602fda94a57c990fe0df199a035d83576b496aa29f4e634a8ac6004e68a6" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" dependencies = [ "num-traits", ] +[[package]] +name = "num-conv" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "51d515d32fb182ee37cda2ccdcb92950d6a3c2893aa280e540671c2cd0f3b1d9" + [[package]] name = "num-format" version = "0.4.4" @@ -5073,11 +5117,10 @@ dependencies = [ [[package]] name = "num-rational" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" +checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "autocfg", "num-bigint", "num-integer", "num-traits", @@ -5388,12 +5431,8 @@ checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" [[package]] name = "parking_lot" version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" dependencies = [ - "instant", - "lock_api", - "parking_lot_core 0.8.6", + "parking_lot 0.12.2", ] [[package]] @@ -5403,34 +5442,27 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7e4af0ca4f6caed20e900d564c242b8e5d4903fdacf31d3daf527b66fe6f42fb" dependencies = [ "lock_api", - "parking_lot_core 0.9.9", + "parking_lot_core 0.9.10", ] [[package]] name = "parking_lot_core" version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" dependencies = [ - "cfg-if", - "instant", - "libc", - "redox_syscall 0.2.16", - "smallvec", - "winapi", + "parking_lot_core 0.9.10", ] [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.4.1", + "redox_syscall 0.5.1", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -6088,15 +6120,6 @@ dependencies = [ "yasna", ] -[[package]] -name = "redox_syscall" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" -dependencies = [ - "bitflags 1.3.2", -] - [[package]] name = "redox_syscall" version = "0.4.1" @@ -6107,13 +6130,22 @@ dependencies = [ ] [[package]] -name = "redox_users" -version = "0.4.3" +name = "redox_syscall" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +dependencies = [ + "bitflags 2.5.0", +] + +[[package]] +name = "redox_users" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ "getrandom", - "redox_syscall 0.2.16", + "libredox", "thiserror", ] @@ -7521,15 +7553,16 @@ dependencies = [ [[package]] name = "schnorrkel" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da18ffd9f2f5d01bc0b3050b37ce7728665f926b4dd1157fe3221b05737d924f" +checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" dependencies = [ + "aead", "arrayref", "arrayvec", "curve25519-dalek", + "getrandom_or_panic", "merlin", - "rand", "rand_core", "serde_bytes", "sha2", @@ -9531,12 +9564,13 @@ dependencies = [ [[package]] name = "time" -version = "0.3.31" +version = "0.3.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f657ba42c3f86e7680e53c8cd3af8abbe56b5491790b46e22e19c0d57463583e" +checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" dependencies = [ "deranged", "itoa", + "num-conv", "powerfmt", "serde", "time-core", @@ -9551,10 +9585,11 @@ checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" [[package]] name = "time-macros" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26197e33420244aeb70c3e8c78376ca46571bc4e701e4791c2cd9f57dcb3a43f" +checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" dependencies = [ + "num-conv", "time-core", ] @@ -10637,21 +10672,22 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows" -version = "0.48.0" +version = "0.51.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e686886bc078bc1b0b600cac0147aadb815089b6e4da64016cbd754b6342700f" +checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" dependencies = [ + "windows-core 0.51.1", "windows-targets 0.48.5", ] [[package]] name = "windows" -version = "0.51.1" +version = "0.54.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca229916c5ee38c2f2bc1e9d8f04df975b4bd93f9955dc69fabb5d91270045c9" +checksum = "9252e5725dbed82865af151df558e754e4a3c2c30818359eb17465f1346a1b49" dependencies = [ - "windows-core", - "windows-targets 0.48.5", + "windows-core 0.54.0", + "windows-targets 0.52.5", ] [[package]] @@ -10663,6 +10699,25 @@ dependencies = [ "windows-targets 0.48.5", ] +[[package]] +name = "windows-core" +version = "0.54.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12661b9c89351d684a50a8a643ce5f608e20243b9fb84687800163429f161d65" +dependencies = [ + "windows-result", + "windows-targets 0.52.5", +] + +[[package]] +name = "windows-result" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "749f0da9cc72d82e600d8d2e44cadd0b9eedb9038f71a1c58556ac1c5791813b" +dependencies = [ + "windows-targets 0.52.5", +] + [[package]] name = "windows-sys" version = "0.48.0" @@ -10678,7 +10733,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.4", + "windows-targets 0.52.5", ] [[package]] @@ -10698,17 +10753,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.4", - "windows_aarch64_msvc 0.52.4", - "windows_i686_gnu 0.52.4", - "windows_i686_msvc 0.52.4", - "windows_x86_64_gnu 0.52.4", - "windows_x86_64_gnullvm 0.52.4", - "windows_x86_64_msvc 0.52.4", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", + "windows_i686_gnullvm", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -10719,9 +10775,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -10731,9 +10787,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -10743,9 +10799,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -10755,9 +10817,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -10767,9 +10829,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -10779,9 +10841,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -10791,9 +10853,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.4" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" diff --git a/Cargo.toml b/Cargo.toml index 94b52ffb..d608cff8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,6 +2,8 @@ resolver = "2" members = [ # Version patches + "patches/parking_lot_core", + "patches/parking_lot", "patches/zstd", "patches/rocksdb", "patches/proc-macro-crate", @@ -112,6 +114,8 @@ lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev # Needed due to dockertest's usage of `Rc`s when we need `Arc`s dockertest = { git = "https://github.com/orcalabs/dockertest-rs", rev = "4dd6ae24738aa6dc5c89444cc822ea4745517493" } +parking_lot_core = { path = "patches/parking_lot_core" } +parking_lot = { path = "patches/parking_lot" } # wasmtime pulls in an old version for this zstd = { path = "patches/zstd" } # Needed for WAL compression diff --git a/patches/parking_lot/Cargo.toml b/patches/parking_lot/Cargo.toml new file mode 100644 index 00000000..957b19bf --- /dev/null +++ b/patches/parking_lot/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "parking_lot" +version = "0.11.2" +description = "parking_lot which patches to the latest update" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/parking_lot" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.70" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +parking_lot = "0.12" diff --git a/patches/parking_lot/src/lib.rs b/patches/parking_lot/src/lib.rs new file mode 100644 index 00000000..df10a74d --- /dev/null +++ b/patches/parking_lot/src/lib.rs @@ -0,0 +1 @@ +pub use parking_lot::*; diff --git a/patches/parking_lot_core/Cargo.toml b/patches/parking_lot_core/Cargo.toml new file mode 100644 index 00000000..37dcc703 --- /dev/null +++ b/patches/parking_lot_core/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "parking_lot_core" +version = "0.8.6" +description = "parking_lot_core which patches to the latest update" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/patches/parking_lot_core" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +rust-version = "1.70" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[dependencies] +parking_lot_core = "0.9" diff --git a/patches/parking_lot_core/src/lib.rs b/patches/parking_lot_core/src/lib.rs new file mode 100644 index 00000000..bfecbfd8 --- /dev/null +++ b/patches/parking_lot_core/src/lib.rs @@ -0,0 +1 @@ +pub use parking_lot_core::*; From ac709b294573e2ba6c0b55ad4adef9af12c3e015 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Tue, 21 May 2024 08:49:57 -0400 Subject: [PATCH 122/126] Correct processor docker tests encoding of Bitcoin addresses in OutInstructions --- tests/processor/src/networks.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tests/processor/src/networks.rs b/tests/processor/src/networks.rs index 3ad2b59a..9af339b7 100644 --- a/tests/processor/src/networks.rs +++ b/tests/processor/src/networks.rs @@ -519,7 +519,12 @@ impl Wallet { match self { Wallet::Bitcoin { public_key, .. } => { use bitcoin_serai::bitcoin::ScriptBuf; - ExternalAddress::new(ScriptBuf::new_p2pkh(&public_key.pubkey_hash()).into()).unwrap() + ExternalAddress::new( + networks::bitcoin::Address::new(ScriptBuf::new_p2pkh(&public_key.pubkey_hash())) + .unwrap() + .into(), + ) + .unwrap() } Wallet::Ethereum { key, .. } => ExternalAddress::new( ethereum_serai::crypto::address(&(ciphersuite::Secp256k1::generator() * key)).into(), From 1d2beb3ee4e5ce260cdcd9dc77151d70eadc26a3 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Wed, 22 May 2024 18:50:11 -0400 Subject: [PATCH 123/126] Ethereum relayer server Causes send test to pass for the processor. --- Cargo.lock | 11 ++ Cargo.toml | 3 + coins/ethereum/relayer/Cargo.toml | 30 +++++ coins/ethereum/relayer/LICENSE | 15 +++ coins/ethereum/relayer/README.md | 4 + coins/ethereum/relayer/src/main.rs | 100 +++++++++++++++ deny.toml | 1 + .../dev/coins/ethereum-relayer/.folder | 11 ++ orchestration/src/ethereum_relayer.rs | 39 ++++++ orchestration/src/main.rs | 16 +++ orchestration/src/processor.rs | 24 ++-- .../testnet/coins/ethereum-relayer/.folder | 11 ++ processor/src/main.rs | 10 +- processor/src/networks/ethereum.rs | 43 ++++++- processor/src/tests/literal/mod.rs | 2 +- tests/docker/src/lib.rs | 3 +- tests/full-stack/src/tests/mod.rs | 8 +- tests/processor/src/lib.rs | 121 ++++++++++++++---- tests/processor/src/tests/batch.rs | 2 +- tests/processor/src/tests/send.rs | 6 +- 20 files changed, 416 insertions(+), 44 deletions(-) create mode 100644 coins/ethereum/relayer/Cargo.toml create mode 100644 coins/ethereum/relayer/LICENSE create mode 100644 coins/ethereum/relayer/README.md create mode 100644 coins/ethereum/relayer/src/main.rs create mode 100644 orchestration/dev/coins/ethereum-relayer/.folder create mode 100644 orchestration/src/ethereum_relayer.rs create mode 100644 orchestration/testnet/coins/ethereum-relayer/.folder diff --git a/Cargo.lock b/Cargo.lock index 4b7997cd..ba0ab765 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -7884,6 +7884,17 @@ dependencies = [ name = "serai-env" version = "0.1.0" +[[package]] +name = "serai-ethereum-relayer" +version = "0.1.0" +dependencies = [ + "env_logger", + "log", + "serai-db", + "serai-env", + "tokio", +] + [[package]] name = "serai-full-stack-tests" version = "0.1.0" diff --git a/Cargo.toml b/Cargo.toml index d608cff8..ce0062f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -38,8 +38,11 @@ members = [ "crypto/schnorrkel", "coins/bitcoin", + "coins/ethereum/alloy-simple-request-transport", "coins/ethereum", + "coins/ethereum/relayer", + "coins/monero/generators", "coins/monero", diff --git a/coins/ethereum/relayer/Cargo.toml b/coins/ethereum/relayer/Cargo.toml new file mode 100644 index 00000000..22c20076 --- /dev/null +++ b/coins/ethereum/relayer/Cargo.toml @@ -0,0 +1,30 @@ +[package] +name = "serai-ethereum-relayer" +version = "0.1.0" +description = "A relayer for Serai's Ethereum transactions" +license = "AGPL-3.0-only" +repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/relayer" +authors = ["Luke Parker "] +keywords = [] +edition = "2021" +publish = false + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +log = { version = "0.4", default-features = false, features = ["std"] } +env_logger = { version = "0.10", default-features = false, features = ["humantime"] } + +tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] } + +serai-env = { path = "../../../common/env" } +serai-db = { path = "../../../common/db" } + +[features] +parity-db = ["serai-db/parity-db"] +rocksdb = ["serai-db/rocksdb"] diff --git a/coins/ethereum/relayer/LICENSE b/coins/ethereum/relayer/LICENSE new file mode 100644 index 00000000..26d57cbb --- /dev/null +++ b/coins/ethereum/relayer/LICENSE @@ -0,0 +1,15 @@ +AGPL-3.0-only license + +Copyright (c) 2023-2024 Luke Parker + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License Version 3 as +published by the Free Software Foundation. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . diff --git a/coins/ethereum/relayer/README.md b/coins/ethereum/relayer/README.md new file mode 100644 index 00000000..beed4b72 --- /dev/null +++ b/coins/ethereum/relayer/README.md @@ -0,0 +1,4 @@ +# Ethereum Transaction Relayer + +This server collects Ethereum router commands to be published, offering an RPC +to fetch them. diff --git a/coins/ethereum/relayer/src/main.rs b/coins/ethereum/relayer/src/main.rs new file mode 100644 index 00000000..54593004 --- /dev/null +++ b/coins/ethereum/relayer/src/main.rs @@ -0,0 +1,100 @@ +pub(crate) use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpListener, +}; + +use serai_db::{Get, DbTxn, Db as DbTrait}; + +#[tokio::main(flavor = "current_thread")] +async fn main() { + // Override the panic handler with one which will panic if any tokio task panics + { + let existing = std::panic::take_hook(); + std::panic::set_hook(Box::new(move |panic| { + existing(panic); + const MSG: &str = "exiting the process due to a task panicking"; + println!("{MSG}"); + log::error!("{MSG}"); + std::process::exit(1); + })); + } + + if std::env::var("RUST_LOG").is_err() { + std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string())); + } + env_logger::init(); + + log::info!("Starting Ethereum relayer server..."); + + // Open the DB + #[allow(unused_variables, unreachable_code)] + let db = { + #[cfg(all(feature = "parity-db", feature = "rocksdb"))] + panic!("built with parity-db and rocksdb"); + #[cfg(all(feature = "parity-db", not(feature = "rocksdb")))] + let db = + serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); + #[cfg(feature = "rocksdb")] + let db = + serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified")); + db + }; + + // Start command recipience server + // This should not be publicly exposed + // TODO: Add auth + tokio::spawn({ + let db = db.clone(); + async move { + // 5132 ^ ((b'E' << 8) | b'R') + let server = TcpListener::bind("0.0.0.0:20830").await.unwrap(); + loop { + let (mut socket, _) = server.accept().await.unwrap(); + let db = db.clone(); + tokio::spawn(async move { + let mut db = db.clone(); + loop { + let Ok(msg_len) = socket.read_u32_le().await else { break }; + let mut buf = vec![0; usize::try_from(msg_len).unwrap()]; + let Ok(_) = socket.read_exact(&mut buf).await else { break }; + + if buf.len() < 5 { + break; + } + let nonce = u32::from_le_bytes(buf[.. 4].try_into().unwrap()); + let mut txn = db.txn(); + txn.put(nonce.to_le_bytes(), &buf[4 ..]); + txn.commit(); + + let Ok(()) = socket.write_all(&[1]).await else { break }; + + log::info!("received signed command #{nonce}"); + } + }); + } + } + }); + + // Start command fetch server + // 5132 ^ ((b'E' << 8) | b'R') + 1 + let server = TcpListener::bind("0.0.0.0:20831").await.unwrap(); + loop { + let (mut socket, _) = server.accept().await.unwrap(); + let db = db.clone(); + tokio::spawn(async move { + let db = db.clone(); + loop { + // Nonce to get the router comamnd for + let mut buf = vec![0; 4]; + let Ok(_) = socket.read_exact(&mut buf).await else { break }; + + let command = db.get(&buf[.. 4]).unwrap_or(vec![]); + let Ok(()) = socket.write_all(&u32::try_from(command.len()).unwrap().to_le_bytes()).await + else { + break; + }; + let Ok(()) = socket.write_all(&command).await else { break }; + } + }); + } +} diff --git a/deny.toml b/deny.toml index d6972d5e..a3e0e3d9 100644 --- a/deny.toml +++ b/deny.toml @@ -44,6 +44,7 @@ exceptions = [ { allow = ["AGPL-3.0"], name = "serai-env" }, { allow = ["AGPL-3.0"], name = "ethereum-serai" }, + { allow = ["AGPL-3.0"], name = "serai-ethereum-relayer" }, { allow = ["AGPL-3.0"], name = "serai-message-queue" }, diff --git a/orchestration/dev/coins/ethereum-relayer/.folder b/orchestration/dev/coins/ethereum-relayer/.folder new file mode 100644 index 00000000..675d4438 --- /dev/null +++ b/orchestration/dev/coins/ethereum-relayer/.folder @@ -0,0 +1,11 @@ +#!/bin/sh + +RPC_USER="${RPC_USER:=serai}" +RPC_PASS="${RPC_PASS:=seraidex}" + +# Run Monero +monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ + --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ + --rpc-access-control-origins "*" --disable-rpc-ban \ + --rpc-login=$RPC_USER:$RPC_PASS \ + $1 diff --git a/orchestration/src/ethereum_relayer.rs b/orchestration/src/ethereum_relayer.rs new file mode 100644 index 00000000..523d3c62 --- /dev/null +++ b/orchestration/src/ethereum_relayer.rs @@ -0,0 +1,39 @@ +use std::path::Path; + +use crate::{Network, Os, mimalloc, os, build_serai_service, write_dockerfile}; + +pub fn ethereum_relayer(orchestration_path: &Path, network: Network) { + let setup = mimalloc(Os::Debian).to_string() + + &build_serai_service("", network.release(), network.db(), "serai-ethereum-relayer"); + + let env_vars = [ + ("DB_PATH", "/volume/ethereum-relayer-db".to_string()), + ("RUST_LOG", "info,serai_ethereum_relayer=trace".to_string()), + ]; + let mut env_vars_str = String::new(); + for (env_var, value) in env_vars { + env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); + } + + let run_ethereum_relayer = format!( + r#" +# Copy the relayer server binary and relevant license +COPY --from=builder --chown=ethereumrelayer /serai/bin/serai-ethereum-relayer /bin + +# Run ethereum-relayer +EXPOSE 20830 +EXPOSE 20831 +CMD {env_vars_str} serai-ethereum-relayer +"# + ); + + let run = os(Os::Debian, "", "ethereumrelayer") + &run_ethereum_relayer; + let res = setup + &run; + + let mut ethereum_relayer_path = orchestration_path.to_path_buf(); + ethereum_relayer_path.push("coins"); + ethereum_relayer_path.push("ethereum-relayer"); + ethereum_relayer_path.push("Dockerfile"); + + write_dockerfile(ethereum_relayer_path, &res); +} diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 1925b94c..f1f76957 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -32,6 +32,9 @@ use mimalloc::mimalloc; mod coins; use coins::*; +mod ethereum_relayer; +use ethereum_relayer::ethereum_relayer; + mod message_queue; use message_queue::message_queue; @@ -280,6 +283,8 @@ fn dockerfiles(network: Network) { let ethereum_key = infrastructure_keys.remove("ethereum").unwrap(); let monero_key = infrastructure_keys.remove("monero").unwrap(); + ethereum_relayer(&orchestration_path, network); + message_queue( &orchestration_path, network, @@ -363,6 +368,7 @@ fn start(network: Network, services: HashSet) { let name = match service.as_ref() { "serai" => "serai", "coordinator" => "coordinator", + "ethereum-relayer" => "ethereum-relayer", "message-queue" => "message-queue", "bitcoin-daemon" => "bitcoin", "bitcoin-processor" => "bitcoin-processor", @@ -495,6 +501,10 @@ fn start(network: Network, services: HashSet) { command } } + "ethereum-relayer" => { + // Expose the router command fetch server + command.arg("-p").arg("20831:20831") + } "monero" => { // Expose the RPC for tests if network == Network::Dev { @@ -561,6 +571,9 @@ Commands: - `message-queue` - `bitcoin-daemon` - `bitcoin-processor` + - `ethereum-daemon` + - `ethereum-processor` + - `ethereum-relayer` - `monero-daemon` - `monero-processor` - `monero-wallet-rpc` (if "dev") @@ -593,6 +606,9 @@ Commands: Some("start") => { let mut services = HashSet::new(); for arg in args { + if arg == "ethereum-processor" { + services.insert("ethereum-relayer".to_string()); + } if let Some(ext_network) = arg.strip_suffix("-processor") { services.insert(ext_network.to_string() + "-daemon"); } diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index 85f7ec5f..cefe6455 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -41,24 +41,32 @@ RUN apt install -y ca-certificates const RPC_PASS: &str = "seraidex"; // TODO: Isolate networks let hostname = format!("serai-{}-{coin}", network.label()); - let port = match coin { - "bitcoin" => 8332, - "ethereum" => 8545, - "monero" => 18081, - _ => panic!("unrecognized external network"), - }; + let port = format!( + "{}", + match coin { + "bitcoin" => 8332, + "ethereum" => 8545, + "monero" => 18081, + _ => panic!("unrecognized external network"), + } + ); - let env_vars = [ + let mut env_vars = vec![ ("MESSAGE_QUEUE_RPC", format!("serai-{}-message-queue", network.label())), ("MESSAGE_QUEUE_KEY", hex::encode(coin_key.to_repr())), ("ENTROPY", hex::encode(entropy.as_ref())), ("NETWORK", coin.to_string()), ("NETWORK_RPC_LOGIN", format!("{RPC_USER}:{RPC_PASS}")), ("NETWORK_RPC_HOSTNAME", hostname), - ("NETWORK_RPC_PORT", format!("{port}")), + ("NETWORK_RPC_PORT", port), ("DB_PATH", "/volume/processor-db".to_string()), ("RUST_LOG", "info,serai_processor=debug".to_string()), ]; + if coin == "ethereum" { + env_vars + .push(("ETHEREUM_RELAYER_HOSTNAME", format!("serai-{}-ethereum-relayer", network.label()))); + env_vars.push(("ETHEREUM_RELAYER_PORT", "20830".to_string())); + } let mut env_vars_str = String::new(); for (env_var, value) in env_vars { env_vars_str += &format!(r#"{env_var}=${{{env_var}:="{value}"}} "#); diff --git a/orchestration/testnet/coins/ethereum-relayer/.folder b/orchestration/testnet/coins/ethereum-relayer/.folder new file mode 100644 index 00000000..675d4438 --- /dev/null +++ b/orchestration/testnet/coins/ethereum-relayer/.folder @@ -0,0 +1,11 @@ +#!/bin/sh + +RPC_USER="${RPC_USER:=serai}" +RPC_PASS="${RPC_PASS:=seraidex}" + +# Run Monero +monerod --non-interactive --regtest --offline --fixed-difficulty=1 \ + --no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \ + --rpc-access-control-origins "*" --disable-rpc-ban \ + --rpc-login=$RPC_USER:$RPC_PASS \ + $1 diff --git a/processor/src/main.rs b/processor/src/main.rs index 1a50effa..e0d97aa6 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -748,7 +748,15 @@ async fn main() { #[cfg(feature = "bitcoin")] NetworkId::Bitcoin => run(db, Bitcoin::new(url).await, coordinator).await, #[cfg(feature = "ethereum")] - NetworkId::Ethereum => run(db.clone(), Ethereum::new(db, url).await, coordinator).await, + NetworkId::Ethereum => { + let relayer_hostname = env::var("ETHEREUM_RELAYER_HOSTNAME") + .expect("ethereum relayer hostname wasn't specified") + .to_string(); + let relayer_port = + env::var("ETHEREUM_RELAYER_PORT").expect("ethereum relayer port wasn't specified"); + let relayer_url = relayer_hostname + ":" + &relayer_port; + run(db.clone(), Ethereum::new(db, url, relayer_url).await, coordinator).await + } #[cfg(feature = "monero")] NetworkId::Monero => run(db, Monero::new(url).await, coordinator).await, _ => panic!("spawning a processor for an unsupported network"), diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs index 802ea68b..b1965bae 100644 --- a/processor/src/networks/ethereum.rs +++ b/processor/src/networks/ethereum.rs @@ -31,6 +31,11 @@ use tokio::{ time::sleep, sync::{RwLock, RwLockReadGuard}, }; +#[cfg(not(test))] +use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, +}; use serai_client::{ primitives::{Coin, Amount, Balance, NetworkId}, @@ -290,6 +295,8 @@ pub struct Ethereum { // address. Accordingly, all methods present are consistent to a Serai chain with a finalized // first key (regardless of local state), and this is safe. db: D, + #[cfg_attr(test, allow(unused))] + relayer_url: String, provider: Arc>, deployer: Deployer, router: Arc>>, @@ -309,9 +316,9 @@ impl fmt::Debug for Ethereum { } } impl Ethereum { - pub async fn new(db: D, url: String) -> Self { + pub async fn new(db: D, daemon_url: String, relayer_url: String) -> Self { let provider = Arc::new(RootProvider::new( - ClientBuilder::default().transport(SimpleRequest::new(url), true), + ClientBuilder::default().transport(SimpleRequest::new(daemon_url), true), )); let mut deployer = Deployer::new(provider.clone()).await; @@ -322,7 +329,9 @@ impl Ethereum { } let deployer = deployer.unwrap().unwrap(); - Ethereum { db, provider, deployer, router: Arc::new(RwLock::new(None)) } + dbg!(&relayer_url); + dbg!(relayer_url.len()); + Ethereum { db, relayer_url, provider, deployer, router: Arc::new(RwLock::new(None)) } } // Obtain a reference to the Router, sleeping until it's deployed if it hasn't already been. @@ -714,8 +723,32 @@ impl Network for Ethereum { // Publish this to the dedicated TX server for a solver to actually publish #[cfg(not(test))] { - let _ = completion; - todo!("TODO"); + let mut msg = vec![]; + match completion.command() { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + msg.extend(&u32::try_from(nonce).unwrap().to_le_bytes()); + } + } + completion.write(&mut msg).unwrap(); + + let Ok(mut socket) = TcpStream::connect(&self.relayer_url).await else { + log::warn!("couldn't connect to the relayer server"); + Err(NetworkError::ConnectionError)? + }; + let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else { + log::warn!("couldn't send the message's len to the relayer server"); + Err(NetworkError::ConnectionError)? + }; + let Ok(()) = socket.write_all(&msg).await else { + log::warn!("couldn't write the message to the relayer server"); + Err(NetworkError::ConnectionError)? + }; + if socket.read_u8().await.ok() != Some(1) { + log::warn!("didn't get the ack from the relayer server"); + Err(NetworkError::ConnectionError)?; + } + + Ok(()) } // Publish this using a dummy account we fund with magic RPC commands diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs index 15c27b8c..d45649d5 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/src/tests/literal/mod.rs @@ -423,7 +423,7 @@ mod ethereum { }); } - Ethereum::new(db, url.clone()).await + Ethereum::new(db, url.clone(), String::new()).await }) } } diff --git a/tests/docker/src/lib.rs b/tests/docker/src/lib.rs index 3493d502..986a1793 100644 --- a/tests/docker/src/lib.rs +++ b/tests/docker/src/lib.rs @@ -85,7 +85,7 @@ pub fn build(name: String) { } let mut dockerfile_path = orchestration_path.clone(); - if HashSet::from(["bitcoin", "ethereum", "monero"]).contains(name.as_str()) { + if HashSet::from(["bitcoin", "ethereum", "ethereum-relayer", "monero"]).contains(name.as_str()) { dockerfile_path = dockerfile_path.join("coins"); } if name.contains("-processor") { @@ -125,6 +125,7 @@ pub fn build(name: String) { let meta = |path: PathBuf| (path.clone(), fs::metadata(path)); let mut metadatas = match name.as_str() { "bitcoin" | "ethereum" | "monero" => vec![], + "ethereum-relayer" => vec![meta(repo_path.join("common")), meta(repo_path.join("coins"))], "message-queue" => vec![ meta(repo_path.join("common")), meta(repo_path.join("crypto")), diff --git a/tests/full-stack/src/tests/mod.rs b/tests/full-stack/src/tests/mod.rs index 1fae8c48..7aaad832 100644 --- a/tests/full-stack/src/tests/mod.rs +++ b/tests/full-stack/src/tests/mod.rs @@ -57,12 +57,16 @@ pub(crate) async fn new_test(test_body: impl TestBody) { let (coord_key, message_queue_keys, message_queue_composition) = message_queue_instance(); let (bitcoin_composition, bitcoin_port) = network_instance(NetworkId::Bitcoin); - let bitcoin_processor_composition = + let mut bitcoin_processor_composition = processor_instance(NetworkId::Bitcoin, bitcoin_port, message_queue_keys[&NetworkId::Bitcoin]); + assert_eq!(bitcoin_processor_composition.len(), 1); + let bitcoin_processor_composition = bitcoin_processor_composition.swap_remove(0); let (monero_composition, monero_port) = network_instance(NetworkId::Monero); - let monero_processor_composition = + let mut monero_processor_composition = processor_instance(NetworkId::Monero, monero_port, message_queue_keys[&NetworkId::Monero]); + assert_eq!(monero_processor_composition.len(), 1); + let monero_processor_composition = monero_processor_composition.swap_remove(0); let coordinator_composition = coordinator_instance(name, coord_key); let serai_composition = serai_composition(name); diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index 6e78e397..1964e641 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -28,7 +28,7 @@ pub fn processor_instance( network: NetworkId, port: u32, message_queue_key: ::F, -) -> TestBodySpecification { +) -> Vec { let mut entropy = [0; 32]; OsRng.fill_bytes(&mut entropy); @@ -41,7 +41,7 @@ pub fn processor_instance( let image = format!("{network_str}-processor"); serai_docker_tests::build(image.clone()); - TestBodySpecification::with_image( + let mut res = vec![TestBodySpecification::with_image( Image::with_repository(format!("serai-dev-{image}")).pull_policy(PullPolicy::Never), ) .replace_env( @@ -55,10 +55,30 @@ pub fn processor_instance( ("RUST_LOG".to_string(), "serai_processor=trace,".to_string()), ] .into(), - ) + )]; + + if network == NetworkId::Ethereum { + serai_docker_tests::build("ethereum-relayer".to_string()); + res.push( + TestBodySpecification::with_image( + Image::with_repository("serai-dev-ethereum-relayer".to_string()) + .pull_policy(PullPolicy::Never), + ) + .replace_env( + [ + ("DB_PATH".to_string(), "./ethereum-relayer-db".to_string()), + ("RUST_LOG".to_string(), "serai_ethereum_relayer=trace,".to_string()), + ] + .into(), + ) + .set_publish_all_ports(true), + ); + } + + res } -pub type Handles = (String, String, String); +pub type Handles = (String, String, String, String); pub fn processor_stack( network: NetworkId, network_hostname_override: Option, @@ -68,7 +88,7 @@ pub fn processor_stack( let (coord_key, message_queue_keys, message_queue_composition) = serai_message_queue_tests::instance(); - let processor_composition = + let mut processor_compositions = processor_instance(network, network_rpc_port, message_queue_keys[&network]); // Give every item in this stack a unique ID @@ -84,7 +104,7 @@ pub fn processor_stack( let mut compositions = vec![]; let mut handles = vec![]; for (name, composition) in [ - ( + Some(( match network { NetworkId::Serai => unreachable!(), NetworkId::Bitcoin => "bitcoin", @@ -92,10 +112,14 @@ pub fn processor_stack( NetworkId::Monero => "monero", }, network_composition, - ), - ("message_queue", message_queue_composition), - ("processor", processor_composition), - ] { + )), + Some(("message_queue", message_queue_composition)), + Some(("processor", processor_compositions.remove(0))), + processor_compositions.pop().map(|composition| ("relayer", composition)), + ] + .into_iter() + .flatten() + { let handle = format!("processor-{name}-{unique_id}"); compositions.push( composition.set_start_policy(StartPolicy::Strict).set_handle(handle.clone()).set_log_options( @@ -113,14 +137,27 @@ pub fn processor_stack( handles.push(handle); } - let processor_composition = compositions.last_mut().unwrap(); + let processor_composition = compositions.get_mut(2).unwrap(); processor_composition.inject_container_name( network_hostname_override.unwrap_or_else(|| handles[0].clone()), "NETWORK_RPC_HOSTNAME", ); + if let Some(hostname) = handles.get(3) { + processor_composition.inject_container_name(hostname, "ETHEREUM_RELAYER_HOSTNAME"); + processor_composition.modify_env("ETHEREUM_RELAYER_PORT", "20830"); + } processor_composition.inject_container_name(handles[1].clone(), "MESSAGE_QUEUE_RPC"); - ((handles[0].clone(), handles[1].clone(), handles[2].clone()), coord_key, compositions) + ( + ( + handles[0].clone(), + handles[1].clone(), + handles[2].clone(), + handles.get(3).cloned().unwrap_or(String::new()), + ), + coord_key, + compositions, + ) } #[derive(serde::Deserialize, Debug)] @@ -134,6 +171,7 @@ pub struct Coordinator { message_queue_handle: String, #[allow(unused)] processor_handle: String, + relayer_handle: String, next_send_id: u64, next_recv_id: u64, @@ -144,7 +182,7 @@ impl Coordinator { pub fn new( network: NetworkId, ops: &DockerOperations, - handles: (String, String, String), + handles: Handles, coord_key: ::F, ) -> Coordinator { let rpc = ops.handle(&handles.1).host_port(2287).unwrap(); @@ -156,6 +194,7 @@ impl Coordinator { network_handle: handles.0, message_queue_handle: handles.1, processor_handle: handles.2, + relayer_handle: handles.3, next_send_id: 0, next_recv_id: 0, @@ -508,7 +547,7 @@ impl Coordinator { } } - pub async fn publish_transacton(&self, ops: &DockerOperations, tx: &[u8]) { + pub async fn publish_transaction(&self, ops: &DockerOperations, tx: &[u8]) { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { NetworkId::Bitcoin => { @@ -545,6 +584,14 @@ impl Coordinator { } } + pub async fn publish_eventuality_completion(&self, ops: &DockerOperations, tx: &[u8]) { + match self.network { + NetworkId::Bitcoin | NetworkId::Monero => self.publish_transaction(ops, tx).await, + NetworkId::Ethereum => (), + NetworkId::Serai => panic!("processor tests broadcasting block to Serai"), + } + } + pub async fn get_published_transaction( &self, ops: &DockerOperations, @@ -575,14 +622,7 @@ impl Coordinator { } } NetworkId::Ethereum => { - use ethereum_serai::alloy::{ - consensus::{TxLegacy, Signed}, - simple_request_transport::SimpleRequest, - rpc_client::ClientBuilder, - provider::{Provider, RootProvider}, - network::Ethereum, - }; - + /* let provider = RootProvider::<_, Ethereum>::new( ClientBuilder::default().transport(SimpleRequest::new(rpc_url.clone()), true), ); @@ -593,6 +633,43 @@ impl Coordinator { let mut bytes = vec![]; tx.encode_with_signature_fields(&sig, &mut bytes); Some(bytes) + */ + + // This is being passed a signature. We need to check the relayer has a TX with this + // signature + + use tokio::{ + io::{AsyncReadExt, AsyncWriteExt}, + net::TcpStream, + }; + + let (ip, port) = ops.handle(&self.relayer_handle).host_port(20831).unwrap(); + let relayer_url = format!("{ip}:{port}"); + + let mut socket = TcpStream::connect(&relayer_url).await.unwrap(); + // Iterate over every published command + for i in 1 .. u32::MAX { + socket.write_all(&i.to_le_bytes()).await.unwrap(); + + let mut recvd_len = [0; 4]; + socket.read_exact(&mut recvd_len).await.unwrap(); + if recvd_len == [0; 4] { + break; + } + + let mut msg = vec![0; usize::try_from(u32::from_le_bytes(recvd_len)).unwrap()]; + socket.read_exact(&mut msg).await.unwrap(); + for start_pos in 0 .. msg.len() { + if (start_pos + tx.len()) > msg.len() { + break; + } + if &msg[start_pos .. (start_pos + tx.len())] == tx { + return Some(msg); + } + } + } + + None } NetworkId::Monero => { use monero_serai::rpc::HttpRpc; diff --git a/tests/processor/src/tests/batch.rs b/tests/processor/src/tests/batch.rs index 5397ad2d..6170270a 100644 --- a/tests/processor/src/tests/batch.rs +++ b/tests/processor/src/tests/batch.rs @@ -229,7 +229,7 @@ fn batch_test() { let (tx, balance_sent) = wallet.send_to_address(&ops, &key_pair.1, instruction.clone()).await; for coordinator in &mut coordinators { - coordinator.publish_transacton(&ops, &tx).await; + coordinator.publish_transaction(&ops, &tx).await; } // Put the TX past the confirmation depth diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index b764f306..62e80c09 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -147,7 +147,7 @@ pub(crate) async fn sign_tx( #[test] fn send_test() { - for network in [NetworkId::Bitcoin, /* TODO NetworkId::Ethereum, */ NetworkId::Monero] { + for network in [NetworkId::Bitcoin, NetworkId::Ethereum, NetworkId::Monero] { let (coordinators, test) = new_test(network); test.run(|ops| async move { @@ -182,7 +182,7 @@ fn send_test() { let (tx, balance_sent) = wallet.send_to_address(&ops, &key_pair.1, Some(instruction.clone())).await; for coordinator in &mut coordinators { - coordinator.publish_transacton(&ops, &tx).await; + coordinator.publish_transaction(&ops, &tx).await; } // Put the TX past the confirmation depth @@ -295,7 +295,7 @@ fn send_test() { .unwrap(); for (i, coordinator) in coordinators.iter_mut().enumerate() { if !participating.contains(&i) { - coordinator.publish_transacton(&ops, &tx).await; + coordinator.publish_eventuality_completion(&ops, &tx).await; // Tell them of it as a completion of the relevant signing nodes coordinator .send_message(messages::sign::CoordinatorMessage::Completed { From cd69f3b9d68b86772d533c09dcbd0c33fc013ec5 Mon Sep 17 00:00:00 2001 From: rlking Date: Sun, 26 May 2024 02:33:23 +0200 Subject: [PATCH 124/126] Check if wasm was built by container exit code and state instead of local mountpoint (#570) * Check if the serai wasm was built successfully by verifying the build container's status code and state, instead of checking the volume mountpoint locally * Use a log statement for which wasm is used * Minor typo fix --------- Co-authored-by: Luke Parker --- Cargo.lock | 1 + orchestration/src/main.rs | 18 ++++++------------ substrate/node/Cargo.toml | 3 ++- substrate/node/src/chain_spec.rs | 5 ++++- 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ba0ab765..2450c190 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8018,6 +8018,7 @@ dependencies = [ "hex", "jsonrpsee", "libp2p", + "log", "pallet-transaction-payment-rpc", "rand_core", "sc-authority-discovery", diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index f1f76957..e8ea7654 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -382,23 +382,17 @@ fn start(network: Network, services: HashSet) { let serai_runtime_volume = format!("serai-{}-runtime-volume", network.label()); if name == "serai" { // Check if it's built by checking if the volume has the expected runtime file + let wasm_build_container_name = format!("serai-{}-runtime", network.label()); let built = || { - if let Ok(path) = Command::new("docker") - .arg("volume") + if let Ok(state_and_status) = Command::new("docker") .arg("inspect") .arg("-f") - .arg("{{ .Mountpoint }}") - .arg(&serai_runtime_volume) + .arg("{{.State.Status}}:{{.State.ExitCode}}") + .arg(&wasm_build_container_name) .output() { - if let Ok(path) = String::from_utf8(path.stdout) { - if let Ok(iter) = std::fs::read_dir(PathBuf::from(path.trim())) { - for item in iter.flatten() { - if item.file_name() == "serai.wasm" { - return true; - } - } - } + if let Ok(state_and_status) = String::from_utf8(state_and_status.stdout) { + return state_and_status.trim() == "exited:0"; } } false diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index 60f7dc0f..0e551c72 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -20,10 +20,11 @@ workspace = true name = "serai-node" [dependencies] +rand_core = "0.6" zeroize = "1" hex = "0.4" +log = "0.4" -rand_core = "0.6" schnorrkel = "0.11" libp2p = "0.52" diff --git a/substrate/node/src/chain_spec.rs b/substrate/node/src/chain_spec.rs index 6fa8d6c3..e66ee4a6 100644 --- a/substrate/node/src/chain_spec.rs +++ b/substrate/node/src/chain_spec.rs @@ -18,9 +18,12 @@ fn account_from_name(name: &'static str) -> PublicKey { fn wasm_binary() -> Vec { // TODO: Accept a config of runtime path - if let Ok(binary) = std::fs::read("/runtime/serai.wasm") { + const WASM_PATH: &str = "/runtime/serai.wasm"; + if let Ok(binary) = std::fs::read(WASM_PATH) { + log::info!("using {WASM_PATH}"); return binary; } + log::info!("using built-in wasm"); WASM_BINARY.ok_or("compiled in wasm not available").unwrap().to_vec() } From f4147c39b264c3a8432a0a4ae8051990b1778ec4 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Fri, 31 May 2024 00:35:53 -0400 Subject: [PATCH 125/126] bitcoin 0.32.1 --- Cargo.lock | 4 ++-- substrate/client/src/networks/bitcoin.rs | 26 ++++++++---------------- 2 files changed, 11 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2450c190..00d51c9d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -973,9 +973,9 @@ checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" [[package]] name = "bitcoin" -version = "0.32.0" +version = "0.32.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170e7750a20974246f17ece04311b4205a6155f1db564c5b224af817663c3ea" +checksum = "4bf33434c870e98ecc8608588ccc990c5daba9ba9ad39733dc85fba22c211504" dependencies = [ "base58ck", "bech32", diff --git a/substrate/client/src/networks/bitcoin.rs b/substrate/client/src/networks/bitcoin.rs index 9f5ff1dd..502bfb44 100644 --- a/substrate/client/src/networks/bitcoin.rs +++ b/substrate/client/src/networks/bitcoin.rs @@ -83,12 +83,6 @@ impl TryFrom> for Address { } fn try_to_vec(addr: &Address) -> Result, ()> { - let witness_program = |addr: &Address| { - let program_push = addr.0.as_script().instructions().last().ok_or(())?.map_err(|_| ())?; - let program = program_push.push_bytes().ok_or(())?.as_bytes(); - Ok::<_, ()>(program.to_vec()) - }; - let parsed_addr = BAddress::::from_script(&addr.0, Network::Bitcoin).map_err(|_| ())?; Ok( @@ -100,21 +94,19 @@ fn try_to_vec(addr: &Address) -> Result, ()> { EncodedAddress::P2SH(*parsed_addr.script_hash().unwrap().as_raw_hash().as_byte_array()) } Some(AddressType::P2wpkh) => { - let program = witness_program(addr)?; - let mut buf = [0; 20]; - buf.copy_from_slice(program.as_ref()); - EncodedAddress::P2WPKH(buf) + let program = parsed_addr.witness_program().ok_or(())?; + let program = program.program().as_bytes(); + EncodedAddress::P2WPKH(program.try_into().map_err(|_| ())?) } Some(AddressType::P2wsh) => { - let program = witness_program(addr)?; - let mut buf = [0; 32]; - buf.copy_from_slice(program.as_ref()); - EncodedAddress::P2WSH(buf) + let program = parsed_addr.witness_program().ok_or(())?; + let program = program.program().as_bytes(); + EncodedAddress::P2WSH(program.try_into().map_err(|_| ())?) } Some(AddressType::P2tr) => { - let program = witness_program(addr)?; - let program_ref: &[u8] = program.as_ref(); - EncodedAddress::P2TR(program_ref.try_into().map_err(|_| ())?) + let program = parsed_addr.witness_program().ok_or(())?; + let program = program.program().as_bytes(); + EncodedAddress::P2TR(program.try_into().map_err(|_| ())?) } _ => Err(())?, }) From 2a05cf32253bdda31cb012d1df8cceb9ed0c8be7 Mon Sep 17 00:00:00 2001 From: Luke Parker Date: Sat, 1 Jun 2024 21:46:47 -0400 Subject: [PATCH 126/126] June 2024 nightly update Replaces #571. --- .github/nightly-version | 2 +- Cargo.lock | 1 + coins/monero/src/wallet/scan.rs | 10 +++---- coins/monero/src/wallet/send/mod.rs | 4 +-- common/zalloc/Cargo.toml | 8 +++--- common/zalloc/build.rs | 10 +++++++ common/zalloc/src/lib.rs | 6 ++--- coordinator/src/db.rs | 2 +- coordinator/tributary/tendermint/src/block.rs | 8 +++--- crypto/ed448/src/backend.rs | 26 +++++++++---------- crypto/frost/README.md | 2 +- crypto/frost/src/sign.rs | 4 +-- crypto/schnorr/src/tests/rfc8032.rs | 2 +- crypto/transcript/README.md | 4 +-- processor/src/multisigs/db.rs | 2 +- substrate/coins/pallet/Cargo.toml | 3 +++ substrate/in-instructions/pallet/Cargo.toml | 3 +++ substrate/signals/pallet/Cargo.toml | 3 +++ substrate/validator-sets/pallet/Cargo.toml | 3 +++ 19 files changed, 63 insertions(+), 40 deletions(-) create mode 100644 common/zalloc/build.rs diff --git a/.github/nightly-version b/.github/nightly-version index 514aef61..1852d9b5 100644 --- a/.github/nightly-version +++ b/.github/nightly-version @@ -1 +1 @@ -nightly-2024-05-01 +nightly-2024-06-01 diff --git a/Cargo.lock b/Cargo.lock index 00d51c9d..0fc29547 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10975,6 +10975,7 @@ dependencies = [ name = "zalloc" version = "0.1.0" dependencies = [ + "rustversion", "zeroize", ] diff --git a/coins/monero/src/wallet/scan.rs b/coins/monero/src/wallet/scan.rs index 0c2cebbd..45bae04d 100644 --- a/coins/monero/src/wallet/scan.rs +++ b/coins/monero/src/wallet/scan.rs @@ -105,13 +105,13 @@ pub struct Metadata { /// but the payment ID will be returned here anyway: /// /// 1) If the payment ID is tied to an output received by a subaddress account - /// that spent Monero in the transaction (the received output is considered - /// "change" and is not considered a "payment" in this case). If there are multiple - /// spending subaddress accounts in a transaction, the highest index spent key image - /// is used to determine the spending subaddress account. + /// that spent Monero in the transaction (the received output is considered + /// "change" and is not considered a "payment" in this case). If there are multiple + /// spending subaddress accounts in a transaction, the highest index spent key image + /// is used to determine the spending subaddress account. /// /// 2) If the payment ID is the unencrypted variant and the block's hf version is - /// v12 or higher (https://github.com/serai-dex/serai/issues/512) + /// v12 or higher (https://github.com/serai-dex/serai/issues/512) pub payment_id: Option, /// Arbitrary data encoded in TX extra. pub arbitrary_data: Vec>, diff --git a/coins/monero/src/wallet/send/mod.rs b/coins/monero/src/wallet/send/mod.rs index f4ac208e..153e6b6c 100644 --- a/coins/monero/src/wallet/send/mod.rs +++ b/coins/monero/src/wallet/send/mod.rs @@ -364,8 +364,8 @@ impl Change { /// 1) The change in the tx is shunted to the fee (fingerprintable fee). /// /// 2) If there are 2 outputs in the tx, there would be no payment ID as is the case when the - /// reference wallet creates 2 output txs, since monero-serai doesn't know which output - /// to tie the dummy payment ID to. + /// reference wallet creates 2 output txs, since monero-serai doesn't know which output + /// to tie the dummy payment ID to. pub fn fingerprintable(address: Option) -> Change { Change { address, view: None } } diff --git a/common/zalloc/Cargo.toml b/common/zalloc/Cargo.toml index 1a4a6b45..af4e7c1c 100644 --- a/common/zalloc/Cargo.toml +++ b/common/zalloc/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc" authors = ["Luke Parker "] keywords = [] edition = "2021" -rust-version = "1.60" +rust-version = "1.77.0" [package.metadata.docs.rs] all-features = true @@ -19,8 +19,10 @@ workspace = true [dependencies] zeroize = { version = "^1.5", default-features = false } +[build-dependencies] +rustversion = { version = "1", default-features = false } + [features] std = ["zeroize/std"] default = ["std"] -# Commented for now as it requires nightly and we don't use nightly -# allocator = [] +allocator = [] diff --git a/common/zalloc/build.rs b/common/zalloc/build.rs new file mode 100644 index 00000000..f3351e22 --- /dev/null +++ b/common/zalloc/build.rs @@ -0,0 +1,10 @@ +#[rustversion::nightly] +fn main() { + println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)"); + println!("cargo::rustc-cfg=zalloc_rustc_nightly"); +} + +#[rustversion::not(nightly)] +fn main() { + println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)"); +} diff --git a/common/zalloc/src/lib.rs b/common/zalloc/src/lib.rs index 0e4c1f75..cc5562a0 100644 --- a/common/zalloc/src/lib.rs +++ b/common/zalloc/src/lib.rs @@ -1,6 +1,6 @@ #![cfg_attr(docsrs, feature(doc_cfg))] #![cfg_attr(docsrs, feature(doc_auto_cfg))] -#![cfg_attr(feature = "allocator", feature(allocator_api))] +#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))] //! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation. //! This can either be used with Box (requires nightly and the "allocator" feature) to provide the @@ -17,12 +17,12 @@ use zeroize::Zeroize; /// An allocator wrapper which zeroizes its memory on dealloc. pub struct ZeroizingAlloc(pub T); -#[cfg(feature = "allocator")] +#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))] use core::{ ptr::NonNull, alloc::{AllocError, Allocator}, }; -#[cfg(feature = "allocator")] +#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))] unsafe impl Allocator for ZeroizingAlloc { fn allocate(&self, layout: Layout) -> Result, AllocError> { self.0.allocate(layout) diff --git a/coordinator/src/db.rs b/coordinator/src/db.rs index 09eab173..04ee9d35 100644 --- a/coordinator/src/db.rs +++ b/coordinator/src/db.rs @@ -122,7 +122,7 @@ impl QueuedBatchesDb { pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec { let batches_vec = Self::get(txn, set).unwrap_or_default(); - txn.del(&Self::key(set)); + txn.del(Self::key(set)); let mut batches: &[u8] = &batches_vec; let mut res = vec![]; diff --git a/coordinator/tributary/tendermint/src/block.rs b/coordinator/tributary/tendermint/src/block.rs index 6dfacfdb..8fc79018 100644 --- a/coordinator/tributary/tendermint/src/block.rs +++ b/coordinator/tributary/tendermint/src/block.rs @@ -177,14 +177,14 @@ impl BlockData { let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?; if new_block { // Delete the latest round key - txn.del(&key(LATEST_ROUND_KEY)); + txn.del(key(LATEST_ROUND_KEY)); } let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?; if new_block || new_round { // Delete the messages for the old round - txn.del(&key(PROPOSE_KEY)); - txn.del(&key(PEVOTE_KEY)); - txn.del(&key(PRECOMMIT_KEY)); + txn.del(key(PROPOSE_KEY)); + txn.del(key(PEVOTE_KEY)); + txn.del(key(PRECOMMIT_KEY)); } // Check we haven't sent this message within this round diff --git a/crypto/ed448/src/backend.rs b/crypto/ed448/src/backend.rs index 83dc3fca..db41e811 100644 --- a/crypto/ed448/src/backend.rs +++ b/crypto/ed448/src/backend.rs @@ -34,7 +34,7 @@ macro_rules! math_op { impl $Op<$Other> for $Value { type Output = $Value; fn $op_fn(self, other: $Other) -> Self::Output { - Self($function(self.0, other.0)) + $Value($function(self.0, other.0)) } } impl $Assign<$Other> for $Value { @@ -45,7 +45,7 @@ macro_rules! math_op { impl<'a> $Op<&'a $Other> for $Value { type Output = $Value; fn $op_fn(self, other: &'a $Other) -> Self::Output { - Self($function(self.0, other.0)) + $Value($function(self.0, other.0)) } } impl<'a> $Assign<&'a $Other> for $Value { @@ -60,7 +60,7 @@ macro_rules! from_wrapper { ($wrapper: ident, $inner: ident, $uint: ident) => { impl From<$uint> for $wrapper { fn from(a: $uint) -> $wrapper { - Self(Residue::new(&$inner::from(a))) + $wrapper(Residue::new(&$inner::from(a))) } } }; @@ -127,7 +127,7 @@ macro_rules! field { impl Neg for $FieldName { type Output = $FieldName; fn neg(self) -> $FieldName { - Self(self.0.neg()) + $FieldName(self.0.neg()) } } @@ -141,13 +141,13 @@ macro_rules! field { impl $FieldName { /// Perform an exponentiation. pub fn pow(&self, other: $FieldName) -> $FieldName { - let mut table = [Self(Residue::ONE); 16]; + let mut table = [$FieldName(Residue::ONE); 16]; table[1] = *self; for i in 2 .. 16 { table[i] = table[i - 1] * self; } - let mut res = Self(Residue::ONE); + let mut res = $FieldName(Residue::ONE); let mut bits = 0; for (i, mut bit) in other.to_le_bits().iter_mut().rev().enumerate() { bits <<= 1; @@ -170,8 +170,8 @@ macro_rules! field { } impl Field for $FieldName { - const ZERO: Self = Self(Residue::ZERO); - const ONE: Self = Self(Residue::ONE); + const ZERO: Self = $FieldName(Residue::ZERO); + const ONE: Self = $FieldName(Residue::ONE); fn random(mut rng: impl RngCore) -> Self { let mut bytes = [0; 112]; @@ -188,12 +188,12 @@ macro_rules! field { fn invert(&self) -> CtOption { const NEG_2: $FieldName = - Self($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::from_u8(2)))); + $FieldName($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::from_u8(2)))); CtOption::new(self.pow(NEG_2), !self.is_zero()) } fn sqrt(&self) -> CtOption { - const MOD_1_4: $FieldName = Self($ResidueType::new( + const MOD_1_4: $FieldName = $FieldName($ResidueType::new( &$MODULUS.saturating_add(&U448::ONE).wrapping_div(&U448::from_u8(4)), )); @@ -217,14 +217,14 @@ macro_rules! field { const TWO_INV: Self = $FieldName($ResidueType::new(&U448::from_u8(2)).invert().0); const MULTIPLICATIVE_GENERATOR: Self = - Self(Residue::new(&U448::from_u8($MULTIPLICATIVE_GENERATOR))); + $FieldName(Residue::new(&U448::from_u8($MULTIPLICATIVE_GENERATOR))); // True for both the Ed448 Scalar field and FieldElement field const S: u32 = 1; // Both fields have their root of unity as -1 const ROOT_OF_UNITY: Self = - Self($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::ONE))); - const ROOT_OF_UNITY_INV: Self = Self(Self::ROOT_OF_UNITY.0.invert().0); + $FieldName($ResidueType::sub(&$ResidueType::ZERO, &$ResidueType::new(&U448::ONE))); + const ROOT_OF_UNITY_INV: Self = $FieldName(Self::ROOT_OF_UNITY.0.invert().0); const DELTA: Self = $FieldName(Residue::new(&U448::from_le_hex($DELTA))); diff --git a/crypto/frost/README.md b/crypto/frost/README.md index 27845844..e6ed2b0a 100644 --- a/crypto/frost/README.md +++ b/crypto/frost/README.md @@ -10,7 +10,7 @@ integrating with existing systems. This library offers ciphersuites compatible with the [IETF draft](https://github.com/cfrg/draft-irtf-cfrg-frost). Currently, version -11 is supported. +15 is supported. This library was [audited by Cypher Stack in March 2023](https://github.com/serai-dex/serai/raw/e1bb2c191b7123fd260d008e31656d090d559d21/audits/Cypher%20Stack%20crypto%20March%202023/Audit.pdf), diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index 73ea0a7d..5115244f 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -362,9 +362,7 @@ impl> SignMachine for AlgorithmSignMachi rho_transcript.append_message(b"message", C::hash_msg(msg)); rho_transcript.append_message( b"preprocesses", - &C::hash_commitments( - self.params.algorithm.transcript().challenge(b"preprocesses").as_ref(), - ), + C::hash_commitments(self.params.algorithm.transcript().challenge(b"preprocesses").as_ref()), ); // Generate the per-signer binding factors diff --git a/crypto/schnorr/src/tests/rfc8032.rs b/crypto/schnorr/src/tests/rfc8032.rs index 991cf450..418f4c0e 100644 --- a/crypto/schnorr/src/tests/rfc8032.rs +++ b/crypto/schnorr/src/tests/rfc8032.rs @@ -52,7 +52,7 @@ fn test_rfc8032() { SchnorrSignature::::read::<&[u8]>(&mut hex::decode(vector.2).unwrap().as_ref()) .unwrap(); let hram = Sha512::new_with_prefix( - &[sig.R.to_bytes().as_ref(), &key.to_bytes(), &hex::decode(vector.1).unwrap()].concat(), + [sig.R.to_bytes().as_ref(), &key.to_bytes(), &hex::decode(vector.1).unwrap()].concat(), ); assert!(sig.verify(key, Scalar::from_hash(hram))); } diff --git a/crypto/transcript/README.md b/crypto/transcript/README.md index a8772a0a..17124693 100644 --- a/crypto/transcript/README.md +++ b/crypto/transcript/README.md @@ -3,9 +3,9 @@ Flexible Transcript is a crate offering: - `Transcript`, a trait offering functions transcripts should implement. - `DigestTranscript`, a competent transcript format instantiated against a -provided hash function. + provided hash function. - `MerlinTranscript`, a wrapper of `merlin` into the trait (available via the -`merlin` feature). + `merlin` feature). - `RecommendedTranscript`, a transcript recommended for usage in applications. Currently, this is `DigestTranscript` (available via the `recommended` feature). diff --git a/processor/src/multisigs/db.rs b/processor/src/multisigs/db.rs index 339b7bdc..3d1d13bd 100644 --- a/processor/src/multisigs/db.rs +++ b/processor/src/multisigs/db.rs @@ -231,7 +231,7 @@ impl ForwardedOutputDb { let res = InInstructionWithBalance::decode(&mut outputs_ref).unwrap(); assert!(outputs_ref.len() < outputs.len()); if outputs_ref.is_empty() { - txn.del(&Self::key(balance)); + txn.del(Self::key(balance)); } else { Self::set(txn, balance, &outputs); } diff --git a/substrate/coins/pallet/Cargo.toml b/substrate/coins/pallet/Cargo.toml index da9a27f6..2aba1fbd 100644 --- a/substrate/coins/pallet/Cargo.toml +++ b/substrate/coins/pallet/Cargo.toml @@ -49,6 +49,9 @@ std = [ "coins-primitives/std", ] +# TODO +try-runtime = [] + runtime-benchmarks = [ "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks", diff --git a/substrate/in-instructions/pallet/Cargo.toml b/substrate/in-instructions/pallet/Cargo.toml index f313a22a..676d11f5 100644 --- a/substrate/in-instructions/pallet/Cargo.toml +++ b/substrate/in-instructions/pallet/Cargo.toml @@ -60,3 +60,6 @@ std = [ "validator-sets-pallet/std", ] default = ["std"] + +# TODO +try-runtime = [] diff --git a/substrate/signals/pallet/Cargo.toml b/substrate/signals/pallet/Cargo.toml index 582a3e09..e06b5e6b 100644 --- a/substrate/signals/pallet/Cargo.toml +++ b/substrate/signals/pallet/Cargo.toml @@ -57,4 +57,7 @@ runtime-benchmarks = [ "frame-support/runtime-benchmarks", ] +# TODO +try-runtime = [] + default = ["std"] diff --git a/substrate/validator-sets/pallet/Cargo.toml b/substrate/validator-sets/pallet/Cargo.toml index 3b553788..dd67d1bc 100644 --- a/substrate/validator-sets/pallet/Cargo.toml +++ b/substrate/validator-sets/pallet/Cargo.toml @@ -70,6 +70,9 @@ std = [ "dex-pallet/std", ] +# TODO +try-runtime = [] + runtime-benchmarks = [ "frame-system/runtime-benchmarks", "frame-support/runtime-benchmarks",