diff --git a/.github/actions/bitcoin/action.yml b/.github/actions/bitcoin/action.yml index 90310868..2a6dbce3 100644 --- a/.github/actions/bitcoin/action.yml +++ b/.github/actions/bitcoin/action.yml @@ -5,7 +5,7 @@ inputs: version: description: "Version to download and run" required: false - default: 24.0.1 + default: "27.0" runs: using: "composite" diff --git a/.github/actions/test-dependencies/action.yml b/.github/actions/test-dependencies/action.yml index a19e1704..49c2fa64 100644 --- a/.github/actions/test-dependencies/action.yml +++ b/.github/actions/test-dependencies/action.yml @@ -10,7 +10,7 @@ inputs: bitcoin-version: description: "Bitcoin version to download and run as a regtest node" required: false - default: 24.0.1 + default: "27.0" runs: using: "composite" @@ -19,9 +19,9 @@ runs: uses: ./.github/actions/build-dependencies - name: Install Foundry - uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 with: - version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2 + version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9 cache: false - name: Run a Monero Regtest Node diff --git a/.github/nightly-version b/.github/nightly-version index 4a8f1e33..514aef61 100644 --- a/.github/nightly-version +++ b/.github/nightly-version @@ -1 +1 @@ -nightly-2024-02-07 +nightly-2024-05-01 diff --git a/.github/workflows/coins-tests.yml b/.github/workflows/coins-tests.yml index a0437c61..f94e9fd5 100644 --- a/.github/workflows/coins-tests.yml +++ b/.github/workflows/coins-tests.yml @@ -30,6 +30,7 @@ jobs: run: | GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \ -p bitcoin-serai \ + -p alloy-simple-request-transport \ -p ethereum-serai \ -p monero-generators \ -p monero-serai diff --git a/.github/workflows/common-tests.yml b/.github/workflows/common-tests.yml index 0135fcaf..f0545f0b 100644 --- a/.github/workflows/common-tests.yml +++ b/.github/workflows/common-tests.yml @@ -28,4 +28,5 @@ jobs: -p std-shims \ -p zalloc \ -p serai-db \ - -p serai-env + -p serai-env \ + -p simple-request diff --git a/.github/workflows/coordinator-tests.yml b/.github/workflows/coordinator-tests.yml index 7cc4d7b3..138fd106 100644 --- a/.github/workflows/coordinator-tests.yml +++ b/.github/workflows/coordinator-tests.yml @@ -37,4 +37,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run coordinator Docker tests - run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/full-stack-tests.yml b/.github/workflows/full-stack-tests.yml index 3d1c86a1..baacf774 100644 --- a/.github/workflows/full-stack-tests.yml +++ b/.github/workflows/full-stack-tests.yml @@ -19,4 +19,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run Full Stack Docker tests - run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/message-queue-tests.yml b/.github/workflows/message-queue-tests.yml index 273af237..7894549c 100644 --- a/.github/workflows/message-queue-tests.yml +++ b/.github/workflows/message-queue-tests.yml @@ -33,4 +33,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run message-queue Docker tests - run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/processor-tests.yml b/.github/workflows/processor-tests.yml index 88f4429c..0b5ecbbe 100644 --- a/.github/workflows/processor-tests.yml +++ b/.github/workflows/processor-tests.yml @@ -37,4 +37,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run processor Docker tests - run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/reproducible-runtime.yml b/.github/workflows/reproducible-runtime.yml index 16256ab6..d34e5ca5 100644 --- a/.github/workflows/reproducible-runtime.yml +++ b/.github/workflows/reproducible-runtime.yml @@ -33,4 +33,4 @@ jobs: uses: ./.github/actions/build-dependencies - name: Run Reproducible Runtime tests - run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test + run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 257c1dd5..e32d2119 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -43,6 +43,7 @@ jobs: -p tendermint-machine \ -p tributary-chain \ -p serai-coordinator \ + -p serai-orchestrator \ -p serai-docker-tests test-substrate: @@ -64,7 +65,9 @@ jobs: -p serai-validator-sets-pallet \ -p serai-in-instructions-primitives \ -p serai-in-instructions-pallet \ + -p serai-signals-primitives \ -p serai-signals-pallet \ + -p serai-abi \ -p serai-runtime \ -p serai-node diff --git a/Cargo.lock b/Cargo.lock index 6d641a93..4ddcfd84 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -95,9 +95,344 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" + +[[package]] +name = "alloy-consensus" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-eips", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "serde", + "sha2", +] + +[[package]] +name = "alloy-core" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcbd9ee412dfb4e81d23cd1ae816d828c494a77d1eb00358035043695d4c5808" +dependencies = [ + "alloy-primitives", +] + +[[package]] +name = "alloy-eips" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "c-kzg", + "once_cell", + "serde", +] + +[[package]] +name = "alloy-genesis" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "alloy-serde", + "serde", +] + +[[package]] +name = "alloy-json-abi" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83a35ddfd27576474322a5869e4c123e5f3e7b2177297c18e4e82ea501cb125b" +dependencies = [ + "alloy-primitives", + "alloy-sol-type-parser", + "serde", +] + +[[package]] +name = "alloy-json-rpc" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", + "thiserror", + "tracing", +] + +[[package]] +name = "alloy-network" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-json-rpc", + "alloy-primitives", + "alloy-rpc-types", + "alloy-signer", + "async-trait", + "futures-utils-wasm", + "thiserror", +] + +[[package]] +name = "alloy-node-bindings" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-genesis", + "alloy-primitives", + "k256", + "serde_json", + "tempfile", + "thiserror", + "tracing", + "url", +] + +[[package]] +name = "alloy-primitives" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99bbad0a6b588ef4aec1b5ddbbfdacd9ef04e00b979617765b03174318ee1f3a" +dependencies = [ + "alloy-rlp", + "bytes", + "cfg-if", + "const-hex", + "derive_more", + "hex-literal", + "itoa", + "k256", + "keccak-asm", + "proptest", + "rand", + "ruint", + "serde", + "tiny-keccak", +] + +[[package]] +name = "alloy-provider" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-eips", + "alloy-json-rpc", + "alloy-network", + "alloy-primitives", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-rpc-types-trace", + "alloy-transport", + "async-stream", + "async-trait", + "auto_impl", + "dashmap", + "futures", + "futures-utils-wasm", + "lru", + "serde_json", + "tokio", + "tracing", +] + +[[package]] +name = "alloy-rlp" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d58d9f5da7b40e9bfff0b7e7816700be4019db97d4b6359fe7f94a9e22e42ac" +dependencies = [ + "alloy-rlp-derive", + "arrayvec", + "bytes", +] + +[[package]] +name = "alloy-rlp-derive" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1a047897373be4bbb0224c1afdabca92648dc57a9c9ef6e7b0be3aff7a859c83" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.60", +] + +[[package]] +name = "alloy-rpc-client" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "alloy-transport-http", + "futures", + "pin-project", + "serde", + "serde_json", + "tokio", + "tokio-stream", + "tower", + "tracing", +] + +[[package]] +name = "alloy-rpc-types" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-consensus", + "alloy-eips", + "alloy-genesis", + "alloy-primitives", + "alloy-rlp", + "alloy-serde", + "alloy-sol-types", + "itertools 0.12.1", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "alloy-rpc-types-trace" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "alloy-rpc-types", + "alloy-serde", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-serde" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "serde", + "serde_json", +] + +[[package]] +name = "alloy-signer" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-primitives", + "async-trait", + "auto_impl", + "elliptic-curve", + "k256", + "thiserror", +] + +[[package]] +name = "alloy-simple-request-transport" +version = "0.1.0" +dependencies = [ + "alloy-json-rpc", + "alloy-transport", + "serde_json", + "simple-request", + "tower", +] + +[[package]] +name = "alloy-sol-macro" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "452d929748ac948a10481fff4123affead32c553cf362841c5103dd508bdfc16" +dependencies = [ + "alloy-json-abi", + "alloy-sol-macro-input", + "const-hex", + "heck 0.4.1", + "indexmap 2.2.6", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 2.0.60", + "syn-solidity", + "tiny-keccak", +] + +[[package]] +name = "alloy-sol-macro-input" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df64e094f6d2099339f9e82b5b38440b159757b6920878f28316243f8166c8d1" +dependencies = [ + "alloy-json-abi", + "const-hex", + "dunce", + "heck 0.5.0", + "proc-macro2", + "quote", + "serde_json", + "syn 2.0.60", + "syn-solidity", +] + +[[package]] +name = "alloy-sol-type-parser" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "715f4d09a330cc181fc7c361b5c5c2766408fa59a0bac60349dcb7baabd404cc" +dependencies = [ + "winnow 0.6.6", +] + +[[package]] +name = "alloy-sol-types" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43bc2d6dfc2a19fd56644494479510f98b1ee929e04cf0d4aa45e98baa3e545b" +dependencies = [ + "alloy-json-abi", + "alloy-primitives", + "alloy-sol-macro", + "const-hex", +] + +[[package]] +name = "alloy-transport" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-json-rpc", + "base64 0.22.0", + "futures-util", + "futures-utils-wasm", + "serde", + "serde_json", + "thiserror", + "tokio", + "tower", + "url", + "wasm-bindgen-futures", +] + +[[package]] +name = "alloy-transport-http" +version = "0.1.0" +source = "git+https://github.com/alloy-rs/alloy?rev=037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6#037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6" +dependencies = [ + "alloy-transport", + "url", +] [[package]] name = "android-tzdata" @@ -173,9 +508,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.81" +version = "1.0.82" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" +checksum = "f538837af36e6f6a9be0faa67f9a314f8119e4e4b5867c6ab40ed60360142519" [[package]] name = "approx" @@ -192,6 +527,130 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +[[package]] +name = "ark-ff" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6b3235cc41ee7a12aaaf2c575a2ad7b46713a8a50bda2fc3b003a04845c05dd6" +dependencies = [ + "ark-ff-asm 0.3.0", + "ark-ff-macros 0.3.0", + "ark-serialize 0.3.0", + "ark-std 0.3.0", + "derivative", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.3.3", + "zeroize", +] + +[[package]] +name = "ark-ff" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ec847af850f44ad29048935519032c33da8aa03340876d351dfab5660d2966ba" +dependencies = [ + "ark-ff-asm 0.4.2", + "ark-ff-macros 0.4.2", + "ark-serialize 0.4.2", + "ark-std 0.4.0", + "derivative", + "digest 0.10.7", + "itertools 0.10.5", + "num-bigint", + "num-traits", + "paste", + "rustc_version 0.4.0", + "zeroize", +] + +[[package]] +name = "ark-ff-asm" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db02d390bf6643fb404d3d22d31aee1c4bc4459600aef9113833d17e786c6e44" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-asm" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ed4aa4fe255d0bc6d79373f7e31d2ea147bcf486cba1be5ba7ea85abdb92348" +dependencies = [ + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db2fd794a08ccb318058009eefdf15bcaaaaf6f8161eb3345f907222bac38b20" +dependencies = [ + "num-bigint", + "num-traits", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-ff-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7abe79b0e4288889c4574159ab790824d0033b9fdcb2a112a3182fac2e514565" +dependencies = [ + "num-bigint", + "num-traits", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "ark-serialize" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d6c2b318ee6e10f8c2853e73a83adc0ccb88995aa978d8a3408d492ab2ee671" +dependencies = [ + "ark-std 0.3.0", + "digest 0.9.0", +] + +[[package]] +name = "ark-serialize" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb7b85a02b83d2f22f89bd5cac66c9c89474240cb6207cb1efc16d098e822a5" +dependencies = [ + "ark-std 0.4.0", + "digest 0.10.7", + "num-bigint", +] + +[[package]] +name = "ark-std" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1df2c09229cbc5a028b1d70e00fdb2acee28b1055dfb5ca73eea49c5a25c4e7c" +dependencies = [ + "num-traits", + "rand", +] + +[[package]] +name = "ark-std" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94893f1e0c6eeab764ade8dc4c0db24caf4fe7cbbaafc0eba0a9030f447b5185" +dependencies = [ + "num-traits", + "rand", +] + [[package]] name = "array-bytes" version = "6.2.2" @@ -287,29 +746,40 @@ checksum = "d034b430882f8381900d3fe6f0aaa3ad94f2cb4ac519b429692a1bc2dda4ae7b" dependencies = [ "event-listener 4.0.3", "event-listener-strategy", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", +] + +[[package]] +name = "async-stream" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +dependencies = [ + "async-stream-impl", + "futures-core", + "pin-project-lite 0.2.14", +] + +[[package]] +name = "async-stream-impl" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.60", ] [[package]] name = "async-trait" -version = "0.1.79" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", -] - -[[package]] -name = "async_io_stream" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6d7b9decdf35d8908a7e3ef02f64c5e9b1695e230154c0e8de3969142d9b94c" -dependencies = [ - "futures", - "pharos", - "rustc_version", + "syn 2.0.60", ] [[package]] @@ -322,7 +792,7 @@ dependencies = [ "futures-sink", "futures-util", "memchr", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -344,7 +814,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -402,6 +872,12 @@ version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +[[package]] +name = "base64" +version = "0.22.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9475866fec1451be56a3c2400fd081ff546538961565ccb5b7142cbd22bc7a51" + [[package]] name = "base64ct" version = "1.6.0" @@ -441,7 +917,7 @@ dependencies = [ "bitflags 2.5.0", "cexpr", "clang-sys", - "itertools", + "itertools 0.12.1", "lazy_static", "lazycell", "proc-macro2", @@ -449,14 +925,29 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] -name = "bitcoin" -version = "0.31.1" +name = "bit-set" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd00f3c09b5f21fb357abe32d29946eb8bb7a0862bae62c0b5e4a692acbbe73c" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + +[[package]] +name = "bit-vec" +version = "0.6.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "349f9b6a179ed607305526ca489b34ad0a41aed5f7980fa90eb03160b69598fb" + +[[package]] +name = "bitcoin" +version = "0.31.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c85783c2fe40083ea54a33aa2f0ba58831d90fcd190f5bdc47e74e84d2a96ae" dependencies = [ "bech32", "bitcoin-internals", @@ -617,6 +1108,18 @@ dependencies = [ "subtle", ] +[[package]] +name = "blst" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c94087b935a822949d3291a9989ad2b2051ea141eda0fd4e478a75f6aa3e604b" +dependencies = [ + "cc", + "glob", + "threadpool", + "zeroize", +] + [[package]] name = "bollard" version = "0.15.0" @@ -633,7 +1136,7 @@ dependencies = [ "hyper 0.14.28", "hyperlocal", "log", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "serde", "serde_derive", "serde_json", @@ -677,7 +1180,7 @@ dependencies = [ "proc-macro-crate 3.1.0", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", "syn_derive", ] @@ -723,9 +1226,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.4" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-slice-cast" @@ -765,6 +1268,20 @@ dependencies = [ "pkg-config", ] +[[package]] +name = "c-kzg" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3130f3d8717cc02e668a896af24984d5d5d4e8bf12e278e982e0f1bd88a0f9af" +dependencies = [ + "blst", + "cc", + "glob", + "hex", + "libc", + "serde", +] + [[package]] name = "camino" version = "1.1.6" @@ -817,9 +1334,9 @@ dependencies = [ [[package]] name = "cfg-expr" -version = "0.15.7" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa50868b64a9a6fda9d593ce778849ea8715cd2a3d2cc17ffdb4a2f2f2f1961d" +checksum = "d067ad48b8650848b989a59a86c6c36a995d02d2bf778d45c3c5d57bc2718f02" dependencies = [ "smallvec", ] @@ -862,9 +1379,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.35" +version = "0.4.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8eaf5903dcbc0a39312feb77df2ff4c76387d591b9fc7b04a238dcf8bb62639a" +checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" dependencies = [ "android-tzdata", "iana-time-zone", @@ -935,9 +1452,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.2" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b230ab84b0ffdf890d5a10abdbc8b83ae1c4918275daea1ab8801f71536b2651" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -957,14 +1474,14 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.0" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "307bc0538d5f0f83b8248db3087aa92fe504e4691294d0c96c0eabc33f47ba47" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -1000,13 +1517,14 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.9.1" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c37be52ef5e3b394db27a2341010685ad5103c72ac15ce2e9420a7e8f93f342c" +checksum = "5ba00838774b4ab0233e355d26710fbfc8327a05c017f6dc4873f876d1f79f78" dependencies = [ "cfg-if", "cpufeatures", "hex", + "proptest", "serde", ] @@ -1042,6 +1560,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +[[package]] +name = "convert_case" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" + [[package]] name = "core-foundation" version = "0.9.4" @@ -1195,7 +1719,7 @@ dependencies = [ "cranelift-codegen", "cranelift-entity", "cranelift-frontend", - "itertools", + "itertools 0.10.5", "log", "smallvec", "wasmparser", @@ -1288,7 +1812,7 @@ dependencies = [ "group", "platforms", "rand_core", - "rustc_version", + "rustc_version 0.4.0", "subtle", "zeroize", ] @@ -1301,14 +1825,14 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] name = "cxx" -version = "1.0.120" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff4dc7287237dd438b926a81a1a5605dad33d286870e5eee2db17bf2bcd9e92a" +checksum = "21db378d04296a84d8b7d047c36bb3954f0b46529db725d7e62fb02f9ba53ccc" dependencies = [ "cc", "cxxbridge-flags", @@ -1318,9 +1842,9 @@ dependencies = [ [[package]] name = "cxx-build" -version = "1.0.120" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f47c6c8ad7c1a10d3ef0fe3ff6733f4db0d78f08ef0b13121543163ef327058b" +checksum = "3e5262a7fa3f0bae2a55b767c223ba98032d7c328f5c13fa5cdc980b77fc0658" dependencies = [ "cc", "codespan-reporting", @@ -1328,24 +1852,24 @@ dependencies = [ "proc-macro2", "quote", "scratch", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] name = "cxxbridge-flags" -version = "1.0.120" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "701a1ac7a697e249cdd8dc026d7a7dafbfd0dbcd8bd24ec55889f2bc13dd6287" +checksum = "be8dcadd2e2fb4a501e1d9e93d6e88e6ea494306d8272069c92d5a9edf8855c0" [[package]] name = "cxxbridge-macro" -version = "1.0.120" +version = "1.0.121" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b404f596046b0bb2d903a9c786b875a126261b52b7c3a64bbb66382c41c771df" +checksum = "ad08a837629ad949b73d032c637653d069e909cffe4ee7870b02301939ce39cc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -1364,6 +1888,19 @@ dependencies = [ "zeroize", ] +[[package]] +name = "dashmap" +version = "5.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +dependencies = [ + "cfg-if", + "hashbrown 0.14.3", + "lock_api", + "once_cell", + "parking_lot_core 0.9.9", +] + [[package]] name = "data-encoding" version = "2.5.0" @@ -1401,9 +1938,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "zeroize", @@ -1433,6 +1970,17 @@ dependencies = [ "serde", ] +[[package]] +name = "derivative" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "derive-syn-parse" version = "0.1.5" @@ -1450,8 +1998,10 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ + "convert_case", "proc-macro2", "quote", + "rustc_version 0.4.0", "syn 1.0.109", ] @@ -1531,7 +2081,7 @@ checksum = "487585f4d0c6655fe74905e2504d8ad6908e4db67f744eb140876906c2f3175d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -1692,9 +2242,9 @@ dependencies = [ [[package]] name = "either" -version = "1.10.0" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11157ac094ffbdde99aa67b23417ebdd801842852b500e395a45a9c0aac03e4a" +checksum = "a47c1c47d2f5964e29c61246e81db715514cd532db6b5116a25ea3c03d6780a2" [[package]] name = "elliptic-curve" @@ -1716,40 +2266,13 @@ dependencies = [ "zeroize", ] -[[package]] -name = "encoding_rs" -version = "0.8.33" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" -dependencies = [ - "cfg-if", -] - -[[package]] -name = "enr" -version = "0.9.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe81b5c06ecfdbc71dd845216f225f53b62a10cb8a16c946836a3467f701d05b" -dependencies = [ - "base64 0.21.7", - "bytes", - "hex", - "k256", - "log", - "rand", - "rlp", - "serde", - "sha3", - "zeroize", -] - [[package]] name = "enum-as-inner" version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "syn 1.0.109", @@ -1761,10 +2284,10 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffccbb6966c05b32ef8fbac435df276c4ae4d3dc55a8cd0eb9745e6c12f546a" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -1802,198 +2325,27 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "ethabi" -version = "18.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7413c5f74cc903ea37386a8965a936cbeb334bd270862fdece542c1b2dcbc898" -dependencies = [ - "ethereum-types", - "hex", - "once_cell", - "regex", - "serde", - "serde_json", - "sha3", - "thiserror", - "uint", -] - -[[package]] -name = "ethbloom" -version = "0.13.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" -dependencies = [ - "crunchy", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "scale-info", - "tiny-keccak", -] - [[package]] name = "ethereum-serai" version = "0.1.0" dependencies = [ - "ethers-contract", - "ethers-core", - "ethers-providers", - "eyre", + "alloy-consensus", + "alloy-core", + "alloy-node-bindings", + "alloy-provider", + "alloy-rpc-client", + "alloy-rpc-types", + "alloy-simple-request-transport", + "alloy-sol-types", + "flexible-transcript", "group", - "hex", "k256", "modular-frost", "rand_core", - "serde", - "serde_json", - "sha2", - "sha3", "thiserror", "tokio", ] -[[package]] -name = "ethereum-types" -version = "0.14.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" -dependencies = [ - "ethbloom", - "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", - "primitive-types", - "scale-info", - "uint", -] - -[[package]] -name = "ethers-contract" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d79269278125006bb0552349c03593ffa9702112ca88bc7046cc669f148fb47c" -dependencies = [ - "const-hex", - "ethers-contract-abigen", - "ethers-contract-derive", - "ethers-core", - "ethers-providers", - "futures-util", - "once_cell", - "pin-project", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "ethers-contract-abigen" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce95a43c939b2e4e2f3191c5ad4a1f279780b8a39139c9905b43a7433531e2ab" -dependencies = [ - "Inflector", - "const-hex", - "dunce", - "ethers-core", - "eyre", - "prettyplease 0.2.16", - "proc-macro2", - "quote", - "regex", - "serde", - "serde_json", - "syn 2.0.55", - "toml 0.7.8", - "walkdir", -] - -[[package]] -name = "ethers-contract-derive" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9ce44906fc871b3ee8c69a695ca7ec7f70e50cb379c9b9cb5e532269e492f6" -dependencies = [ - "Inflector", - "const-hex", - "ethers-contract-abigen", - "ethers-core", - "proc-macro2", - "quote", - "serde_json", - "syn 2.0.55", -] - -[[package]] -name = "ethers-core" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0a17f0708692024db9956b31d7a20163607d2745953f5ae8125ab368ba280ad" -dependencies = [ - "arrayvec", - "bytes", - "cargo_metadata", - "chrono", - "const-hex", - "elliptic-curve", - "ethabi", - "generic-array 0.14.7", - "k256", - "num_enum", - "once_cell", - "open-fastrlp", - "rand", - "rlp", - "serde", - "serde_json", - "strum 0.25.0", - "syn 2.0.55", - "tempfile", - "thiserror", - "tiny-keccak", - "unicode-xid", -] - -[[package]] -name = "ethers-providers" -version = "2.0.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6838fa110e57d572336178b7c79e94ff88ef976306852d8cb87d9e5b1fc7c0b5" -dependencies = [ - "async-trait", - "auto_impl", - "base64 0.21.7", - "bytes", - "const-hex", - "enr", - "ethers-core", - "futures-core", - "futures-timer", - "futures-util", - "hashers", - "http 0.2.12", - "instant", - "jsonwebtoken", - "once_cell", - "pin-project", - "reqwest", - "serde", - "serde_json", - "thiserror", - "tokio", - "tracing", - "tracing-futures", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "ws_stream_wasm", -] - [[package]] name = "event-listener" version = "2.5.3" @@ -2008,7 +2360,7 @@ checksum = "67b215c49b2b248c855fb73579eb1f4f26c38ffdc12973e20e07b91d78d5646e" dependencies = [ "concurrent-queue", "parking", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2018,7 +2370,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "958e4d70b6d5e81971bebec42271ec641e7ff4e170a6fa605f2b8a8b65cb97d3" dependencies = [ "event-listener 4.0.3", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2040,17 +2392,7 @@ dependencies = [ "fs-err", "proc-macro2", "quote", - "syn 2.0.55", -] - -[[package]] -name = "eyre" -version = "0.6.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cd915d99f24784cdc19fd37ef22b97e3ff0ae756c7e492e9fbfe897d61e2aec" -dependencies = [ - "indenter", - "once_cell", + "syn 2.0.60", ] [[package]] @@ -2065,6 +2407,17 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" +[[package]] +name = "fastrlp" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139834ddba373bbdd213dffe02c8d110508dcf1726c2be27e8d1f7d7e1856418" +dependencies = [ + "arrayvec", + "auto_impl", + "bytes", +] + [[package]] name = "fdlimit" version = "0.2.1" @@ -2309,12 +2662,12 @@ dependencies = [ "derive-syn-parse", "expander", "frame-support-procedural-tools", - "itertools", + "itertools 0.10.5", "macro_magic", "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -2326,7 +2679,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -2336,7 +2689,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -2484,7 +2837,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "52527eb5074e35e9339c6b4e8d12600c7128b68fb25dcb9fa9dec18f7c25f3a5" dependencies = [ "futures-core", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2495,7 +2848,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -2505,7 +2858,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd3cf68c183738046838e300353e4716c674dc5e56890de4826801a6622a28" dependencies = [ "futures-io", - "rustls 0.21.10", + "rustls 0.21.11", ] [[package]] @@ -2536,10 +2889,6 @@ name = "futures-timer" version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" -dependencies = [ - "gloo-timers", - "send_wrapper 0.4.0", -] [[package]] name = "futures-util" @@ -2554,11 +2903,17 @@ dependencies = [ "futures-sink", "futures-task", "memchr", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "pin-utils", "slab", ] +[[package]] +name = "futures-utils-wasm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42012b0f064e01aa58b545fe3727f90f7dd4020f4a3ea735b50344965f5a57e9" + [[package]] name = "fxhash" version = "0.2.1" @@ -2671,18 +3026,6 @@ dependencies = [ "regex-syntax 0.8.3", ] -[[package]] -name = "gloo-timers" -version = "0.2.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b995a66bb87bebce9a0f4a95aed01daca4872c050bfcb21653361c03bc35e5c" -dependencies = [ - "futures-channel", - "futures-core", - "js-sys", - "wasm-bindgen", -] - [[package]] name = "group" version = "0.13.0" @@ -2696,9 +3039,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbd2820c5e49886948654ab546d0688ff24530286bdcf8fca3cefb16d4618eb" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -2753,21 +3096,18 @@ dependencies = [ "allocator-api2", ] -[[package]] -name = "hashers" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2bca93b15ea5a746f220e56587f71e73c6165eab783df9e26590069953e3c30" -dependencies = [ - "fxhash", -] - [[package]] name = "heck" version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + [[package]] name = "hermit-abi" version = "0.3.9" @@ -2779,6 +3119,9 @@ name = "hex" version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" +dependencies = [ + "serde", +] [[package]] name = "hex-conservative" @@ -2875,7 +3218,7 @@ checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", "http 0.2.12", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2898,7 +3241,7 @@ dependencies = [ "futures-core", "http 1.1.0", "http-body 1.0.0", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", ] [[package]] @@ -2941,7 +3284,7 @@ dependencies = [ "httparse", "httpdate", "itoa", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "socket2 0.4.10", "tokio", "tower-service", @@ -2951,9 +3294,9 @@ dependencies = [ [[package]] name = "hyper" -version = "1.2.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "186548d73ac615b32a73aafe38fb4f56c0d340e110e5a200bcadbaf2e199263a" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", @@ -2962,7 +3305,7 @@ dependencies = [ "http-body 1.0.0", "httparse", "itoa", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "smallvec", "tokio", "want", @@ -2970,15 +3313,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.0" +version = "0.27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736f15a50e749d033164c56c09783b6102c4ff8da79ad77dbddbbaea0f8567f7" +checksum = "908bb38696d7a037a01ebcc68a00634112ac2bbf8ca74e30a2c3d2f4f021302b" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.2.0", + "hyper 1.3.1", "hyper-util", - "rustls 0.23.4", + "rustls 0.23.5", "rustls-native-certs", "rustls-pki-types", "tokio", @@ -2997,8 +3340,8 @@ dependencies = [ "futures-util", "http 1.1.0", "http-body 1.0.0", - "hyper 1.2.0", - "pin-project-lite 0.2.13", + "hyper 1.3.1", + "pin-project-lite 0.2.14", "socket2 0.5.6", "tokio", "tower", @@ -3063,6 +3406,16 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "if-addrs" version = "0.10.2" @@ -3120,15 +3473,6 @@ dependencies = [ "parity-scale-codec", ] -[[package]] -name = "impl-rlp" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" -dependencies = [ - "rlp", -] - [[package]] name = "impl-serde" version = "0.4.0" @@ -3149,12 +3493,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "indenter" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" - [[package]] name = "indexmap" version = "1.9.3" @@ -3242,6 +3580,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -3302,7 +3649,7 @@ version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "44e8ab85614a08792b9bff6c8feee23be78c98d0182d4c622c05256ab553892a" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro-crate 1.3.1", "proc-macro2", "quote", @@ -3345,20 +3692,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "jsonwebtoken" -version = "8.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6971da4d9c3aa03c3d8f3ff0f4155b534aad021292003895a469716b2a230378" -dependencies = [ - "base64 0.21.7", - "pem", - "ring 0.16.20", - "serde", - "serde_json", - "simple_asn1", -] - [[package]] name = "k256" version = "0.13.3" @@ -3370,7 +3703,6 @@ dependencies = [ "elliptic-curve", "once_cell", "sha2", - "signature", ] [[package]] @@ -3382,6 +3714,16 @@ dependencies = [ "cpufeatures", ] +[[package]] +name = "keccak-asm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb8515fff80ed850aea4a1595f2e519c003e2a00a82fe168ebf5269196caf444" +dependencies = [ + "digest 0.10.7", + "sha3-asm", +] + [[package]] name = "kvdb" version = "0.13.0" @@ -3448,7 +3790,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.52.4", + "windows-targets 0.48.5", ] [[package]] @@ -3765,7 +4107,7 @@ dependencies = [ "quinn", "rand", "ring 0.16.20", - "rustls 0.21.10", + "rustls 0.21.11", "socket2 0.5.6", "thiserror", "tokio", @@ -3818,11 +4160,11 @@ version = "0.33.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4d5ec2a3df00c7836d7696c136274c9c59705bac69133253696a6c932cd1d74" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro-warning", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -3854,7 +4196,7 @@ dependencies = [ "libp2p-identity", "rcgen", "ring 0.16.20", - "rustls 0.21.10", + "rustls 0.21.11", "rustls-webpki 0.101.7", "thiserror", "x509-parser", @@ -3886,7 +4228,7 @@ dependencies = [ "futures", "js-sys", "libp2p-core", - "send_wrapper 0.6.0", + "send_wrapper", "wasm-bindgen", "wasm-bindgen-futures", ] @@ -4075,7 +4417,7 @@ dependencies = [ "macro_magic_core", "macro_magic_macros", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -4089,7 +4431,7 @@ dependencies = [ "macro_magic_core_macros", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -4100,7 +4442,7 @@ checksum = "d710e1214dffbab3b5dacb21475dde7d6ed84c69ff722b3a47a782668d44fbac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -4111,7 +4453,7 @@ checksum = "b8fb85ec1620619edf2984a7693497d4ec88a9665d8b87e942856884c92dbf2a" dependencies = [ "macro_magic_core", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -4164,9 +4506,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "memfd" @@ -4216,12 +4558,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - [[package]] name = "mini-serai" version = "0.1.0" @@ -4306,7 +4642,6 @@ dependencies = [ "dalek-ff-group", "digest 0.10.7", "dkg", - "dleq", "flexible-transcript", "hex", "minimal-ed448", @@ -4343,7 +4678,6 @@ dependencies = [ "curve25519-dalek", "dalek-ff-group", "digest_auth", - "dleq", "flexible-transcript", "group", "hex", @@ -4518,9 +4852,9 @@ dependencies = [ [[package]] name = "nalgebra" -version = "0.32.4" +version = "0.32.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4541eb06dce09c0241ebbaab7102f0a01a0c8994afed2e5d0d66775016e25ac2" +checksum = "3ea4908d4f23254adda3daa60ffef0f1ac7b8c3e9a864cf3cc154b251908a2ef" dependencies = [ "approx", "matrixmultiply", @@ -4607,9 +4941,9 @@ dependencies = [ [[package]] name = "netlink-sys" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6471bf08e7ac0135876a9581bf3217ef0333c191c128d34878079f42ee150411" +checksum = "416060d346fbaf1f23f9512963e3e878f1a78e707cb699ba9215761754244307" dependencies = [ "bytes", "futures", @@ -4732,27 +5066,6 @@ dependencies = [ "libc", ] -[[package]] -name = "num_enum" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" -dependencies = [ - "num_enum_derive", -] - -[[package]] -name = "num_enum_derive" -version = "0.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" -dependencies = [ - "proc-macro-crate 3.1.0", - "proc-macro2", - "quote", - "syn 2.0.55", -] - [[package]] name = "object" version = "0.31.1" @@ -4795,31 +5108,6 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "open-fastrlp" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "786393f80485445794f6043fd3138854dd109cc6c4bd1a6383db304c9ce9b9ce" -dependencies = [ - "arrayvec", - "auto_impl", - "bytes", - "ethereum-types", - "open-fastrlp-derive", -] - -[[package]] -name = "open-fastrlp-derive" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "003b2be5c6c53c1cfeb0a238b8a1c3915cd410feb684457a36c10038f764bb1c" -dependencies = [ - "bytes", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "openssl-probe" version = "0.1.5" @@ -5182,6 +5470,17 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + [[package]] name = "petgraph" version = "0.6.4" @@ -5192,16 +5491,6 @@ dependencies = [ "indexmap 2.2.6", ] -[[package]] -name = "pharos" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9567389417feee6ce15dd6527a8a1ecac205ef62c2932bcf3d9f6fc5b78b414" -dependencies = [ - "futures", - "rustc_version", -] - [[package]] name = "pin-project" version = "1.1.5" @@ -5219,7 +5508,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -5230,9 +5519,9 @@ checksum = "257b64915a082f7811703966789728173279bdebb956b143dbcd23f6f970a777" [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -5271,7 +5560,7 @@ dependencies = [ "cfg-if", "concurrent-queue", "hermit-abi", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "rustix", "tracing", "windows-sys 0.52.0", @@ -5320,7 +5609,7 @@ checksum = "59230a63c37f3e18569bdb90e4a89cbf5bf8b06fea0b84e65ea10cc4df47addd" dependencies = [ "difflib", "float-cmp", - "itertools", + "itertools 0.10.5", "normalize-line-endings", "predicates-core", "regex", @@ -5352,16 +5641,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "prettyplease" -version = "0.2.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a41cf62165e97c7f814d2221421dbb9afcbcdb0a88068e5ea206e19951c2cbb5" -dependencies = [ - "proc-macro2", - "syn 2.0.55", -] - [[package]] name = "primeorder" version = "0.13.6" @@ -5379,7 +5658,6 @@ checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", - "impl-rlp", "impl-serde", "scale-info", "uint", @@ -5443,14 +5721,14 @@ checksum = "3d1eaa7fa0aa1929ffdf7eeb6eac234dde6268914a14ad44d23521ab6a9b258e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] name = "proc-macro2" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" +checksum = "3d1597b0c024618f09a9c3b8655b7e430397a36d23fdafec26d6965e9eec3eba" dependencies = [ "unicode-ident", ] @@ -5489,7 +5767,27 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", +] + +[[package]] +name = "proptest" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" +dependencies = [ + "bit-set", + "bit-vec", + "bitflags 2.5.0", + "lazy_static", + "num-traits", + "rand", + "rand_chacha", + "rand_xorshift", + "regex-syntax 0.8.3", + "rusty-fork", + "tempfile", + "unarray", ] [[package]] @@ -5509,13 +5807,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" dependencies = [ "bytes", - "heck", - "itertools", + "heck 0.4.1", + "itertools 0.10.5", "lazy_static", "log", "multimap", "petgraph", - "prettyplease 0.1.25", + "prettyplease", "prost", "prost-types", "regex", @@ -5531,7 +5829,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" dependencies = [ "anyhow", - "itertools", + "itertools 0.10.5", "proc-macro2", "quote", "syn 1.0.109", @@ -5602,11 +5900,11 @@ checksum = "8cc2c5017e4b43d5995dcea317bc46c1e09404c0a9664d2908f7f02dfe943d75" dependencies = [ "bytes", "futures-io", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "quinn-proto", "quinn-udp", "rustc-hash", - "rustls 0.21.10", + "rustls 0.21.11", "thiserror", "tokio", "tracing", @@ -5622,7 +5920,7 @@ dependencies = [ "rand", "ring 0.16.20", "rustc-hash", - "rustls 0.21.10", + "rustls 0.21.11", "slab", "thiserror", "tinyvec", @@ -5644,9 +5942,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.35" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "291ec9ab5efd934aaf503a6466c5d5251535d108ee747472c3977cc5acc868ef" +checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ "proc-macro2", ] @@ -5706,6 +6004,15 @@ dependencies = [ "rand_core", ] +[[package]] +name = "rand_xorshift" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" +dependencies = [ + "rand_core", +] + [[package]] name = "rawpointer" version = "0.2.1" @@ -5790,7 +6097,7 @@ checksum = "5fddb4f8d99b0a2ebafc65a87a69a7b9875e4b1ae1f00db265d300ef7f28bccc" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -5850,41 +6157,6 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" -[[package]] -name = "reqwest" -version = "0.11.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37b1ae8d9ac08420c66222fb9096fc5de435c3c48542bc5336c51892cffafb41" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "ipnet", - "js-sys", - "log", - "mime", - "once_cell", - "percent-encoding", - "pin-project-lite 0.2.13", - "serde", - "serde_json", - "serde_urlencoded", - "system-configuration", - "tokio", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - [[package]] name = "resolv-conf" version = "0.7.0" @@ -5951,21 +6223,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb919243f34364b6bd2fc10ef797edbfa75f33c252e7998527479c6d6b47e1ec" dependencies = [ "bytes", - "rlp-derive", "rustc-hex", ] -[[package]] -name = "rlp-derive" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e33d7b2abe0c340d8797fe2907d3f20d3b5ea5908683618bfe80df7f621f672a" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "rocksdb" version = "0.21.0" @@ -6019,6 +6279,36 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "ruint" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f308135fef9fc398342da5472ce7c484529df23743fb7c734e0f3d472971e62" +dependencies = [ + "alloy-rlp", + "ark-ff 0.3.0", + "ark-ff 0.4.2", + "bytes", + "fastrlp", + "num-bigint", + "num-traits", + "parity-scale-codec", + "primitive-types", + "proptest", + "rand", + "rlp", + "ruint-macro", + "serde", + "valuable", + "zeroize", +] + +[[package]] +name = "ruint-macro" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f86854cf50259291520509879a5c294c3c9a4c334e9ff65071c51e42ef1e2343" + [[package]] name = "rustc-demangle" version = "0.1.23" @@ -6037,6 +6327,15 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" +[[package]] +name = "rustc_version" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0dfe2087c51c460008730de8b57e6a320782fbfb312e1f4d520e6c6fae155ee" +dependencies = [ + "semver 0.11.0", +] + [[package]] name = "rustc_version" version = "0.4.0" @@ -6070,9 +6369,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.21.10" +version = "0.21.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" +checksum = "7fecbfb7b1444f477b345853b1fce097a2c6fb637b2bfb87e6bc5db0f043fae4" dependencies = [ "log", "ring 0.17.8", @@ -6082,9 +6381,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.4" +version = "0.23.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c4d6d8ad9f2492485e13453acbb291dd08f64441b6609c491f1c2cd2c6b4fe1" +checksum = "afabcee0551bd1aa3e18e5adbf2c0544722014b899adb31bd186ec638d3da97e" dependencies = [ "once_cell", "ring 0.17.8", @@ -6109,19 +6408,19 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.1" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f48172685e6ff52a556baa527774f61fcaa884f59daf3375c62a3f1cd2549dab" +checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" dependencies = [ - "base64 0.21.7", + "base64 0.22.0", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "868e20fada228fefaf6b652e00cc73623d54f8171e7352c18bb281571f2d92da" +checksum = "ecd36cc4259e3e4514335c4a138c6b43171a8d61d8f5c9348f9fc7529416f247" [[package]] name = "rustls-webpki" @@ -6146,9 +6445,21 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "80af6f9131f277a45a3fba6ce8e2258037bb0477a67e610d3c1fe046ab31de47" + +[[package]] +name = "rusty-fork" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" +dependencies = [ + "fnv", + "quick-error", + "tempfile", + "wait-timeout", +] [[package]] name = "rw-stream-sink" @@ -6289,7 +6600,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -7050,7 +7361,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -7112,9 +7423,9 @@ dependencies = [ [[package]] name = "scale-info" -version = "2.11.1" +version = "2.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "788745a868b0e751750388f4e6546eb921ef714a4317fa6954f7cde114eb2eb7" +checksum = "7c453e59a955f81fb62ee5d596b450383d699f152d350e9d23a0db2adb78e4c0" dependencies = [ "bitvec", "cfg-if", @@ -7126,9 +7437,9 @@ dependencies = [ [[package]] name = "scale-info-derive" -version = "2.11.1" +version = "2.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7dc2f4e8bc344b9fc3d5f74f72c2e55bfc38d28dc2ebc69c194a3df424e4d9ac" +checksum = "18cf6c6447f813ef19eb450e985bcce6705f9ce7660db221b59093d15c79c4b7" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", @@ -7263,9 +7574,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -7276,9 +7587,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -7290,7 +7601,16 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7a3186ec9e65071a2095434b1f5bb24838d4e8e130f584c790f6033c79943537" dependencies = [ - "semver-parser", + "semver-parser 0.7.0", +] + +[[package]] +name = "semver" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f301af10236f6df4160f7c3f04eec6dbc70ace82d23326abad5edee88801c6b6" +dependencies = [ + "semver-parser 0.10.2", ] [[package]] @@ -7309,10 +7629,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] -name = "send_wrapper" -version = "0.4.0" +name = "semver-parser" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" +checksum = "00b0bef5b7f9e0df16536d3961cfb6e84331c065b4066afb39768d0e319411f7" +dependencies = [ + "pest", +] [[package]] name = "send_wrapper" @@ -7329,6 +7652,7 @@ dependencies = [ "parity-scale-codec", "scale-info", "serai-coins-primitives", + "serai-genesis-liquidity-primitives", "serai-in-instructions-primitives", "serai-primitives", "serai-signals-primitives", @@ -7551,6 +7875,9 @@ dependencies = [ "serai-dex-pallet", "serai-genesis-liquidity-primitives", "serai-primitives", + "serai-validator-sets-primitives", + "sp-application-crypto", + "sp-core", "sp-std", ] @@ -7558,7 +7885,14 @@ dependencies = [ name = "serai-genesis-liquidity-primitives" version = "0.1.0" dependencies = [ + "borsh", + "parity-scale-codec", + "scale-info", "serai-primitives", + "serai-validator-sets-primitives", + "serde", + "sp-std", + "zeroize", ] [[package]] @@ -7660,6 +7994,7 @@ dependencies = [ "futures-util", "hex", "jsonrpsee", + "libp2p", "pallet-transaction-payment-rpc", "rand_core", "sc-authority-discovery", @@ -7734,9 +8069,11 @@ dependencies = [ "bitcoin-serai", "borsh", "ciphersuite", + "const-hex", "dalek-ff-group", "dockertest", "env_logger", + "ethereum-serai", "flexible-transcript", "frost-schnorrkel", "hex", @@ -7925,9 +8262,9 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb1c873e1b9b056a4dc4c0c198b24c3ffa059243875552b2bd0933b1aee4ce2" +checksum = "9846a40c979031340571da2545a4e5b7c4163bdae79b301d5f86d03979451fcc" dependencies = [ "serde_derive", ] @@ -7943,20 +8280,20 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.197" +version = "1.0.198" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" +checksum = "e88edab869b01783ba905e7d0153f9fc1a6505a96e4ad3018011eedb838566d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] name = "serde_json" -version = "1.0.115" +version = "1.0.116" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" +checksum = "3e17db7126d17feb94eb3fad46bf1a96b034e8aacbc2e775fe81505f8b0b2813" dependencies = [ "itoa", "ryu", @@ -7965,13 +8302,13 @@ dependencies = [ [[package]] name = "serde_repr" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" +checksum = "6c64451ba24fc7a6a2d60fc75dd9c83c90903b19028d4eff35e88fc1e86564e9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -8057,6 +8394,16 @@ dependencies = [ "keccak", ] +[[package]] +name = "sha3-asm" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bac61da6b35ad76b195eb4771210f947734321a8d81d7738e1580d953bc7a15e" +dependencies = [ + "cc", + "cfg-if", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -8110,7 +8457,7 @@ version = "0.1.0" dependencies = [ "base64ct", "http-body-util", - "hyper 1.2.0", + "hyper 1.3.1", "hyper-rustls", "hyper-util", "tokio", @@ -8118,18 +8465,6 @@ dependencies = [ "zeroize", ] -[[package]] -name = "simple_asn1" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" -dependencies = [ - "num-bigint", - "num-traits", - "thiserror", - "time", -] - [[package]] name = "siphasher" version = "0.3.11" @@ -8175,7 +8510,7 @@ dependencies = [ "curve25519-dalek", "rand_core", "ring 0.17.8", - "rustc_version", + "rustc_version 0.4.0", "sha2", "subtle", ] @@ -8248,7 +8583,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -8444,7 +8779,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "quote", "sp-core-hashing", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -8463,7 +8798,7 @@ source = "git+https://github.com/serai-dex/substrate#6e3f07bf5c98a6a3ec15f2b1a46 dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -8635,7 +8970,7 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -8788,7 +9123,7 @@ dependencies = [ "parity-scale-codec", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -8925,9 +9260,9 @@ dependencies = [ [[package]] name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" @@ -8950,7 +9285,7 @@ version = "0.24.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e385be0d24f186b4ce2f9982191e7101bb737312ad61c1f2f984f34bcf85d59" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", @@ -8963,11 +9298,11 @@ version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" dependencies = [ - "heck", + "heck 0.4.1", "proc-macro2", "quote", "rustversion", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -9055,15 +9390,27 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.55" +version = "2.0.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" +checksum = "909518bc7b1c9b779f1bbf07f2929d35af9f0f37e47c6e9ef7f9dddc1e1821f3" dependencies = [ "proc-macro2", "quote", "unicode-ident", ] +[[package]] +name = "syn-solidity" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4497156948bd342b52038035a6fa514a89626e37af9d2c52a5e8d8ebcc7ee479" +dependencies = [ + "paste", + "proc-macro2", + "quote", + "syn 2.0.60", +] + [[package]] name = "syn_derive" version = "0.1.8" @@ -9073,7 +9420,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -9165,22 +9512,22 @@ checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" [[package]] name = "thiserror" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" +checksum = "f0126ad08bff79f29fc3ae6a55cc72352056dfff61e3ff8bb7129476d44b23aa" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.58" +version = "1.0.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" +checksum = "d1cd413b5d558b4c5bf3680e324a6fa5014e7b7c067a51e69dbdf47eb7148b66" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -9276,9 +9623,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", @@ -9286,7 +9633,7 @@ dependencies = [ "mio", "num_cpus", "parking_lot 0.12.1", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "signal-hook-registry", "socket2 0.5.6", "tokio-macros", @@ -9301,7 +9648,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -9310,7 +9657,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.4", + "rustls 0.23.5", "rustls-pki-types", "tokio", ] @@ -9322,7 +9669,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tokio", "tokio-util", ] @@ -9337,7 +9684,7 @@ dependencies = [ "futures-core", "futures-io", "futures-sink", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tokio", "tracing", ] @@ -9382,7 +9729,7 @@ dependencies = [ "serde", "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] @@ -9393,7 +9740,7 @@ checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" dependencies = [ "indexmap 2.2.6", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] @@ -9405,7 +9752,7 @@ dependencies = [ "futures-core", "futures-util", "pin-project", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tokio", "tower-layer", "tower-service", @@ -9425,7 +9772,7 @@ dependencies = [ "http 0.2.12", "http-body 0.4.6", "http-range-header", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tower-layer", "tower-service", ] @@ -9449,7 +9796,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ "log", - "pin-project-lite 0.2.13", + "pin-project-lite 0.2.14", "tracing-attributes", "tracing-core", ] @@ -9462,7 +9809,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -9695,6 +10042,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uint" version = "0.9.5" @@ -9707,6 +10060,12 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "unarray" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" + [[package]] name = "unicode-bidi" version = "0.3.15" @@ -9776,12 +10135,12 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "143b538f18257fac9cad154828a57c6bf5157e1aa604d4816b5995bf6de87ae5" +checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" dependencies = [ "form_urlencoded", - "idna 0.4.0", + "idna 0.5.0", "percent-encoding", ] @@ -9821,6 +10180,15 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" +[[package]] +name = "wait-timeout" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9f200f5b12eb75f8c1ed65abd4b2db8a6e1b138a20de009dacee265a2498f3f6" +dependencies = [ + "libc", +] + [[package]] name = "walkdir" version = "2.5.0" @@ -9867,7 +10235,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", "wasm-bindgen-shared", ] @@ -9901,7 +10269,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -10210,7 +10578,7 @@ checksum = "ca7af9bb3ee875c4907835e607a275d10b04d15623d3aebe01afe8fbd3f85050" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -10243,9 +10611,9 @@ dependencies = [ [[package]] name = "wide" -version = "0.7.15" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89beec544f246e679fc25490e3f8e08003bc4bf612068f325120dad4cea02c1c" +checksum = "81a1851a719f11d1d2fea40e15c72f6c00de8c142d7ac47c1441cc7e4d0d5bc6" dependencies = [ "bytemuck", "safe_arch", @@ -10253,9 +10621,9 @@ dependencies = [ [[package]] name = "widestring" -version = "1.0.2" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "653f141f39ec16bba3c5abe400a0c60da7468261cc2cbf36805022876bc721a8" +checksum = "7219d36b6eac893fa81e84ebe06485e7dcbb616177469b142df14f1f4deb1311" [[package]] name = "winapi" @@ -10457,6 +10825,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "winnow" +version = "0.6.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0c976aaaa0e1f90dbb21e9587cdaf1d9679a1cde8875c0d6bd83ab96a208352" + [[package]] name = "winreg" version = "0.50.0" @@ -10467,25 +10841,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "ws_stream_wasm" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7999f5f4217fe3818726b66257a4475f71e74ffd190776ad053fa159e50737f5" -dependencies = [ - "async_io_stream", - "futures", - "js-sys", - "log", - "pharos", - "rustc_version", - "send_wrapper 0.6.0", - "thiserror", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", -] - [[package]] name = "wyz" version = "0.5.1" @@ -10526,9 +10881,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" +checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" [[package]] name = "xmltree" @@ -10587,7 +10942,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] @@ -10607,14 +10962,14 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.60", ] [[package]] name = "zstd" version = "0.11.2+zstd.1.5.2" dependencies = [ - "zstd 0.13.0", + "zstd 0.13.1", ] [[package]] @@ -10628,11 +10983,11 @@ dependencies = [ [[package]] name = "zstd" -version = "0.13.0" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" +checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" dependencies = [ - "zstd-safe 7.0.0", + "zstd-safe 7.1.0", ] [[package]] @@ -10647,18 +11002,18 @@ dependencies = [ [[package]] name = "zstd-safe" -version = "7.0.0" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" +checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.10+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "c253a4914af5bafc8fa8c86ee400827e83cf6ec01195ec1f1ed8441bf00d65aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index bcc344ed..8a19d159 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,6 +36,7 @@ members = [ "crypto/schnorrkel", "coins/bitcoin", + "coins/ethereum/alloy-simple-request-transport", "coins/ethereum", "coins/monero/generators", "coins/monero", diff --git a/coins/bitcoin/src/wallet/send.rs b/coins/bitcoin/src/wallet/send.rs index f4cfa3b5..24594ab4 100644 --- a/coins/bitcoin/src/wallet/send.rs +++ b/coins/bitcoin/src/wallet/send.rs @@ -375,7 +375,7 @@ impl SignMachine for TransactionSignMachine { msg: &[u8], ) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> { if !msg.is_empty() { - panic!("message was passed to the TransactionMachine when it generates its own"); + panic!("message was passed to the TransactionSignMachine when it generates its own"); } let commitments = (0 .. self.sigs.len()) diff --git a/coins/ethereum/.gitignore b/coins/ethereum/.gitignore index 46365e03..2dccdce9 100644 --- a/coins/ethereum/.gitignore +++ b/coins/ethereum/.gitignore @@ -1,7 +1,3 @@ # Solidity build outputs cache artifacts - -# Auto-generated ABI files -src/abi/schnorr.rs -src/abi/router.rs diff --git a/coins/ethereum/Cargo.toml b/coins/ethereum/Cargo.toml index bc60d3a4..4bb92fe4 100644 --- a/coins/ethereum/Cargo.toml +++ b/coins/ethereum/Cargo.toml @@ -18,28 +18,29 @@ workspace = true [dependencies] thiserror = { version = "1", default-features = false } -eyre = { version = "0.6", default-features = false } -sha3 = { version = "0.10", default-features = false, features = ["std"] } - -group = { version = "0.13", default-features = false } -k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa"] } -frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] } - -ethers-core = { version = "2", default-features = false } -ethers-providers = { version = "2", default-features = false } -ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] } - -[build-dependencies] -ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] } - -[dev-dependencies] rand_core = { version = "0.6", default-features = false, features = ["std"] } -hex = { version = "0.4", default-features = false, features = ["std"] } -serde = { version = "1", default-features = false, features = ["std"] } -serde_json = { version = "1", default-features = false, features = ["std"] } +transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] } -sha2 = { version = "0.10", default-features = false, features = ["std"] } +group = { version = "0.13", default-features = false } +k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] } +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] } + +alloy-core = { version = "0.7", default-features = false } +alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] } +alloy-consensus = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false, features = ["k256"] } +alloy-rpc-types = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } +alloy-rpc-client = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } +alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false } +alloy-provider = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } + +[dev-dependencies] +frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] } tokio = { version = "1", features = ["macros"] } + +alloy-node-bindings = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } + +[features] +tests = [] diff --git a/coins/ethereum/README.md b/coins/ethereum/README.md index 13f1f2db..0090b26b 100644 --- a/coins/ethereum/README.md +++ b/coins/ethereum/README.md @@ -3,6 +3,12 @@ This package contains Ethereum-related functionality, specifically deploying and interacting with Serai contracts. +While `monero-serai` and `bitcoin-serai` are general purpose libraries, +`ethereum-serai` is Serai specific. If any of the utilities are generally +desired, please fork and maintain your own copy to ensure the desired +functionality is preserved, or open an issue to request we make this library +general purpose. + ### Dependencies - solc diff --git a/coins/ethereum/alloy-simple-request-transport/Cargo.toml b/coins/ethereum/alloy-simple-request-transport/Cargo.toml new file mode 100644 index 00000000..115998e4 --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "alloy-simple-request-transport" +version = "0.1.0" +description = "A transport for alloy based off simple-request" +license = "MIT" +repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum/alloy-simple-request-transport" +authors = ["Luke Parker "] +edition = "2021" +rust-version = "1.74" + +[package.metadata.docs.rs] +all-features = true +rustdoc-args = ["--cfg", "docsrs"] + +[lints] +workspace = true + +[dependencies] +tower = "0.4" + +serde_json = { version = "1", default-features = false } +simple-request = { path = "../../../common/request", default-features = false } + +alloy-json-rpc = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } +alloy-transport = { git = "https://github.com/alloy-rs/alloy", rev = "037dd4b20ec8533d6b6d5cf5e9489bbb182c18c6", default-features = false } + +[features] +default = ["tls"] +tls = ["simple-request/tls"] diff --git a/coins/ethereum/alloy-simple-request-transport/LICENSE b/coins/ethereum/alloy-simple-request-transport/LICENSE new file mode 100644 index 00000000..659881f1 --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2024 Luke Parker + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/coins/ethereum/alloy-simple-request-transport/README.md b/coins/ethereum/alloy-simple-request-transport/README.md new file mode 100644 index 00000000..372540d1 --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/README.md @@ -0,0 +1,4 @@ +# Alloy Simple Request Transport + +A transport for alloy based on simple-request, a small HTTP client built around +hyper. diff --git a/coins/ethereum/alloy-simple-request-transport/src/lib.rs b/coins/ethereum/alloy-simple-request-transport/src/lib.rs new file mode 100644 index 00000000..93b35bc1 --- /dev/null +++ b/coins/ethereum/alloy-simple-request-transport/src/lib.rs @@ -0,0 +1,60 @@ +#![cfg_attr(docsrs, feature(doc_auto_cfg))] +#![doc = include_str!("../README.md")] + +use core::task; +use std::io; + +use alloy_json_rpc::{RequestPacket, ResponsePacket}; +use alloy_transport::{TransportError, TransportErrorKind, TransportFut}; + +use simple_request::{hyper, Request, Client}; + +use tower::Service; + +#[derive(Clone, Debug)] +pub struct SimpleRequest { + client: Client, + url: String, +} + +impl SimpleRequest { + pub fn new(url: String) -> Self { + Self { client: Client::with_connection_pool(), url } + } +} + +impl Service for SimpleRequest { + type Response = ResponsePacket; + type Error = TransportError; + type Future = TransportFut<'static>; + + #[inline] + fn poll_ready(&mut self, _cx: &mut task::Context<'_>) -> task::Poll> { + task::Poll::Ready(Ok(())) + } + + #[inline] + fn call(&mut self, req: RequestPacket) -> Self::Future { + let inner = self.clone(); + Box::pin(async move { + let packet = req.serialize().map_err(TransportError::SerError)?; + let request = Request::from( + hyper::Request::post(&inner.url) + .header("Content-Type", "application/json") + .body(serde_json::to_vec(&packet).map_err(TransportError::SerError)?.into()) + .unwrap(), + ); + + let mut res = inner + .client + .request(request) + .await + .map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))? + .body() + .await + .map_err(|e| TransportErrorKind::custom(io::Error::other(format!("{e:?}"))))?; + + serde_json::from_reader(&mut res).map_err(|e| TransportError::deser_err(e, "")) + }) + } +} diff --git a/coins/ethereum/build.rs b/coins/ethereum/build.rs index 3590b12f..38fcfe00 100644 --- a/coins/ethereum/build.rs +++ b/coins/ethereum/build.rs @@ -1,7 +1,5 @@ use std::process::Command; -use ethers_contract::Abigen; - fn main() { println!("cargo:rerun-if-changed=contracts/*"); println!("cargo:rerun-if-changed=artifacts/*"); @@ -21,22 +19,23 @@ fn main() { "--base-path", ".", "-o", "./artifacts", "--overwrite", "--bin", "--abi", - "--optimize", - "./contracts/Schnorr.sol", "./contracts/Router.sol", + "--via-ir", "--optimize", + + "./contracts/IERC20.sol", + + "./contracts/Schnorr.sol", + "./contracts/Deployer.sol", + "./contracts/Sandbox.sol", + "./contracts/Router.sol", + + "./src/tests/contracts/Schnorr.sol", + "./src/tests/contracts/ERC20.sol", + + "--no-color", ]; - assert!(Command::new("solc").args(args).status().unwrap().success()); - - Abigen::new("Schnorr", "./artifacts/Schnorr.abi") - .unwrap() - .generate() - .unwrap() - .write_to_file("./src/abi/schnorr.rs") - .unwrap(); - - Abigen::new("Router", "./artifacts/Router.abi") - .unwrap() - .generate() - .unwrap() - .write_to_file("./src/abi/router.rs") - .unwrap(); + let solc = Command::new("solc").args(args).output().unwrap(); + assert!(solc.status.success()); + for line in String::from_utf8(solc.stderr).unwrap().lines() { + assert!(!line.starts_with("Error:")); + } } diff --git a/coins/ethereum/contracts/Deployer.sol b/coins/ethereum/contracts/Deployer.sol new file mode 100644 index 00000000..475be4c1 --- /dev/null +++ b/coins/ethereum/contracts/Deployer.sol @@ -0,0 +1,52 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +/* +The expected deployment process of the Router is as follows: + +1) A transaction deploying Deployer is made. Then, a deterministic signature is + created such that an account with an unknown private key is the creator of + the contract. Anyone can fund this address, and once anyone does, the + transaction deploying Deployer can be published by anyone. No other + transaction may be made from that account. + +2) Anyone deploys the Router through the Deployer. This uses a sequential nonce + such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible. + While such attacks would still be feasible if the Deployer's address was + controllable, the usage of a deterministic signature with a NUMS method + prevents that. + +This doesn't have any denial-of-service risks and will resolve once anyone steps +forward as deployer. This does fail to guarantee an identical address across +every chain, though it enables letting anyone efficiently ask the Deployer for +the address (with the Deployer having an identical address on every chain). + +Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the +Deployer contract to use a consistent salt for the Router, yet the Router must +be deployed with a specific public key for Serai. Since Ethereum isn't able to +determine a valid public key (one the result of a Serai DKG) from a dishonest +public key, we have to allow multiple deployments with Serai being the one to +determine which to use. + +The alternative would be to have a council publish the Serai key on-Ethereum, +with Serai verifying the published result. This would introduce a DoS risk in +the council not publishing the correct key/not publishing any key. +*/ + +contract Deployer { + event Deployment(bytes32 indexed init_code_hash, address created); + + error DeploymentFailed(); + + function deploy(bytes memory init_code) external { + address created; + assembly { + created := create(0, add(init_code, 0x20), mload(init_code)) + } + if (created == address(0)) { + revert DeploymentFailed(); + } + // These may be emitted out of order upon re-entrancy + emit Deployment(keccak256(init_code), created); + } +} diff --git a/coins/ethereum/contracts/IERC20.sol b/coins/ethereum/contracts/IERC20.sol new file mode 100644 index 00000000..70f1f93c --- /dev/null +++ b/coins/ethereum/contracts/IERC20.sol @@ -0,0 +1,20 @@ +// SPDX-License-Identifier: CC0 +pragma solidity ^0.8.0; + +interface IERC20 { + event Transfer(address indexed from, address indexed to, uint256 value); + event Approval(address indexed owner, address indexed spender, uint256 value); + + function name() external view returns (string memory); + function symbol() external view returns (string memory); + function decimals() external view returns (uint8); + + function totalSupply() external view returns (uint256); + + function balanceOf(address owner) external view returns (uint256); + function transfer(address to, uint256 value) external returns (bool); + function transferFrom(address from, address to, uint256 value) external returns (bool); + + function approve(address spender, uint256 value) external returns (bool); + function allowance(address owner, address spender) external view returns (uint256); +} diff --git a/coins/ethereum/contracts/Router.sol b/coins/ethereum/contracts/Router.sol index 25775ec5..c5e1efa2 100644 --- a/coins/ethereum/contracts/Router.sol +++ b/coins/ethereum/contracts/Router.sol @@ -1,27 +1,24 @@ // SPDX-License-Identifier: AGPLv3 pragma solidity ^0.8.0; +import "./IERC20.sol"; + import "./Schnorr.sol"; +import "./Sandbox.sol"; -contract Router is Schnorr { - // Contract initializer - // TODO: Replace with a MuSig of the genesis validators - address public initializer; - - // Nonce is incremented for each batch of transactions executed +contract Router { + // Nonce is incremented for each batch of transactions executed/key update uint256 public nonce; - // fixed parity for the public keys used in this contract - uint8 constant public KEY_PARITY = 27; - - // current public key's x-coordinate - // note: this key must always use the fixed parity defined above + // Current public key's x-coordinate + // This key must always have the parity defined within the Schnorr contract bytes32 public seraiKey; struct OutInstruction { address to; + Call[] calls; + uint256 value; - bytes data; } struct Signature { @@ -29,62 +26,197 @@ contract Router is Schnorr { bytes32 s; } + event SeraiKeyUpdated( + uint256 indexed nonce, + bytes32 indexed key, + Signature signature + ); + event InInstruction( + address indexed from, + address indexed coin, + uint256 amount, + bytes instruction + ); // success is a uint256 representing a bitfield of transaction successes - event Executed(uint256 nonce, bytes32 batch, uint256 success); + event Executed( + uint256 indexed nonce, + bytes32 indexed batch, + uint256 success, + Signature signature + ); // error types - error NotInitializer(); - error AlreadyInitialized(); error InvalidKey(); + error InvalidSignature(); + error InvalidAmount(); + error FailedTransfer(); error TooManyTransactions(); - constructor() { - initializer = msg.sender; + modifier _updateSeraiKeyAtEndOfFn( + uint256 _nonce, + bytes32 key, + Signature memory sig + ) { + if ( + (key == bytes32(0)) || + ((bytes32(uint256(key) % Schnorr.Q)) != key) + ) { + revert InvalidKey(); + } + + _; + + seraiKey = key; + emit SeraiKeyUpdated(_nonce, key, sig); } - // initSeraiKey can be called by the contract initializer to set the first - // public key, only if the public key has yet to be set. - function initSeraiKey(bytes32 _seraiKey) external { - if (msg.sender != initializer) revert NotInitializer(); - if (seraiKey != 0) revert AlreadyInitialized(); - if (_seraiKey == bytes32(0)) revert InvalidKey(); - seraiKey = _seraiKey; + constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn( + 0, + _seraiKey, + Signature({ c: bytes32(0), s: bytes32(0) }) + ) { + nonce = 1; } - // updateSeraiKey validates the given Schnorr signature against the current public key, - // and if successful, updates the contract's public key to the given one. + // updateSeraiKey validates the given Schnorr signature against the current + // public key, and if successful, updates the contract's public key to the + // given one. function updateSeraiKey( bytes32 _seraiKey, - Signature memory sig - ) public { - if (_seraiKey == bytes32(0)) revert InvalidKey(); - bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey)); - if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature(); - seraiKey = _seraiKey; + Signature calldata sig + ) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) { + bytes memory message = + abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey); + nonce++; + + if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { + revert InvalidSignature(); + } } - // execute accepts a list of transactions to execute as well as a Schnorr signature. + function inInstruction( + address coin, + uint256 amount, + bytes memory instruction + ) external payable { + if (coin == address(0)) { + if (amount != msg.value) { + revert InvalidAmount(); + } + } else { + (bool success, bytes memory res) = + address(coin).call( + abi.encodeWithSelector( + IERC20.transferFrom.selector, + msg.sender, + address(this), + amount + ) + ); + + // Require there was nothing returned, which is done by some non-standard + // tokens, or that the ERC20 contract did in fact return true + bool nonStandardResOrTrue = + (res.length == 0) || abi.decode(res, (bool)); + if (!(success && nonStandardResOrTrue)) { + revert FailedTransfer(); + } + } + + /* + Due to fee-on-transfer tokens, emitting the amount directly is frowned upon. + The amount instructed to transfer may not actually be the amount + transferred. + + If we add nonReentrant to every single function which can effect the + balance, we can check the amount exactly matches. This prevents transfers of + less value than expected occurring, at least, not without an additional + transfer to top up the difference (which isn't routed through this contract + and accordingly isn't trying to artificially create events). + + If we don't add nonReentrant, a transfer can be started, and then a new + transfer for the difference can follow it up (again and again until a + rounding error is reached). This contract would believe all transfers were + done in full, despite each only being done in part (except for the last + one). + + Given fee-on-transfer tokens aren't intended to be supported, the only + token planned to be supported is Dai and it doesn't have any fee-on-transfer + logic, fee-on-transfer tokens aren't even able to be supported at this time, + we simply classify this entire class of tokens as non-standard + implementations which induce undefined behavior. It is the Serai network's + role not to add support for any non-standard implementations. + */ + emit InInstruction(msg.sender, coin, amount, instruction); + } + + // execute accepts a list of transactions to execute as well as a signature. // if signature verification passes, the given transactions are executed. // if signature verification fails, this function will revert. function execute( OutInstruction[] calldata transactions, - Signature memory sig - ) public { - if (transactions.length > 256) revert TooManyTransactions(); + Signature calldata sig + ) external { + if (transactions.length > 256) { + revert TooManyTransactions(); + } - bytes32 message = keccak256(abi.encode("execute", nonce, transactions)); + bytes memory message = + abi.encode("execute", block.chainid, nonce, transactions); + uint256 executed_with_nonce = nonce; // This prevents re-entrancy from causing double spends yet does allow // out-of-order execution via re-entrancy nonce++; - if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature(); + + if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) { + revert InvalidSignature(); + } uint256 successes; - for(uint256 i = 0; i < transactions.length; i++) { - (bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data); + for (uint256 i = 0; i < transactions.length; i++) { + bool success; + + // If there are no calls, send to `to` the value + if (transactions[i].calls.length == 0) { + (success, ) = transactions[i].to.call{ + value: transactions[i].value, + gas: 5_000 + }(""); + } else { + // If there are calls, ignore `to`. Deploy a new Sandbox and proxy the + // calls through that + // + // We could use a single sandbox in order to reduce gas costs, yet that + // risks one person creating an approval that's hooked before another + // user's intended action executes, in order to drain their coins + // + // While technically, that would be a flaw in the sandboxed flow, this + // is robust and prevents such flaws from being possible + // + // We also don't want people to set state via the Sandbox and expect it + // future available when anyone else could set a distinct value + Sandbox sandbox = new Sandbox(); + (success, ) = address(sandbox).call{ + value: transactions[i].value, + // TODO: Have the Call specify the gas up front + gas: 350_000 + }( + abi.encodeWithSelector( + Sandbox.sandbox.selector, + transactions[i].calls + ) + ); + } + assembly { successes := or(successes, shl(i, success)) } } - emit Executed(nonce, message, successes); + emit Executed( + executed_with_nonce, + keccak256(message), + successes, + sig + ); } } diff --git a/coins/ethereum/contracts/Sandbox.sol b/coins/ethereum/contracts/Sandbox.sol new file mode 100644 index 00000000..a82a3afd --- /dev/null +++ b/coins/ethereum/contracts/Sandbox.sol @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.24; + +struct Call { + address to; + uint256 value; + bytes data; +} + +// A minimal sandbox focused on gas efficiency. +// +// The first call is executed if any of the calls fail, making it a fallback. +// All other calls are executed sequentially. +contract Sandbox { + error AlreadyCalled(); + error CallsFailed(); + + function sandbox(Call[] calldata calls) external payable { + // Prevent re-entrancy due to this executing arbitrary calls from anyone + // and anywhere + bool called; + assembly { called := tload(0) } + if (called) { + revert AlreadyCalled(); + } + assembly { tstore(0, 1) } + + // Execute the calls, starting from 1 + for (uint256 i = 1; i < calls.length; i++) { + (bool success, ) = + calls[i].to.call{ value: calls[i].value }(calls[i].data); + + // If this call failed, execute the fallback (call 0) + if (!success) { + (success, ) = + calls[0].to.call{ value: address(this).balance }(calls[0].data); + // If this call also failed, revert entirely + if (!success) { + revert CallsFailed(); + } + return; + } + } + + // We don't clear the re-entrancy guard as this contract should never be + // called again, so there's no reason to spend the effort + } +} diff --git a/coins/ethereum/contracts/Schnorr.sol b/coins/ethereum/contracts/Schnorr.sol index 47263e66..8edcdffd 100644 --- a/coins/ethereum/contracts/Schnorr.sol +++ b/coins/ethereum/contracts/Schnorr.sol @@ -2,38 +2,43 @@ pragma solidity ^0.8.0; // see https://github.com/noot/schnorr-verify for implementation details -contract Schnorr { +library Schnorr { // secp256k1 group order uint256 constant public Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141; - error InvalidSOrA(); - error InvalidSignature(); + // Fixed parity for the public keys used in this contract + // This avoids spending a word passing the parity in a similar style to + // Bitcoin's Taproot + uint8 constant public KEY_PARITY = 27; - // parity := public key y-coord parity (27 or 28) - // px := public key x-coord + error InvalidSOrA(); + error MalformedSignature(); + + // px := public key x-coord, where the public key has a parity of KEY_PARITY // message := 32-byte hash of the message // c := schnorr signature challenge // s := schnorr signature function verify( - uint8 parity, bytes32 px, - bytes32 message, + bytes memory message, bytes32 c, bytes32 s - ) public view returns (bool) { - // ecrecover = (m, v, r, s); + ) internal pure returns (bool) { + // ecrecover = (m, v, r, s) -> key + // We instead pass the following to obtain the nonce (not the key) + // Then we hash it and verify it matches the challenge bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q)); bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q)); + // For safety, we want each input to ecrecover to be 0 (sa, px, ca) + // The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero + // That leaves us to check `sa` are non-zero if (sa == 0) revert InvalidSOrA(); - // the ecrecover precompile implementation checks that the `r` and `s` - // inputs are non-zero (in this case, `px` and `ca`), thus we don't need to - // check if they're zero. - address R = ecrecover(sa, parity, px, ca); - if (R == address(0)) revert InvalidSignature(); - return c == keccak256( - abi.encodePacked(R, uint8(parity), px, block.chainid, message) - ); + address R = ecrecover(sa, KEY_PARITY, px, ca); + if (R == address(0)) revert MalformedSignature(); + + // Check the signature is correct by rebuilding the challenge + return c == keccak256(abi.encodePacked(R, px, message)); } } diff --git a/coins/ethereum/src/abi/mod.rs b/coins/ethereum/src/abi/mod.rs index 2d7dd47c..1ae23374 100644 --- a/coins/ethereum/src/abi/mod.rs +++ b/coins/ethereum/src/abi/mod.rs @@ -1,6 +1,37 @@ +use alloy_sol_types::sol; + #[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] #[allow(clippy::all)] -pub(crate) mod schnorr; +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod erc20_container { + use super::*; + sol!("contracts/IERC20.sol"); +} +pub use erc20_container::IERC20 as erc20; + #[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] #[allow(clippy::all)] -pub(crate) mod router; +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod deployer_container { + use super::*; + sol!("contracts/Deployer.sol"); +} +pub use deployer_container::Deployer as deployer; + +#[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] +#[allow(clippy::all)] +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod router_container { + use super::*; + sol!(Router, "artifacts/Router.abi"); +} +pub use router_container::Router as router; diff --git a/coins/ethereum/src/abi/router.rs b/coins/ethereum/src/abi/router.rs new file mode 100644 index 00000000..3b7e6f98 --- /dev/null +++ b/coins/ethereum/src/abi/router.rs @@ -0,0 +1,1164 @@ +pub use router::*; +/// This module was auto-generated with ethers-rs Abigen. +/// More information at: +#[allow( + clippy::enum_variant_names, + clippy::too_many_arguments, + clippy::upper_case_acronyms, + clippy::type_complexity, + dead_code, + non_camel_case_types, +)] +pub mod router { + #[allow(deprecated)] + fn __abi() -> ::ethers_core::abi::Abi { + ::ethers_core::abi::ethabi::Contract { + constructor: ::core::option::Option::Some(::ethers_core::abi::ethabi::Constructor { + inputs: ::std::vec![], + }), + functions: ::core::convert::From::from([ + ( + ::std::borrow::ToOwned::to_owned("KEY_PARITY"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("KEY_PARITY"), + inputs: ::std::vec![], + outputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers_core::abi::ethabi::ParamType::Uint(8usize), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("uint8"), + ), + }, + ], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::View, + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("Q"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("Q"), + inputs: ::std::vec![], + outputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers_core::abi::ethabi::ParamType::Uint(256usize), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("uint256"), + ), + }, + ], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::View, + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("execute"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("execute"), + inputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("transactions"), + kind: ::ethers_core::abi::ethabi::ParamType::Array( + ::std::boxed::Box::new( + ::ethers_core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers_core::abi::ethabi::ParamType::Address, + ::ethers_core::abi::ethabi::ParamType::Uint(256usize), + ::ethers_core::abi::ethabi::ParamType::Bytes, + ], + ), + ), + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned( + "struct Router.OutInstruction[]", + ), + ), + }, + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("sig"), + kind: ::ethers_core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers_core::abi::ethabi::ParamType::FixedBytes(32usize), + ::ethers_core::abi::ethabi::ParamType::FixedBytes(32usize), + ], + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("struct Router.Signature"), + ), + }, + ], + outputs: ::std::vec![], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::NonPayable, + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("initSeraiKey"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("initSeraiKey"), + inputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("_seraiKey"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ], + outputs: ::std::vec![], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::NonPayable, + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("initializer"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("initializer"), + inputs: ::std::vec![], + outputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers_core::abi::ethabi::ParamType::Address, + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("address"), + ), + }, + ], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::View, + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("nonce"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("nonce"), + inputs: ::std::vec![], + outputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers_core::abi::ethabi::ParamType::Uint(256usize), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("uint256"), + ), + }, + ], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::View, + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("seraiKey"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("seraiKey"), + inputs: ::std::vec![], + outputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::View, + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("updateSeraiKey"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("updateSeraiKey"), + inputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("_seraiKey"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("sig"), + kind: ::ethers_core::abi::ethabi::ParamType::Tuple( + ::std::vec![ + ::ethers_core::abi::ethabi::ParamType::FixedBytes(32usize), + ::ethers_core::abi::ethabi::ParamType::FixedBytes(32usize), + ], + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("struct Router.Signature"), + ), + }, + ], + outputs: ::std::vec![], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::NonPayable, + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("verify"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("verify"), + inputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("parity"), + kind: ::ethers_core::abi::ethabi::ParamType::Uint(8usize), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("uint8"), + ), + }, + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("px"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("message"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("c"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("s"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ], + outputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers_core::abi::ethabi::ParamType::Bool, + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bool"), + ), + }, + ], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::View, + }, + ], + ), + ]), + events: ::core::convert::From::from([ + ( + ::std::borrow::ToOwned::to_owned("Executed"), + ::std::vec![ + ::ethers_core::abi::ethabi::Event { + name: ::std::borrow::ToOwned::to_owned("Executed"), + inputs: ::std::vec![ + ::ethers_core::abi::ethabi::EventParam { + name: ::std::borrow::ToOwned::to_owned("nonce"), + kind: ::ethers_core::abi::ethabi::ParamType::Uint(256usize), + indexed: false, + }, + ::ethers_core::abi::ethabi::EventParam { + name: ::std::borrow::ToOwned::to_owned("batch"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + indexed: false, + }, + ::ethers_core::abi::ethabi::EventParam { + name: ::std::borrow::ToOwned::to_owned("success"), + kind: ::ethers_core::abi::ethabi::ParamType::Uint(256usize), + indexed: false, + }, + ], + anonymous: false, + }, + ], + ), + ]), + errors: ::core::convert::From::from([ + ( + ::std::borrow::ToOwned::to_owned("AlreadyInitialized"), + ::std::vec![ + ::ethers_core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("AlreadyInitialized"), + inputs: ::std::vec![], + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("InvalidKey"), + ::std::vec![ + ::ethers_core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("InvalidKey"), + inputs: ::std::vec![], + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("InvalidSOrA"), + ::std::vec![ + ::ethers_core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("InvalidSOrA"), + inputs: ::std::vec![], + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("InvalidSignature"), + ::std::vec![ + ::ethers_core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("InvalidSignature"), + inputs: ::std::vec![], + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("NotInitializer"), + ::std::vec![ + ::ethers_core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("NotInitializer"), + inputs: ::std::vec![], + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("TooManyTransactions"), + ::std::vec![ + ::ethers_core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned( + "TooManyTransactions", + ), + inputs: ::std::vec![], + }, + ], + ), + ]), + receive: false, + fallback: false, + } + } + ///The parsed JSON ABI of the contract. + pub static ROUTER_ABI: ::ethers_contract::Lazy<::ethers_core::abi::Abi> = ::ethers_contract::Lazy::new( + __abi, + ); + pub struct Router(::ethers_contract::Contract); + impl ::core::clone::Clone for Router { + fn clone(&self) -> Self { + Self(::core::clone::Clone::clone(&self.0)) + } + } + impl ::core::ops::Deref for Router { + type Target = ::ethers_contract::Contract; + fn deref(&self) -> &Self::Target { + &self.0 + } + } + impl ::core::ops::DerefMut for Router { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + impl ::core::fmt::Debug for Router { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + f.debug_tuple(::core::stringify!(Router)).field(&self.address()).finish() + } + } + impl Router { + /// Creates a new contract instance with the specified `ethers` client at + /// `address`. The contract derefs to a `ethers::Contract` object. + pub fn new>( + address: T, + client: ::std::sync::Arc, + ) -> Self { + Self( + ::ethers_contract::Contract::new( + address.into(), + ROUTER_ABI.clone(), + client, + ), + ) + } + ///Calls the contract's `KEY_PARITY` (0x7e7777a7) function + pub fn key_parity(&self) -> ::ethers_contract::builders::ContractCall { + self.0 + .method_hash([126, 119, 119, 167], ()) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `Q` (0xe493ef8c) function + pub fn q( + &self, + ) -> ::ethers_contract::builders::ContractCall { + self.0 + .method_hash([228, 147, 239, 140], ()) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `execute` (0xb839b1a1) function + pub fn execute( + &self, + transactions: ::std::vec::Vec, + sig: Signature, + ) -> ::ethers_contract::builders::ContractCall { + self.0 + .method_hash([184, 57, 177, 161], (transactions, sig)) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `initSeraiKey` (0x3d54f51e) function + pub fn init_serai_key( + &self, + serai_key: [u8; 32], + ) -> ::ethers_contract::builders::ContractCall { + self.0 + .method_hash([61, 84, 245, 30], serai_key) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `initializer` (0x9ce110d7) function + pub fn initializer( + &self, + ) -> ::ethers_contract::builders::ContractCall< + M, + ::ethers_core::types::Address, + > { + self.0 + .method_hash([156, 225, 16, 215], ()) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `nonce` (0xaffed0e0) function + pub fn nonce( + &self, + ) -> ::ethers_contract::builders::ContractCall { + self.0 + .method_hash([175, 254, 208, 224], ()) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `seraiKey` (0x9d6eea0a) function + pub fn serai_key( + &self, + ) -> ::ethers_contract::builders::ContractCall { + self.0 + .method_hash([157, 110, 234, 10], ()) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `updateSeraiKey` (0xb5071c6a) function + pub fn update_serai_key( + &self, + serai_key: [u8; 32], + sig: Signature, + ) -> ::ethers_contract::builders::ContractCall { + self.0 + .method_hash([181, 7, 28, 106], (serai_key, sig)) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `verify` (0x9186da4c) function + pub fn verify( + &self, + parity: u8, + px: [u8; 32], + message: [u8; 32], + c: [u8; 32], + s: [u8; 32], + ) -> ::ethers_contract::builders::ContractCall { + self.0 + .method_hash([145, 134, 218, 76], (parity, px, message, c, s)) + .expect("method not found (this should never happen)") + } + ///Gets the contract's `Executed` event + pub fn executed_filter( + &self, + ) -> ::ethers_contract::builders::Event<::std::sync::Arc, M, ExecutedFilter> { + self.0.event() + } + /// Returns an `Event` builder for all the events of this contract. + pub fn events( + &self, + ) -> ::ethers_contract::builders::Event<::std::sync::Arc, M, ExecutedFilter> { + self.0.event_with_filter(::core::default::Default::default()) + } + } + impl From<::ethers_contract::Contract> + for Router { + fn from(contract: ::ethers_contract::Contract) -> Self { + Self::new(contract.address(), contract.client()) + } + } + ///Custom Error type `AlreadyInitialized` with signature `AlreadyInitialized()` and selector `0x0dc149f0` + #[derive( + Clone, + ::ethers_contract::EthError, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[etherror(name = "AlreadyInitialized", abi = "AlreadyInitialized()")] + pub struct AlreadyInitialized; + ///Custom Error type `InvalidKey` with signature `InvalidKey()` and selector `0x76d4e1e8` + #[derive( + Clone, + ::ethers_contract::EthError, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[etherror(name = "InvalidKey", abi = "InvalidKey()")] + pub struct InvalidKey; + ///Custom Error type `InvalidSOrA` with signature `InvalidSOrA()` and selector `0x4e99a12e` + #[derive( + Clone, + ::ethers_contract::EthError, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[etherror(name = "InvalidSOrA", abi = "InvalidSOrA()")] + pub struct InvalidSOrA; + ///Custom Error type `InvalidSignature` with signature `InvalidSignature()` and selector `0x8baa579f` + #[derive( + Clone, + ::ethers_contract::EthError, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[etherror(name = "InvalidSignature", abi = "InvalidSignature()")] + pub struct InvalidSignature; + ///Custom Error type `NotInitializer` with signature `NotInitializer()` and selector `0xceeb95b3` + #[derive( + Clone, + ::ethers_contract::EthError, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[etherror(name = "NotInitializer", abi = "NotInitializer()")] + pub struct NotInitializer; + ///Custom Error type `TooManyTransactions` with signature `TooManyTransactions()` and selector `0xfb4593ba` + #[derive( + Clone, + ::ethers_contract::EthError, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[etherror(name = "TooManyTransactions", abi = "TooManyTransactions()")] + pub struct TooManyTransactions; + ///Container type for all of the contract's custom errors + #[derive(Clone, ::ethers_contract::EthAbiType, Debug, PartialEq, Eq, Hash)] + pub enum RouterErrors { + AlreadyInitialized(AlreadyInitialized), + InvalidKey(InvalidKey), + InvalidSOrA(InvalidSOrA), + InvalidSignature(InvalidSignature), + NotInitializer(NotInitializer), + TooManyTransactions(TooManyTransactions), + /// The standard solidity revert string, with selector + /// Error(string) -- 0x08c379a0 + RevertString(::std::string::String), + } + impl ::ethers_core::abi::AbiDecode for RouterErrors { + fn decode( + data: impl AsRef<[u8]>, + ) -> ::core::result::Result { + let data = data.as_ref(); + if let Ok(decoded) = <::std::string::String as ::ethers_core::abi::AbiDecode>::decode( + data, + ) { + return Ok(Self::RevertString(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::AlreadyInitialized(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::InvalidKey(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::InvalidSOrA(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::InvalidSignature(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::NotInitializer(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::TooManyTransactions(decoded)); + } + Err(::ethers_core::abi::Error::InvalidData.into()) + } + } + impl ::ethers_core::abi::AbiEncode for RouterErrors { + fn encode(self) -> ::std::vec::Vec { + match self { + Self::AlreadyInitialized(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::InvalidKey(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::InvalidSOrA(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::InvalidSignature(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::NotInitializer(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::TooManyTransactions(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::RevertString(s) => ::ethers_core::abi::AbiEncode::encode(s), + } + } + } + impl ::ethers_contract::ContractRevert for RouterErrors { + fn valid_selector(selector: [u8; 4]) -> bool { + match selector { + [0x08, 0xc3, 0x79, 0xa0] => true, + _ if selector + == ::selector() => { + true + } + _ if selector + == ::selector() => true, + _ if selector + == ::selector() => true, + _ if selector + == ::selector() => { + true + } + _ if selector + == ::selector() => { + true + } + _ if selector + == ::selector() => { + true + } + _ => false, + } + } + } + impl ::core::fmt::Display for RouterErrors { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + match self { + Self::AlreadyInitialized(element) => { + ::core::fmt::Display::fmt(element, f) + } + Self::InvalidKey(element) => ::core::fmt::Display::fmt(element, f), + Self::InvalidSOrA(element) => ::core::fmt::Display::fmt(element, f), + Self::InvalidSignature(element) => ::core::fmt::Display::fmt(element, f), + Self::NotInitializer(element) => ::core::fmt::Display::fmt(element, f), + Self::TooManyTransactions(element) => { + ::core::fmt::Display::fmt(element, f) + } + Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), + } + } + } + impl ::core::convert::From<::std::string::String> for RouterErrors { + fn from(value: String) -> Self { + Self::RevertString(value) + } + } + impl ::core::convert::From for RouterErrors { + fn from(value: AlreadyInitialized) -> Self { + Self::AlreadyInitialized(value) + } + } + impl ::core::convert::From for RouterErrors { + fn from(value: InvalidKey) -> Self { + Self::InvalidKey(value) + } + } + impl ::core::convert::From for RouterErrors { + fn from(value: InvalidSOrA) -> Self { + Self::InvalidSOrA(value) + } + } + impl ::core::convert::From for RouterErrors { + fn from(value: InvalidSignature) -> Self { + Self::InvalidSignature(value) + } + } + impl ::core::convert::From for RouterErrors { + fn from(value: NotInitializer) -> Self { + Self::NotInitializer(value) + } + } + impl ::core::convert::From for RouterErrors { + fn from(value: TooManyTransactions) -> Self { + Self::TooManyTransactions(value) + } + } + #[derive( + Clone, + ::ethers_contract::EthEvent, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethevent(name = "Executed", abi = "Executed(uint256,bytes32,uint256)")] + pub struct ExecutedFilter { + pub nonce: ::ethers_core::types::U256, + pub batch: [u8; 32], + pub success: ::ethers_core::types::U256, + } + ///Container type for all input parameters for the `KEY_PARITY` function with signature `KEY_PARITY()` and selector `0x7e7777a7` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall(name = "KEY_PARITY", abi = "KEY_PARITY()")] + pub struct KeyParityCall; + ///Container type for all input parameters for the `Q` function with signature `Q()` and selector `0xe493ef8c` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall(name = "Q", abi = "Q()")] + pub struct QCall; + ///Container type for all input parameters for the `execute` function with signature `execute((address,uint256,bytes)[],(bytes32,bytes32))` and selector `0xb839b1a1` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall( + name = "execute", + abi = "execute((address,uint256,bytes)[],(bytes32,bytes32))" + )] + pub struct ExecuteCall { + pub transactions: ::std::vec::Vec, + pub sig: Signature, + } + ///Container type for all input parameters for the `initSeraiKey` function with signature `initSeraiKey(bytes32)` and selector `0x3d54f51e` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall(name = "initSeraiKey", abi = "initSeraiKey(bytes32)")] + pub struct InitSeraiKeyCall { + pub serai_key: [u8; 32], + } + ///Container type for all input parameters for the `initializer` function with signature `initializer()` and selector `0x9ce110d7` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall(name = "initializer", abi = "initializer()")] + pub struct InitializerCall; + ///Container type for all input parameters for the `nonce` function with signature `nonce()` and selector `0xaffed0e0` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall(name = "nonce", abi = "nonce()")] + pub struct NonceCall; + ///Container type for all input parameters for the `seraiKey` function with signature `seraiKey()` and selector `0x9d6eea0a` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall(name = "seraiKey", abi = "seraiKey()")] + pub struct SeraiKeyCall; + ///Container type for all input parameters for the `updateSeraiKey` function with signature `updateSeraiKey(bytes32,(bytes32,bytes32))` and selector `0xb5071c6a` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall( + name = "updateSeraiKey", + abi = "updateSeraiKey(bytes32,(bytes32,bytes32))" + )] + pub struct UpdateSeraiKeyCall { + pub serai_key: [u8; 32], + pub sig: Signature, + } + ///Container type for all input parameters for the `verify` function with signature `verify(uint8,bytes32,bytes32,bytes32,bytes32)` and selector `0x9186da4c` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall(name = "verify", abi = "verify(uint8,bytes32,bytes32,bytes32,bytes32)")] + pub struct VerifyCall { + pub parity: u8, + pub px: [u8; 32], + pub message: [u8; 32], + pub c: [u8; 32], + pub s: [u8; 32], + } + ///Container type for all of the contract's call + #[derive(Clone, ::ethers_contract::EthAbiType, Debug, PartialEq, Eq, Hash)] + pub enum RouterCalls { + KeyParity(KeyParityCall), + Q(QCall), + Execute(ExecuteCall), + InitSeraiKey(InitSeraiKeyCall), + Initializer(InitializerCall), + Nonce(NonceCall), + SeraiKey(SeraiKeyCall), + UpdateSeraiKey(UpdateSeraiKeyCall), + Verify(VerifyCall), + } + impl ::ethers_core::abi::AbiDecode for RouterCalls { + fn decode( + data: impl AsRef<[u8]>, + ) -> ::core::result::Result { + let data = data.as_ref(); + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::KeyParity(decoded)); + } + if let Ok(decoded) = ::decode(data) { + return Ok(Self::Q(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::Execute(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::InitSeraiKey(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::Initializer(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::Nonce(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::SeraiKey(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::UpdateSeraiKey(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::Verify(decoded)); + } + Err(::ethers_core::abi::Error::InvalidData.into()) + } + } + impl ::ethers_core::abi::AbiEncode for RouterCalls { + fn encode(self) -> Vec { + match self { + Self::KeyParity(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::Q(element) => ::ethers_core::abi::AbiEncode::encode(element), + Self::Execute(element) => ::ethers_core::abi::AbiEncode::encode(element), + Self::InitSeraiKey(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::Initializer(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::Nonce(element) => ::ethers_core::abi::AbiEncode::encode(element), + Self::SeraiKey(element) => ::ethers_core::abi::AbiEncode::encode(element), + Self::UpdateSeraiKey(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::Verify(element) => ::ethers_core::abi::AbiEncode::encode(element), + } + } + } + impl ::core::fmt::Display for RouterCalls { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + match self { + Self::KeyParity(element) => ::core::fmt::Display::fmt(element, f), + Self::Q(element) => ::core::fmt::Display::fmt(element, f), + Self::Execute(element) => ::core::fmt::Display::fmt(element, f), + Self::InitSeraiKey(element) => ::core::fmt::Display::fmt(element, f), + Self::Initializer(element) => ::core::fmt::Display::fmt(element, f), + Self::Nonce(element) => ::core::fmt::Display::fmt(element, f), + Self::SeraiKey(element) => ::core::fmt::Display::fmt(element, f), + Self::UpdateSeraiKey(element) => ::core::fmt::Display::fmt(element, f), + Self::Verify(element) => ::core::fmt::Display::fmt(element, f), + } + } + } + impl ::core::convert::From for RouterCalls { + fn from(value: KeyParityCall) -> Self { + Self::KeyParity(value) + } + } + impl ::core::convert::From for RouterCalls { + fn from(value: QCall) -> Self { + Self::Q(value) + } + } + impl ::core::convert::From for RouterCalls { + fn from(value: ExecuteCall) -> Self { + Self::Execute(value) + } + } + impl ::core::convert::From for RouterCalls { + fn from(value: InitSeraiKeyCall) -> Self { + Self::InitSeraiKey(value) + } + } + impl ::core::convert::From for RouterCalls { + fn from(value: InitializerCall) -> Self { + Self::Initializer(value) + } + } + impl ::core::convert::From for RouterCalls { + fn from(value: NonceCall) -> Self { + Self::Nonce(value) + } + } + impl ::core::convert::From for RouterCalls { + fn from(value: SeraiKeyCall) -> Self { + Self::SeraiKey(value) + } + } + impl ::core::convert::From for RouterCalls { + fn from(value: UpdateSeraiKeyCall) -> Self { + Self::UpdateSeraiKey(value) + } + } + impl ::core::convert::From for RouterCalls { + fn from(value: VerifyCall) -> Self { + Self::Verify(value) + } + } + ///Container type for all return fields from the `KEY_PARITY` function with signature `KEY_PARITY()` and selector `0x7e7777a7` + #[derive( + Clone, + ::ethers_contract::EthAbiType, + ::ethers_contract::EthAbiCodec, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + pub struct KeyParityReturn(pub u8); + ///Container type for all return fields from the `Q` function with signature `Q()` and selector `0xe493ef8c` + #[derive( + Clone, + ::ethers_contract::EthAbiType, + ::ethers_contract::EthAbiCodec, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + pub struct QReturn(pub ::ethers_core::types::U256); + ///Container type for all return fields from the `initializer` function with signature `initializer()` and selector `0x9ce110d7` + #[derive( + Clone, + ::ethers_contract::EthAbiType, + ::ethers_contract::EthAbiCodec, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + pub struct InitializerReturn(pub ::ethers_core::types::Address); + ///Container type for all return fields from the `nonce` function with signature `nonce()` and selector `0xaffed0e0` + #[derive( + Clone, + ::ethers_contract::EthAbiType, + ::ethers_contract::EthAbiCodec, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + pub struct NonceReturn(pub ::ethers_core::types::U256); + ///Container type for all return fields from the `seraiKey` function with signature `seraiKey()` and selector `0x9d6eea0a` + #[derive( + Clone, + ::ethers_contract::EthAbiType, + ::ethers_contract::EthAbiCodec, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + pub struct SeraiKeyReturn(pub [u8; 32]); + ///Container type for all return fields from the `verify` function with signature `verify(uint8,bytes32,bytes32,bytes32,bytes32)` and selector `0x9186da4c` + #[derive( + Clone, + ::ethers_contract::EthAbiType, + ::ethers_contract::EthAbiCodec, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + pub struct VerifyReturn(pub bool); + ///`OutInstruction(address,uint256,bytes)` + #[derive( + Clone, + ::ethers_contract::EthAbiType, + ::ethers_contract::EthAbiCodec, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + pub struct OutInstruction { + pub to: ::ethers_core::types::Address, + pub value: ::ethers_core::types::U256, + pub data: ::ethers_core::types::Bytes, + } + ///`Signature(bytes32,bytes32)` + #[derive( + Clone, + ::ethers_contract::EthAbiType, + ::ethers_contract::EthAbiCodec, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + pub struct Signature { + pub c: [u8; 32], + pub s: [u8; 32], + } +} diff --git a/coins/ethereum/src/abi/schnorr.rs b/coins/ethereum/src/abi/schnorr.rs new file mode 100644 index 00000000..f0c7ee4e --- /dev/null +++ b/coins/ethereum/src/abi/schnorr.rs @@ -0,0 +1,410 @@ +pub use schnorr::*; +/// This module was auto-generated with ethers-rs Abigen. +/// More information at: +#[allow( + clippy::enum_variant_names, + clippy::too_many_arguments, + clippy::upper_case_acronyms, + clippy::type_complexity, + dead_code, + non_camel_case_types, +)] +pub mod schnorr { + #[allow(deprecated)] + fn __abi() -> ::ethers_core::abi::Abi { + ::ethers_core::abi::ethabi::Contract { + constructor: ::core::option::Option::None, + functions: ::core::convert::From::from([ + ( + ::std::borrow::ToOwned::to_owned("Q"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("Q"), + inputs: ::std::vec![], + outputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers_core::abi::ethabi::ParamType::Uint(256usize), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("uint256"), + ), + }, + ], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::View, + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("verify"), + ::std::vec![ + ::ethers_core::abi::ethabi::Function { + name: ::std::borrow::ToOwned::to_owned("verify"), + inputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("parity"), + kind: ::ethers_core::abi::ethabi::ParamType::Uint(8usize), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("uint8"), + ), + }, + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("px"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("message"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("c"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ::ethers_core::abi::ethabi::Param { + name: ::std::borrow::ToOwned::to_owned("s"), + kind: ::ethers_core::abi::ethabi::ParamType::FixedBytes( + 32usize, + ), + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bytes32"), + ), + }, + ], + outputs: ::std::vec![ + ::ethers_core::abi::ethabi::Param { + name: ::std::string::String::new(), + kind: ::ethers_core::abi::ethabi::ParamType::Bool, + internal_type: ::core::option::Option::Some( + ::std::borrow::ToOwned::to_owned("bool"), + ), + }, + ], + constant: ::core::option::Option::None, + state_mutability: ::ethers_core::abi::ethabi::StateMutability::View, + }, + ], + ), + ]), + events: ::std::collections::BTreeMap::new(), + errors: ::core::convert::From::from([ + ( + ::std::borrow::ToOwned::to_owned("InvalidSOrA"), + ::std::vec![ + ::ethers_core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("InvalidSOrA"), + inputs: ::std::vec![], + }, + ], + ), + ( + ::std::borrow::ToOwned::to_owned("InvalidSignature"), + ::std::vec![ + ::ethers_core::abi::ethabi::AbiError { + name: ::std::borrow::ToOwned::to_owned("InvalidSignature"), + inputs: ::std::vec![], + }, + ], + ), + ]), + receive: false, + fallback: false, + } + } + ///The parsed JSON ABI of the contract. + pub static SCHNORR_ABI: ::ethers_contract::Lazy<::ethers_core::abi::Abi> = ::ethers_contract::Lazy::new( + __abi, + ); + pub struct Schnorr(::ethers_contract::Contract); + impl ::core::clone::Clone for Schnorr { + fn clone(&self) -> Self { + Self(::core::clone::Clone::clone(&self.0)) + } + } + impl ::core::ops::Deref for Schnorr { + type Target = ::ethers_contract::Contract; + fn deref(&self) -> &Self::Target { + &self.0 + } + } + impl ::core::ops::DerefMut for Schnorr { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } + } + impl ::core::fmt::Debug for Schnorr { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + f.debug_tuple(::core::stringify!(Schnorr)).field(&self.address()).finish() + } + } + impl Schnorr { + /// Creates a new contract instance with the specified `ethers` client at + /// `address`. The contract derefs to a `ethers::Contract` object. + pub fn new>( + address: T, + client: ::std::sync::Arc, + ) -> Self { + Self( + ::ethers_contract::Contract::new( + address.into(), + SCHNORR_ABI.clone(), + client, + ), + ) + } + ///Calls the contract's `Q` (0xe493ef8c) function + pub fn q( + &self, + ) -> ::ethers_contract::builders::ContractCall { + self.0 + .method_hash([228, 147, 239, 140], ()) + .expect("method not found (this should never happen)") + } + ///Calls the contract's `verify` (0x9186da4c) function + pub fn verify( + &self, + parity: u8, + px: [u8; 32], + message: [u8; 32], + c: [u8; 32], + s: [u8; 32], + ) -> ::ethers_contract::builders::ContractCall { + self.0 + .method_hash([145, 134, 218, 76], (parity, px, message, c, s)) + .expect("method not found (this should never happen)") + } + } + impl From<::ethers_contract::Contract> + for Schnorr { + fn from(contract: ::ethers_contract::Contract) -> Self { + Self::new(contract.address(), contract.client()) + } + } + ///Custom Error type `InvalidSOrA` with signature `InvalidSOrA()` and selector `0x4e99a12e` + #[derive( + Clone, + ::ethers_contract::EthError, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[etherror(name = "InvalidSOrA", abi = "InvalidSOrA()")] + pub struct InvalidSOrA; + ///Custom Error type `InvalidSignature` with signature `InvalidSignature()` and selector `0x8baa579f` + #[derive( + Clone, + ::ethers_contract::EthError, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[etherror(name = "InvalidSignature", abi = "InvalidSignature()")] + pub struct InvalidSignature; + ///Container type for all of the contract's custom errors + #[derive(Clone, ::ethers_contract::EthAbiType, Debug, PartialEq, Eq, Hash)] + pub enum SchnorrErrors { + InvalidSOrA(InvalidSOrA), + InvalidSignature(InvalidSignature), + /// The standard solidity revert string, with selector + /// Error(string) -- 0x08c379a0 + RevertString(::std::string::String), + } + impl ::ethers_core::abi::AbiDecode for SchnorrErrors { + fn decode( + data: impl AsRef<[u8]>, + ) -> ::core::result::Result { + let data = data.as_ref(); + if let Ok(decoded) = <::std::string::String as ::ethers_core::abi::AbiDecode>::decode( + data, + ) { + return Ok(Self::RevertString(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::InvalidSOrA(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::InvalidSignature(decoded)); + } + Err(::ethers_core::abi::Error::InvalidData.into()) + } + } + impl ::ethers_core::abi::AbiEncode for SchnorrErrors { + fn encode(self) -> ::std::vec::Vec { + match self { + Self::InvalidSOrA(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::InvalidSignature(element) => { + ::ethers_core::abi::AbiEncode::encode(element) + } + Self::RevertString(s) => ::ethers_core::abi::AbiEncode::encode(s), + } + } + } + impl ::ethers_contract::ContractRevert for SchnorrErrors { + fn valid_selector(selector: [u8; 4]) -> bool { + match selector { + [0x08, 0xc3, 0x79, 0xa0] => true, + _ if selector + == ::selector() => true, + _ if selector + == ::selector() => { + true + } + _ => false, + } + } + } + impl ::core::fmt::Display for SchnorrErrors { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + match self { + Self::InvalidSOrA(element) => ::core::fmt::Display::fmt(element, f), + Self::InvalidSignature(element) => ::core::fmt::Display::fmt(element, f), + Self::RevertString(s) => ::core::fmt::Display::fmt(s, f), + } + } + } + impl ::core::convert::From<::std::string::String> for SchnorrErrors { + fn from(value: String) -> Self { + Self::RevertString(value) + } + } + impl ::core::convert::From for SchnorrErrors { + fn from(value: InvalidSOrA) -> Self { + Self::InvalidSOrA(value) + } + } + impl ::core::convert::From for SchnorrErrors { + fn from(value: InvalidSignature) -> Self { + Self::InvalidSignature(value) + } + } + ///Container type for all input parameters for the `Q` function with signature `Q()` and selector `0xe493ef8c` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall(name = "Q", abi = "Q()")] + pub struct QCall; + ///Container type for all input parameters for the `verify` function with signature `verify(uint8,bytes32,bytes32,bytes32,bytes32)` and selector `0x9186da4c` + #[derive( + Clone, + ::ethers_contract::EthCall, + ::ethers_contract::EthDisplay, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + #[ethcall(name = "verify", abi = "verify(uint8,bytes32,bytes32,bytes32,bytes32)")] + pub struct VerifyCall { + pub parity: u8, + pub px: [u8; 32], + pub message: [u8; 32], + pub c: [u8; 32], + pub s: [u8; 32], + } + ///Container type for all of the contract's call + #[derive(Clone, ::ethers_contract::EthAbiType, Debug, PartialEq, Eq, Hash)] + pub enum SchnorrCalls { + Q(QCall), + Verify(VerifyCall), + } + impl ::ethers_core::abi::AbiDecode for SchnorrCalls { + fn decode( + data: impl AsRef<[u8]>, + ) -> ::core::result::Result { + let data = data.as_ref(); + if let Ok(decoded) = ::decode(data) { + return Ok(Self::Q(decoded)); + } + if let Ok(decoded) = ::decode( + data, + ) { + return Ok(Self::Verify(decoded)); + } + Err(::ethers_core::abi::Error::InvalidData.into()) + } + } + impl ::ethers_core::abi::AbiEncode for SchnorrCalls { + fn encode(self) -> Vec { + match self { + Self::Q(element) => ::ethers_core::abi::AbiEncode::encode(element), + Self::Verify(element) => ::ethers_core::abi::AbiEncode::encode(element), + } + } + } + impl ::core::fmt::Display for SchnorrCalls { + fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result { + match self { + Self::Q(element) => ::core::fmt::Display::fmt(element, f), + Self::Verify(element) => ::core::fmt::Display::fmt(element, f), + } + } + } + impl ::core::convert::From for SchnorrCalls { + fn from(value: QCall) -> Self { + Self::Q(value) + } + } + impl ::core::convert::From for SchnorrCalls { + fn from(value: VerifyCall) -> Self { + Self::Verify(value) + } + } + ///Container type for all return fields from the `Q` function with signature `Q()` and selector `0xe493ef8c` + #[derive( + Clone, + ::ethers_contract::EthAbiType, + ::ethers_contract::EthAbiCodec, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + pub struct QReturn(pub ::ethers_core::types::U256); + ///Container type for all return fields from the `verify` function with signature `verify(uint8,bytes32,bytes32,bytes32,bytes32)` and selector `0x9186da4c` + #[derive( + Clone, + ::ethers_contract::EthAbiType, + ::ethers_contract::EthAbiCodec, + Default, + Debug, + PartialEq, + Eq, + Hash + )] + pub struct VerifyReturn(pub bool); +} diff --git a/coins/ethereum/src/crypto.rs b/coins/ethereum/src/crypto.rs index 5f681cfa..ca228eb5 100644 --- a/coins/ethereum/src/crypto.rs +++ b/coins/ethereum/src/crypto.rs @@ -1,91 +1,185 @@ -use sha3::{Digest, Keccak256}; - use group::ff::PrimeField; use k256::{ - elliptic_curve::{ - bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint, - }, - ProjectivePoint, Scalar, U256, + elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint}, + ProjectivePoint, Scalar, U256 as KU256, }; +#[cfg(test)] +use k256::{elliptic_curve::point::DecompressPoint, AffinePoint}; use frost::{ algorithm::{Hram, SchnorrSignature}, - curve::Secp256k1, + curve::{Ciphersuite, Secp256k1}, }; +use alloy_core::primitives::{Parity, Signature as AlloySignature}; +use alloy_consensus::{SignableTransaction, Signed, TxLegacy}; + +use crate::abi::router::{Signature as AbiSignature}; + pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] { - Keccak256::digest(data).into() + alloy_core::primitives::keccak256(data).into() } -pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] { +pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar { + >::reduce_bytes(&keccak256(data).into()) +} + +pub fn address(point: &ProjectivePoint) -> [u8; 20] { let encoded_point = point.to_encoded_point(false); // Last 20 bytes of the hash of the concatenated x and y coordinates // We obtain the concatenated x and y coordinates via the uncompressed encoding of the point keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap() } +pub(crate) fn deterministically_sign(tx: &TxLegacy) -> Signed { + assert!( + tx.chain_id.is_none(), + "chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)" + ); + + let sig_hash = tx.signature_hash().0; + let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat()); + let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat()); + loop { + let r_bytes: [u8; 32] = r.to_repr().into(); + let s_bytes: [u8; 32] = s.to_repr().into(); + let v = Parity::NonEip155(false); + let signature = + AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap(); + let tx = tx.clone().into_signed(signature); + if tx.recover_signer().is_ok() { + return tx; + } + + // Re-hash until valid + r = hash_to_scalar(r_bytes.as_ref()); + s = hash_to_scalar(s_bytes.as_ref()); + } +} + +/// The public key for a Schnorr-signing account. #[allow(non_snake_case)] +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct PublicKey { - pub A: ProjectivePoint, - pub px: Scalar, - pub parity: u8, + pub(crate) A: ProjectivePoint, + pub(crate) px: Scalar, } impl PublicKey { + /// Construct a new `PublicKey`. + /// + /// This will return None if the provided point isn't eligible to be a public key (due to + /// bounds such as parity). #[allow(non_snake_case)] pub fn new(A: ProjectivePoint) -> Option { let affine = A.to_affine(); - let parity = u8::from(bool::from(affine.y_is_odd())) + 27; - if parity != 27 { + // Only allow even keys to save a word within Ethereum + let is_odd = bool::from(affine.y_is_odd()); + if is_odd { None?; } let x_coord = affine.x(); - let x_coord_scalar = >::reduce_bytes(&x_coord); + let x_coord_scalar = >::reduce_bytes(&x_coord); // Return None if a reduction would occur + // Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less + // headache/concern to have + // This does ban a trivial amoount of public keys if x_coord_scalar.to_repr() != x_coord { None?; } - Some(PublicKey { A, px: x_coord_scalar, parity }) + Some(PublicKey { A, px: x_coord_scalar }) + } + + pub fn point(&self) -> ProjectivePoint { + self.A + } + + pub(crate) fn eth_repr(&self) -> [u8; 32] { + self.px.to_repr().into() + } + + #[cfg(test)] + pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option { + #[allow(non_snake_case)] + let A = Option::::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into(); + Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px }) } } +/// The HRAm to use for the Schnorr contract. #[derive(Clone, Default)] pub struct EthereumHram {} impl Hram for EthereumHram { #[allow(non_snake_case)] fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar { - let a_encoded_point = A.to_encoded_point(true); - let mut a_encoded = a_encoded_point.as_ref().to_owned(); - a_encoded[0] += 25; // Ethereum uses 27/28 for point parity - assert!((a_encoded[0] == 27) || (a_encoded[0] == 28)); + let x_coord = A.to_affine().x(); + let mut data = address(R).to_vec(); - data.append(&mut a_encoded); + data.extend(x_coord.as_slice()); data.extend(m); - Scalar::reduce(U256::from_be_slice(&keccak256(&data))) + + >::reduce_bytes(&keccak256(&data).into()) } } +/// A signature for the Schnorr contract. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] pub struct Signature { pub(crate) c: Scalar, pub(crate) s: Scalar, } impl Signature { + pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool { + #[allow(non_snake_case)] + let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c); + EthereumHram::hram(&R, &public_key.A, message) == self.c + } + + /// Construct a new `Signature`. + /// + /// This will return None if the signature is invalid. pub fn new( public_key: &PublicKey, - chain_id: U256, - m: &[u8], + message: &[u8], signature: SchnorrSignature, ) -> Option { - let c = EthereumHram::hram( - &signature.R, - &public_key.A, - &[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(), - ); + let c = EthereumHram::hram(&signature.R, &public_key.A, message); if !signature.verify(public_key.A, c) { None?; } - Some(Signature { c, s: signature.s }) + + let res = Signature { c, s: signature.s }; + assert!(res.verify(public_key, message)); + Some(res) + } + + pub fn c(&self) -> Scalar { + self.c + } + pub fn s(&self) -> Scalar { + self.s + } + + pub fn to_bytes(&self) -> [u8; 64] { + let mut res = [0; 64]; + res[.. 32].copy_from_slice(self.c.to_repr().as_ref()); + res[32 ..].copy_from_slice(self.s.to_repr().as_ref()); + res + } + + pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result { + let mut reader = bytes.as_slice(); + let c = Secp256k1::read_F(&mut reader)?; + let s = Secp256k1::read_F(&mut reader)?; + Ok(Signature { c, s }) + } +} +impl From<&Signature> for AbiSignature { + fn from(sig: &Signature) -> AbiSignature { + let c: [u8; 32] = sig.c.to_repr().into(); + let s: [u8; 32] = sig.s.to_repr().into(); + AbiSignature { c: c.into(), s: s.into() } } } diff --git a/coins/ethereum/src/deployer.rs b/coins/ethereum/src/deployer.rs new file mode 100644 index 00000000..1a16664c --- /dev/null +++ b/coins/ethereum/src/deployer.rs @@ -0,0 +1,120 @@ +use std::sync::Arc; + +use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind}; +use alloy_consensus::{Signed, TxLegacy}; + +use alloy_sol_types::{SolCall, SolEvent}; + +use alloy_rpc_types::{BlockNumberOrTag, Filter}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use crate::{ + Error, + crypto::{self, keccak256, PublicKey}, + router::Router, +}; +pub use crate::abi::deployer as abi; + +/// The Deployer contract for the Router contract. +/// +/// This Deployer has a deterministic address, letting it be immediately identified on any +/// compatible chain. It then supports retrieving the Router contract's address (which isn't +/// deterministic) using a single log query. +#[derive(Clone, Debug)] +pub struct Deployer; +impl Deployer { + /// Obtain the transaction to deploy this contract, already signed. + /// + /// The account this transaction is sent from (which is populated in `from`) must be sufficiently + /// funded for this transaction to be submitted. This account has no known private key to anyone, + /// so ETH sent can be neither misappropriated nor returned. + pub fn deployment_tx() -> Signed { + let bytecode = include_str!("../artifacts/Deployer.bin"); + let bytecode = + Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex"); + + let tx = TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000u128, + // TODO: Use a more accurate gas limit + gas_limit: 1_000_000u128, + to: TxKind::Create, + value: U256::ZERO, + input: bytecode, + }; + + crypto::deterministically_sign(&tx) + } + + /// Obtain the deterministic address for this contract. + pub fn address() -> [u8; 20] { + let deployer_deployer = + Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature"); + **Address::create(&deployer_deployer, 0) + } + + /// Construct a new view of the `Deployer`. + pub async fn new(provider: Arc>) -> Result, Error> { + let address = Self::address(); + #[cfg(not(test))] + let required_block = BlockNumberOrTag::Finalized; + #[cfg(test)] + let required_block = BlockNumberOrTag::Latest; + let code = provider + .get_code_at(address.into(), required_block.into()) + .await + .map_err(|_| Error::ConnectionError)?; + // Contract has yet to be deployed + if code.is_empty() { + return Ok(None); + } + Ok(Some(Self)) + } + + /// Yield the `ContractCall` necessary to deploy the Router. + pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy { + TxLegacy { + to: TxKind::Call(Self::address().into()), + input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(), + gas_limit: 1_000_000, + ..Default::default() + } + } + + /// Find the first Router deployed with the specified key as its first key. + /// + /// This is the Router Serai will use, and is the only way to construct a `Router`. + pub async fn find_router( + &self, + provider: Arc>, + key: &PublicKey, + ) -> Result, Error> { + let init_code = Router::init_code(key); + let init_code_hash = keccak256(&init_code); + + #[cfg(not(test))] + let to_block = BlockNumberOrTag::Finalized; + #[cfg(test)] + let to_block = BlockNumberOrTag::Latest; + + // Find the first log using this init code (where the init code is binding to the key) + // TODO: Make an abstraction for event filtering (de-duplicating common code) + let filter = + Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address())); + let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH); + let filter = filter.topic1(B256::from(init_code_hash)); + let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let Some(first_log) = logs.first() else { return Ok(None) }; + let router = first_log + .log_decode::() + .map_err(|_| Error::ConnectionError)? + .inner + .data + .created; + + Ok(Some(Router::new(provider, router))) + } +} diff --git a/coins/ethereum/src/erc20.rs b/coins/ethereum/src/erc20.rs new file mode 100644 index 00000000..86bd1b2d --- /dev/null +++ b/coins/ethereum/src/erc20.rs @@ -0,0 +1,118 @@ +use std::{sync::Arc, collections::HashSet}; + +use alloy_core::primitives::{Address, B256, U256}; + +use alloy_sol_types::{SolInterface, SolEvent}; + +use alloy_rpc_types::{BlockNumberOrTag, Filter}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use crate::Error; +pub use crate::abi::erc20 as abi; +use abi::{IERC20Calls, Transfer, transferCall, transferFromCall}; + +#[derive(Clone, Debug)] +pub struct TopLevelErc20Transfer { + pub id: [u8; 32], + pub from: [u8; 20], + pub amount: U256, + pub data: Vec, +} + +/// A view for an ERC20 contract. +#[derive(Clone, Debug)] +pub struct Erc20(Arc>, Address); +impl Erc20 { + /// Construct a new view of the specified ERC20 contract. + /// + /// This checks a contract is deployed at that address yet does not check the contract is + /// actually an ERC20. + pub async fn new( + provider: Arc>, + address: [u8; 20], + ) -> Result, Error> { + let code = provider + .get_code_at(address.into(), BlockNumberOrTag::Finalized.into()) + .await + .map_err(|_| Error::ConnectionError)?; + // Contract has yet to be deployed + if code.is_empty() { + return Ok(None); + } + Ok(Some(Self(provider.clone(), Address::from(&address)))) + } + + pub async fn top_level_transfers( + &self, + block: u64, + to: [u8; 20], + ) -> Result, Error> { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(Transfer::SIGNATURE_HASH); + let mut to_topic = [0; 32]; + to_topic[12 ..].copy_from_slice(&to); + let filter = filter.topic2(B256::from(to_topic)); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let mut handled = HashSet::new(); + + let mut top_level_transfers = vec![]; + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?; + let tx = self.0.get_transaction_by_hash(tx_id).await.map_err(|_| Error::ConnectionError)?; + + // If this is a top-level call... + if tx.to == Some(self.1) { + // And we recognize the call... + // Don't validate the encoding as this can't be re-encoded to an identical bytestring due + // to the InInstruction appended + if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) { + // Extract the top-level call's from/to/value + let (from, call_to, value) = match call { + IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value), + IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => { + (from, call_to, value) + } + // Treat any other function selectors as unrecognized + _ => continue, + }; + + let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + // Ensure the top-level transfer is equivalent, and this presumably isn't a log for an + // internal transfer + if (log.from != from) || (call_to != to) || (value != log.value) { + continue; + } + + // Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's + // the only log we handle + if handled.contains(&tx_id) { + continue; + } + handled.insert(tx_id); + + // Read the data appended after + let encoded = call.abi_encode(); + let data = tx.input.as_ref()[encoded.len() ..].to_vec(); + + // Push the transfer + top_level_transfers.push(TopLevelErc20Transfer { + // Since we'll only handle one log for this TX, set the ID to the TX ID + id: *tx_id, + from: *log.from.0, + amount: log.value, + data, + }); + } + } + } + Ok(top_level_transfers) + } +} diff --git a/coins/ethereum/src/lib.rs b/coins/ethereum/src/lib.rs index 505de38e..8d4a5312 100644 --- a/coins/ethereum/src/lib.rs +++ b/coins/ethereum/src/lib.rs @@ -1,16 +1,30 @@ use thiserror::Error; +pub use alloy_core; +pub use alloy_consensus; + +pub use alloy_rpc_types; +pub use alloy_simple_request_transport; +pub use alloy_rpc_client; +pub use alloy_provider; + pub mod crypto; pub(crate) mod abi; -pub mod schnorr; + +pub mod erc20; +pub mod deployer; pub mod router; +pub mod machine; + #[cfg(test)] mod tests; -#[derive(Error, Debug)] +#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)] pub enum Error { #[error("failed to verify Schnorr signature")] InvalidSignature, + #[error("couldn't make call/send TX")] + ConnectionError, } diff --git a/coins/ethereum/src/machine.rs b/coins/ethereum/src/machine.rs new file mode 100644 index 00000000..0d5dc7a5 --- /dev/null +++ b/coins/ethereum/src/machine.rs @@ -0,0 +1,414 @@ +use std::{ + io::{self, Read}, + collections::HashMap, +}; + +use rand_core::{RngCore, CryptoRng}; + +use transcript::{Transcript, RecommendedTranscript}; + +use group::GroupEncoding; +use frost::{ + curve::{Ciphersuite, Secp256k1}, + Participant, ThresholdKeys, FrostError, + algorithm::Schnorr, + sign::*, +}; + +use alloy_core::primitives::U256; + +use crate::{ + crypto::{PublicKey, EthereumHram, Signature}, + router::{ + abi::{Call as AbiCall, OutInstruction as AbiOutInstruction}, + Router, + }, +}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Call { + pub to: [u8; 20], + pub value: U256, + pub data: Vec, +} +impl Call { + pub fn read(reader: &mut R) -> io::Result { + let mut to = [0; 20]; + reader.read_exact(&mut to)?; + + let value = { + let mut value_bytes = [0; 32]; + reader.read_exact(&mut value_bytes)?; + U256::from_le_slice(&value_bytes) + }; + + let mut data_len = { + let mut data_len = [0; 4]; + reader.read_exact(&mut data_len)?; + usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize") + }; + + // A valid DoS would be to claim a 4 GB data is present for only 4 bytes + // We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB) + let mut data = vec![]; + while data_len > 0 { + let chunk_len = data_len.min(1024); + let mut chunk = vec![0; chunk_len]; + reader.read_exact(&mut chunk)?; + data.extend(&chunk); + data_len -= chunk_len; + } + + Ok(Call { to, value, data }) + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.to)?; + writer.write_all(&self.value.as_le_bytes())?; + + let data_len = u32::try_from(self.data.len()) + .map_err(|_| io::Error::other("call data length exceeded 2**32"))?; + writer.write_all(&data_len.to_le_bytes())?; + writer.write_all(&self.data) + } +} +impl From for AbiCall { + fn from(call: Call) -> AbiCall { + AbiCall { to: call.to.into(), value: call.value, data: call.data.into() } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum OutInstructionTarget { + Direct([u8; 20]), + Calls(Vec), +} +impl OutInstructionTarget { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + + match kind[0] { + 0 => { + let mut addr = [0; 20]; + reader.read_exact(&mut addr)?; + Ok(OutInstructionTarget::Direct(addr)) + } + 1 => { + let mut calls_len = [0; 4]; + reader.read_exact(&mut calls_len)?; + let calls_len = u32::from_le_bytes(calls_len); + + let mut calls = vec![]; + for _ in 0 .. calls_len { + calls.push(Call::read(reader)?); + } + Ok(OutInstructionTarget::Calls(calls)) + } + _ => Err(io::Error::other("unrecognized OutInstructionTarget"))?, + } + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + OutInstructionTarget::Direct(addr) => { + writer.write_all(&[0])?; + writer.write_all(addr)?; + } + OutInstructionTarget::Calls(calls) => { + writer.write_all(&[1])?; + let call_len = u32::try_from(calls.len()) + .map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?; + writer.write_all(&call_len.to_le_bytes())?; + for call in calls { + call.write(writer)?; + } + } + } + Ok(()) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct OutInstruction { + pub target: OutInstructionTarget, + pub value: U256, +} +impl OutInstruction { + fn read(reader: &mut R) -> io::Result { + let target = OutInstructionTarget::read(reader)?; + + let value = { + let mut value_bytes = [0; 32]; + reader.read_exact(&mut value_bytes)?; + U256::from_le_slice(&value_bytes) + }; + + Ok(OutInstruction { target, value }) + } + fn write(&self, writer: &mut W) -> io::Result<()> { + self.target.write(writer)?; + writer.write_all(&self.value.as_le_bytes()) + } +} +impl From for AbiOutInstruction { + fn from(instruction: OutInstruction) -> AbiOutInstruction { + match instruction.target { + OutInstructionTarget::Direct(addr) => { + AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value } + } + OutInstructionTarget::Calls(calls) => AbiOutInstruction { + to: [0; 20].into(), + calls: calls.into_iter().map(Into::into).collect(), + value: instruction.value, + }, + } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum RouterCommand { + UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey }, + Execute { chain_id: U256, nonce: U256, outs: Vec }, +} + +impl RouterCommand { + pub fn msg(&self) -> Vec { + match self { + RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { + Router::update_serai_key_message(*chain_id, *nonce, key) + } + RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message( + *chain_id, + *nonce, + outs.iter().map(|out| out.clone().into()).collect(), + ), + } + } + + pub fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + + match kind[0] { + 0 => { + let mut chain_id = [0; 32]; + reader.read_exact(&mut chain_id)?; + + let mut nonce = [0; 32]; + reader.read_exact(&mut nonce)?; + + let key = PublicKey::new(Secp256k1::read_G(reader)?) + .ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?; + Ok(RouterCommand::UpdateSeraiKey { + chain_id: U256::from_le_slice(&chain_id), + nonce: U256::from_le_slice(&nonce), + key, + }) + } + 1 => { + let mut chain_id = [0; 32]; + reader.read_exact(&mut chain_id)?; + let chain_id = U256::from_le_slice(&chain_id); + + let mut nonce = [0; 32]; + reader.read_exact(&mut nonce)?; + let nonce = U256::from_le_slice(&nonce); + + let mut outs_len = [0; 4]; + reader.read_exact(&mut outs_len)?; + let outs_len = u32::from_le_bytes(outs_len); + + let mut outs = vec![]; + for _ in 0 .. outs_len { + outs.push(OutInstruction::read(reader)?); + } + + Ok(RouterCommand::Execute { chain_id, nonce, outs }) + } + _ => Err(io::Error::other("reading unknown type of RouterCommand"))?, + } + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => { + writer.write_all(&[0])?; + writer.write_all(&chain_id.as_le_bytes())?; + writer.write_all(&nonce.as_le_bytes())?; + writer.write_all(&key.A.to_bytes()) + } + RouterCommand::Execute { chain_id, nonce, outs } => { + writer.write_all(&[1])?; + writer.write_all(&chain_id.as_le_bytes())?; + writer.write_all(&nonce.as_le_bytes())?; + writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?; + for out in outs { + out.write(writer)?; + } + Ok(()) + } + } + } + + pub fn serialize(&self) -> Vec { + let mut res = vec![]; + self.write(&mut res).unwrap(); + res + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct SignedRouterCommand { + command: RouterCommand, + signature: Signature, +} + +impl SignedRouterCommand { + pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option { + let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?; + let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?; + let signature = Signature { c, s }; + + if !signature.verify(key, &command.msg()) { + None? + } + Some(SignedRouterCommand { command, signature }) + } + + pub fn command(&self) -> &RouterCommand { + &self.command + } + + pub fn signature(&self) -> &Signature { + &self.signature + } + + pub fn read(reader: &mut R) -> io::Result { + let command = RouterCommand::read(reader)?; + + let mut sig = [0; 64]; + reader.read_exact(&mut sig)?; + let signature = Signature::from_bytes(sig)?; + + Ok(SignedRouterCommand { command, signature }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + self.command.write(writer)?; + writer.write_all(&self.signature.to_bytes()) + } +} + +pub struct RouterCommandMachine { + key: PublicKey, + command: RouterCommand, + machine: AlgorithmMachine>, +} + +impl RouterCommandMachine { + pub fn new(keys: ThresholdKeys, command: RouterCommand) -> Option { + // The Schnorr algorithm should be fine without this, even when using the IETF variant + // If this is better and more comprehensive, we should do it, even if not necessary + let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1"); + let key = keys.group_key(); + transcript.append_message(b"key", key.to_bytes()); + transcript.append_message(b"command", command.serialize()); + + Some(Self { + key: PublicKey::new(key)?, + command, + machine: AlgorithmMachine::new(Schnorr::new(transcript), keys), + }) + } +} + +impl PreprocessMachine for RouterCommandMachine { + type Preprocess = Preprocess; + type Signature = SignedRouterCommand; + type SignMachine = RouterCommandSignMachine; + + fn preprocess( + self, + rng: &mut R, + ) -> (Self::SignMachine, Self::Preprocess) { + let (machine, preprocess) = self.machine.preprocess(rng); + + (RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess) + } +} + +pub struct RouterCommandSignMachine { + key: PublicKey, + command: RouterCommand, + machine: AlgorithmSignMachine>, +} + +impl SignMachine for RouterCommandSignMachine { + type Params = (); + type Keys = ThresholdKeys; + type Preprocess = Preprocess; + type SignatureShare = SignatureShare; + type SignatureMachine = RouterCommandSignatureMachine; + + fn cache(self) -> CachedPreprocess { + unimplemented!( + "RouterCommand machines don't support caching their preprocesses due to {}", + "being already bound to a specific command" + ); + } + + fn from_cache( + (): (), + _: ThresholdKeys, + _: CachedPreprocess, + ) -> (Self, Self::Preprocess) { + unimplemented!( + "RouterCommand machines don't support caching their preprocesses due to {}", + "being already bound to a specific command" + ); + } + + fn read_preprocess(&self, reader: &mut R) -> io::Result { + self.machine.read_preprocess(reader) + } + + fn sign( + self, + commitments: HashMap, + msg: &[u8], + ) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> { + if !msg.is_empty() { + panic!("message was passed to a RouterCommand machine when it generates its own"); + } + + let (machine, share) = self.machine.sign(commitments, &self.command.msg())?; + + Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share)) + } +} + +pub struct RouterCommandSignatureMachine { + key: PublicKey, + command: RouterCommand, + machine: + AlgorithmSignatureMachine>, +} + +impl SignatureMachine for RouterCommandSignatureMachine { + type SignatureShare = SignatureShare; + + fn read_share(&self, reader: &mut R) -> io::Result { + self.machine.read_share(reader) + } + + fn complete( + self, + shares: HashMap, + ) -> Result { + let sig = self.machine.complete(shares)?; + let signature = Signature::new(&self.key, &self.command.msg(), sig) + .expect("machine produced an invalid signature"); + Ok(SignedRouterCommand { command: self.command, signature }) + } +} diff --git a/coins/ethereum/src/router.rs b/coins/ethereum/src/router.rs index 3696fd9b..d2750a02 100644 --- a/coins/ethereum/src/router.rs +++ b/coins/ethereum/src/router.rs @@ -1,30 +1,428 @@ -pub use crate::abi::router::*; +use std::{sync::Arc, io, collections::HashSet}; -/* -use crate::crypto::{ProcessedSignature, PublicKey}; -use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode}; -use eyre::Result; -use std::{convert::From, fs::File, sync::Arc}; +use k256::{ + elliptic_curve::{group::GroupEncoding, sec1}, + ProjectivePoint, +}; -pub async fn router_update_public_key( - contract: &Router, - public_key: &PublicKey, - signature: &ProcessedSignature, -) -> std::result::Result, eyre::ErrReport> { - let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into()); - let pending_tx = tx.send().await?; - let receipt = pending_tx.await?; - Ok(receipt) +use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind}; +#[cfg(test)] +use alloy_core::primitives::B256; +use alloy_consensus::TxLegacy; + +use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent}; + +use alloy_rpc_types::Filter; +#[cfg(test)] +use alloy_rpc_types::{BlockId, TransactionRequest, TransactionInput}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +pub use crate::{ + Error, + crypto::{PublicKey, Signature}, + abi::{erc20::Transfer, router as abi}, +}; +use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum Coin { + Ether, + Erc20([u8; 20]), } -pub async fn router_execute( - contract: &Router, - txs: Vec, - signature: &ProcessedSignature, -) -> std::result::Result, eyre::ErrReport> { - let tx = contract.execute(txs, signature.into()).send(); - let pending_tx = tx.send().await?; - let receipt = pending_tx.await?; - Ok(receipt) +impl Coin { + pub fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + Ok(match kind[0] { + 0 => Coin::Ether, + 1 => { + let mut address = [0; 20]; + reader.read_exact(&mut address)?; + Coin::Erc20(address) + } + _ => Err(io::Error::other("unrecognized Coin type"))?, + }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Coin::Ether => writer.write_all(&[0]), + Coin::Erc20(token) => { + writer.write_all(&[1])?; + writer.write_all(token) + } + } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct InInstruction { + pub id: ([u8; 32], u64), + pub from: [u8; 20], + pub coin: Coin, + pub amount: U256, + pub data: Vec, + pub key_at_end_of_block: ProjectivePoint, +} + +impl InInstruction { + pub fn read(reader: &mut R) -> io::Result { + let id = { + let mut id_hash = [0; 32]; + reader.read_exact(&mut id_hash)?; + let mut id_pos = [0; 8]; + reader.read_exact(&mut id_pos)?; + let id_pos = u64::from_le_bytes(id_pos); + (id_hash, id_pos) + }; + + let mut from = [0; 20]; + reader.read_exact(&mut from)?; + + let coin = Coin::read(reader)?; + let mut amount = [0; 32]; + reader.read_exact(&mut amount)?; + let amount = U256::from_le_slice(&amount); + + let mut data_len = [0; 4]; + reader.read_exact(&mut data_len)?; + let data_len = usize::try_from(u32::from_le_bytes(data_len)) + .map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?; + let mut data = vec![0; data_len]; + reader.read_exact(&mut data)?; + + let mut key_at_end_of_block = ::Repr::default(); + reader.read_exact(&mut key_at_end_of_block)?; + let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block)) + .ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?; + + Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block }) + } + + pub fn write(&self, writer: &mut W) -> io::Result<()> { + writer.write_all(&self.id.0)?; + writer.write_all(&self.id.1.to_le_bytes())?; + + writer.write_all(&self.from)?; + + self.coin.write(writer)?; + writer.write_all(&self.amount.as_le_bytes())?; + + writer.write_all( + &u32::try_from(self.data.len()) + .map_err(|_| { + io::Error::other("InInstruction being written had data exceeding 2**32 in length") + })? + .to_le_bytes(), + )?; + writer.write_all(&self.data)?; + + writer.write_all(&self.key_at_end_of_block.to_bytes()) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Executed { + pub tx_id: [u8; 32], + pub nonce: u64, + pub signature: [u8; 64], +} + +/// The contract Serai uses to manage its state. +#[derive(Clone, Debug)] +pub struct Router(Arc>, Address); +impl Router { + pub(crate) fn code() -> Vec { + let bytecode = include_str!("../artifacts/Router.bin"); + Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec() + } + + pub(crate) fn init_code(key: &PublicKey) -> Vec { + let mut bytecode = Self::code(); + // Append the constructor arguments + bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode()); + bytecode + } + + // This isn't pub in order to force users to use `Deployer::find_router`. + pub(crate) fn new(provider: Arc>, address: Address) -> Self { + Self(provider, address) + } + + pub fn address(&self) -> [u8; 20] { + **self.1 + } + + /// Get the key for Serai at the specified block. + #[cfg(test)] + pub async fn serai_key(&self, at: [u8; 32]) -> Result { + let call = TransactionRequest::default() + .to(Some(self.1)) + .input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into())); + let bytes = self + .0 + .call(&call, Some(BlockId::Hash(B256::from(at).into()))) + .await + .map_err(|_| Error::ConnectionError)?; + let res = + abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; + PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError) + } + + /// Get the message to be signed in order to update the key for Serai. + pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec { + let mut buffer = b"updateSeraiKey".to_vec(); + buffer.extend(&chain_id.to_be_bytes::<32>()); + buffer.extend(&nonce.to_be_bytes::<32>()); + buffer.extend(&key.eth_repr()); + buffer + } + + /// Update the key representing Serai. + pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy { + // TODO: Set a more accurate gas + TxLegacy { + to: TxKind::Call(self.1), + input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into())) + .abi_encode() + .into(), + gas_limit: 100_000, + ..Default::default() + } + } + + /// Get the current nonce for the published batches. + #[cfg(test)] + pub async fn nonce(&self, at: [u8; 32]) -> Result { + let call = TransactionRequest::default() + .to(Some(self.1)) + .input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into())); + let bytes = self + .0 + .call(&call, Some(BlockId::Hash(B256::from(at).into()))) + .await + .map_err(|_| Error::ConnectionError)?; + let res = + abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; + Ok(res._0) + } + + /// Get the message to be signed in order to update the key for Serai. + pub(crate) fn execute_message( + chain_id: U256, + nonce: U256, + outs: Vec, + ) -> Vec { + ("execute".to_string(), chain_id, nonce, outs).abi_encode_params() + } + + /// Execute a batch of `OutInstruction`s. + pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy { + TxLegacy { + to: TxKind::Call(self.1), + input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(), + // TODO + gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()), + ..Default::default() + } + } + + pub async fn key_at_end_of_block(&self, block: u64) -> Result { + let filter = Filter::new().from_block(0).to_block(block).address(self.1); + let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); + let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?; + let last_key_x_coordinate = last_key_x_coordinate_log + .log_decode::() + .map_err(|_| Error::ConnectionError)? + .inner + .data + .key; + + let mut compressed_point = ::Repr::default(); + compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY); + compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice()); + + Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError) + } + + pub async fn in_instructions( + &self, + block: u64, + allowed_tokens: &HashSet<[u8; 20]>, + ) -> Result, Error> { + let key_at_end_of_block = self.key_at_end_of_block(block).await?; + + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + let mut transfer_check = HashSet::new(); + let mut in_instructions = vec![]; + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let id = ( + log.block_hash.ok_or(Error::ConnectionError)?.into(), + log.log_index.ok_or(Error::ConnectionError)?, + ); + + let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?; + let tx = self.0.get_transaction_by_hash(tx_hash).await.map_err(|_| Error::ConnectionError)?; + + let log = + log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + let coin = if log.coin.0 == [0; 20] { + Coin::Ether + } else { + let token = *log.coin.0; + + if !allowed_tokens.contains(&token) { + continue; + } + + // If this also counts as a top-level transfer via the token, drop it + // + // Necessary in order to handle a potential edge case with some theoretical token + // implementations + // + // This will either let it be handled by the top-level transfer hook or will drop it + // entirely on the side of caution + if tx.to == Some(token.into()) { + continue; + } + + // Get all logs for this TX + let receipt = self + .0 + .get_transaction_receipt(tx_hash) + .await + .map_err(|_| Error::ConnectionError)? + .ok_or(Error::ConnectionError)?; + let tx_logs = receipt.inner.logs(); + + // Find a matching transfer log + let mut found_transfer = false; + for tx_log in tx_logs { + let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?; + // Ensure we didn't already use this transfer to check a distinct InInstruction event + if transfer_check.contains(&log_index) { + continue; + } + + // Check if this log is from the token we expected to be transferred + if tx_log.address().0 != token { + continue; + } + // Check if this is a transfer log + // https://github.com/alloy-rs/core/issues/589 + if tx_log.topics()[0] != Transfer::SIGNATURE_HASH { + continue; + } + let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue }; + // Check if this is a transfer to us for the expected amount + if (transfer.to == self.1) && (transfer.value == log.amount) { + transfer_check.insert(log_index); + found_transfer = true; + break; + } + } + if !found_transfer { + // This shouldn't be a ConnectionError + // This is an exploit, a non-conforming ERC20, or an invalid connection + // This should halt the process which is sufficient, yet this is sub-optimal + // TODO + Err(Error::ConnectionError)?; + } + + Coin::Erc20(token) + }; + + in_instructions.push(InInstruction { + id, + from: *log.from.0, + coin, + amount: log.amount, + data: log.instruction.as_ref().to_vec(), + key_at_end_of_block, + }); + } + + Ok(in_instructions) + } + + pub async fn executed_commands(&self, block: u64) -> Result, Error> { + let mut res = vec![]; + + { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); + + let log = + log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + let mut signature = [0; 64]; + signature[.. 32].copy_from_slice(log.signature.c.as_ref()); + signature[32 ..].copy_from_slice(log.signature.s.as_ref()); + res.push(Executed { + tx_id, + nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, + signature, + }); + } + } + + { + let filter = Filter::new().from_block(block).to_block(block).address(self.1); + let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH); + let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?; + + for log in logs { + // Double check the address which emitted this log + if log.address() != self.1 { + Err(Error::ConnectionError)?; + } + + let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into(); + + let log = log.log_decode::().map_err(|_| Error::ConnectionError)?.inner.data; + + let mut signature = [0; 64]; + signature[.. 32].copy_from_slice(log.signature.c.as_ref()); + signature[32 ..].copy_from_slice(log.signature.s.as_ref()); + res.push(Executed { + tx_id, + nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?, + signature, + }); + } + } + + Ok(res) + } + + #[cfg(feature = "tests")] + pub fn key_updated_filter(&self) -> Filter { + Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH) + } + #[cfg(feature = "tests")] + pub fn executed_filter(&self) -> Filter { + Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH) + } } -*/ diff --git a/coins/ethereum/src/schnorr.rs b/coins/ethereum/src/schnorr.rs deleted file mode 100644 index 0e4495ec..00000000 --- a/coins/ethereum/src/schnorr.rs +++ /dev/null @@ -1,34 +0,0 @@ -use eyre::{eyre, Result}; - -use group::ff::PrimeField; - -use ethers_providers::{Provider, Http}; - -use crate::{ - Error, - crypto::{keccak256, PublicKey, Signature}, -}; -pub use crate::abi::schnorr::*; - -pub async fn call_verify( - contract: &Schnorr>, - public_key: &PublicKey, - message: &[u8], - signature: &Signature, -) -> Result<()> { - if contract - .verify( - public_key.parity, - public_key.px.to_repr().into(), - keccak256(message), - signature.c.to_repr().into(), - signature.s.to_repr().into(), - ) - .call() - .await? - { - Ok(()) - } else { - Err(eyre!(Error::InvalidSignature)) - } -} diff --git a/coins/ethereum/src/tests/abi/mod.rs b/coins/ethereum/src/tests/abi/mod.rs new file mode 100644 index 00000000..57ea8811 --- /dev/null +++ b/coins/ethereum/src/tests/abi/mod.rs @@ -0,0 +1,13 @@ +use alloy_sol_types::sol; + +#[rustfmt::skip] +#[allow(warnings)] +#[allow(needless_pass_by_value)] +#[allow(clippy::all)] +#[allow(clippy::ignored_unit_patterns)] +#[allow(clippy::redundant_closure_for_method_calls)] +mod schnorr_container { + use super::*; + sol!("src/tests/contracts/Schnorr.sol"); +} +pub(crate) use schnorr_container::TestSchnorr as schnorr; diff --git a/coins/ethereum/src/tests/contracts/ERC20.sol b/coins/ethereum/src/tests/contracts/ERC20.sol new file mode 100644 index 00000000..e157974c --- /dev/null +++ b/coins/ethereum/src/tests/contracts/ERC20.sol @@ -0,0 +1,51 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +contract TestERC20 { + event Transfer(address indexed from, address indexed to, uint256 value); + event Approval(address indexed owner, address indexed spender, uint256 value); + + function name() public pure returns (string memory) { + return "Test ERC20"; + } + function symbol() public pure returns (string memory) { + return "TEST"; + } + function decimals() public pure returns (uint8) { + return 18; + } + + function totalSupply() public pure returns (uint256) { + return 1_000_000 * 10e18; + } + + mapping(address => uint256) balances; + mapping(address => mapping(address => uint256)) allowances; + + constructor() { + balances[msg.sender] = totalSupply(); + } + + function balanceOf(address owner) public view returns (uint256) { + return balances[owner]; + } + function transfer(address to, uint256 value) public returns (bool) { + balances[msg.sender] -= value; + balances[to] += value; + return true; + } + function transferFrom(address from, address to, uint256 value) public returns (bool) { + allowances[from][msg.sender] -= value; + balances[from] -= value; + balances[to] += value; + return true; + } + + function approve(address spender, uint256 value) public returns (bool) { + allowances[msg.sender][spender] = value; + return true; + } + function allowance(address owner, address spender) public view returns (uint256) { + return allowances[owner][spender]; + } +} diff --git a/coins/ethereum/src/tests/contracts/Schnorr.sol b/coins/ethereum/src/tests/contracts/Schnorr.sol new file mode 100644 index 00000000..832cd2fe --- /dev/null +++ b/coins/ethereum/src/tests/contracts/Schnorr.sol @@ -0,0 +1,15 @@ +// SPDX-License-Identifier: AGPLv3 +pragma solidity ^0.8.0; + +import "../../../contracts/Schnorr.sol"; + +contract TestSchnorr { + function verify( + bytes32 px, + bytes calldata message, + bytes32 c, + bytes32 s + ) external pure returns (bool) { + return Schnorr.verify(px, message, c, s); + } +} diff --git a/coins/ethereum/src/tests/crypto.rs b/coins/ethereum/src/tests/crypto.rs index 6dced933..a668b2d6 100644 --- a/coins/ethereum/src/tests/crypto.rs +++ b/coins/ethereum/src/tests/crypto.rs @@ -1,49 +1,33 @@ use rand_core::OsRng; -use sha2::Sha256; -use sha3::{Digest, Keccak256}; - -use group::Group; +use group::ff::{Field, PrimeField}; use k256::{ - ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey}, - elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint}, - U256, Scalar, AffinePoint, ProjectivePoint, + ecdsa::{ + self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey, + }, + Scalar, ProjectivePoint, }; use frost::{ - curve::Secp256k1, + curve::{Ciphersuite, Secp256k1}, algorithm::{Hram, IetfSchnorr}, tests::{algorithm_machines, sign}, }; use crate::{crypto::*, tests::key_gen}; -pub fn hash_to_scalar(data: &[u8]) -> Scalar { - Scalar::reduce(U256::from_be_slice(&keccak256(data))) -} - -pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> { - if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) { - return None; - } - - #[allow(non_snake_case)] - let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into()); - #[allow(non_snake_case)] - if let Some(R) = Option::::from(R) { - #[allow(non_snake_case)] - let R = ProjectivePoint::from(R); - - let r = r.invert().unwrap(); - let u1 = ProjectivePoint::GENERATOR * (-message * r); - let u2 = R * (s * r); - let key: ProjectivePoint = u1 + u2; - if !bool::from(key.is_identity()) { - return Some(address(&key)); - } - } - - None +// The ecrecover opcode, yet with parity replacing v +pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> { + let sig = ecdsa::Signature::from_scalars(r, s).ok()?; + let message: [u8; 32] = message.to_repr().into(); + alloy_core::primitives::Signature::from_signature_and_parity( + sig, + alloy_core::primitives::Parity::Parity(odd_y), + ) + .ok()? + .recover_address_from_prehash(&alloy_core::primitives::B256::from(message)) + .ok() + .map(Into::into) } #[test] @@ -55,20 +39,23 @@ fn test_ecrecover() { const MESSAGE: &[u8] = b"Hello, World!"; let (sig, recovery_id) = private .as_nonzero_scalar() - .try_sign_prehashed_rfc6979::(&Keccak256::digest(MESSAGE), b"") + .try_sign_prehashed( + ::F::random(&mut OsRng), + &keccak256(MESSAGE).into(), + ) .unwrap(); // Sanity check the signature verifies #[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result { - assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ()); + assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ()); } // Perform the ecrecover assert_eq!( ecrecover( hash_to_scalar(MESSAGE), - u8::from(recovery_id.unwrap().is_y_odd()) + 27, + u8::from(recovery_id.unwrap().is_y_odd()) == 1, *sig.r(), *sig.s() ) @@ -93,18 +80,13 @@ fn test_signing() { pub fn preprocess_signature_for_ecrecover( R: ProjectivePoint, public_key: &PublicKey, - chain_id: U256, m: &[u8], s: Scalar, -) -> (u8, Scalar, Scalar) { - let c = EthereumHram::hram( - &R, - &public_key.A, - &[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(), - ); +) -> (Scalar, Scalar) { + let c = EthereumHram::hram(&R, &public_key.A, m); let sa = -(s * public_key.px); let ca = -(c * public_key.px); - (public_key.parity, sa, ca) + (sa, ca) } #[test] @@ -112,21 +94,12 @@ fn test_ecrecover_hack() { let (keys, public_key) = key_gen(); const MESSAGE: &[u8] = b"Hello, World!"; - let hashed_message = keccak256(MESSAGE); - let chain_id = U256::ONE; - let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); let algo = IetfSchnorr::::ietf(); - let sig = sign( - &mut OsRng, - &algo, - keys.clone(), - algorithm_machines(&mut OsRng, &algo, &keys), - full_message, - ); + let sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); - let (parity, sa, ca) = - preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s); - let q = ecrecover(sa, parity, public_key.px, ca).unwrap(); + let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s); + let q = ecrecover(sa, false, public_key.px, ca).unwrap(); assert_eq!(q, address(&sig.R)); } diff --git a/coins/ethereum/src/tests/mod.rs b/coins/ethereum/src/tests/mod.rs index c468cfb6..3a381d42 100644 --- a/coins/ethereum/src/tests/mod.rs +++ b/coins/ethereum/src/tests/mod.rs @@ -1,21 +1,25 @@ -use std::{sync::Arc, time::Duration, fs::File, collections::HashMap}; +use std::{sync::Arc, collections::HashMap}; use rand_core::OsRng; -use group::ff::PrimeField; use k256::{Scalar, ProjectivePoint}; use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen}; -use ethers_core::{ - types::{H160, Signature as EthersSignature}, - abi::Abi, +use alloy_core::{ + primitives::{Address, U256, Bytes, TxKind}, + hex::FromHex, }; -use ethers_contract::ContractFactory; -use ethers_providers::{Middleware, Provider, Http}; +use alloy_consensus::{SignableTransaction, TxLegacy}; -use crate::crypto::PublicKey; +use alloy_rpc_types::TransactionReceipt; +use alloy_simple_request_transport::SimpleRequest; +use alloy_provider::{Provider, RootProvider}; + +use crate::crypto::{address, deterministically_sign, PublicKey}; mod crypto; + +mod abi; mod schnorr; mod router; @@ -36,57 +40,88 @@ pub fn key_gen() -> (HashMap>, PublicKey) (keys, public_key) } -// TODO: Replace with a contract deployment from an unknown account, so the environment solely has -// to fund the deployer, not create/pass a wallet -// TODO: Deterministic deployments across chains +// TODO: Use a proper error here +pub async fn send( + provider: &RootProvider, + wallet: &k256::ecdsa::SigningKey, + mut tx: TxLegacy, +) -> Option { + let verifying_key = *wallet.verifying_key().as_affine(); + let address = Address::from(address(&verifying_key.into())); + + // https://github.com/alloy-rs/alloy/issues/539 + // let chain_id = provider.get_chain_id().await.unwrap(); + // tx.chain_id = Some(chain_id); + tx.chain_id = None; + tx.nonce = provider.get_transaction_count(address, None).await.unwrap(); + // 100 gwei + tx.gas_price = 100_000_000_000u128; + + let sig = wallet.sign_prehash_recoverable(tx.signature_hash().as_ref()).unwrap(); + assert_eq!(address, tx.clone().into_signed(sig.into()).recover_signer().unwrap()); + assert!( + provider.get_balance(address, None).await.unwrap() > + ((U256::from(tx.gas_price) * U256::from(tx.gas_limit)) + tx.value) + ); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig.into(), &mut bytes); + let pending_tx = provider.send_raw_transaction(&bytes).await.ok()?; + pending_tx.get_receipt().await.ok() +} + +pub async fn fund_account( + provider: &RootProvider, + wallet: &k256::ecdsa::SigningKey, + to_fund: Address, + value: U256, +) -> Option<()> { + let funding_tx = + TxLegacy { to: TxKind::Call(to_fund), gas_limit: 21_000, value, ..Default::default() }; + assert!(send(provider, wallet, funding_tx).await.unwrap().status()); + + Some(()) +} + +// TODO: Use a proper error here pub async fn deploy_contract( - chain_id: u32, - client: Arc>, + client: Arc>, wallet: &k256::ecdsa::SigningKey, name: &str, -) -> eyre::Result { - let abi: Abi = - serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap(); - +) -> Option
{ let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap(); let hex_bin = if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf }; - let bin = hex::decode(hex_bin).unwrap(); - let factory = ContractFactory::new(abi, bin.into(), client.clone()); + let bin = Bytes::from_hex(hex_bin).unwrap(); - let mut deployment_tx = factory.deploy(())?.tx; - deployment_tx.set_chain_id(chain_id); - deployment_tx.set_gas(1_000_000); - let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?; - deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas); - deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas); + let deployment_tx = TxLegacy { + chain_id: None, + nonce: 0, + // 100 gwei + gas_price: 100_000_000_000u128, + gas_limit: 1_000_000, + to: TxKind::Create, + value: U256::ZERO, + input: bin, + }; - let sig_hash = deployment_tx.sighash(); - let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap(); + let deployment_tx = deterministically_sign(&deployment_tx); - // EIP-155 v - let mut v = u64::from(rid.to_byte()); - assert!((v == 0) || (v == 1)); - v += u64::from((chain_id * 2) + 35); + // Fund the deployer address + fund_account( + &client, + wallet, + deployment_tx.recover_signer().unwrap(), + U256::from(deployment_tx.tx().gas_limit) * U256::from(deployment_tx.tx().gas_price), + ) + .await?; - let r = sig.r().to_repr(); - let r_ref: &[u8] = r.as_ref(); - let s = sig.s().to_repr(); - let s_ref: &[u8] = s.as_ref(); - let deployment_tx = - deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v }); + let (deployment_tx, sig, _) = deployment_tx.into_parts(); + let mut bytes = vec![]; + deployment_tx.encode_with_signature_fields(&sig, &mut bytes); + let pending_tx = client.send_raw_transaction(&bytes).await.ok()?; + let receipt = pending_tx.get_receipt().await.ok()?; + assert!(receipt.status()); - let pending_tx = client.send_raw_transaction(deployment_tx).await?; - - let mut receipt; - while { - receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?; - receipt.is_none() - } { - tokio::time::sleep(Duration::from_secs(6)).await; - } - let receipt = receipt.unwrap(); - assert!(receipt.status == Some(1.into())); - - Ok(receipt.contract_address.unwrap()) + Some(receipt.contract_address.unwrap()) } diff --git a/coins/ethereum/src/tests/router.rs b/coins/ethereum/src/tests/router.rs index c9be93be..39a865bd 100644 --- a/coins/ethereum/src/tests/router.rs +++ b/coins/ethereum/src/tests/router.rs @@ -2,7 +2,8 @@ use std::{convert::TryFrom, sync::Arc, collections::HashMap}; use rand_core::OsRng; -use group::ff::PrimeField; +use group::Group; +use k256::ProjectivePoint; use frost::{ curve::Secp256k1, Participant, ThresholdKeys, @@ -10,100 +11,173 @@ use frost::{ tests::{algorithm_machines, sign}, }; -use ethers_core::{ - types::{H160, U256, Bytes}, - abi::AbiEncode, - utils::{Anvil, AnvilInstance}, -}; -use ethers_providers::{Middleware, Provider, Http}; +use alloy_core::primitives::{Address, U256}; + +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use alloy_node_bindings::{Anvil, AnvilInstance}; use crate::{ - crypto::{keccak256, PublicKey, EthereumHram, Signature}, - router::{self, *}, - tests::{key_gen, deploy_contract}, + crypto::*, + deployer::Deployer, + router::{Router, abi as router}, + tests::{key_gen, send, fund_account}, }; async fn setup_test() -> ( - u32, AnvilInstance, - Router>, + Arc>, + u64, + Router, HashMap>, PublicKey, ) { let anvil = Anvil::new().spawn(); - let provider = Provider::::try_from(anvil.endpoint()).unwrap(); - let chain_id = provider.get_chainid().await.unwrap().as_u32(); + let provider = RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + ); + let chain_id = provider.get_chain_id().await.unwrap(); let wallet = anvil.keys()[0].clone().into(); let client = Arc::new(provider); - let contract_address = - deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap(); - let contract = Router::new(contract_address, client.clone()); + // Make sure the Deployer constructor returns None, as it doesn't exist yet + assert!(Deployer::new(client.clone()).await.unwrap().is_none()); + + // Deploy the Deployer + let tx = Deployer::deployment_tx(); + fund_account( + &client, + &wallet, + tx.recover_signer().unwrap(), + U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price), + ) + .await + .unwrap(); + + let (tx, sig, _) = tx.into_parts(); + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig, &mut bytes); + + let pending_tx = client.send_raw_transaction(&bytes).await.unwrap(); + let receipt = pending_tx.get_receipt().await.unwrap(); + assert!(receipt.status()); + let deployer = + Deployer::new(client.clone()).await.expect("network error").expect("deployer wasn't deployed"); let (keys, public_key) = key_gen(); - // Set the key to the threshold keys - let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000); - let pending_tx = tx.send().await.unwrap(); - let receipt = pending_tx.await.unwrap().unwrap(); - assert!(receipt.status == Some(1.into())); + // Verify the Router constructor returns None, as it doesn't exist yet + assert!(deployer.find_router(client.clone(), &public_key).await.unwrap().is_none()); - (chain_id, anvil, contract, keys, public_key) + // Deploy the router + let receipt = send(&client, &anvil.keys()[0].clone().into(), deployer.deploy_router(&public_key)) + .await + .unwrap(); + assert!(receipt.status()); + let contract = deployer.find_router(client.clone(), &public_key).await.unwrap().unwrap(); + + (anvil, client, chain_id, contract, keys, public_key) +} + +async fn latest_block_hash(client: &RootProvider) -> [u8; 32] { + client + .get_block(client.get_block_number().await.unwrap().into(), false) + .await + .unwrap() + .unwrap() + .header + .hash + .unwrap() + .0 } #[tokio::test] async fn test_deploy_contract() { - setup_test().await; + let (_anvil, client, _, router, _, public_key) = setup_test().await; + + let block_hash = latest_block_hash(&client).await; + assert_eq!(router.serai_key(block_hash).await.unwrap(), public_key); + assert_eq!(router.nonce(block_hash).await.unwrap(), U256::try_from(1u64).unwrap()); + // TODO: Check it emitted SeraiKeyUpdated(public_key) at its genesis } pub fn hash_and_sign( keys: &HashMap>, public_key: &PublicKey, - chain_id: U256, message: &[u8], ) -> Signature { - let hashed_message = keccak256(message); - - let mut chain_id_bytes = [0; 32]; - chain_id.to_big_endian(&mut chain_id_bytes); - let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat(); - let algo = IetfSchnorr::::ietf(); - let sig = sign( - &mut OsRng, - &algo, - keys.clone(), - algorithm_machines(&mut OsRng, &algo, keys), - full_message, - ); + let sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, keys), message); - Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap() + Signature::new(public_key, message, sig).unwrap() +} + +#[tokio::test] +async fn test_router_update_serai_key() { + let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; + + let next_key = loop { + let point = ProjectivePoint::random(&mut OsRng); + let Some(next_key) = PublicKey::new(point) else { continue }; + break next_key; + }; + + let message = Router::update_serai_key_message( + U256::try_from(chain_id).unwrap(), + U256::try_from(1u64).unwrap(), + &next_key, + ); + let sig = hash_and_sign(&keys, &public_key, &message); + + let first_block_hash = latest_block_hash(&client).await; + assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key); + + let receipt = + send(&client, &anvil.keys()[0].clone().into(), contract.update_serai_key(&next_key, &sig)) + .await + .unwrap(); + assert!(receipt.status()); + + let second_block_hash = latest_block_hash(&client).await; + assert_eq!(contract.serai_key(second_block_hash).await.unwrap(), next_key); + // Check this does still offer the historical state + assert_eq!(contract.serai_key(first_block_hash).await.unwrap(), public_key); + // TODO: Check logs + + println!("gas used: {:?}", receipt.gas_used); + // println!("logs: {:?}", receipt.logs); } #[tokio::test] async fn test_router_execute() { - let (chain_id, _anvil, contract, keys, public_key) = setup_test().await; + let (anvil, client, chain_id, contract, keys, public_key) = setup_test().await; - let to = H160([0u8; 20]); - let value = U256([0u64; 4]); - let data = Bytes::from([0]); - let tx = OutInstruction { to, value, data: data.clone() }; + let to = Address::from([0; 20]); + let value = U256::ZERO; + let tx = router::OutInstruction { to, value, calls: vec![] }; + let txs = vec![tx]; - let nonce_call = contract.nonce(); - let nonce = nonce_call.call().await.unwrap(); + let first_block_hash = latest_block_hash(&client).await; + let nonce = contract.nonce(first_block_hash).await.unwrap(); + assert_eq!(nonce, U256::try_from(1u64).unwrap()); - let encoded = - ("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode(); - let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded); + let message = Router::execute_message(U256::try_from(chain_id).unwrap(), nonce, txs.clone()); + let sig = hash_and_sign(&keys, &public_key, &message); - let tx = contract - .execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() }) - .gas(300_000); - let pending_tx = tx.send().await.unwrap(); - let receipt = dbg!(pending_tx.await.unwrap().unwrap()); - assert!(receipt.status == Some(1.into())); + let receipt = + send(&client, &anvil.keys()[0].clone().into(), contract.execute(&txs, &sig)).await.unwrap(); + assert!(receipt.status()); - println!("gas used: {:?}", receipt.cumulative_gas_used); - println!("logs: {:?}", receipt.logs); + let second_block_hash = latest_block_hash(&client).await; + assert_eq!(contract.nonce(second_block_hash).await.unwrap(), U256::try_from(2u64).unwrap()); + // Check this does still offer the historical state + assert_eq!(contract.nonce(first_block_hash).await.unwrap(), U256::try_from(1u64).unwrap()); + // TODO: Check logs + + println!("gas used: {:?}", receipt.gas_used); + // println!("logs: {:?}", receipt.logs); } diff --git a/coins/ethereum/src/tests/schnorr.rs b/coins/ethereum/src/tests/schnorr.rs index 9525e4d6..9311c292 100644 --- a/coins/ethereum/src/tests/schnorr.rs +++ b/coins/ethereum/src/tests/schnorr.rs @@ -1,11 +1,9 @@ -use std::{convert::TryFrom, sync::Arc}; +use std::sync::Arc; use rand_core::OsRng; -use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar}; - -use ethers_core::utils::{keccak256, Anvil, AnvilInstance}; -use ethers_providers::{Middleware, Provider, Http}; +use group::ff::PrimeField; +use k256::Scalar; use frost::{ curve::Secp256k1, @@ -13,24 +11,34 @@ use frost::{ tests::{algorithm_machines, sign}, }; +use alloy_core::primitives::Address; + +use alloy_sol_types::SolCall; + +use alloy_rpc_types::{TransactionInput, TransactionRequest}; +use alloy_simple_request_transport::SimpleRequest; +use alloy_rpc_client::ClientBuilder; +use alloy_provider::{Provider, RootProvider}; + +use alloy_node_bindings::{Anvil, AnvilInstance}; + use crate::{ + Error, crypto::*, - schnorr::*, - tests::{key_gen, deploy_contract}, + tests::{key_gen, deploy_contract, abi::schnorr as abi}, }; -async fn setup_test() -> (u32, AnvilInstance, Schnorr>) { +async fn setup_test() -> (AnvilInstance, Arc>, Address) { let anvil = Anvil::new().spawn(); - let provider = Provider::::try_from(anvil.endpoint()).unwrap(); - let chain_id = provider.get_chainid().await.unwrap().as_u32(); + let provider = RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true), + ); let wallet = anvil.keys()[0].clone().into(); let client = Arc::new(provider); - let contract_address = - deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap(); - let contract = Schnorr::new(contract_address, client.clone()); - (chain_id, anvil, contract) + let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap(); + (anvil, client, address) } #[tokio::test] @@ -38,30 +46,48 @@ async fn test_deploy_contract() { setup_test().await; } +pub async fn call_verify( + provider: &RootProvider, + contract: Address, + public_key: &PublicKey, + message: &[u8], + signature: &Signature, +) -> Result<(), Error> { + let px: [u8; 32] = public_key.px.to_repr().into(); + let c_bytes: [u8; 32] = signature.c.to_repr().into(); + let s_bytes: [u8; 32] = signature.s.to_repr().into(); + let call = TransactionRequest::default().to(Some(contract)).input(TransactionInput::new( + abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into())) + .abi_encode() + .into(), + )); + let bytes = provider.call(&call, None).await.map_err(|_| Error::ConnectionError)?; + let res = + abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?; + + if res._0 { + Ok(()) + } else { + Err(Error::InvalidSignature) + } +} + #[tokio::test] async fn test_ecrecover_hack() { - let (chain_id, _anvil, contract) = setup_test().await; - let chain_id = U256::from(chain_id); + let (_anvil, client, contract) = setup_test().await; let (keys, public_key) = key_gen(); const MESSAGE: &[u8] = b"Hello, World!"; - let hashed_message = keccak256(MESSAGE); - let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat(); let algo = IetfSchnorr::::ietf(); - let sig = sign( - &mut OsRng, - &algo, - keys.clone(), - algorithm_machines(&mut OsRng, &algo, &keys), - full_message, - ); - let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap(); + let sig = + sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE); + let sig = Signature::new(&public_key, MESSAGE, sig).unwrap(); - call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap(); + call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap(); // Test an invalid signature fails let mut sig = sig; sig.s += Scalar::ONE; - assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err()); + assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err()); } diff --git a/coins/monero/Cargo.toml b/coins/monero/Cargo.toml index 9c78e431..357803c9 100644 --- a/coins/monero/Cargo.toml +++ b/coins/monero/Cargo.toml @@ -43,7 +43,6 @@ multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features = # Needed for multisig transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true } -dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true } frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true } monero-generators = { path = "generators", version = "0.4", default-features = false } @@ -91,7 +90,6 @@ std = [ "multiexp/std", "transcript/std", - "dleq/std", "monero-generators/std", @@ -106,7 +104,7 @@ std = [ cache-distribution = ["async-lock"] http-rpc = ["digest_auth", "simple-request", "tokio"] -multisig = ["transcript", "frost", "dleq", "std"] +multisig = ["transcript", "frost", "std"] binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"] experimental = [] diff --git a/coins/monero/src/lib.rs b/coins/monero/src/lib.rs index 6d9c0a6b..4e6b26d1 100644 --- a/coins/monero/src/lib.rs +++ b/coins/monero/src/lib.rs @@ -14,7 +14,12 @@ use zeroize::{Zeroize, ZeroizeOnDrop}; use sha3::{Digest, Keccak256}; -use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint}; +use curve25519_dalek::{ + constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT}, + scalar::Scalar, + edwards::{EdwardsPoint, VartimeEdwardsPrecomputation}, + traits::VartimePrecomputedMultiscalarMul, +}; pub use monero_generators::{H, decompress_point}; @@ -56,6 +61,13 @@ pub(crate) fn INV_EIGHT() -> Scalar { *INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert()) } +static BASEPOINT_PRECOMP_CELL: OnceLock = OnceLock::new(); +#[allow(non_snake_case)] +pub(crate) fn BASEPOINT_PRECOMP() -> &'static VartimeEdwardsPrecomputation { + BASEPOINT_PRECOMP_CELL + .get_or_init(|| VartimeEdwardsPrecomputation::new([ED25519_BASEPOINT_POINT])) +} + /// Monero protocol version. /// /// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the diff --git a/coins/monero/src/ringct/bulletproofs/mod.rs b/coins/monero/src/ringct/bulletproofs/mod.rs index df0c6ff8..ce9f7492 100644 --- a/coins/monero/src/ringct/bulletproofs/mod.rs +++ b/coins/monero/src/ringct/bulletproofs/mod.rs @@ -91,7 +91,7 @@ impl Bulletproofs { Bulletproofs::Plus( AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect()) .unwrap() - .prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap())) + .prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs.to_vec()).unwrap())) .unwrap(), ) }) diff --git a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs index af5c0275..cba95014 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/aggregate_range_proof.rs @@ -24,7 +24,7 @@ use crate::{ }, }; -// Figure 3 +// Figure 3 of the Bulletproofs+ Paper #[derive(Clone, Debug)] pub(crate) struct AggregateRangeStatement { generators: Generators, @@ -38,24 +38,15 @@ impl Zeroize for AggregateRangeStatement { } #[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)] -pub(crate) struct AggregateRangeWitness { - values: Vec, - gammas: Vec, -} +pub(crate) struct AggregateRangeWitness(Vec); impl AggregateRangeWitness { - pub(crate) fn new(commitments: &[Commitment]) -> Option { + pub(crate) fn new(commitments: Vec) -> Option { if commitments.is_empty() || (commitments.len() > MAX_M) { return None; } - let mut values = Vec::with_capacity(commitments.len()); - let mut gammas = Vec::with_capacity(commitments.len()); - for commitment in commitments { - values.push(commitment.amount); - gammas.push(Scalar(commitment.mask)); - } - Some(AggregateRangeWitness { values, gammas }) + Some(AggregateRangeWitness(commitments)) } } @@ -162,13 +153,11 @@ impl AggregateRangeStatement { witness: &AggregateRangeWitness, ) -> Option { // Check for consistency with the witness - if self.V.len() != witness.values.len() { + if self.V.len() != witness.0.len() { return None; } - for (commitment, (value, gamma)) in - self.V.iter().zip(witness.values.iter().zip(witness.gammas.iter())) - { - if Commitment::new(**gamma, *value).calculate() != **commitment { + for (commitment, witness) in self.V.iter().zip(witness.0.iter()) { + if witness.calculate() != **commitment { return None; } } @@ -196,7 +185,13 @@ impl AggregateRangeStatement { let mut a_l = ScalarVector(Vec::with_capacity(V.len() * N)); for j in 1 ..= V.len() { d_js.push(Self::d_j(j, V.len())); - a_l.0.append(&mut u64_decompose(*witness.values.get(j - 1).unwrap_or(&0)).0); + #[allow(clippy::map_unwrap_or)] + a_l.0.append( + &mut u64_decompose( + *witness.0.get(j - 1).map(|commitment| &commitment.amount).unwrap_or(&0), + ) + .0, + ); } let a_r = a_l.clone() - Scalar::ONE; @@ -223,8 +218,8 @@ impl AggregateRangeStatement { let a_l = a_l - z; let a_r = a_r + &d_descending_y_plus_z; let mut alpha = alpha; - for j in 1 ..= witness.gammas.len() { - alpha += z_pow[j - 1] * witness.gammas[j - 1] * y_mn_plus_one; + for j in 1 ..= witness.0.len() { + alpha += z_pow[j - 1] * Scalar(witness.0[j - 1].mask) * y_mn_plus_one; } Some(AggregateRangeProof { diff --git a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs index 09bb6748..7cb9a4df 100644 --- a/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs +++ b/coins/monero/src/ringct/bulletproofs/plus/weighted_inner_product.rs @@ -15,7 +15,7 @@ use crate::ringct::bulletproofs::plus::{ ScalarVector, PointVector, GeneratorsList, Generators, padded_pow_of_2, transcript::*, }; -// Figure 1 +// Figure 1 of the Bulletproofs+ paper #[derive(Clone, Debug)] pub(crate) struct WipStatement { generators: Generators, diff --git a/coins/monero/src/ringct/clsag/mod.rs b/coins/monero/src/ringct/clsag/mod.rs index 1290e3e3..042d964a 100644 --- a/coins/monero/src/ringct/clsag/mod.rs +++ b/coins/monero/src/ringct/clsag/mod.rs @@ -9,17 +9,17 @@ use std_shims::{ use rand_core::{RngCore, CryptoRng}; use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; -use subtle::{ConstantTimeEq, Choice, CtOption}; +use subtle::{ConstantTimeEq, ConditionallySelectable}; use curve25519_dalek::{ - constants::ED25519_BASEPOINT_TABLE, + constants::{ED25519_BASEPOINT_TABLE, ED25519_BASEPOINT_POINT}, scalar::Scalar, - traits::{IsIdentity, VartimePrecomputedMultiscalarMul}, + traits::{IsIdentity, MultiscalarMul, VartimePrecomputedMultiscalarMul}, edwards::{EdwardsPoint, VartimeEdwardsPrecomputation}, }; use crate::{ - INV_EIGHT, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys, + INV_EIGHT, BASEPOINT_PRECOMP, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys, ringct::hash_to_point, serialize::*, }; @@ -27,8 +27,6 @@ use crate::{ mod multisig; #[cfg(feature = "multisig")] pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig}; -#[cfg(feature = "multisig")] -pub(crate) use multisig::add_key_image_share; /// Errors returned when CLSAG signing fails. #[derive(Clone, Copy, PartialEq, Eq, Debug)] @@ -100,8 +98,11 @@ fn core( ) -> ((EdwardsPoint, Scalar, Scalar), Scalar) { let n = ring.len(); - let images_precomp = VartimeEdwardsPrecomputation::new([I, D]); - let D = D * INV_EIGHT(); + let images_precomp = match A_c1 { + Mode::Sign(..) => None, + Mode::Verify(..) => Some(VartimeEdwardsPrecomputation::new([I, D])), + }; + let D_INV_EIGHT = D * INV_EIGHT(); // Generate the transcript // Instead of generating multiple, a single transcript is created and then edited as needed @@ -130,7 +131,7 @@ fn core( } to_hash.extend(I.compress().to_bytes()); - to_hash.extend(D.compress().to_bytes()); + to_hash.extend(D_INV_EIGHT.compress().to_bytes()); to_hash.extend(pseudo_out.compress().to_bytes()); // mu_P with agg_0 let mu_P = hash_to_scalar(&to_hash); @@ -169,29 +170,44 @@ fn core( } // Perform the core loop - let mut c1 = CtOption::new(Scalar::ZERO, Choice::from(0)); + let mut c1 = c; for i in (start .. end).map(|i| i % n) { - // This will only execute once and shouldn't need to be constant time. Making it constant time - // removes the risk of branch prediction creating timing differences depending on ring index - // however - c1 = c1.or_else(|| CtOption::new(c, i.ct_eq(&0))); - let c_p = mu_P * c; let c_c = mu_C * c; - let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]); + // (s_i * G) + (c_p * P_i) + (c_c * C_i) + let L = match A_c1 { + Mode::Sign(..) => { + EdwardsPoint::multiscalar_mul([s[i], c_p, c_c], [ED25519_BASEPOINT_POINT, P[i], C[i]]) + } + Mode::Verify(..) => { + BASEPOINT_PRECOMP().vartime_mixed_multiscalar_mul([s[i]], [c_p, c_c], [P[i], C[i]]) + } + }; + let PH = hash_to_point(&P[i]); - // Shouldn't be an issue as all of the variables in this vartime statement are public - let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]); + + // (c_p * I) + (c_c * D) + (s_i * PH) + let R = match A_c1 { + Mode::Sign(..) => EdwardsPoint::multiscalar_mul([c_p, c_c, s[i]], [I, D, &PH]), + Mode::Verify(..) => { + images_precomp.as_ref().unwrap().vartime_mixed_multiscalar_mul([c_p, c_c], [s[i]], [PH]) + } + }; to_hash.truncate(((2 * n) + 3) * 32); to_hash.extend(L.compress().to_bytes()); to_hash.extend(R.compress().to_bytes()); c = hash_to_scalar(&to_hash); + + // This will only execute once and shouldn't need to be constant time. Making it constant time + // removes the risk of branch prediction creating timing differences depending on ring index + // however + c1.conditional_assign(&c, i.ct_eq(&(n - 1))); } // This first tuple is needed to continue signing, the latter is the c to be tested/worked with - ((D, c * mu_P, c * mu_C), c1.unwrap_or(c)) + ((D_INV_EIGHT, c * mu_P, c * mu_C), c1) } /// CLSAG signature, as used in Monero. @@ -261,8 +277,10 @@ impl Clsag { nonce.deref() * hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]), ); - clsag.s[usize::from(inputs[i].2.decoys.i)] = - (-((p * inputs[i].0.deref()) + c)) + nonce.deref(); + // Effectively r - cx, except cx is (c_p x) + (c_c z), where z is the delta between a ring + // member's commitment and our input commitment (which will only have a known discrete log + // over G if the amounts cancel out) + clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce.deref() - ((p * inputs[i].0.deref()) + c); inputs[i].0.zeroize(); nonce.zeroize(); diff --git a/coins/monero/src/ringct/clsag/multisig.rs b/coins/monero/src/ringct/clsag/multisig.rs index 85748b78..e9234979 100644 --- a/coins/monero/src/ringct/clsag/multisig.rs +++ b/coins/monero/src/ringct/clsag/multisig.rs @@ -1,5 +1,8 @@ use core::{ops::Deref, fmt::Debug}; -use std_shims::io::{self, Read, Write}; +use std_shims::{ + io::{self, Read, Write}, + collections::HashMap, +}; use std::sync::{Arc, RwLock}; use rand_core::{RngCore, CryptoRng, SeedableRng}; @@ -9,11 +12,13 @@ use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing}; use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint}; -use group::{ff::Field, Group, GroupEncoding}; +use group::{ + ff::{Field, PrimeField}, + Group, GroupEncoding, +}; use transcript::{Transcript, RecommendedTranscript}; use dalek_ff_group as dfg; -use dleq::DLEqProof; use frost::{ dkg::lagrange, curve::Ed25519, @@ -26,10 +31,6 @@ use crate::ringct::{ clsag::{ClsagInput, Clsag}, }; -fn dleq_transcript() -> RecommendedTranscript { - RecommendedTranscript::new(b"monero_key_image_dleq") -} - impl ClsagInput { fn transcript(&self, transcript: &mut T) { // Doesn't domain separate as this is considered part of the larger CLSAG proof @@ -43,6 +44,7 @@ impl ClsagInput { // They're just a unreliable reference to this data which will be included in the message // if in use transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]); + // This also transcripts the key image generator since it's derived from this key transcript.append_message(b"key", pair[0].compress().to_bytes()); transcript.append_message(b"commitment", pair[1].compress().to_bytes()) } @@ -70,13 +72,11 @@ impl ClsagDetails { #[derive(Clone, PartialEq, Eq, Zeroize, Debug)] pub struct ClsagAddendum { pub(crate) key_image: dfg::EdwardsPoint, - dleq: DLEqProof, } impl WriteAddendum for ClsagAddendum { fn write(&self, writer: &mut W) -> io::Result<()> { - writer.write_all(self.key_image.compress().to_bytes().as_ref())?; - self.dleq.write(writer) + writer.write_all(self.key_image.compress().to_bytes().as_ref()) } } @@ -97,9 +97,8 @@ pub struct ClsagMultisig { transcript: RecommendedTranscript, pub(crate) H: EdwardsPoint, - // Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires - // an extra round - image: EdwardsPoint, + key_image_shares: HashMap<[u8; 32], dfg::EdwardsPoint>, + image: Option, details: Arc>>, @@ -117,7 +116,8 @@ impl ClsagMultisig { transcript, H: hash_to_point(&output_key), - image: EdwardsPoint::identity(), + key_image_shares: HashMap::new(), + image: None, details, @@ -135,20 +135,6 @@ impl ClsagMultisig { } } -pub(crate) fn add_key_image_share( - image: &mut EdwardsPoint, - generator: EdwardsPoint, - offset: Scalar, - included: &[Participant], - participant: Participant, - share: EdwardsPoint, -) { - if image.is_identity().into() { - *image = generator * offset; - } - *image += share * lagrange::(participant, included).0; -} - impl Algorithm for ClsagMultisig { type Transcript = RecommendedTranscript; type Addendum = ClsagAddendum; @@ -160,23 +146,10 @@ impl Algorithm for ClsagMultisig { fn preprocess_addendum( &mut self, - rng: &mut R, + _rng: &mut R, keys: &ThresholdKeys, ) -> ClsagAddendum { - ClsagAddendum { - key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(), - dleq: DLEqProof::prove( - rng, - // Doesn't take in a larger transcript object due to the usage of this - // Every prover would immediately write their own DLEq proof, when they can only do so in - // the proper order if they want to reach consensus - // It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to - // try to merge later in some form, when it should instead just merge xH (as it does) - &mut dleq_transcript(), - &[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)], - keys.secret_share(), - ), - } + ClsagAddendum { key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref() } } fn read_addendum(&self, reader: &mut R) -> io::Result { @@ -190,7 +163,7 @@ impl Algorithm for ClsagMultisig { Err(io::Error::other("non-canonical key image"))?; } - Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::::read(reader)? }) + Ok(ClsagAddendum { key_image: xH }) } fn process_addendum( @@ -199,33 +172,29 @@ impl Algorithm for ClsagMultisig { l: Participant, addendum: ClsagAddendum, ) -> Result<(), FrostError> { - // TODO: This check is faulty if two shares are additive inverses of each other - if self.image.is_identity().into() { + if self.image.is_none() { self.transcript.domain_separate(b"CLSAG"); + // Transcript the ring self.input().transcript(&mut self.transcript); + // Transcript the mask self.transcript.append_message(b"mask", self.mask().to_bytes()); + + // Init the image to the offset + self.image = Some(dfg::EdwardsPoint(self.H) * view.offset()); } + // Transcript this participant's contribution self.transcript.append_message(b"participant", l.to_bytes()); - - addendum - .dleq - .verify( - &mut dleq_transcript(), - &[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)], - &[view.original_verification_share(l), addendum.key_image], - ) - .map_err(|_| FrostError::InvalidPreprocess(l))?; - self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes()); - add_key_image_share( - &mut self.image, - self.H, - view.offset().0, - view.included(), - l, - addendum.key_image.0, - ); + + // Accumulate the interpolated share + let interpolated_key_image_share = + addendum.key_image * lagrange::(l, view.included()); + *self.image.as_mut().unwrap() += interpolated_key_image_share; + + self + .key_image_shares + .insert(view.verification_share(l).to_bytes(), interpolated_key_image_share); Ok(()) } @@ -253,7 +222,7 @@ impl Algorithm for ClsagMultisig { #[allow(non_snake_case)] let (clsag, pseudo_out, p, c) = Clsag::sign_core( &mut rng, - &self.image, + &self.image.expect("verifying a share despite never processing any addendums").0, &self.input(), self.mask(), self.msg.as_ref().unwrap(), @@ -262,7 +231,8 @@ impl Algorithm for ClsagMultisig { ); self.interim = Some(Interim { p, c, clsag, pseudo_out }); - (-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref() + // r - p x, where p is the challenge for the keys + *nonces[0] - dfg::Scalar(p) * view.secret_share().deref() } #[must_use] @@ -274,11 +244,13 @@ impl Algorithm for ClsagMultisig { ) -> Option { let interim = self.interim.as_ref().unwrap(); let mut clsag = interim.clsag.clone(); + // We produced shares as `r - p x`, yet the signature is `r - p x - c x` + // Substract `c x` (saved as `c`) now clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c; if clsag .verify( &self.input().decoys.ring, - &self.image, + &self.image.expect("verifying a signature despite never processing any addendums").0, &interim.pseudo_out, self.msg.as_ref().unwrap(), ) @@ -296,10 +268,61 @@ impl Algorithm for ClsagMultisig { share: dfg::Scalar, ) -> Result, ()> { let interim = self.interim.as_ref().unwrap(); - Ok(vec![ + + // For a share `r - p x`, the following two equalities should hold: + // - `(r - p x)G == R.0 - pV`, where `V = xG` + // - `(r - p x)H == R.1 - pK`, where `K = xH` (the key image share) + // + // This is effectively a discrete log equality proof for: + // V, K over G, H + // with nonces + // R.0, R.1 + // and solution + // s + // + // Which is a batch-verifiable rewrite of the traditional CP93 proof + // (and also writable as Generalized Schnorr Protocol) + // + // That means that given a proper challenge, this alone can be certainly argued to prove the + // key image share is well-formed and the provided signature so proves for that. + + // This is a bit funky as it doesn't prove the nonces are well-formed however. They're part of + // the prover data/transcript for a CP93/GSP proof, not part of the statement. This practically + // is fine, for a variety of reasons (given a consistent `x`, a consistent `r` can be + // extracted, and the nonces as used in CLSAG are also part of its prover data/transcript). + + let key_image_share = self.key_image_shares[&verification_share.to_bytes()]; + + // Hash every variable relevant here, using the hahs output as the random weight + let mut weight_transcript = + RecommendedTranscript::new(b"monero-serai v0.1 ClsagMultisig::verify_share"); + weight_transcript.append_message(b"G", dfg::EdwardsPoint::generator().to_bytes()); + weight_transcript.append_message(b"H", self.H.to_bytes()); + weight_transcript.append_message(b"xG", verification_share.to_bytes()); + weight_transcript.append_message(b"xH", key_image_share.to_bytes()); + weight_transcript.append_message(b"rG", nonces[0][0].to_bytes()); + weight_transcript.append_message(b"rH", nonces[0][1].to_bytes()); + weight_transcript.append_message(b"c", dfg::Scalar(interim.p).to_repr()); + weight_transcript.append_message(b"s", share.to_repr()); + let weight = weight_transcript.challenge(b"weight"); + let weight = dfg::Scalar(Scalar::from_bytes_mod_order_wide(&weight.into())); + + let part_one = vec![ (share, dfg::EdwardsPoint::generator()), - (dfg::Scalar(interim.p), verification_share), + // -(R.0 - pV) == -R.0 + pV (-dfg::Scalar::ONE, nonces[0][0]), - ]) + (dfg::Scalar(interim.p), verification_share), + ]; + + let mut part_two = vec![ + (weight * share, dfg::EdwardsPoint(self.H)), + // -(R.1 - pK) == -R.1 + pK + (-weight, nonces[0][1]), + (weight * dfg::Scalar(interim.p), key_image_share), + ]; + + let mut all = part_one; + all.append(&mut part_two); + Ok(all) } } diff --git a/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs b/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs index a50b9d40..658da250 100644 --- a/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs +++ b/coins/monero/src/tests/bulletproofs/plus/aggregate_range_proof.rs @@ -21,7 +21,7 @@ fn test_aggregate_range_proof() { } let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect(); let statement = AggregateRangeStatement::new(commitment_points).unwrap(); - let witness = AggregateRangeWitness::new(&commitments).unwrap(); + let witness = AggregateRangeWitness::new(commitments).unwrap(); let proof = statement.clone().prove(&mut OsRng, &witness).unwrap(); statement.verify(&mut OsRng, &mut verifier, (), proof); diff --git a/coins/monero/src/tests/clsag.rs b/coins/monero/src/tests/clsag.rs index 59e41ebf..a17d7ba2 100644 --- a/coins/monero/src/tests/clsag.rs +++ b/coins/monero/src/tests/clsag.rs @@ -57,7 +57,7 @@ fn clsag() { } let image = generate_key_image(&secrets.0); - let (clsag, pseudo_out) = Clsag::sign( + let (mut clsag, pseudo_out) = Clsag::sign( &mut OsRng, vec![( secrets.0, @@ -76,7 +76,12 @@ fn clsag() { msg, ) .swap_remove(0); + clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap(); + + // make sure verification fails if we throw a random `c1` at it. + clsag.c1 = random_scalar(&mut OsRng); + assert!(clsag.verify(&ring, &image, &pseudo_out, &msg).is_err()); } } diff --git a/coins/monero/src/wallet/address.rs b/coins/monero/src/wallet/address.rs index 9c79942b..d080488d 100644 --- a/coins/monero/src/wallet/address.rs +++ b/coins/monero/src/wallet/address.rs @@ -1,5 +1,5 @@ -use core::{marker::PhantomData, fmt::Debug}; -use std_shims::string::{String, ToString}; +use core::{marker::PhantomData, fmt}; +use std_shims::string::ToString; use zeroize::Zeroize; @@ -81,7 +81,7 @@ impl AddressType { } /// A type which returns the byte for a given address. -pub trait AddressBytes: Clone + Copy + PartialEq + Eq + Debug { +pub trait AddressBytes: Clone + Copy + PartialEq + Eq + fmt::Debug { fn network_bytes(network: Network) -> (u8, u8, u8, u8); } @@ -191,8 +191,8 @@ pub struct Address { pub view: EdwardsPoint, } -impl core::fmt::Debug for Address { - fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { +impl fmt::Debug for Address { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { fmt .debug_struct("Address") .field("meta", &self.meta) @@ -212,8 +212,8 @@ impl Zeroize for Address { } } -impl ToString for Address { - fn to_string(&self) -> String { +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let mut data = vec![self.meta.to_byte()]; data.extend(self.spend.compress().to_bytes()); data.extend(self.view.compress().to_bytes()); @@ -226,7 +226,7 @@ impl ToString for Address { if let Some(id) = self.meta.kind.payment_id() { data.extend(id); } - encode_check(&data).unwrap() + write!(f, "{}", encode_check(&data).unwrap()) } } diff --git a/coins/monero/src/wallet/send/multisig.rs b/coins/monero/src/wallet/send/multisig.rs index 02626e6a..a5be404a 100644 --- a/coins/monero/src/wallet/send/multisig.rs +++ b/coins/monero/src/wallet/send/multisig.rs @@ -18,6 +18,7 @@ use transcript::{Transcript, RecommendedTranscript}; use frost::{ curve::Ed25519, Participant, FrostError, ThresholdKeys, + dkg::lagrange, sign::{ Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine, @@ -27,7 +28,7 @@ use frost::{ use crate::{ random_scalar, ringct::{ - clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share}, + clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig}, RctPrunable, }, transaction::{Input, Transaction}, @@ -261,8 +262,13 @@ impl SignMachine for TransactionSignMachine { included.push(self.i); included.sort_unstable(); - // Convert the unified commitments to a Vec of the individual commitments + // Start calculating the key images, as needed on the TX level let mut images = vec![EdwardsPoint::identity(); self.clsags.len()]; + for (image, (generator, offset)) in images.iter_mut().zip(&self.key_images) { + *image = generator * offset; + } + + // Convert the serialized nonces commitments to a parallelized Vec let mut commitments = (0 .. self.clsags.len()) .map(|c| { included @@ -291,14 +297,7 @@ impl SignMachine for TransactionSignMachine { // provides the easiest API overall, as this is where the TX is (which needs the key // images in its message), along with where the outputs are determined (where our // outputs may need these in order to guarantee uniqueness) - add_key_image_share( - &mut images[c], - self.key_images[c].0, - self.key_images[c].1, - &included, - *l, - preprocess.addendum.key_image.0, - ); + images[c] += preprocess.addendum.key_image.0 * lagrange::(*l, &included).0; Ok((*l, preprocess)) }) diff --git a/common/db/src/parity_db.rs b/common/db/src/parity_db.rs index 06fd0c7c..8c913468 100644 --- a/common/db/src/parity_db.rs +++ b/common/db/src/parity_db.rs @@ -11,7 +11,7 @@ impl Get for Transaction<'_> { let mut res = self.0.get(&key); for change in &self.1 { if change.1 == key.as_ref() { - res = change.2.clone(); + res.clone_from(&change.2); } } res diff --git a/coordinator/Cargo.toml b/coordinator/Cargo.toml index 12f8e763..ae4e2be7 100644 --- a/coordinator/Cargo.toml +++ b/coordinator/Cargo.toml @@ -51,7 +51,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim futures-util = { version = "0.3", default-features = false, features = ["std"] } tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } -libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] } +libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] } [dev-dependencies] tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] } diff --git a/coordinator/src/cosign_evaluator.rs b/coordinator/src/cosign_evaluator.rs index 4ce7faf7..29d9cc4b 100644 --- a/coordinator/src/cosign_evaluator.rs +++ b/coordinator/src/cosign_evaluator.rs @@ -22,7 +22,7 @@ use serai_db::{Get, DbTxn, Db, create_db}; use processor_messages::coordinator::cosign_block_msg; use crate::{ - p2p::{CosignedBlock, P2pMessageKind, P2p}, + p2p::{CosignedBlock, GossipMessageKind, P2p}, substrate::LatestCosignedBlock, }; @@ -323,7 +323,7 @@ impl CosignEvaluator { for cosign in cosigns { let mut buf = vec![]; cosign.serialize(&mut buf).unwrap(); - P2p::broadcast(&p2p, P2pMessageKind::CosignedBlock, buf).await; + P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await; } sleep(Duration::from_secs(60)).await; } diff --git a/coordinator/src/main.rs b/coordinator/src/main.rs index 4de23ae0..58de348d 100644 --- a/coordinator/src/main.rs +++ b/coordinator/src/main.rs @@ -260,7 +260,7 @@ async fn handle_processor_message( cosign_channel.send(cosigned_block).unwrap(); let mut buf = vec![]; cosigned_block.serialize(&mut buf).unwrap(); - P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await; + P2p::broadcast(p2p, GossipMessageKind::CosignedBlock, buf).await; None } // This causes an action on Substrate yet not on any Tributary diff --git a/coordinator/src/p2p.rs b/coordinator/src/p2p.rs index 19bf299d..ef876f9a 100644 --- a/coordinator/src/p2p.rs +++ b/coordinator/src/p2p.rs @@ -1,8 +1,8 @@ use core::{time::Duration, fmt}; use std::{ sync::Arc, - io::Read, - collections::HashMap, + io::{self, Read}, + collections::{HashSet, HashMap}, time::{SystemTime, Instant}, }; @@ -15,7 +15,7 @@ use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorS use serai_db::Db; -use futures_util::StreamExt; +use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, StreamExt}; use tokio::{ sync::{Mutex, RwLock, mpsc, broadcast}, time::sleep, @@ -27,12 +27,16 @@ use libp2p::{ PeerId, tcp::Config as TcpConfig, noise, yamux, + request_response::{ + Codec as RrCodecTrait, Message as RrMessage, Event as RrEvent, Config as RrConfig, + Behaviour as RrBehavior, + }, gossipsub::{ IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, IdentityTransform, AllowAllSubscriptionFilter, Event as GsEvent, PublishError, Behaviour as GsBehavior, }, - swarm::{NetworkBehaviour, SwarmEvent, Swarm}, + swarm::{NetworkBehaviour, SwarmEvent}, SwarmBuilder, }; @@ -40,6 +44,8 @@ pub(crate) use tributary::{ReadWrite, P2p as TributaryP2p}; use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent}; +// Block size limit + 1 KB of space for signatures/metadata +const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024; const LIBP2P_TOPIC: &str = "serai-coordinator"; #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)] @@ -51,71 +57,112 @@ pub struct CosignedBlock { } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] -pub enum P2pMessageKind { +pub enum ReqResMessageKind { KeepAlive, - Tributary([u8; 32]), Heartbeat([u8; 32]), Block([u8; 32]), +} + +impl ReqResMessageKind { + pub fn read(reader: &mut R) -> Option { + let mut kind = [0; 1]; + reader.read_exact(&mut kind).ok()?; + match kind[0] { + 0 => Some(ReqResMessageKind::KeepAlive), + 1 => Some({ + let mut genesis = [0; 32]; + reader.read_exact(&mut genesis).ok()?; + ReqResMessageKind::Heartbeat(genesis) + }), + 2 => Some({ + let mut genesis = [0; 32]; + reader.read_exact(&mut genesis).ok()?; + ReqResMessageKind::Block(genesis) + }), + _ => None, + } + } + + pub fn serialize(&self) -> Vec { + match self { + ReqResMessageKind::KeepAlive => vec![0], + ReqResMessageKind::Heartbeat(genesis) => { + let mut res = vec![1]; + res.extend(genesis); + res + } + ReqResMessageKind::Block(genesis) => { + let mut res = vec![2]; + res.extend(genesis); + res + } + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum GossipMessageKind { + Tributary([u8; 32]), CosignedBlock, } +impl GossipMessageKind { + pub fn read(reader: &mut R) -> Option { + let mut kind = [0; 1]; + reader.read_exact(&mut kind).ok()?; + match kind[0] { + 0 => Some({ + let mut genesis = [0; 32]; + reader.read_exact(&mut genesis).ok()?; + GossipMessageKind::Tributary(genesis) + }), + 1 => Some(GossipMessageKind::CosignedBlock), + _ => None, + } + } + + pub fn serialize(&self) -> Vec { + match self { + GossipMessageKind::Tributary(genesis) => { + let mut res = vec![0]; + res.extend(genesis); + res + } + GossipMessageKind::CosignedBlock => { + vec![1] + } + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] +pub enum P2pMessageKind { + ReqRes(ReqResMessageKind), + Gossip(GossipMessageKind), +} + impl P2pMessageKind { fn genesis(&self) -> Option<[u8; 32]> { match self { - P2pMessageKind::KeepAlive | P2pMessageKind::CosignedBlock => None, - P2pMessageKind::Tributary(genesis) | - P2pMessageKind::Heartbeat(genesis) | - P2pMessageKind::Block(genesis) => Some(*genesis), + P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) | + P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => None, + P2pMessageKind::ReqRes( + ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis), + ) | + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => Some(*genesis), } } +} - fn serialize(&self) -> Vec { - match self { - P2pMessageKind::KeepAlive => vec![0], - P2pMessageKind::Tributary(genesis) => { - let mut res = vec![1]; - res.extend(genesis); - res - } - P2pMessageKind::Heartbeat(genesis) => { - let mut res = vec![2]; - res.extend(genesis); - res - } - P2pMessageKind::Block(genesis) => { - let mut res = vec![3]; - res.extend(genesis); - res - } - P2pMessageKind::CosignedBlock => { - vec![4] - } - } +impl From for P2pMessageKind { + fn from(kind: ReqResMessageKind) -> P2pMessageKind { + P2pMessageKind::ReqRes(kind) } +} - fn read(reader: &mut R) -> Option { - let mut kind = [0; 1]; - reader.read_exact(&mut kind).ok()?; - match kind[0] { - 0 => Some(P2pMessageKind::KeepAlive), - 1 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - P2pMessageKind::Tributary(genesis) - }), - 2 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - P2pMessageKind::Heartbeat(genesis) - }), - 3 => Some({ - let mut genesis = [0; 32]; - reader.read_exact(&mut genesis).ok()?; - P2pMessageKind::Block(genesis) - }), - 4 => Some(P2pMessageKind::CosignedBlock), - _ => None, - } +impl From for P2pMessageKind { + fn from(kind: GossipMessageKind) -> P2pMessageKind { + P2pMessageKind::Gossip(kind) } } @@ -133,17 +180,21 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]); async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]); - async fn send_raw(&self, to: Self::Id, genesis: Option<[u8; 32]>, msg: Vec); - async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec); - async fn receive_raw(&self) -> (Self::Id, Vec); + async fn send_raw(&self, to: Self::Id, msg: Vec); + async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec); + async fn receive(&self) -> Message; - async fn send(&self, to: Self::Id, kind: P2pMessageKind, msg: Vec) { + async fn send(&self, to: Self::Id, kind: ReqResMessageKind, msg: Vec) { let mut actual_msg = kind.serialize(); actual_msg.extend(msg); - self.send_raw(to, kind.genesis(), actual_msg).await; + self.send_raw(to, actual_msg).await; } - async fn broadcast(&self, kind: P2pMessageKind, msg: Vec) { - let mut actual_msg = kind.serialize(); + async fn broadcast(&self, kind: impl Send + Into, msg: Vec) { + let kind = kind.into(); + let mut actual_msg = match kind { + P2pMessageKind::ReqRes(kind) => kind.serialize(), + P2pMessageKind::Gossip(kind) => kind.serialize(), + }; actual_msg.extend(msg); /* log::trace!( @@ -157,41 +208,70 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p { } ); */ - self.broadcast_raw(kind.genesis(), actual_msg).await; + self.broadcast_raw(kind, actual_msg).await; } - async fn receive(&self) -> Message { - let (sender, kind, msg) = loop { - let (sender, msg) = self.receive_raw().await; - if msg.is_empty() { - log::error!("empty p2p message from {sender:?}"); - continue; - } +} - let mut msg_ref = msg.as_ref(); - let Some(kind) = P2pMessageKind::read::<&[u8]>(&mut msg_ref) else { - log::error!("invalid p2p message kind from {sender:?}"); - continue; - }; - break (sender, kind, msg_ref.to_vec()); - }; - /* - log::trace!( - "received p2p message (kind {})", - match kind { - P2pMessageKind::KeepAlive => "KeepAlive".to_string(), - P2pMessageKind::Tributary(genesis) => format!("Tributary({})", hex::encode(genesis)), - P2pMessageKind::Heartbeat(genesis) => format!("Heartbeat({})", hex::encode(genesis)), - P2pMessageKind::Block(genesis) => format!("Block({})", hex::encode(genesis)), - P2pMessageKind::CosignedBlock => "CosignedBlock".to_string(), - } - ); - */ - Message { sender, kind, msg } +#[derive(Default, Clone, Copy, PartialEq, Eq, Debug)] +struct RrCodec; +#[async_trait] +impl RrCodecTrait for RrCodec { + type Protocol = &'static str; + type Request = Vec; + type Response = Vec; + + async fn read_request( + &mut self, + _: &Self::Protocol, + io: &mut R, + ) -> io::Result> { + let mut len = [0; 4]; + io.read_exact(&mut len).await?; + let len = usize::try_from(u32::from_le_bytes(len)).expect("not a 32-bit platform?"); + if len > MAX_LIBP2P_MESSAGE_SIZE { + Err(io::Error::other("request length exceeded MAX_LIBP2P_MESSAGE_SIZE"))?; + } + // This may be a non-trivial allocation easily causable + // While we could chunk the read, meaning we only perform the allocation as bandwidth is used, + // the max message size should be sufficiently sane + let mut buf = vec![0; len]; + io.read_exact(&mut buf).await?; + Ok(buf) + } + async fn read_response( + &mut self, + proto: &Self::Protocol, + io: &mut R, + ) -> io::Result> { + self.read_request(proto, io).await + } + async fn write_request( + &mut self, + _: &Self::Protocol, + io: &mut W, + req: Vec, + ) -> io::Result<()> { + io.write_all( + &u32::try_from(req.len()) + .map_err(|_| io::Error::other("request length exceeded 2**32"))? + .to_le_bytes(), + ) + .await?; + io.write_all(&req).await + } + async fn write_response( + &mut self, + proto: &Self::Protocol, + io: &mut W, + res: Vec, + ) -> io::Result<()> { + self.write_request(proto, io, res).await } } #[derive(NetworkBehaviour)] struct Behavior { + reqres: RrBehavior, gossipsub: GsBehavior, } @@ -199,8 +279,9 @@ struct Behavior { #[derive(Clone)] pub struct LibP2p { subscribe: Arc>>, - broadcast: Arc, Vec)>>>, - receive: Arc)>>>, + send: Arc)>>>, + broadcast: Arc)>>>, + receive: Arc>>>, } impl fmt::Debug for LibP2p { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -211,14 +292,12 @@ impl fmt::Debug for LibP2p { impl LibP2p { #[allow(clippy::new_without_default)] pub fn new(serai: Arc) -> Self { - // Block size limit + 1 KB of space for signatures/metadata - const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024; - log::info!("creating a libp2p instance"); let throwaway_key_pair = Keypair::generate_ed25519(); let behavior = Behavior { + reqres: { RrBehavior::new([], RrConfig::default()) }, gossipsub: { let heartbeat_interval = tributary::tendermint::LATENCY_TIME / 2; let heartbeats_per_block = @@ -282,6 +361,7 @@ impl LibP2p { const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o') swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap(); + let (send_send, mut send_recv) = mpsc::unbounded_channel(); let (broadcast_send, mut broadcast_recv) = mpsc::unbounded_channel(); let (receive_send, receive_recv) = mpsc::unbounded_channel(); let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel(); @@ -290,17 +370,31 @@ impl LibP2p { IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode()))) } + // TODO: If a network has less than TARGET_PEERS, this will cause retries ad infinitum + const TARGET_PEERS: usize = 5; + + // The addrs we're currently dialing, and the networks associated with them + let dialing_peers = Arc::new(RwLock::new(HashMap::new())); + // The peers we're currently connected to, and the networks associated with them + let connected_peers = Arc::new(RwLock::new(HashMap::>::new())); + // Find and connect to peers - let (pending_p2p_connections_send, mut pending_p2p_connections_recv) = + let (connect_to_network_send, mut connect_to_network_recv) = tokio::sync::mpsc::unbounded_channel(); let (to_dial_send, mut to_dial_recv) = tokio::sync::mpsc::unbounded_channel(); tokio::spawn({ - let pending_p2p_connections_send = pending_p2p_connections_send.clone(); + let dialing_peers = dialing_peers.clone(); + let connected_peers = connected_peers.clone(); + + let connect_to_network_send = connect_to_network_send.clone(); async move { loop { - // TODO: Add better peer management logic? - { - let connect = |addr: Multiaddr| { + let connect = |network: NetworkId, addr: Multiaddr| { + let dialing_peers = dialing_peers.clone(); + let connected_peers = connected_peers.clone(); + let to_dial_send = to_dial_send.clone(); + let connect_to_network_send = connect_to_network_send.clone(); + async move { log::info!("found peer from substrate: {addr}"); let protocols = addr.iter().filter_map(|piece| match piece { @@ -318,44 +412,97 @@ impl LibP2p { let addr = new_addr; log::debug!("transformed found peer: {addr}"); - // TODO: Check this isn't a duplicate - to_dial_send.send(addr).unwrap(); - }; - - // TODO: We should also connect to random peers from random nets as needed for - // cosigning - let mut to_retry = vec![]; - while let Some(network) = pending_p2p_connections_recv.recv().await { - if let Ok(mut nodes) = serai.p2p_validators(network).await { - // If there's an insufficient amount of nodes known, connect to all yet add it - // back and break - if nodes.len() < 3 { - log::warn!( - "insufficient amount of P2P nodes known for {:?}: {}", - network, - nodes.len() - ); - to_retry.push(network); - for node in nodes { - connect(node); - } - continue; + let (is_fresh_dial, nets) = { + let mut dialing_peers = dialing_peers.write().await; + let is_fresh_dial = !dialing_peers.contains_key(&addr); + if is_fresh_dial { + dialing_peers.insert(addr.clone(), HashSet::new()); } + // Associate this network with this peer + dialing_peers.get_mut(&addr).unwrap().insert(network); - // Randomly select up to 5 - for _ in 0 .. 5 { - if !nodes.is_empty() { - let to_connect = nodes.swap_remove( - usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) - .unwrap(), - ); - connect(to_connect); + let nets = dialing_peers.get(&addr).unwrap().clone(); + (is_fresh_dial, nets) + }; + + // Spawn a task to remove this peer from 'dialing' in sixty seconds, in case dialing + // fails + // This performs cleanup and bounds the size of the map to whatever growth occurs + // within a temporal window + tokio::spawn({ + let dialing_peers = dialing_peers.clone(); + let connected_peers = connected_peers.clone(); + let connect_to_network_send = connect_to_network_send.clone(); + let addr = addr.clone(); + async move { + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + let mut dialing_peers = dialing_peers.write().await; + if let Some(expected_nets) = dialing_peers.remove(&addr) { + log::debug!("removed addr from dialing upon timeout: {addr}"); + + // TODO: De-duplicate this below instance + // If we failed to dial and haven't gotten enough actual connections, retry + let connected_peers = connected_peers.read().await; + for net in expected_nets { + let mut remaining_peers = 0; + for nets in connected_peers.values() { + if nets.contains(&net) { + remaining_peers += 1; + } + } + // If we do not, start connecting to this network again + if remaining_peers < TARGET_PEERS { + connect_to_network_send.send(net).expect( + "couldn't send net to connect to due to disconnects (receiver dropped?)", + ); + } + } } } + }); + + if is_fresh_dial { + to_dial_send.send((addr, nets)).unwrap(); } } - for to_retry in to_retry { - pending_p2p_connections_send.send(to_retry).unwrap(); + }; + + // TODO: We should also connect to random peers from random nets as needed for + // cosigning + + // Drain the chainnel, de-duplicating any networks in it + let mut connect_to_network_networks = HashSet::new(); + while let Ok(network) = connect_to_network_recv.try_recv() { + connect_to_network_networks.insert(network); + } + for network in connect_to_network_networks { + if let Ok(mut nodes) = serai.p2p_validators(network).await { + // If there's an insufficient amount of nodes known, connect to all yet add it + // back and break + if nodes.len() < TARGET_PEERS { + log::warn!( + "insufficient amount of P2P nodes known for {:?}: {}", + network, + nodes.len() + ); + // Retry this later + connect_to_network_send.send(network).unwrap(); + for node in nodes { + connect(network, node).await; + } + continue; + } + + // Randomly select up to 150% of the TARGET_PEERS + for _ in 0 .. ((3 * TARGET_PEERS) / 2) { + if !nodes.is_empty() { + let to_connect = nodes.swap_remove( + usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap()) + .unwrap(), + ); + connect(network, to_connect).await; + } + } } } // Sleep 60 seconds before moving to the next iteration @@ -368,35 +515,10 @@ impl LibP2p { tokio::spawn({ let mut time_of_last_p2p_message = Instant::now(); - #[allow(clippy::needless_pass_by_ref_mut)] // False positive - fn broadcast_raw( - p2p: &mut Swarm, - time_of_last_p2p_message: &mut Instant, - set: Option, - msg: Vec, - ) { - // Update the time of last message - *time_of_last_p2p_message = Instant::now(); - - let topic = - if let Some(set) = set { topic_for_set(set) } else { IdentTopic::new(LIBP2P_TOPIC) }; - - match p2p.behaviour_mut().gossipsub.publish(topic, msg.clone()) { - Err(PublishError::SigningError(e)) => panic!("signing error when broadcasting: {e}"), - Err(PublishError::InsufficientPeers) => { - log::warn!("failed to send p2p message due to insufficient peers") - } - Err(PublishError::MessageTooLarge) => { - panic!("tried to send a too large message: {}", hex::encode(msg)) - } - Err(PublishError::TransformFailed(e)) => panic!("IdentityTransform failed: {e}"), - Err(PublishError::Duplicate) | Ok(_) => {} - } - } - async move { + let connected_peers = connected_peers.clone(); + let mut set_for_genesis = HashMap::new(); - let mut connected_peers = 0; loop { let time_since_last = Instant::now().duration_since(time_of_last_p2p_message); tokio::select! { @@ -409,7 +531,7 @@ impl LibP2p { let topic = topic_for_set(set); if subscribe { log::info!("subscribing to p2p messages for {set:?}"); - pending_p2p_connections_send.send(set.network).unwrap(); + connect_to_network_send.send(set.network).unwrap(); set_for_genesis.insert(genesis, set); swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap(); } else { @@ -419,17 +541,50 @@ impl LibP2p { } } + msg = send_recv.recv() => { + let (peer, msg): (PeerId, Vec) = + msg.expect("send_recv closed. are we shutting down?"); + swarm.behaviour_mut().reqres.send_request(&peer, msg); + }, + // Handle any queued outbound messages msg = broadcast_recv.recv() => { - let (genesis, msg): (Option<[u8; 32]>, Vec) = + // Update the time of last message + time_of_last_p2p_message = Instant::now(); + + let (kind, msg): (P2pMessageKind, Vec) = msg.expect("broadcast_recv closed. are we shutting down?"); - let set = genesis.and_then(|genesis| set_for_genesis.get(&genesis).copied()); - broadcast_raw( - &mut swarm, - &mut time_of_last_p2p_message, - set, - msg, - ); + + if matches!(kind, P2pMessageKind::ReqRes(_)) { + // Use request/response, yet send to all connected peers + for peer_id in swarm.connected_peers().copied().collect::>() { + swarm.behaviour_mut().reqres.send_request(&peer_id, msg.clone()); + } + } else { + // Use gossipsub + + let set = + kind.genesis().and_then(|genesis| set_for_genesis.get(&genesis).copied()); + let topic = if let Some(set) = set { + topic_for_set(set) + } else { + IdentTopic::new(LIBP2P_TOPIC) + }; + + match swarm.behaviour_mut().gossipsub.publish(topic, msg.clone()) { + Err(PublishError::SigningError(e)) => { + panic!("signing error when broadcasting: {e}") + }, + Err(PublishError::InsufficientPeers) => { + log::warn!("failed to send p2p message due to insufficient peers") + } + Err(PublishError::MessageTooLarge) => { + panic!("tried to send a too large message: {}", hex::encode(msg)) + } + Err(PublishError::TransformFailed(e)) => panic!("IdentityTransform failed: {e}"), + Err(PublishError::Duplicate) | Ok(_) => {} + } + } } // Handle new incoming messages @@ -438,42 +593,119 @@ impl LibP2p { Some(SwarmEvent::Dialing { connection_id, .. }) => { log::debug!("dialing to peer in connection ID {}", &connection_id); } - Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => { + Some(SwarmEvent::ConnectionEstablished { + peer_id, + connection_id, + endpoint, + .. + }) => { if &peer_id == swarm.local_peer_id() { log::warn!("established a libp2p connection to ourselves"); swarm.close_connection(connection_id); continue; } - connected_peers += 1; - log::debug!( - "connection established to peer {} in connection ID {}, connected peers: {}", - &peer_id, - &connection_id, - connected_peers, - ); + let addr = endpoint.get_remote_address(); + let nets = { + let mut dialing_peers = dialing_peers.write().await; + if let Some(nets) = dialing_peers.remove(addr) { + nets + } else { + log::debug!("connected to a peer who we didn't have within dialing"); + HashSet::new() + } + }; + { + let mut connected_peers = connected_peers.write().await; + connected_peers.insert(addr.clone(), nets); + + log::debug!( + "connection established to peer {} in connection ID {}, connected peers: {}", + &peer_id, + &connection_id, + connected_peers.len(), + ); + } } - Some(SwarmEvent::ConnectionClosed { peer_id, .. }) => { - connected_peers -= 1; + Some(SwarmEvent::ConnectionClosed { peer_id, endpoint, .. }) => { + let mut connected_peers = connected_peers.write().await; + let Some(nets) = connected_peers.remove(endpoint.get_remote_address()) else { + log::debug!("closed connection to peer which wasn't in connected_peers"); + continue; + }; + // Downgrade to a read lock + let connected_peers = connected_peers.downgrade(); + + // For each net we lost a peer for, check if we still have sufficient peers + // overall + for net in nets { + let mut remaining_peers = 0; + for nets in connected_peers.values() { + if nets.contains(&net) { + remaining_peers += 1; + } + } + // If we do not, start connecting to this network again + if remaining_peers < TARGET_PEERS { + connect_to_network_send + .send(net) + .expect( + "couldn't send net to connect to due to disconnects (receiver dropped?)" + ); + } + } + log::debug!( "connection with peer {peer_id} closed, connected peers: {}", - connected_peers, + connected_peers.len(), ); } + Some(SwarmEvent::Behaviour(BehaviorEvent::Reqres( + RrEvent::Message { peer, message }, + ))) => { + let message = match message { + RrMessage::Request { request, .. } => request, + RrMessage::Response { response, .. } => response, + }; + + let mut msg_ref = message.as_slice(); + let Some(kind) = ReqResMessageKind::read(&mut msg_ref) else { continue }; + let message = Message { + sender: peer, + kind: P2pMessageKind::ReqRes(kind), + msg: msg_ref.to_vec(), + }; + receive_send.send(message).expect("receive_send closed. are we shutting down?"); + } Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub( GsEvent::Message { propagation_source, message, .. }, ))) => { - receive_send - .send((propagation_source, message.data)) - .expect("receive_send closed. are we shutting down?"); + let mut msg_ref = message.data.as_slice(); + let Some(kind) = GossipMessageKind::read(&mut msg_ref) else { continue }; + let message = Message { + sender: propagation_source, + kind: P2pMessageKind::Gossip(kind), + msg: msg_ref.to_vec(), + }; + receive_send.send(message).expect("receive_send closed. are we shutting down?"); } _ => {} } } // Handle peers to dial - addr = to_dial_recv.recv() => { - let addr = addr.expect("received address was None (sender dropped?)"); + addr_and_nets = to_dial_recv.recv() => { + let (addr, nets) = + addr_and_nets.expect("received address was None (sender dropped?)"); + // If we've already dialed and connected to this address, don't further dial them + // Just associate these networks with them + if let Some(existing_nets) = connected_peers.write().await.get_mut(&addr) { + for net in nets { + existing_nets.insert(net); + } + continue; + } + if let Err(e) = swarm.dial(addr) { log::warn!("dialing peer failed: {e:?}"); } @@ -487,12 +719,13 @@ impl LibP2p { // (where a finalized block only occurs due to network activity), meaning this won't be // run () = tokio::time::sleep(Duration::from_secs(80).saturating_sub(time_since_last)) => { - broadcast_raw( - &mut swarm, - &mut time_of_last_p2p_message, - None, - P2pMessageKind::KeepAlive.serialize() - ); + time_of_last_p2p_message = Instant::now(); + for peer_id in swarm.connected_peers().copied().collect::>() { + swarm + .behaviour_mut() + .reqres + .send_request(&peer_id, ReqResMessageKind::KeepAlive.serialize()); + } } } } @@ -501,6 +734,7 @@ impl LibP2p { LibP2p { subscribe: Arc::new(Mutex::new(subscribe_send)), + send: Arc::new(Mutex::new(send_send)), broadcast: Arc::new(Mutex::new(broadcast_send)), receive: Arc::new(Mutex::new(receive_recv)), } @@ -529,22 +763,22 @@ impl P2p for LibP2p { .expect("subscribe_send closed. are we shutting down?"); } - async fn send_raw(&self, _: Self::Id, genesis: Option<[u8; 32]>, msg: Vec) { - self.broadcast_raw(genesis, msg).await; + async fn send_raw(&self, peer: Self::Id, msg: Vec) { + self.send.lock().await.send((peer, msg)).expect("send_send closed. are we shutting down?"); } - async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec) { + async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec) { self .broadcast .lock() .await - .send((genesis, msg)) + .send((kind, msg)) .expect("broadcast_send closed. are we shutting down?"); } // TODO: We only have a single handle call this. Differentiate Send/Recv to remove this constant // lock acquisition? - async fn receive_raw(&self) -> (Self::Id, Vec) { + async fn receive(&self) -> Message { self.receive.lock().await.recv().await.expect("receive_recv closed. are we shutting down?") } } @@ -552,7 +786,7 @@ impl P2p for LibP2p { #[async_trait] impl TributaryP2p for LibP2p { async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { - ::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await + ::broadcast(self, GossipMessageKind::Tributary(genesis), msg).await } } @@ -590,16 +824,12 @@ pub async fn heartbeat_tributaries_task( if SystemTime::now() > (block_time + Duration::from_secs(60)) { log::warn!("last known tributary block was over a minute ago"); let mut msg = tip.to_vec(); - // Also include the timestamp so LibP2p doesn't flag this as an old message re-circulating - let timestamp = SystemTime::now() + let time: u64 = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("system clock is wrong") .as_secs(); - // Divide by the block time so if multiple parties send a Heartbeat, they're more likely to - // overlap - let time_unit = timestamp / u64::from(Tributary::::block_time()); - msg.extend(time_unit.to_le_bytes()); - P2p::broadcast(&p2p, P2pMessageKind::Heartbeat(tributary.genesis()), msg).await; + msg.extend(time.to_le_bytes()); + P2p::broadcast(&p2p, ReqResMessageKind::Heartbeat(tributary.genesis()), msg).await; } } @@ -631,6 +861,8 @@ pub async fn handle_p2p_task( // Subscribe to the topic for this tributary p2p.subscribe(tributary.spec.set(), genesis).await; + let spec_set = tributary.spec.set(); + // Per-Tributary P2P message handler tokio::spawn({ let p2p = p2p.clone(); @@ -641,91 +873,58 @@ pub async fn handle_p2p_task( break; }; match msg.kind { - P2pMessageKind::KeepAlive => {} + P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {} - P2pMessageKind::Tributary(msg_genesis) => { - assert_eq!(msg_genesis, genesis); - log::trace!("handling message for tributary {:?}", tributary.spec.set()); - if tributary.tributary.handle_message(&msg.msg).await { - P2p::broadcast(&p2p, msg.kind, msg.msg).await; - } - } - - // TODO2: Rate limit this per timestamp - // And/or slash on Heartbeat which justifies a response, since the node + // TODO: Slash on Heartbeat which justifies a response, since the node // obviously was offline and we must now use our bandwidth to compensate for // them? - P2pMessageKind::Heartbeat(msg_genesis) => { + P2pMessageKind::ReqRes(ReqResMessageKind::Heartbeat(msg_genesis)) => { assert_eq!(msg_genesis, genesis); if msg.msg.len() != 40 { log::error!("validator sent invalid heartbeat"); continue; } + // Only respond to recent heartbeats + let msg_time = u64::from_le_bytes(msg.msg[32 .. 40].try_into().expect( + "length-checked heartbeat message didn't have 8 bytes for the u64", + )); + if SystemTime::now() + .duration_since(SystemTime::UNIX_EPOCH) + .expect("system clock is wrong") + .as_secs() + .saturating_sub(msg_time) > + 10 + { + continue; + } + + log::debug!("received heartbeat with a recent timestamp"); + + let reader = tributary.tributary.reader(); let p2p = p2p.clone(); - let spec = tributary.spec.clone(); - let reader = tributary.tributary.reader(); // Spawn a dedicated task as this may require loading large amounts of data // from disk and take a notable amount of time tokio::spawn(async move { - /* - // Have sqrt(n) nodes reply with the blocks - let mut responders = (tributary.spec.n() as f32).sqrt().floor() as u64; - // Try to have at least 3 responders - if responders < 3 { - responders = tributary.spec.n().min(3).into(); - } - */ - - /* - // Have up to three nodes respond - let responders = u64::from(spec.n().min(3)); - - // Decide which nodes will respond by using the latest block's hash as a - // mutually agreed upon entropy source - // This isn't a secure source of entropy, yet it's fine for this - let entropy = u64::from_le_bytes(reader.tip()[.. 8].try_into().unwrap()); - // If n = 10, responders = 3, we want `start` to be 0 ..= 7 - // (so the highest is 7, 8, 9) - // entropy % (10 + 1) - 3 = entropy % 8 = 0 ..= 7 - let start = - usize::try_from(entropy % (u64::from(spec.n() + 1) - responders)) - .unwrap(); - let mut selected = false; - for validator in &spec.validators() - [start .. (start + usize::try_from(responders).unwrap())] - { - if our_key == validator.0 { - selected = true; - break; - } - } - if !selected { - log::debug!("received heartbeat and not selected to respond"); - return; - } - - log::debug!("received heartbeat and selected to respond"); - */ - - // Have every node respond - // While we could only have a subset respond, LibP2P will sync all messages - // it isn't aware of - // It's cheaper to be aware from our disk than from over the network - // TODO: Spawn a dedicated topic for this heartbeat response? let mut latest = msg.msg[.. 32].try_into().unwrap(); + let mut to_send = vec![]; while let Some(next) = reader.block_after(&latest) { - let mut res = reader.block(&next).unwrap().serialize(); - res.extend(reader.commit(&next).unwrap()); - // Also include the timestamp used within the Heartbeat - res.extend(&msg.msg[32 .. 40]); - p2p.send(msg.sender, P2pMessageKind::Block(spec.genesis()), res).await; + to_send.push(next); latest = next; } + if to_send.len() > 3 { + for next in to_send { + let mut res = reader.block(&next).unwrap().serialize(); + res.extend(reader.commit(&next).unwrap()); + // Also include the timestamp used within the Heartbeat + res.extend(&msg.msg[32 .. 40]); + p2p.send(msg.sender, ReqResMessageKind::Block(genesis), res).await; + } + } }); } - P2pMessageKind::Block(msg_genesis) => { + P2pMessageKind::ReqRes(ReqResMessageKind::Block(msg_genesis)) => { assert_eq!(msg_genesis, genesis); let mut msg_ref: &[u8] = msg.msg.as_ref(); let Ok(block) = Block::::read(&mut msg_ref) else { @@ -744,7 +943,15 @@ pub async fn handle_p2p_task( ); } - P2pMessageKind::CosignedBlock => unreachable!(), + P2pMessageKind::Gossip(GossipMessageKind::Tributary(msg_genesis)) => { + assert_eq!(msg_genesis, genesis); + log::trace!("handling message for tributary {:?}", spec_set); + if tributary.tributary.handle_message(&msg.msg).await { + P2p::broadcast(&p2p, msg.kind, msg.msg).await; + } + } + + P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => unreachable!(), } } } @@ -764,15 +971,16 @@ pub async fn handle_p2p_task( loop { let msg = p2p.receive().await; match msg.kind { - P2pMessageKind::KeepAlive => {} - P2pMessageKind::Tributary(genesis) | - P2pMessageKind::Heartbeat(genesis) | - P2pMessageKind::Block(genesis) => { + P2pMessageKind::ReqRes(ReqResMessageKind::KeepAlive) => {} + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) | + P2pMessageKind::ReqRes( + ReqResMessageKind::Heartbeat(genesis) | ReqResMessageKind::Block(genesis), + ) => { if let Some(channel) = channels.read().await.get(&genesis) { channel.send(msg).unwrap(); } } - P2pMessageKind::CosignedBlock => { + P2pMessageKind::Gossip(GossipMessageKind::CosignedBlock) => { let Ok(msg) = CosignedBlock::deserialize_reader(&mut msg.msg.as_slice()) else { log::error!("received CosignedBlock message with invalidly serialized contents"); continue; diff --git a/coordinator/src/tests/mod.rs b/coordinator/src/tests/mod.rs index 45a62297..db4c158f 100644 --- a/coordinator/src/tests/mod.rs +++ b/coordinator/src/tests/mod.rs @@ -14,7 +14,7 @@ use tokio::sync::RwLock; use crate::{ processors::{Message, Processors}, - TributaryP2p, P2pMessageKind, P2p, + TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p, }; pub mod tributary; @@ -45,7 +45,10 @@ impl Processors for MemProcessors { #[allow(clippy::type_complexity)] #[derive(Clone, Debug)] -pub struct LocalP2p(usize, pub Arc>, Vec)>>)>>); +pub struct LocalP2p( + usize, + pub Arc>, Vec)>>)>>, +); impl LocalP2p { pub fn new(validators: usize) -> Vec { @@ -65,11 +68,13 @@ impl P2p for LocalP2p { async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {} - async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec) { - self.1.write().await.1[to].push_back((self.0, msg)); + async fn send_raw(&self, to: Self::Id, msg: Vec) { + let mut msg_ref = msg.as_slice(); + let kind = ReqResMessageKind::read(&mut msg_ref).unwrap(); + self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec())); } - async fn broadcast_raw(&self, _genesis: Option<[u8; 32]>, msg: Vec) { + async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec) { // Content-based deduplication let mut lock = self.1.write().await; { @@ -81,19 +86,26 @@ impl P2p for LocalP2p { } let queues = &mut lock.1; + let kind_len = (match kind { + P2pMessageKind::ReqRes(kind) => kind.serialize(), + P2pMessageKind::Gossip(kind) => kind.serialize(), + }) + .len(); + let msg = msg[kind_len ..].to_vec(); + for (i, msg_queue) in queues.iter_mut().enumerate() { if i == self.0 { continue; } - msg_queue.push_back((self.0, msg.clone())); + msg_queue.push_back((self.0, kind, msg.clone())); } } - async fn receive_raw(&self) -> (Self::Id, Vec) { + async fn receive(&self) -> P2pMessage { // This is a cursed way to implement an async read from a Vec loop { - if let Some(res) = self.1.write().await.1[self.0].pop_front() { - return res; + if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() { + return P2pMessage { sender, kind, msg }; } tokio::time::sleep(std::time::Duration::from_millis(100)).await; } @@ -103,6 +115,11 @@ impl P2p for LocalP2p { #[async_trait] impl TributaryP2p for LocalP2p { async fn broadcast(&self, genesis: [u8; 32], msg: Vec) { - ::broadcast(self, P2pMessageKind::Tributary(genesis), msg).await + ::broadcast( + self, + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)), + msg, + ) + .await } } diff --git a/coordinator/src/tests/tributary/chain.rs b/coordinator/src/tests/tributary/chain.rs index 360af7ec..7fc6a064 100644 --- a/coordinator/src/tests/tributary/chain.rs +++ b/coordinator/src/tests/tributary/chain.rs @@ -26,7 +26,7 @@ use serai_db::MemDb; use tributary::Tributary; use crate::{ - P2pMessageKind, P2p, + GossipMessageKind, P2pMessageKind, P2p, tributary::{Transaction, TributarySpec}, tests::LocalP2p, }; @@ -98,7 +98,7 @@ pub async fn run_tributaries( for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { - P2pMessageKind::Tributary(genesis) => { + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); if tributary.handle_message(&msg.msg).await { p2p.broadcast(msg.kind, msg.msg).await; @@ -173,7 +173,7 @@ async fn tributary_test() { for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { - P2pMessageKind::Tributary(genesis) => { + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); tributary.handle_message(&msg.msg).await; } @@ -199,7 +199,7 @@ async fn tributary_test() { for (p2p, tributary) in &mut tributaries { while let Poll::Ready(msg) = poll!(p2p.receive()) { match msg.kind { - P2pMessageKind::Tributary(genesis) => { + P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => { assert_eq!(genesis, tributary.genesis()); tributary.handle_message(&msg.msg).await; } diff --git a/coordinator/src/tests/tributary/sync.rs b/coordinator/src/tests/tributary/sync.rs index 0a468c63..18f60864 100644 --- a/coordinator/src/tests/tributary/sync.rs +++ b/coordinator/src/tests/tributary/sync.rs @@ -116,8 +116,8 @@ async fn sync_test() { .map_err(|_| "failed to send ActiveTributary to heartbeat") .unwrap(); - // The heartbeat is once every 10 blocks - sleep(Duration::from_secs(10 * block_time)).await; + // The heartbeat is once every 10 blocks, with some limitations + sleep(Duration::from_secs(20 * block_time)).await; assert!(syncer_tributary.tip().await != spec.genesis()); // Verify it synced to the tip diff --git a/coordinator/tributary/src/lib.rs b/coordinator/tributary/src/lib.rs index 99deb588..a4c6bfe5 100644 --- a/coordinator/tributary/src/lib.rs +++ b/coordinator/tributary/src/lib.rs @@ -59,8 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50; pub const BLOCK_SIZE_LIMIT: usize = 3_001_000; pub(crate) const TENDERMINT_MESSAGE: u8 = 0; -pub(crate) const BLOCK_MESSAGE: u8 = 1; -pub(crate) const TRANSACTION_MESSAGE: u8 = 2; +pub(crate) const TRANSACTION_MESSAGE: u8 = 1; #[allow(clippy::large_enum_variant)] #[derive(Clone, PartialEq, Eq, Debug)] @@ -336,9 +335,6 @@ impl Tributary { // Return true if the message should be rebroadcasted. pub async fn handle_message(&self, msg: &[u8]) -> bool { - // Acquire the lock now to prevent sync_block from being run at the same time - let mut sync_block = self.synced_block_result.write().await; - match msg.first() { Some(&TRANSACTION_MESSAGE) => { let Ok(tx) = Transaction::read::<&[u8]>(&mut &msg[1 ..]) else { @@ -370,19 +366,6 @@ impl Tributary { false } - Some(&BLOCK_MESSAGE) => { - let mut msg_ref = &msg[1 ..]; - let Ok(block) = Block::::read(&mut msg_ref) else { - log::error!("received invalid block message"); - return false; - }; - let commit = msg[(msg.len() - msg_ref.len()) ..].to_vec(); - if self.sync_block_internal(block, commit, &mut sync_block).await { - log::debug!("synced block over p2p net instead of building the commit ourselves"); - } - false - } - _ => false, } } diff --git a/coordinator/tributary/src/provided.rs b/coordinator/tributary/src/provided.rs index 103286af..27c5f3cd 100644 --- a/coordinator/tributary/src/provided.rs +++ b/coordinator/tributary/src/provided.rs @@ -74,7 +74,7 @@ impl ProvidedTransactions { panic!("provided transaction saved to disk wasn't provided"); }; - if res.transactions.get(order).is_none() { + if !res.transactions.contains_key(order) { res.transactions.insert(order, VecDeque::new()); } res.transactions.get_mut(order).unwrap().push_back(tx); @@ -135,7 +135,7 @@ impl ProvidedTransactions { txn.put(current_provided_key, currently_provided); txn.commit(); - if self.transactions.get(order).is_none() { + if !self.transactions.contains_key(order) { self.transactions.insert(order, VecDeque::new()); } self.transactions.get_mut(order).unwrap().push_back(tx); diff --git a/coordinator/tributary/src/tendermint/mod.rs b/coordinator/tributary/src/tendermint/mod.rs index df8f7219..e38efa5d 100644 --- a/coordinator/tributary/src/tendermint/mod.rs +++ b/coordinator/tributary/src/tendermint/mod.rs @@ -41,9 +41,8 @@ use tendermint::{ use tokio::sync::RwLock; use crate::{ - TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, BLOCK_MESSAGE, ReadWrite, - transaction::Transaction as TransactionTrait, Transaction, BlockHeader, Block, BlockError, - Blockchain, P2p, + TENDERMINT_MESSAGE, TRANSACTION_MESSAGE, ReadWrite, transaction::Transaction as TransactionTrait, + Transaction, BlockHeader, Block, BlockError, Blockchain, P2p, }; pub mod tx; @@ -414,12 +413,7 @@ impl Network for TendermintNetwork ); match block_res { Ok(()) => { - // If we successfully added this block, broadcast it - // TODO: Move this under the coordinator once we set up on new block notifications? - let mut msg = serialized_block.0; - msg.insert(0, BLOCK_MESSAGE); - msg.extend(encoded_commit); - self.p2p.broadcast(self.genesis, msg).await; + // If we successfully added this block, break break; } Err(BlockError::NonLocalProvided(hash)) => { @@ -428,6 +422,7 @@ impl Network for TendermintNetwork hex::encode(hash), hex::encode(self.genesis) ); + tokio::time::sleep(core::time::Duration::from_secs(5)).await; } _ => return invalid_block(), } diff --git a/coordinator/tributary/tendermint/src/block.rs b/coordinator/tributary/tendermint/src/block.rs index 71dfb3cc..6dfacfdb 100644 --- a/coordinator/tributary/tendermint/src/block.rs +++ b/coordinator/tributary/tendermint/src/block.rs @@ -139,10 +139,8 @@ impl BlockData { // 27, 33, 41, 46, 60, 64 self.round_mut().step = data.step(); - // Only return a message to if we're actually a current validator and haven't prior posted a - // message + // Only return a message to if we're actually a current validator let round_number = self.round().number; - let step = data.step(); let res = self.validator_id.map(|validator_id| Message { sender: validator_id, block: self.number, @@ -150,21 +148,59 @@ impl BlockData { data, }); - if res.is_some() { + if let Some(res) = res.as_ref() { + const LATEST_BLOCK_KEY: &[u8] = b"tendermint-machine-sent_block"; + const LATEST_ROUND_KEY: &[u8] = b"tendermint-machine-sent_round"; + const PROPOSE_KEY: &[u8] = b"tendermint-machine-sent_propose"; + const PEVOTE_KEY: &[u8] = b"tendermint-machine-sent_prevote"; + const PRECOMMIT_KEY: &[u8] = b"tendermint-machine-sent_commit"; + + let genesis = self.genesis; + let key = |prefix: &[u8]| [prefix, &genesis].concat(); + let mut txn = self.db.txn(); - let key = [ - b"tendermint-machine_already_sent_message".as_ref(), - &self.genesis, - &self.number.0.to_le_bytes(), - &round_number.0.to_le_bytes(), - &step.encode(), - ] - .concat(); - // If we've already sent a message, return - if txn.get(&key).is_some() { + + // Ensure we haven't prior sent a message for a future block/round + let last_block_or_round = |txn: &mut ::Transaction<'_>, prefix, current| { + let key = key(prefix); + let latest = + u64::from_le_bytes(txn.get(key.as_slice()).unwrap_or(vec![0; 8]).try_into().unwrap()); + if latest > current { + None?; + } + if current > latest { + txn.put(&key, current.to_le_bytes()); + return Some(true); + } + Some(false) + }; + let new_block = last_block_or_round(&mut txn, LATEST_BLOCK_KEY, self.number.0)?; + if new_block { + // Delete the latest round key + txn.del(&key(LATEST_ROUND_KEY)); + } + let new_round = last_block_or_round(&mut txn, LATEST_ROUND_KEY, round_number.0.into())?; + if new_block || new_round { + // Delete the messages for the old round + txn.del(&key(PROPOSE_KEY)); + txn.del(&key(PEVOTE_KEY)); + txn.del(&key(PRECOMMIT_KEY)); + } + + // Check we haven't sent this message within this round + let msg_key = key(match res.data.step() { + Step::Propose => PROPOSE_KEY, + Step::Prevote => PEVOTE_KEY, + Step::Precommit => PRECOMMIT_KEY, + }); + if txn.get(&msg_key).is_some() { + assert!(!new_block); + assert!(!new_round); None?; } - txn.put(&key, []); + // Put this message to the DB + txn.put(&msg_key, res.encode()); + txn.commit(); } diff --git a/coordinator/tributary/tendermint/src/lib.rs b/coordinator/tributary/tendermint/src/lib.rs index 77805677..adc6fef7 100644 --- a/coordinator/tributary/tendermint/src/lib.rs +++ b/coordinator/tributary/tendermint/src/lib.rs @@ -313,11 +313,16 @@ impl TendermintMachine { let time_until_round_end = round_end.instant().saturating_duration_since(Instant::now()); if time_until_round_end == Duration::ZERO { log::trace!( + target: "tendermint", "resetting when prior round ended {}ms ago", Instant::now().saturating_duration_since(round_end.instant()).as_millis(), ); } - log::trace!("sleeping until round ends in {}ms", time_until_round_end.as_millis()); + log::trace!( + target: "tendermint", + "sleeping until round ends in {}ms", + time_until_round_end.as_millis(), + ); sleep(time_until_round_end).await; // Clear our outbound message queue @@ -509,7 +514,7 @@ impl TendermintMachine { match step { Step::Propose => { // Slash the validator for not proposing when they should've - log::debug!(target: "tendermint", "Validator didn't propose when they should have"); + log::debug!(target: "tendermint", "validator didn't propose when they should have"); // this slash will be voted on. self.slash( self.weights.proposer(self.block.number, self.block.round().number), @@ -598,7 +603,11 @@ impl TendermintMachine { ); let id = block.id(); let proposal = self.network.add_block(block, commit).await; - log::trace!("added block {} (produced by machine)", hex::encode(id.as_ref())); + log::trace!( + target: "tendermint", + "added block {} (produced by machine)", + hex::encode(id.as_ref()), + ); self.reset(msg.round, proposal).await; } Err(TendermintError::Malicious(sender, evidence)) => { @@ -692,7 +701,12 @@ impl TendermintMachine { (msg.round == self.block.round().number) && (msg.data.step() == Step::Propose) { - log::trace!("received Propose for block {}, round {}", msg.block.0, msg.round.0); + log::trace!( + target: "tendermint", + "received Propose for block {}, round {}", + msg.block.0, + msg.round.0, + ); } // If this is a precommit, verify its signature @@ -710,7 +724,13 @@ impl TendermintMachine { if !self.block.log.log(signed.clone())? { return Err(TendermintError::AlreadyHandled); } - log::debug!(target: "tendermint", "received new tendermint message"); + log::trace!( + target: "tendermint", + "received new tendermint message (block: {}, round: {}, step: {:?})", + msg.block.0, + msg.round.0, + msg.data.step(), + ); // All functions, except for the finalizer and the jump, are locked to the current round @@ -757,6 +777,13 @@ impl TendermintMachine { // 55-56 // Jump, enabling processing by the below code if self.block.log.round_participation(msg.round) > self.weights.fault_threshold() { + log::debug!( + target: "tendermint", + "jumping from round {} to round {}", + self.block.round().number.0, + msg.round.0, + ); + // Jump to the new round. let proposer = self.round(msg.round, None); @@ -814,13 +841,26 @@ impl TendermintMachine { if (self.block.round().step == Step::Prevote) && matches!(msg.data, Data::Prevote(_)) { let (participation, weight) = self.block.log.message_instances(self.block.round().number, &Data::Prevote(None)); + let threshold_weight = self.weights.threshold(); + if participation < threshold_weight { + log::trace!( + target: "tendermint", + "progess towards setting prevote timeout, participation: {}, needed: {}", + participation, + threshold_weight, + ); + } // 34-35 - if participation >= self.weights.threshold() { + if participation >= threshold_weight { + log::trace!( + target: "tendermint", + "setting timeout for prevote due to sufficient participation", + ); self.block.round_mut().set_timeout(Step::Prevote); } // 44-46 - if weight >= self.weights.threshold() { + if weight >= threshold_weight { self.broadcast(Data::Precommit(None)); return Ok(None); } @@ -830,6 +870,10 @@ impl TendermintMachine { if matches!(msg.data, Data::Precommit(_)) && self.block.log.has_participation(self.block.round().number, Step::Precommit) { + log::trace!( + target: "tendermint", + "setting timeout for precommit due to sufficient participation", + ); self.block.round_mut().set_timeout(Step::Precommit); } diff --git a/coordinator/tributary/tendermint/src/message_log.rs b/coordinator/tributary/tendermint/src/message_log.rs index e045189b..3959852d 100644 --- a/coordinator/tributary/tendermint/src/message_log.rs +++ b/coordinator/tributary/tendermint/src/message_log.rs @@ -1,6 +1,5 @@ use std::{sync::Arc, collections::HashMap}; -use log::debug; use parity_scale_codec::Encode; use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence}; @@ -27,7 +26,7 @@ impl MessageLog { let step = msg.data.step(); if let Some(existing) = msgs.get(&step) { if existing.msg.data != msg.data { - debug!( + log::debug!( target: "tendermint", "Validator sent multiple messages for the same block + round + step" ); diff --git a/crypto/dalek-ff-group/Cargo.toml b/crypto/dalek-ff-group/Cargo.toml index 0fe4bce0..d8a92194 100644 --- a/crypto/dalek-ff-group/Cargo.toml +++ b/crypto/dalek-ff-group/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dalek-ff-gr authors = ["Luke Parker "] keywords = ["curve25519", "ed25519", "ristretto", "dalek", "group"] edition = "2021" -rust-version = "1.65" +rust-version = "1.66" [package.metadata.docs.rs] all-features = true diff --git a/crypto/dkg/Cargo.toml b/crypto/dkg/Cargo.toml index a8d3f0a8..bf308705 100644 --- a/crypto/dkg/Cargo.toml +++ b/crypto/dkg/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dkg" authors = ["Luke Parker "] keywords = ["dkg", "multisig", "threshold", "ff", "group"] edition = "2021" -rust-version = "1.70" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/crypto/dleq/Cargo.toml b/crypto/dleq/Cargo.toml index 7d8c87e9..c9d525e1 100644 --- a/crypto/dleq/Cargo.toml +++ b/crypto/dleq/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/crypto/dleq" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.73" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/crypto/ed448/Cargo.toml b/crypto/ed448/Cargo.toml index 2302d7b3..b0d0026e 100644 --- a/crypto/ed448/Cargo.toml +++ b/crypto/ed448/Cargo.toml @@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/crypto/ed448" authors = ["Luke Parker "] keywords = ["ed448", "ff", "group"] edition = "2021" -rust-version = "1.65" +rust-version = "1.66" [package.metadata.docs.rs] all-features = true diff --git a/crypto/frost/Cargo.toml b/crypto/frost/Cargo.toml index 128a3667..b89d5290 100644 --- a/crypto/frost/Cargo.toml +++ b/crypto/frost/Cargo.toml @@ -38,7 +38,6 @@ ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] } schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] } -dleq = { path = "../dleq", version = "^0.4.1", default-features = false, features = ["std", "serialize"] } dkg = { path = "../dkg", version = "^0.5.1", default-features = false, features = ["std"] } diff --git a/crypto/frost/src/algorithm.rs b/crypto/frost/src/algorithm.rs index f2da59ea..0b0abd6c 100644 --- a/crypto/frost/src/algorithm.rs +++ b/crypto/frost/src/algorithm.rs @@ -39,6 +39,13 @@ pub trait Algorithm: Send + Sync + Clone { /// Obtain the list of nonces to generate, as specified by the generators to create commitments /// against per-nonce. + /// + /// The Algorithm is responsible for all transcripting of these nonce specifications/generators. + /// + /// The prover will be passed the commitments, and the commitments will be sent to all other + /// participants. No guarantees the commitments are internally consistent (have the same discrete + /// logarithm across generators) are made. Any Algorithm which specifies multiple generators for + /// a single nonce must handle that itself. fn nonces(&self) -> Vec>; /// Generate an addendum to FROST"s preprocessing stage. diff --git a/crypto/frost/src/nonce.rs b/crypto/frost/src/nonce.rs index 8638baff..f76f9bc4 100644 --- a/crypto/frost/src/nonce.rs +++ b/crypto/frost/src/nonce.rs @@ -1,13 +1,9 @@ // FROST defines its nonce as sum(Di, Ei * bi) -// Monero needs not just the nonce over G however, yet also over H -// Then there is a signature (a modified Chaum Pedersen proof) using multiple nonces at once // -// Accordingly, in order for this library to be robust, it supports generating an arbitrary amount -// of nonces, each against an arbitrary list of generators +// In order for this library to be robust, it supports generating an arbitrary amount of nonces, +// each against an arbitrary list of generators // // Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b) -// When representations across multiple generators are provided, a DLEq proof is also provided to -// confirm their integrity use core::ops::Deref; use std::{ @@ -24,32 +20,8 @@ use transcript::Transcript; use ciphersuite::group::{ff::PrimeField, Group, GroupEncoding}; use multiexp::multiexp_vartime; -use dleq::MultiDLEqProof; - use crate::{curve::Curve, Participant}; -// Transcript used to aggregate binomial nonces for usage within a single DLEq proof. -fn aggregation_transcript(context: &[u8]) -> T { - let mut transcript = T::new(b"FROST DLEq Aggregation v0.5"); - transcript.append_message(b"context", context); - transcript -} - -// Every participant proves for their commitments at the start of the protocol -// These proofs are verified sequentially, requiring independent transcripts -// In order to make these transcripts more robust, the FROST transcript (at time of preprocess) is -// challenged in order to create a commitment to it, carried in each independent transcript -// (effectively forking the original transcript) -// -// For FROST, as defined by the IETF, this will do nothing (and this transcript will never even be -// constructed). For higher level protocols, the transcript may have contextual info these proofs -// will then be bound to -fn dleq_transcript(context: &[u8]) -> T { - let mut transcript = T::new(b"FROST Commitments DLEq v0.5"); - transcript.append_message(b"context", context); - transcript -} - // Each nonce is actually a pair of random scalars, notated as d, e under the FROST paper // This is considered a single nonce as r = d + be #[derive(Clone, Zeroize)] @@ -69,7 +41,7 @@ impl GeneratorCommitments { } } -// A single nonce's commitments and relevant proofs +// A single nonce's commitments #[derive(Clone, PartialEq, Eq)] pub(crate) struct NonceCommitments { // Called generators as these commitments are indexed by generator later on @@ -121,12 +93,6 @@ impl NonceCommitments { t.append_message(b"commitment_E", commitments.0[1].to_bytes()); } } - - fn aggregation_factor(&self, context: &[u8]) -> C::F { - let mut transcript = aggregation_transcript::(context); - self.transcript(&mut transcript); - ::hash_to_F(b"dleq_aggregation", transcript.challenge(b"binding").as_ref()) - } } /// Commitments for all the nonces across all their generators. @@ -135,51 +101,26 @@ pub(crate) struct Commitments { // Called nonces as these commitments are indexed by nonce // So to get the commitments for the first nonce, it'd be commitments.nonces[0] pub(crate) nonces: Vec>, - // DLEq Proof proving that each set of commitments were generated using a single pair of discrete - // logarithms - pub(crate) dleq: Option>, } impl Commitments { - pub(crate) fn new( + pub(crate) fn new( rng: &mut R, secret_share: &Zeroizing, planned_nonces: &[Vec], - context: &[u8], ) -> (Vec>, Commitments) { let mut nonces = vec![]; let mut commitments = vec![]; - let mut dleq_generators = vec![]; - let mut dleq_nonces = vec![]; for generators in planned_nonces { let (nonce, these_commitments): (Nonce, _) = NonceCommitments::new(&mut *rng, secret_share, generators); - if generators.len() > 1 { - dleq_generators.push(generators.clone()); - dleq_nonces.push(Zeroizing::new( - (these_commitments.aggregation_factor::(context) * nonce.0[1].deref()) + - nonce.0[0].deref(), - )); - } - nonces.push(nonce); commitments.push(these_commitments); } - let dleq = if !dleq_generators.is_empty() { - Some(MultiDLEqProof::prove( - rng, - &mut dleq_transcript::(context), - &dleq_generators, - &dleq_nonces, - )) - } else { - None - }; - - (nonces, Commitments { nonces: commitments, dleq }) + (nonces, Commitments { nonces: commitments }) } pub(crate) fn transcript(&self, t: &mut T) { @@ -187,58 +128,20 @@ impl Commitments { for nonce in &self.nonces { nonce.transcript(t); } - - // Transcripting the DLEqs implicitly transcripts the exact generators used for the nonces in - // an exact order - // This means it shouldn't be possible for variadic generators to cause conflicts - if let Some(dleq) = &self.dleq { - t.append_message(b"dleq", dleq.serialize()); - } } - pub(crate) fn read( - reader: &mut R, - generators: &[Vec], - context: &[u8], - ) -> io::Result { + pub(crate) fn read(reader: &mut R, generators: &[Vec]) -> io::Result { let nonces = (0 .. generators.len()) .map(|i| NonceCommitments::read(reader, &generators[i])) .collect::>, _>>()?; - let mut dleq_generators = vec![]; - let mut dleq_nonces = vec![]; - for (generators, nonce) in generators.iter().cloned().zip(&nonces) { - if generators.len() > 1 { - let binding = nonce.aggregation_factor::(context); - let mut aggregated = vec![]; - for commitments in &nonce.generators { - aggregated.push(commitments.0[0] + (commitments.0[1] * binding)); - } - dleq_generators.push(generators); - dleq_nonces.push(aggregated); - } - } - - let dleq = if !dleq_generators.is_empty() { - let dleq = MultiDLEqProof::read(reader, dleq_generators.len())?; - dleq - .verify(&mut dleq_transcript::(context), &dleq_generators, &dleq_nonces) - .map_err(|_| io::Error::other("invalid DLEq proof"))?; - Some(dleq) - } else { - None - }; - - Ok(Commitments { nonces, dleq }) + Ok(Commitments { nonces }) } pub(crate) fn write(&self, writer: &mut W) -> io::Result<()> { for nonce in &self.nonces { nonce.write(writer)?; } - if let Some(dleq) = &self.dleq { - dleq.write(writer)?; - } Ok(()) } } diff --git a/crypto/frost/src/sign.rs b/crypto/frost/src/sign.rs index a716dc58..73ea0a7d 100644 --- a/crypto/frost/src/sign.rs +++ b/crypto/frost/src/sign.rs @@ -125,14 +125,8 @@ impl> AlgorithmMachine { let mut params = self.params; let mut rng = ChaCha20Rng::from_seed(*seed.0); - // Get a challenge to the existing transcript for use when proving for the commitments - let commitments_challenge = params.algorithm.transcript().challenge(b"commitments"); - let (nonces, commitments) = Commitments::new::<_, A::Transcript>( - &mut rng, - params.keys.secret_share(), - ¶ms.algorithm.nonces(), - commitments_challenge.as_ref(), - ); + let (nonces, commitments) = + Commitments::new::<_>(&mut rng, params.keys.secret_share(), ¶ms.algorithm.nonces()); let addendum = params.algorithm.preprocess_addendum(&mut rng, ¶ms.keys); let preprocess = Preprocess { commitments, addendum }; @@ -141,27 +135,18 @@ impl> AlgorithmMachine { let mut blame_entropy = [0; 32]; rng.fill_bytes(&mut blame_entropy); ( - AlgorithmSignMachine { - params, - seed, - commitments_challenge, - nonces, - preprocess: preprocess.clone(), - blame_entropy, - }, + AlgorithmSignMachine { params, seed, nonces, preprocess: preprocess.clone(), blame_entropy }, preprocess, ) } #[cfg(any(test, feature = "tests"))] pub(crate) fn unsafe_override_preprocess( - mut self, + self, nonces: Vec>, preprocess: Preprocess, ) -> AlgorithmSignMachine { AlgorithmSignMachine { - commitments_challenge: self.params.algorithm.transcript().challenge(b"commitments"), - params: self.params, seed: CachedPreprocess(Zeroizing::new([0; 32])), @@ -255,8 +240,6 @@ pub struct AlgorithmSignMachine> { params: Params, seed: CachedPreprocess, - #[zeroize(skip)] - commitments_challenge: ::Challenge, pub(crate) nonces: Vec>, // Skips the preprocess due to being too large a bound to feasibly enforce on users #[zeroize(skip)] @@ -285,11 +268,7 @@ impl> SignMachine for AlgorithmSignMachi fn read_preprocess(&self, reader: &mut R) -> io::Result { Ok(Preprocess { - commitments: Commitments::read::<_, A::Transcript>( - reader, - &self.params.algorithm.nonces(), - self.commitments_challenge.as_ref(), - )?, + commitments: Commitments::read::<_>(reader, &self.params.algorithm.nonces())?, addendum: self.params.algorithm.read_addendum(reader)?, }) } diff --git a/crypto/frost/src/tests/mod.rs b/crypto/frost/src/tests/mod.rs index e457c703..f93a5fbf 100644 --- a/crypto/frost/src/tests/mod.rs +++ b/crypto/frost/src/tests/mod.rs @@ -12,7 +12,7 @@ use crate::{ /// Tests for the nonce handling code. pub mod nonces; -use nonces::{test_multi_nonce, test_invalid_commitment, test_invalid_dleq_proof}; +use nonces::test_multi_nonce; /// Vectorized test suite to ensure consistency. pub mod vectors; @@ -267,6 +267,4 @@ pub fn test_ciphersuite>(rng: &mut test_schnorr_blame::(rng); test_multi_nonce::(rng); - test_invalid_commitment::(rng); - test_invalid_dleq_proof::(rng); } diff --git a/crypto/frost/src/tests/nonces.rs b/crypto/frost/src/tests/nonces.rs index ee060bef..7b1480e9 100644 --- a/crypto/frost/src/tests/nonces.rs +++ b/crypto/frost/src/tests/nonces.rs @@ -9,14 +9,12 @@ use transcript::{Transcript, RecommendedTranscript}; use ciphersuite::group::{ff::Field, Group, GroupEncoding}; -use dleq::MultiDLEqProof; pub use dkg::tests::{key_gen, recover_key}; use crate::{ Curve, Participant, ThresholdView, ThresholdKeys, FrostError, algorithm::Algorithm, - sign::{Writable, SignMachine}, - tests::{algorithm_machines, preprocess, sign}, + tests::{algorithm_machines, sign}, }; #[derive(Clone)] @@ -157,75 +155,3 @@ pub fn test_multi_nonce(rng: &mut R) { let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); sign(&mut *rng, &MultiNonce::::new(), keys.clone(), machines, &[]); } - -/// Test malleating a commitment for a nonce across generators causes the preprocess to error. -pub fn test_invalid_commitment(rng: &mut R) { - let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); - let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); - - // Select a random participant to give an invalid commitment - let participants = preprocesses.keys().collect::>(); - let faulty = *participants - [usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()]; - - // Grab their preprocess - let mut preprocess = preprocesses.remove(&faulty).unwrap(); - - // Mutate one of the commitments - let nonce = - preprocess.commitments.nonces.get_mut(usize::try_from(rng.next_u64()).unwrap() % 2).unwrap(); - let generators_len = nonce.generators.len(); - nonce.generators[usize::try_from(rng.next_u64()).unwrap() % generators_len].0 - [usize::try_from(rng.next_u64()).unwrap() % 2] = C::G::random(&mut *rng); - - // The commitments are validated at time of deserialization (read_preprocess) - // Accordingly, serialize it and read it again to make sure that errors - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); -} - -/// Test malleating the DLEq proof for a preprocess causes it to error. -pub fn test_invalid_dleq_proof(rng: &mut R) { - let keys = key_gen::(&mut *rng); - let machines = algorithm_machines(&mut *rng, &MultiNonce::::new(), &keys); - let (machines, mut preprocesses) = preprocess(&mut *rng, machines, |_, _| {}); - - // Select a random participant to give an invalid DLEq proof - let participants = preprocesses.keys().collect::>(); - let faulty = *participants - [usize::try_from(rng.next_u64() % u64::try_from(participants.len()).unwrap()).unwrap()]; - - // Invalidate it by replacing it with a completely different proof - let dlogs = [Zeroizing::new(C::F::random(&mut *rng)), Zeroizing::new(C::F::random(&mut *rng))]; - let mut preprocess = preprocesses.remove(&faulty).unwrap(); - preprocess.commitments.dleq = Some(MultiDLEqProof::prove( - &mut *rng, - &mut RecommendedTranscript::new(b"Invalid DLEq Proof"), - &nonces::(), - &dlogs, - )); - - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); - - // Also test None for a proof will cause an error - preprocess.commitments.dleq = None; - assert!(machines - .iter() - .next() - .unwrap() - .1 - .read_preprocess::<&[u8]>(&mut preprocess.serialize().as_ref()) - .is_err()); -} diff --git a/crypto/frost/src/tests/vectors.rs b/crypto/frost/src/tests/vectors.rs index 3356a6cd..7be6478a 100644 --- a/crypto/frost/src/tests/vectors.rs +++ b/crypto/frost/src/tests/vectors.rs @@ -14,7 +14,7 @@ use ciphersuite::group::{ff::PrimeField, GroupEncoding}; use crate::{ curve::Curve, Participant, ThresholdCore, ThresholdKeys, - algorithm::{IetfTranscript, Hram, IetfSchnorr}, + algorithm::{Hram, IetfSchnorr}, sign::{ Writable, Nonce, GeneratorCommitments, NonceCommitments, Commitments, Preprocess, PreprocessMachine, SignMachine, SignatureMachine, AlgorithmMachine, @@ -191,7 +191,6 @@ pub fn test_with_vectors>( nonces: vec![NonceCommitments { generators: vec![GeneratorCommitments(these_commitments)], }], - dleq: None, }, addendum: (), }; @@ -301,12 +300,8 @@ pub fn test_with_vectors>( } // Also test it at the Commitments level - let (generated_nonces, commitments) = Commitments::::new::<_, IetfTranscript>( - &mut TransparentRng(randomness), - &share, - &[vec![C::generator()]], - &[], - ); + let (generated_nonces, commitments) = + Commitments::::new::<_>(&mut TransparentRng(randomness), &share, &[vec![C::generator()]]); assert_eq!(generated_nonces.len(), 1); assert_eq!(generated_nonces[0].0, [nonces[0].clone(), nonces[1].clone()]); diff --git a/deny.toml b/deny.toml index 93dc2f96..bb2d4aa4 100644 --- a/deny.toml +++ b/deny.toml @@ -101,6 +101,7 @@ allow-git = [ "https://github.com/rust-lang-nursery/lazy-static.rs", "https://github.com/serai-dex/substrate-bip39", "https://github.com/serai-dex/substrate", + "https://github.com/alloy-rs/alloy", "https://github.com/monero-rs/base58-monero", "https://github.com/kayabaNerve/dockertest-rs", ] diff --git a/orchestration/dev/coins/ethereum/run.sh b/orchestration/dev/coins/ethereum/run.sh index 0b86ff69..4fee3e46 100755 --- a/orchestration/dev/coins/ethereum/run.sh +++ b/orchestration/dev/coins/ethereum/run.sh @@ -1,6 +1,3 @@ #!/bin/sh -geth --dev --networkid 5208 --datadir "eth-devnet" \ - --http --http.api "web3,net,eth,miner" \ - --http.addr 0.0.0.0 --http.port 8545 \ - --http.vhosts="*" --http.corsdomain "*" +~/.foundry/bin/anvil --no-mining --slots-in-an-epoch 32 diff --git a/orchestration/src/coins/bitcoin.rs b/orchestration/src/coins/bitcoin.rs index a5c8b21c..94686244 100644 --- a/orchestration/src/coins/bitcoin.rs +++ b/orchestration/src/coins/bitcoin.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use crate::{Network, Os, mimalloc, os, write_dockerfile}; @@ -7,7 +7,7 @@ pub fn bitcoin(orchestration_path: &Path, network: Network) { const DOWNLOAD_BITCOIN: &str = r#" FROM alpine:latest as bitcoin -ENV BITCOIN_VERSION=26.0 +ENV BITCOIN_VERSION=27.0 RUN apk --no-cache add git gnupg diff --git a/orchestration/src/coins/ethereum.rs b/orchestration/src/coins/ethereum.rs deleted file mode 100644 index 2e15d370..00000000 --- a/orchestration/src/coins/ethereum.rs +++ /dev/null @@ -1,5 +0,0 @@ -use std::path::Path; - -pub fn ethereum(_orchestration_path: &Path) { - // TODO -} diff --git a/orchestration/src/coins/ethereum/consensus/lighthouse.rs b/orchestration/src/coins/ethereum/consensus/lighthouse.rs new file mode 100644 index 00000000..add9728b --- /dev/null +++ b/orchestration/src/coins/ethereum/consensus/lighthouse.rs @@ -0,0 +1,36 @@ +use crate::Network; + +pub fn lighthouse(network: Network) -> (String, String, String) { + assert_ne!(network, Network::Dev); + + #[rustfmt::skip] + const DOWNLOAD_LIGHTHOUSE: &str = r#" +FROM alpine:latest as lighthouse + +ENV LIGHTHOUSE_VERSION=5.1.3 + +RUN apk --no-cache add git gnupg + +# Download lighthouse +RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +RUN wget https://github.com/sigp/lighthouse/releases/download/v${LIGHTHOUSE_VERSION}/lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc + +# Verify the signature +gpg --keyserver keyserver.ubuntu.com --recv-keys 15E66D941F697E28F49381F426416DC3F30674B0 +gpg --verify lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz + +# Extract lighthouse +RUN tar xvf lighthouse-v${LIGHTHOUSE_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +"#; + + let run_lighthouse = format!( + r#" +COPY --from=lighthouse --chown=ethereum lighthouse /bin + +ADD /orchestration/{}/coins/ethereum/consensus/lighthouse/run.sh /consensus_layer.sh +"#, + network.label() + ); + + (DOWNLOAD_LIGHTHOUSE.to_string(), String::new(), run_lighthouse) +} diff --git a/orchestration/src/coins/ethereum/consensus/mod.rs b/orchestration/src/coins/ethereum/consensus/mod.rs new file mode 100644 index 00000000..4f64c0d8 --- /dev/null +++ b/orchestration/src/coins/ethereum/consensus/mod.rs @@ -0,0 +1,6 @@ +mod lighthouse; +#[allow(unused)] +pub use lighthouse::lighthouse; + +mod nimbus; +pub use nimbus::nimbus; diff --git a/orchestration/src/coins/ethereum/consensus/nimbus.rs b/orchestration/src/coins/ethereum/consensus/nimbus.rs new file mode 100644 index 00000000..07006aa9 --- /dev/null +++ b/orchestration/src/coins/ethereum/consensus/nimbus.rs @@ -0,0 +1,49 @@ +use crate::Network; + +pub fn nimbus(network: Network) -> (String, String, String) { + assert_ne!(network, Network::Dev); + + let platform = match std::env::consts::ARCH { + "x86_64" => "amd64", + "arm" => "arm32v7", + "aarch64" => "arm64v8", + _ => panic!("unsupported platform"), + }; + + #[rustfmt::skip] + let checksum = match platform { + "amd64" => "5da10222cfb555ce2e3820ece12e8e30318945e3ed4b2b88d295963c879daeee071623c47926f880f3db89ce537fd47c6b26fe37e47aafbae3222b58bcec2fba", + "arm32v7" => "7055da77bfa1186ee2e7ce2a48b923d45ccb039592f529c58d93d55a62bca46566ada451bd7497c3ae691260544f0faf303602afd85ccc18388fdfdac0bb2b45", + "arm64v8" => "1a68f44598462abfade0dbeb6adf10b52614ba03605a8bf487b99493deb41468317926ef2d657479fcc26fce640aeebdbd880956beec3fb110b5abc97bd83556", + _ => panic!("unsupported platform"), + }; + + #[rustfmt::skip] + let download_nimbus = format!(r#" +FROM alpine:latest as nimbus + +ENV NIMBUS_VERSION=24.3.0 +ENV NIMBUS_COMMIT=dc19b082 + +# Download nimbus +RUN wget https://github.com/status-im/nimbus-eth2/releases/download/v${{NIMBUS_VERSION}}/nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz + +# Extract nimbus +RUN tar xvf nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}.tar.gz +RUN mv nimbus-eth2_Linux_{platform}_${{NIMBUS_VERSION}}_${{NIMBUS_COMMIT}}/build/nimbus_beacon_node ./nimbus + +# Verify the checksum +RUN sha512sum nimbus | grep {checksum} +"#); + + let run_nimbus = format!( + r#" +COPY --from=nimbus --chown=ethereum nimbus /bin + +ADD /orchestration/{}/coins/ethereum/consensus/nimbus/run.sh /consensus_layer.sh +"#, + network.label() + ); + + (download_nimbus, String::new(), run_nimbus) +} diff --git a/orchestration/src/coins/ethereum/execution/anvil.rs b/orchestration/src/coins/ethereum/execution/anvil.rs new file mode 100644 index 00000000..53d894ec --- /dev/null +++ b/orchestration/src/coins/ethereum/execution/anvil.rs @@ -0,0 +1,14 @@ +use crate::Network; + +pub fn anvil(network: Network) -> (String, String, String) { + assert_eq!(network, Network::Dev); + + const ANVIL_SETUP: &str = r#" +RUN curl -L https://foundry.paradigm.xyz | bash || exit 0 +RUN ~/.foundry/bin/foundryup + +EXPOSE 8545 +"#; + + (String::new(), "RUN apt install git curl -y".to_string(), ANVIL_SETUP.to_string()) +} diff --git a/orchestration/src/coins/ethereum/execution/mod.rs b/orchestration/src/coins/ethereum/execution/mod.rs new file mode 100644 index 00000000..3db59c84 --- /dev/null +++ b/orchestration/src/coins/ethereum/execution/mod.rs @@ -0,0 +1,5 @@ +mod reth; +pub use reth::reth; + +mod anvil; +pub use anvil::anvil; diff --git a/orchestration/src/coins/ethereum/execution/reth.rs b/orchestration/src/coins/ethereum/execution/reth.rs new file mode 100644 index 00000000..8c80a9fa --- /dev/null +++ b/orchestration/src/coins/ethereum/execution/reth.rs @@ -0,0 +1,38 @@ +use crate::Network; + +pub fn reth(network: Network) -> (String, String, String) { + assert_ne!(network, Network::Dev); + + #[rustfmt::skip] + const DOWNLOAD_RETH: &str = r#" +FROM alpine:latest as reth + +ENV RETH_VERSION=0.2.0-beta.6 + +RUN apk --no-cache add git gnupg + +# Download reth +RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +RUN wget https://github.com/paradigmxyz/reth/releases/download/v${RETH_VERSION}/reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz.asc + +# Verify the signature +gpg --keyserver keyserver.ubuntu.com --recv-keys A3AE097C89093A124049DF1F5391A3C4100530B4 +gpg --verify reth-v${RETH_VERSION}-$(uname -m).tar.gz.asc reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz + +# Extract reth +RUN tar xvf reth-v${RETH_VERSION}-$(uname -m)-unknown-linux-gnu.tar.gz +"#; + + let run_reth = format!( + r#" +COPY --from=reth --chown=ethereum reth /bin + +EXPOSE 30303 9001 8545 + +ADD /orchestration/{}/coins/ethereum/execution/reth/run.sh /execution_layer.sh +"#, + network.label() + ); + + (DOWNLOAD_RETH.to_string(), String::new(), run_reth) +} diff --git a/orchestration/src/coins/ethereum/mod.rs b/orchestration/src/coins/ethereum/mod.rs new file mode 100644 index 00000000..a06318c0 --- /dev/null +++ b/orchestration/src/coins/ethereum/mod.rs @@ -0,0 +1,43 @@ +use std::path::Path; + +use crate::{Network, Os, mimalloc, os, write_dockerfile}; + +mod execution; +use execution::*; + +mod consensus; +use consensus::*; + +pub fn ethereum(orchestration_path: &Path, network: Network) { + let ((el_download, el_run_as_root, el_run), (cl_download, cl_run_as_root, cl_run)) = + if network == Network::Dev { + (anvil(network), (String::new(), String::new(), String::new())) + } else { + // TODO: Select an EL/CL based off a RNG seeded from the public key + (reth(network), nimbus(network)) + }; + + let download = mimalloc(Os::Alpine).to_string() + &el_download + &cl_download; + + let run = format!( + r#" +ADD /orchestration/{}/coins/ethereum/run.sh /run.sh +CMD ["/run.sh"] +"#, + network.label() + ); + let run = mimalloc(Os::Debian).to_string() + + &os(Os::Debian, &(el_run_as_root + "\r\n" + &cl_run_as_root), "ethereum") + + &el_run + + &cl_run + + &run; + + let res = download + &run; + + let mut ethereum_path = orchestration_path.to_path_buf(); + ethereum_path.push("coins"); + ethereum_path.push("ethereum"); + ethereum_path.push("Dockerfile"); + + write_dockerfile(ethereum_path, &res); +} diff --git a/orchestration/src/coins/monero.rs b/orchestration/src/coins/monero.rs index 873c6458..c21bc610 100644 --- a/orchestration/src/coins/monero.rs +++ b/orchestration/src/coins/monero.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use crate::{Network, Os, mimalloc, write_dockerfile}; diff --git a/orchestration/src/coordinator.rs b/orchestration/src/coordinator.rs index 67a24527..13fdff59 100644 --- a/orchestration/src/coordinator.rs +++ b/orchestration/src/coordinator.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use zeroize::Zeroizing; diff --git a/orchestration/src/main.rs b/orchestration/src/main.rs index 4be84cd4..0e6c7cb0 100644 --- a/orchestration/src/main.rs +++ b/orchestration/src/main.rs @@ -266,7 +266,7 @@ fn dockerfiles(network: Network) { let orchestration_path = orchestration_path(network); bitcoin(&orchestration_path, network); - ethereum(&orchestration_path); + ethereum(&orchestration_path, network); monero(&orchestration_path, network); if network == Network::Dev { monero_wallet_rpc(&orchestration_path); diff --git a/orchestration/src/message_queue.rs b/orchestration/src/message_queue.rs index f16c6cbe..eb662b67 100644 --- a/orchestration/src/message_queue.rs +++ b/orchestration/src/message_queue.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto}; diff --git a/orchestration/src/processor.rs b/orchestration/src/processor.rs index 7ee69d11..8a2c8c77 100644 --- a/orchestration/src/processor.rs +++ b/orchestration/src/processor.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use zeroize::Zeroizing; diff --git a/orchestration/src/serai.rs b/orchestration/src/serai.rs index 77d098b6..2e1e915c 100644 --- a/orchestration/src/serai.rs +++ b/orchestration/src/serai.rs @@ -1,4 +1,4 @@ -use std::{path::Path}; +use std::path::Path; use zeroize::Zeroizing; use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto}; diff --git a/orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh b/orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh new file mode 100755 index 00000000..1b3857bf --- /dev/null +++ b/orchestration/testnet/coins/ethereum/consensus/lighthouse/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +RUST_LOG=info lighthouse bn --execution-endpoint http://localhost:8551 --execution-jwt /home/ethereum/.jwt diff --git a/orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh b/orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh new file mode 100755 index 00000000..2bb8d868 --- /dev/null +++ b/orchestration/testnet/coins/ethereum/consensus/nimbus/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +exit 1 diff --git a/orchestration/testnet/coins/ethereum/execution/geth/run.sh b/orchestration/testnet/coins/ethereum/execution/geth/run.sh new file mode 100755 index 00000000..fee4a57c --- /dev/null +++ b/orchestration/testnet/coins/ethereum/execution/geth/run.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +#geth --dev --networkid 5208 \ +# --http --http.api "web3,net,eth,miner" \ +# --http.addr 0.0.0.0 --http.port 8545 \ +# --http.vhosts="*" --http.corsdomain "*" + +exit 1 diff --git a/orchestration/testnet/coins/ethereum/execution/reth/run.sh b/orchestration/testnet/coins/ethereum/execution/reth/run.sh new file mode 100755 index 00000000..5be8924a --- /dev/null +++ b/orchestration/testnet/coins/ethereum/execution/reth/run.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +RUST_LOG=info reth node --authrpc.jwtsecret /home/ethereum/.jwt diff --git a/orchestration/testnet/coins/ethereum/run.sh b/orchestration/testnet/coins/ethereum/run.sh index 2bb8d868..82b8ff58 100755 --- a/orchestration/testnet/coins/ethereum/run.sh +++ b/orchestration/testnet/coins/ethereum/run.sh @@ -1,3 +1 @@ -#!/bin/sh - -exit 1 +/execution_layer.sh & /consensus_layer.sh diff --git a/processor/Cargo.toml b/processor/Cargo.toml index 73a34efe..cbc022a1 100644 --- a/processor/Cargo.toml +++ b/processor/Cargo.toml @@ -28,6 +28,7 @@ rand_core = { version = "0.6", default-features = false, features = ["std", "get rand_chacha = { version = "0.3", default-features = false, features = ["std"] } # Encoders +const-hex = { version = "1", default-features = false } hex = { version = "0.4", default-features = false, features = ["std"] } scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] } borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] } @@ -40,11 +41,16 @@ transcript = { package = "flexible-transcript", path = "../crypto/transcript", d frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] } frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false } +# Bitcoin/Ethereum +k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } + # Bitcoin secp256k1 = { version = "0.28", default-features = false, features = ["std", "global-context", "rand-std"], optional = true } -k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true } bitcoin-serai = { path = "../coins/bitcoin", default-features = false, features = ["std"], optional = true } +# Ethereum +ethereum-serai = { path = "../coins/ethereum", default-features = false, optional = true } + # Monero dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true } monero-serai = { path = "../coins/monero", default-features = false, features = ["std", "http-rpc", "multisig"], optional = true } @@ -55,12 +61,12 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] } zalloc = { path = "../common/zalloc" } -serai-db = { path = "../common/db", optional = true } +serai-db = { path = "../common/db" } serai-env = { path = "../common/env", optional = true } # TODO: Replace with direct usage of primitives serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] } -messages = { package = "serai-processor-messages", path = "./messages", optional = true } +messages = { package = "serai-processor-messages", path = "./messages" } message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true } @@ -69,6 +75,8 @@ frost = { package = "modular-frost", path = "../crypto/frost", features = ["test sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] } +ethereum-serai = { path = "../coins/ethereum", default-features = false, features = ["tests"] } + dockertest = "0.4" serai-docker-tests = { path = "../tests/docker" } @@ -76,9 +84,11 @@ serai-docker-tests = { path = "../tests/docker" } secp256k1 = ["k256", "frost/secp256k1"] bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"] +ethereum = ["secp256k1", "ethereum-serai"] + ed25519 = ["dalek-ff-group", "frost/ed25519"] monero = ["ed25519", "monero-serai", "serai-client/monero"] -binaries = ["env_logger", "serai-env", "messages", "message-queue"] +binaries = ["env_logger", "serai-env", "message-queue"] parity-db = ["serai-db/parity-db"] rocksdb = ["serai-db/rocksdb"] diff --git a/processor/src/lib.rs b/processor/src/lib.rs index 378b852d..19f67508 100644 --- a/processor/src/lib.rs +++ b/processor/src/lib.rs @@ -1,7 +1,15 @@ +#![allow(dead_code)] + mod plan; pub use plan::*; +mod db; +pub(crate) use db::*; + +mod key_gen; + pub mod networks; +pub(crate) mod multisigs; mod additional_key; pub use additional_key::additional_key; diff --git a/processor/src/main.rs b/processor/src/main.rs index a4e9552d..1a50effa 100644 --- a/processor/src/main.rs +++ b/processor/src/main.rs @@ -31,6 +31,8 @@ mod networks; use networks::{Block, Network}; #[cfg(feature = "bitcoin")] use networks::Bitcoin; +#[cfg(feature = "ethereum")] +use networks::Ethereum; #[cfg(feature = "monero")] use networks::Monero; @@ -735,6 +737,7 @@ async fn main() { }; let network_id = match env::var("NETWORK").expect("network wasn't specified").as_str() { "bitcoin" => NetworkId::Bitcoin, + "ethereum" => NetworkId::Ethereum, "monero" => NetworkId::Monero, _ => panic!("unrecognized network"), }; @@ -744,6 +747,8 @@ async fn main() { match network_id { #[cfg(feature = "bitcoin")] NetworkId::Bitcoin => run(db, Bitcoin::new(url).await, coordinator).await, + #[cfg(feature = "ethereum")] + NetworkId::Ethereum => run(db.clone(), Ethereum::new(db, url).await, coordinator).await, #[cfg(feature = "monero")] NetworkId::Monero => run(db, Monero::new(url).await, coordinator).await, _ => panic!("spawning a processor for an unsupported network"), diff --git a/processor/src/multisigs/db.rs b/processor/src/multisigs/db.rs index 51287a0e..339b7bdc 100644 --- a/processor/src/multisigs/db.rs +++ b/processor/src/multisigs/db.rs @@ -1,3 +1,5 @@ +use std::io; + use ciphersuite::Ciphersuite; pub use serai_db::*; @@ -6,9 +8,59 @@ use serai_client::{primitives::Balance, in_instructions::primitives::InInstructi use crate::{ Get, Plan, - networks::{Transaction, Network}, + networks::{Output, Transaction, Network}, }; +#[derive(Clone, PartialEq, Eq, Debug)] +pub enum PlanFromScanning { + Refund(N::Output, N::Address), + Forward(N::Output), +} + +impl PlanFromScanning { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + match kind[0] { + 0 => { + let output = N::Output::read(reader)?; + + let mut address_vec_len = [0; 4]; + reader.read_exact(&mut address_vec_len)?; + let mut address_vec = + vec![0; usize::try_from(u32::from_le_bytes(address_vec_len)).unwrap()]; + reader.read_exact(&mut address_vec)?; + let address = + N::Address::try_from(address_vec).map_err(|_| "invalid address saved to disk").unwrap(); + + Ok(PlanFromScanning::Refund(output, address)) + } + 1 => { + let output = N::Output::read(reader)?; + Ok(PlanFromScanning::Forward(output)) + } + _ => panic!("reading unrecognized PlanFromScanning"), + } + } + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + PlanFromScanning::Refund(output, address) => { + writer.write_all(&[0])?; + output.write(writer)?; + + let address_vec: Vec = + address.clone().try_into().map_err(|_| "invalid address being refunded to").unwrap(); + writer.write_all(&u32::try_from(address_vec.len()).unwrap().to_le_bytes())?; + writer.write_all(&address_vec) + } + PlanFromScanning::Forward(output) => { + writer.write_all(&[1])?; + output.write(writer) + } + } + } +} + create_db!( MultisigsDb { NextBatchDb: () -> u32, @@ -80,7 +132,11 @@ impl PlanDb { ) -> bool { let plan = Plan::::read::<&[u8]>(&mut &Self::get(getter, &id).unwrap()[8 ..]).unwrap(); assert_eq!(plan.id(), id); - (key == plan.key) && (Some(N::change_address(plan.key)) == plan.change) + if let Some(change) = N::change_address(plan.key) { + (key == plan.key) && (Some(change) == plan.change) + } else { + false + } } } @@ -130,7 +186,7 @@ impl PlansFromScanningDb { pub fn set_plans_from_scanning( txn: &mut impl DbTxn, block_number: usize, - plans: Vec>, + plans: Vec>, ) { let mut buf = vec![]; for plan in plans { @@ -142,13 +198,13 @@ impl PlansFromScanningDb { pub fn take_plans_from_scanning( txn: &mut impl DbTxn, block_number: usize, - ) -> Option>> { + ) -> Option>> { let block_number = u64::try_from(block_number).unwrap(); let res = Self::get(txn, block_number).map(|plans| { let mut plans_ref = plans.as_slice(); let mut res = vec![]; while !plans_ref.is_empty() { - res.push(Plan::::read(&mut plans_ref).unwrap()); + res.push(PlanFromScanning::::read(&mut plans_ref).unwrap()); } res }); diff --git a/processor/src/multisigs/mod.rs b/processor/src/multisigs/mod.rs index a6e8bbc9..75c91675 100644 --- a/processor/src/multisigs/mod.rs +++ b/processor/src/multisigs/mod.rs @@ -7,7 +7,7 @@ use scale::{Encode, Decode}; use messages::SubstrateContext; use serai_client::{ - primitives::{MAX_DATA_LEN, NetworkId, Coin, ExternalAddress, BlockHash, Data}, + primitives::{MAX_DATA_LEN, ExternalAddress, BlockHash, Data}, in_instructions::primitives::{ InInstructionWithBalance, Batch, RefundableInInstruction, Shorthand, MAX_BATCH_SIZE, }, @@ -28,15 +28,12 @@ use scanner::{ScannerEvent, ScannerHandle, Scanner}; mod db; use db::*; -#[cfg(not(test))] -mod scheduler; -#[cfg(test)] -pub mod scheduler; +pub(crate) mod scheduler; use scheduler::Scheduler; use crate::{ Get, Db, Payment, Plan, - networks::{OutputType, Output, Transaction, SignableTransaction, Block, PreparedSend, Network}, + networks::{OutputType, Output, SignableTransaction, Eventuality, Block, PreparedSend, Network}, }; // InInstructionWithBalance from an external output @@ -95,6 +92,8 @@ enum RotationStep { ClosingExisting, } +// This explicitly shouldn't take the database as we prepare Plans we won't execute for fee +// estimates async fn prepare_send( network: &N, block_number: usize, @@ -122,7 +121,7 @@ async fn prepare_send( pub struct MultisigViewer { activation_block: usize, key: ::G, - scheduler: Scheduler, + scheduler: N::Scheduler, } #[allow(clippy::type_complexity)] @@ -131,7 +130,7 @@ pub enum MultisigEvent { // Batches to publish Batches(Option<(::G, ::G)>, Vec), // Eventuality completion found on-chain - Completed(Vec, [u8; 32], N::Transaction), + Completed(Vec, [u8; 32], ::Completion), } pub struct MultisigManager { @@ -157,20 +156,7 @@ impl MultisigManager { assert!(current_keys.len() <= 2); let mut actively_signing = vec![]; for (_, key) in ¤t_keys { - schedulers.push( - Scheduler::from_db( - raw_db, - *key, - match N::NETWORK { - NetworkId::Serai => panic!("adding a key for Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - // TODO: This is incomplete to DAI - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - ) - .unwrap(), - ); + schedulers.push(N::Scheduler::from_db(raw_db, *key, N::NETWORK).unwrap()); // Load any TXs being actively signed let key = key.to_bytes(); @@ -245,17 +231,7 @@ impl MultisigManager { let viewer = Some(MultisigViewer { activation_block, key: external_key, - scheduler: Scheduler::::new::( - txn, - external_key, - match N::NETWORK { - NetworkId::Serai => panic!("adding a key for Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - // TODO: This is incomplete to DAI - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - ), + scheduler: N::Scheduler::new::(txn, external_key, N::NETWORK), }); if self.existing.is_none() { @@ -352,48 +328,30 @@ impl MultisigManager { (existing_outputs, new_outputs) } - fn refund_plan(output: N::Output, refund_to: N::Address) -> Plan { + fn refund_plan( + scheduler: &mut N::Scheduler, + txn: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan { log::info!("creating refund plan for {}", hex::encode(output.id())); assert_eq!(output.kind(), OutputType::External); - Plan { - key: output.key(), - // Uses a payment as this will still be successfully sent due to fee amortization, - // and because change is currently always a Serai key - payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], - inputs: vec![output], - change: None, - } + scheduler.refund_plan::(txn, output, refund_to) } - fn forward_plan(&self, output: N::Output) -> Plan { + // Returns the plan for forwarding if one is needed. + // Returns None if one is not needed to forward this output. + fn forward_plan(&mut self, txn: &mut D::Transaction<'_>, output: &N::Output) -> Option> { log::info!("creating forwarding plan for {}", hex::encode(output.id())); - - /* - Sending a Plan, with arbitrary data proxying the InInstruction, would require adding - a flow for networks which drop their data to still embed arbitrary data. It'd also have - edge cases causing failures (we'd need to manually provide the origin if it was implied, - which may exceed the encoding limit). - - Instead, we save the InInstruction as we scan this output. Then, when the output is - successfully forwarded, we simply read it from the local database. This also saves the - costs of embedding arbitrary data. - - Since we can't rely on the Eventuality system to detect if it's a forwarded transaction, - due to the asynchonicity of the Eventuality system, we instead interpret an Forwarded - output which has an amount associated with an InInstruction which was forwarded as having - been forwarded. - */ - - Plan { - key: self.existing.as_ref().unwrap().key, - payments: vec![Payment { - address: N::forward_address(self.new.as_ref().unwrap().key), - data: None, - balance: output.balance(), - }], - inputs: vec![output], - change: None, + let res = self.existing.as_mut().unwrap().scheduler.forward_plan::( + txn, + output.clone(), + self.new.as_ref().expect("forwarding plan yet no new multisig").key, + ); + if res.is_none() { + log::info!("no forwarding plan was necessary for {}", hex::encode(output.id())); } + res } // Filter newly received outputs due to the step being RotationStep::ClosingExisting. @@ -605,7 +563,31 @@ impl MultisigManager { block_number { // Load plans crated when we scanned the block - plans = PlansFromScanningDb::take_plans_from_scanning::(txn, block_number).unwrap(); + let scanning_plans = + PlansFromScanningDb::take_plans_from_scanning::(txn, block_number).unwrap(); + // Expand into actual plans + plans = scanning_plans + .into_iter() + .map(|plan| match plan { + PlanFromScanning::Refund(output, refund_to) => { + let existing = self.existing.as_mut().unwrap(); + if output.key() == existing.key { + Self::refund_plan(&mut existing.scheduler, txn, output, refund_to) + } else { + let new = self + .new + .as_mut() + .expect("new multisig didn't expect yet output wasn't for existing multisig"); + assert_eq!(output.key(), new.key, "output wasn't for existing nor new multisig"); + Self::refund_plan(&mut new.scheduler, txn, output, refund_to) + } + } + PlanFromScanning::Forward(output) => self + .forward_plan(txn, &output) + .expect("supposed to forward an output yet no forwarding plan"), + }) + .collect(); + for plan in &plans { plans_from_scanning.insert(plan.id()); } @@ -665,13 +647,23 @@ impl MultisigManager { }); for plan in &plans { - if plan.change == Some(N::change_address(plan.key)) { - // Assert these are only created during the expected step - match *step { - RotationStep::UseExisting => {} - RotationStep::NewAsChange | - RotationStep::ForwardFromExisting | - RotationStep::ClosingExisting => panic!("change was set to self despite rotating"), + // This first equality should 'never meaningfully' be false + // All created plans so far are by the existing multisig EXCEPT: + // A) If we created a refund plan from the new multisig (yet that wouldn't have change) + // B) The existing Scheduler returned a Plan for the new key (yet that happens with the SC + // scheduler, yet that doesn't have change) + // Despite being 'unnecessary' now, it's better to explicitly ensure and be robust + if plan.key == self.existing.as_ref().unwrap().key { + if let Some(change) = N::change_address(plan.key) { + if plan.change == Some(change) { + // Assert these (self-change) are only created during the expected step + match *step { + RotationStep::UseExisting => {} + RotationStep::NewAsChange | + RotationStep::ForwardFromExisting | + RotationStep::ClosingExisting => panic!("change was set to self despite rotating"), + } + } } } } @@ -853,15 +845,20 @@ impl MultisigManager { let plans_at_start = plans.len(); let (refund_to, instruction) = instruction_from_output::(output); if let Some(mut instruction) = instruction { - // Build a dedicated Plan forwarding this - let forward_plan = self.forward_plan(output.clone()); - plans.push(forward_plan.clone()); + let Some(shimmed_plan) = N::Scheduler::shim_forward_plan( + output.clone(), + self.new.as_ref().expect("forwarding from existing yet no new multisig").key, + ) else { + // If this network doesn't need forwarding, report the output now + return true; + }; + plans.push(PlanFromScanning::::Forward(output.clone())); // Set the instruction for this output to be returned // We need to set it under the amount it's forwarded with, so prepare its forwarding // TX to determine the fees involved let PreparedSend { tx, post_fee_branches: _, operating_costs } = - prepare_send(network, block_number, forward_plan, 0).await; + prepare_send(network, block_number, shimmed_plan, 0).await; // operating_costs should not increase in a forwarding TX assert_eq!(operating_costs, 0); @@ -872,12 +869,28 @@ impl MultisigManager { // letting it die out if let Some(tx) = &tx { instruction.balance.amount.0 -= tx.0.fee(); + + /* + Sending a Plan, with arbitrary data proxying the InInstruction, would require + adding a flow for networks which drop their data to still embed arbitrary data. + It'd also have edge cases causing failures (we'd need to manually provide the + origin if it was implied, which may exceed the encoding limit). + + Instead, we save the InInstruction as we scan this output. Then, when the + output is successfully forwarded, we simply read it from the local database. + This also saves the costs of embedding arbitrary data. + + Since we can't rely on the Eventuality system to detect if it's a forwarded + transaction, due to the asynchonicity of the Eventuality system, we instead + interpret an Forwarded output which has an amount associated with an + InInstruction which was forwarded as having been forwarded. + */ ForwardedOutputDb::save_forwarded_output(txn, &instruction); } } else if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { // Build a dedicated Plan refunding this - plans.push(Self::refund_plan(output.clone(), refund_to)); + plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); } } @@ -909,7 +922,7 @@ impl MultisigManager { let Some(instruction) = instruction else { if let Some(refund_to) = refund_to { if let Ok(refund_to) = refund_to.consume().try_into() { - plans.push(Self::refund_plan(output.clone(), refund_to)); + plans.push(PlanFromScanning::Refund(output.clone(), refund_to)); } } continue; @@ -999,9 +1012,9 @@ impl MultisigManager { // This must be emitted before ScannerEvent::Block for all completions of known Eventualities // within the block. Unknown Eventualities may have their Completed events emitted after // ScannerEvent::Block however. - ScannerEvent::Completed(key, block_number, id, tx) => { - ResolvedDb::resolve_plan::(txn, &key, id, &tx.id()); - (block_number, MultisigEvent::Completed(key, id, tx)) + ScannerEvent::Completed(key, block_number, id, tx_id, completion) => { + ResolvedDb::resolve_plan::(txn, &key, id, &tx_id); + (block_number, MultisigEvent::Completed(key, id, completion)) } }; diff --git a/processor/src/multisigs/scanner.rs b/processor/src/multisigs/scanner.rs index cefa8a25..3d28f3e8 100644 --- a/processor/src/multisigs/scanner.rs +++ b/processor/src/multisigs/scanner.rs @@ -17,15 +17,26 @@ use tokio::{ use crate::{ Get, DbTxn, Db, - networks::{Output, Transaction, EventualitiesTracker, Block, Network}, + networks::{Output, Transaction, Eventuality, EventualitiesTracker, Block, Network}, }; #[derive(Clone, Debug)] pub enum ScannerEvent { // Block scanned - Block { is_retirement_block: bool, block: >::Id, outputs: Vec }, + Block { + is_retirement_block: bool, + block: >::Id, + outputs: Vec, + }, // Eventuality completion found on-chain - Completed(Vec, usize, [u8; 32], N::Transaction), + // TODO: Move this from a tuple + Completed( + Vec, + usize, + [u8; 32], + >::Id, + ::Completion, + ), } pub type ScannerEventChannel = mpsc::UnboundedReceiver>; @@ -555,19 +566,25 @@ impl Scanner { } } - for (id, (block_number, tx)) in network + for (id, (block_number, tx, completion)) in network .get_eventuality_completions(scanner.eventualities.get_mut(&key_vec).unwrap(), &block) .await { info!( "eventuality {} resolved by {}, as found on chain", hex::encode(id), - hex::encode(&tx.id()) + hex::encode(tx.as_ref()) ); completion_block_numbers.push(block_number); // This must be before the mission of ScannerEvent::Block, per commentary in mod.rs - if !scanner.emit(ScannerEvent::Completed(key_vec.clone(), block_number, id, tx)) { + if !scanner.emit(ScannerEvent::Completed( + key_vec.clone(), + block_number, + id, + tx, + completion, + )) { return; } } diff --git a/processor/src/multisigs/scheduler/mod.rs b/processor/src/multisigs/scheduler/mod.rs new file mode 100644 index 00000000..26c940fe --- /dev/null +++ b/processor/src/multisigs/scheduler/mod.rs @@ -0,0 +1,96 @@ +use core::fmt::Debug; +use std::io; + +use ciphersuite::Ciphersuite; + +use serai_client::primitives::{NetworkId, Balance}; + +use crate::{networks::Network, Db, Payment, Plan}; + +pub(crate) mod utxo; +pub(crate) mod smart_contract; + +pub trait SchedulerAddendum: Send + Clone + PartialEq + Debug { + fn read(reader: &mut R) -> io::Result; + fn write(&self, writer: &mut W) -> io::Result<()>; +} + +impl SchedulerAddendum for () { + fn read(_: &mut R) -> io::Result { + Ok(()) + } + fn write(&self, _: &mut W) -> io::Result<()> { + Ok(()) + } +} + +pub trait Scheduler: Sized + Clone + PartialEq + Debug { + type Addendum: SchedulerAddendum; + + /// Check if this Scheduler is empty. + fn empty(&self) -> bool; + + /// Create a new Scheduler. + fn new( + txn: &mut D::Transaction<'_>, + key: ::G, + network: NetworkId, + ) -> Self; + + /// Load a Scheduler from the DB. + fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result; + + /// Check if a branch is usable. + fn can_use_branch(&self, balance: Balance) -> bool; + + /// Schedule a series of outputs/payments. + fn schedule( + &mut self, + txn: &mut D::Transaction<'_>, + utxos: Vec, + payments: Vec>, + // TODO: Tighten this to multisig_for_any_change + key_for_any_change: ::G, + force_spend: bool, + ) -> Vec>; + + /// Consume all payments still pending within this Scheduler, without scheduling them. + fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec>; + + /// Note a branch output as having been created, with the amount it was actually created with, + /// or not having been created due to being too small. + fn created_output( + &mut self, + txn: &mut D::Transaction<'_>, + expected: u64, + actual: Option, + ); + + /// Refund a specific output. + fn refund_plan( + &mut self, + txn: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan; + + /// Shim the forwarding Plan as necessary to obtain a fee estimate. + /// + /// If this Scheduler is for a Network which requires forwarding, this must return Some with a + /// plan with identical fee behavior. If forwarding isn't necessary, returns None. + fn shim_forward_plan(output: N::Output, to: ::G) -> Option>; + + /// Forward a specific output to the new multisig. + /// + /// Returns None if no forwarding is necessary. Must return Some if forwarding is necessary. + fn forward_plan( + &mut self, + txn: &mut D::Transaction<'_>, + output: N::Output, + to: ::G, + ) -> Option>; +} diff --git a/processor/src/multisigs/scheduler/smart_contract.rs b/processor/src/multisigs/scheduler/smart_contract.rs new file mode 100644 index 00000000..27268b82 --- /dev/null +++ b/processor/src/multisigs/scheduler/smart_contract.rs @@ -0,0 +1,208 @@ +use std::{io, collections::HashSet}; + +use ciphersuite::{group::GroupEncoding, Ciphersuite}; + +use serai_client::primitives::{NetworkId, Coin, Balance}; + +use crate::{ + Get, DbTxn, Db, Payment, Plan, create_db, + networks::{Output, Network}, + multisigs::scheduler::{SchedulerAddendum, Scheduler as SchedulerTrait}, +}; + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Scheduler { + key: ::G, + coins: HashSet, + rotated: bool, +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Addendum { + Nonce(u64), + RotateTo { nonce: u64, new_key: ::G }, +} + +impl SchedulerAddendum for Addendum { + fn read(reader: &mut R) -> io::Result { + let mut kind = [0xff]; + reader.read_exact(&mut kind)?; + match kind[0] { + 0 => { + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + Ok(Addendum::Nonce(u64::from_le_bytes(nonce))) + } + 1 => { + let mut nonce = [0; 8]; + reader.read_exact(&mut nonce)?; + let nonce = u64::from_le_bytes(nonce); + + let new_key = N::Curve::read_G(reader)?; + Ok(Addendum::RotateTo { nonce, new_key }) + } + _ => Err(io::Error::other("reading unknown Addendum type"))?, + } + } + fn write(&self, writer: &mut W) -> io::Result<()> { + match self { + Addendum::Nonce(nonce) => { + writer.write_all(&[0])?; + writer.write_all(&nonce.to_le_bytes()) + } + Addendum::RotateTo { nonce, new_key } => { + writer.write_all(&[1])?; + writer.write_all(&nonce.to_le_bytes())?; + writer.write_all(new_key.to_bytes().as_ref()) + } + } + } +} + +create_db! { + SchedulerDb { + LastNonce: () -> u64, + RotatedTo: (key: &[u8]) -> Vec, + } +} + +impl> SchedulerTrait for Scheduler { + type Addendum = Addendum; + + /// Check if this Scheduler is empty. + fn empty(&self) -> bool { + self.rotated + } + + /// Create a new Scheduler. + fn new( + _txn: &mut D::Transaction<'_>, + key: ::G, + network: NetworkId, + ) -> Self { + assert!(N::branch_address(key).is_none()); + assert!(N::change_address(key).is_none()); + assert!(N::forward_address(key).is_none()); + + Scheduler { key, coins: network.coins().iter().copied().collect(), rotated: false } + } + + /// Load a Scheduler from the DB. + fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result { + Ok(Scheduler { + key, + coins: network.coins().iter().copied().collect(), + rotated: RotatedTo::get(db, key.to_bytes().as_ref()).is_some(), + }) + } + + fn can_use_branch(&self, _balance: Balance) -> bool { + false + } + + fn schedule( + &mut self, + txn: &mut D::Transaction<'_>, + utxos: Vec, + payments: Vec>, + key_for_any_change: ::G, + force_spend: bool, + ) -> Vec> { + for utxo in utxos { + assert!(self.coins.contains(&utxo.balance().coin)); + } + + let mut nonce = LastNonce::get(txn).map_or(0, |nonce| nonce + 1); + let mut plans = vec![]; + for chunk in payments.as_slice().chunks(N::MAX_OUTPUTS) { + // Once we rotate, all further payments should be scheduled via the new multisig + assert!(!self.rotated); + plans.push(Plan { + key: self.key, + inputs: vec![], + payments: chunk.to_vec(), + change: None, + scheduler_addendum: Addendum::Nonce(nonce), + }); + nonce += 1; + } + + // If we're supposed to rotate to the new key, create an empty Plan which will signify the key + // update + if force_spend && (!self.rotated) { + plans.push(Plan { + key: self.key, + inputs: vec![], + payments: vec![], + change: None, + scheduler_addendum: Addendum::RotateTo { nonce, new_key: key_for_any_change }, + }); + nonce += 1; + self.rotated = true; + RotatedTo::set( + txn, + self.key.to_bytes().as_ref(), + &key_for_any_change.to_bytes().as_ref().to_vec(), + ); + } + + LastNonce::set(txn, &nonce); + + plans + } + + fn consume_payments(&mut self, _txn: &mut D::Transaction<'_>) -> Vec> { + vec![] + } + + fn created_output( + &mut self, + _txn: &mut D::Transaction<'_>, + _expected: u64, + _actual: Option, + ) { + panic!("Smart Contract Scheduler created a Branch output") + } + + /// Refund a specific output. + fn refund_plan( + &mut self, + txn: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan { + let current_key = RotatedTo::get(txn, self.key.to_bytes().as_ref()) + .and_then(|key_bytes| ::read_G(&mut key_bytes.as_slice()).ok()) + .unwrap_or(self.key); + + let nonce = LastNonce::get(txn).map_or(0, |nonce| nonce + 1); + LastNonce::set(txn, &(nonce + 1)); + Plan { + key: current_key, + inputs: vec![], + payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], + change: None, + scheduler_addendum: Addendum::Nonce(nonce), + } + } + + fn shim_forward_plan(_output: N::Output, _to: ::G) -> Option> { + None + } + + /// Forward a specific output to the new multisig. + /// + /// Returns None if no forwarding is necessary. + fn forward_plan( + &mut self, + _txn: &mut D::Transaction<'_>, + _output: N::Output, + _to: ::G, + ) -> Option> { + None + } +} diff --git a/processor/src/multisigs/scheduler.rs b/processor/src/multisigs/scheduler/utxo.rs similarity index 80% rename from processor/src/multisigs/scheduler.rs rename to processor/src/multisigs/scheduler/utxo.rs index abc81a80..e9aa3351 100644 --- a/processor/src/multisigs/scheduler.rs +++ b/processor/src/multisigs/scheduler/utxo.rs @@ -5,16 +5,17 @@ use std::{ use ciphersuite::{group::GroupEncoding, Ciphersuite}; -use serai_client::primitives::{Coin, Amount, Balance}; +use serai_client::primitives::{NetworkId, Coin, Amount, Balance}; use crate::{ - networks::{OutputType, Output, Network}, DbTxn, Db, Payment, Plan, + networks::{OutputType, Output, Network, UtxoNetwork}, + multisigs::scheduler::Scheduler as SchedulerTrait, }; -/// Stateless, deterministic output/payment manager. -#[derive(PartialEq, Eq, Debug)] -pub struct Scheduler { +/// Deterministic output/payment manager. +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Scheduler { key: ::G, coin: Coin, @@ -46,7 +47,7 @@ fn scheduler_key(key: &G) -> Vec { D::key(b"SCHEDULER", b"scheduler", key.to_bytes()) } -impl Scheduler { +impl> Scheduler { pub fn empty(&self) -> bool { self.queued_plans.is_empty() && self.plans.is_empty() && @@ -144,8 +145,18 @@ impl Scheduler { pub fn new( txn: &mut D::Transaction<'_>, key: ::G, - coin: Coin, + network: NetworkId, ) -> Self { + assert!(N::branch_address(key).is_some()); + assert!(N::change_address(key).is_some()); + assert!(N::forward_address(key).is_some()); + + let coin = { + let coins = network.coins(); + assert_eq!(coins.len(), 1); + coins[0] + }; + let res = Scheduler { key, coin, @@ -159,7 +170,17 @@ impl Scheduler { res } - pub fn from_db(db: &D, key: ::G, coin: Coin) -> io::Result { + pub fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result { + let coin = { + let coins = network.coins(); + assert_eq!(coins.len(), 1); + coins[0] + }; + let scheduler = db.get(scheduler_key::(&key)).unwrap_or_else(|| { panic!("loading scheduler from DB without scheduler for {}", hex::encode(key.to_bytes())) }); @@ -201,7 +222,7 @@ impl Scheduler { amount }; - let branch_address = N::branch_address(self.key); + let branch_address = N::branch_address(self.key).unwrap(); // If we have more payments than we can handle in a single TX, create plans for them // TODO2: This isn't perfect. For 258 outputs, and a MAX_OUTPUTS of 16, this will create: @@ -237,7 +258,8 @@ impl Scheduler { key: self.key, inputs, payments, - change: Some(N::change_address(key_for_any_change)).filter(|_| change), + change: Some(N::change_address(key_for_any_change).unwrap()).filter(|_| change), + scheduler_addendum: (), } } @@ -305,7 +327,7 @@ impl Scheduler { its *own* branch address, since created_output is called on the signer's Scheduler. */ { - let branch_address = N::branch_address(self.key); + let branch_address = N::branch_address(self.key).unwrap(); payments = payments.drain(..).filter(|payment| payment.address != branch_address).collect::>(); } @@ -357,7 +379,8 @@ impl Scheduler { key: self.key, inputs: chunk, payments: vec![], - change: Some(N::change_address(key_for_any_change)), + change: Some(N::change_address(key_for_any_change).unwrap()), + scheduler_addendum: (), }) } @@ -403,7 +426,8 @@ impl Scheduler { key: self.key, inputs: self.utxos.drain(..).collect::>(), payments: vec![], - change: Some(N::change_address(key_for_any_change)), + change: Some(N::change_address(key_for_any_change).unwrap()), + scheduler_addendum: (), }); } @@ -435,9 +459,6 @@ impl Scheduler { // Note a branch output as having been created, with the amount it was actually created with, // or not having been created due to being too small - // This can be called whenever, so long as it's properly ordered - // (it's independent to Serai/the chain we're scheduling over, yet still expects outputs to be - // created in the same order Plans are returned in) pub fn created_output( &mut self, txn: &mut D::Transaction<'_>, @@ -501,3 +522,106 @@ impl Scheduler { txn.put(scheduler_key::(&self.key), self.serialize()); } } + +impl> SchedulerTrait for Scheduler { + type Addendum = (); + + /// Check if this Scheduler is empty. + fn empty(&self) -> bool { + Scheduler::empty(self) + } + + /// Create a new Scheduler. + fn new( + txn: &mut D::Transaction<'_>, + key: ::G, + network: NetworkId, + ) -> Self { + Scheduler::new::(txn, key, network) + } + + /// Load a Scheduler from the DB. + fn from_db( + db: &D, + key: ::G, + network: NetworkId, + ) -> io::Result { + Scheduler::from_db::(db, key, network) + } + + /// Check if a branch is usable. + fn can_use_branch(&self, balance: Balance) -> bool { + Scheduler::can_use_branch(self, balance) + } + + /// Schedule a series of outputs/payments. + fn schedule( + &mut self, + txn: &mut D::Transaction<'_>, + utxos: Vec, + payments: Vec>, + key_for_any_change: ::G, + force_spend: bool, + ) -> Vec> { + Scheduler::schedule::(self, txn, utxos, payments, key_for_any_change, force_spend) + } + + /// Consume all payments still pending within this Scheduler, without scheduling them. + fn consume_payments(&mut self, txn: &mut D::Transaction<'_>) -> Vec> { + Scheduler::consume_payments::(self, txn) + } + + /// Note a branch output as having been created, with the amount it was actually created with, + /// or not having been created due to being too small. + // TODO: Move this to Balance. + fn created_output( + &mut self, + txn: &mut D::Transaction<'_>, + expected: u64, + actual: Option, + ) { + Scheduler::created_output::(self, txn, expected, actual) + } + + fn refund_plan( + &mut self, + _: &mut D::Transaction<'_>, + output: N::Output, + refund_to: N::Address, + ) -> Plan { + Plan { + key: output.key(), + // Uses a payment as this will still be successfully sent due to fee amortization, + // and because change is currently always a Serai key + payments: vec![Payment { address: refund_to, data: None, balance: output.balance() }], + inputs: vec![output], + change: None, + scheduler_addendum: (), + } + } + + fn shim_forward_plan(output: N::Output, to: ::G) -> Option> { + Some(Plan { + key: output.key(), + payments: vec![Payment { + address: N::forward_address(to).unwrap(), + data: None, + balance: output.balance(), + }], + inputs: vec![output], + change: None, + scheduler_addendum: (), + }) + } + + fn forward_plan( + &mut self, + _: &mut D::Transaction<'_>, + output: N::Output, + to: ::G, + ) -> Option> { + assert_eq!(self.key, output.key()); + // Call shim as shim returns the actual + Self::shim_forward_plan(output, to) + } +} diff --git a/processor/src/networks/bitcoin.rs b/processor/src/networks/bitcoin.rs index 606a3e12..3f8174e4 100644 --- a/processor/src/networks/bitcoin.rs +++ b/processor/src/networks/bitcoin.rs @@ -52,9 +52,10 @@ use crate::{ networks::{ NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, + Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, }, Payment, + multisigs::scheduler::utxo::Scheduler, }; #[derive(Clone, PartialEq, Eq, Debug)] @@ -178,14 +179,6 @@ impl TransactionTrait for Transaction { hash.reverse(); hash } - fn serialize(&self) -> Vec { - let mut buf = vec![]; - self.consensus_encode(&mut buf).unwrap(); - buf - } - fn read(reader: &mut R) -> io::Result { - Transaction::consensus_decode(reader).map_err(|e| io::Error::other(format!("{e}"))) - } #[cfg(test)] async fn fee(&self, network: &Bitcoin) -> u64 { @@ -209,7 +202,23 @@ impl TransactionTrait for Transaction { #[derive(Clone, PartialEq, Eq, Debug)] pub struct Eventuality([u8; 32]); +#[derive(Clone, PartialEq, Eq, Default, Debug)] +pub struct EmptyClaim; +impl AsRef<[u8]> for EmptyClaim { + fn as_ref(&self) -> &[u8] { + &[] + } +} +impl AsMut<[u8]> for EmptyClaim { + fn as_mut(&mut self) -> &mut [u8] { + &mut [] + } +} + impl EventualityTrait for Eventuality { + type Claim = EmptyClaim; + type Completion = Transaction; + fn lookup(&self) -> Vec { self.0.to_vec() } @@ -224,6 +233,18 @@ impl EventualityTrait for Eventuality { fn serialize(&self) -> Vec { self.0.to_vec() } + + fn claim(_: &Transaction) -> EmptyClaim { + EmptyClaim + } + fn serialize_completion(completion: &Transaction) -> Vec { + let mut buf = vec![]; + completion.consensus_encode(&mut buf).unwrap(); + buf + } + fn read_completion(reader: &mut R) -> io::Result { + Transaction::consensus_decode(reader).map_err(|e| io::Error::other(format!("{e}"))) + } } #[derive(Clone, Debug)] @@ -374,8 +395,12 @@ impl Bitcoin { for input in &tx.input { let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array(); input_tx.reverse(); - in_value += self.get_transaction(&input_tx).await?.output - [usize::try_from(input.previous_output.vout).unwrap()] + in_value += self + .rpc + .get_transaction(&input_tx) + .await + .map_err(|_| NetworkError::ConnectionError)? + .output[usize::try_from(input.previous_output.vout).unwrap()] .value .to_sat(); } @@ -492,7 +517,7 @@ impl Bitcoin { if witness.len() >= 2 { let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone()); if Self::segwit_data_pattern(&redeem_script) == Some(true) { - data = witness[witness.len() - 2].clone(); // len() - 1 is the redeem_script + data.clone_from(&witness[witness.len() - 2]); // len() - 1 is the redeem_script break; } } @@ -537,6 +562,25 @@ impl Bitcoin { } } +// Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) +// A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes +// While our inputs are entirely SegWit, such fine tuning is not necessary and could create +// issues in the future (if the size decreases or we misevaluate it) +// It also offers a minimal amount of benefit when we are able to logarithmically accumulate +// inputs +// For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and +// 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 +// bytes +// 100,000 / 192 = 520 +// 520 * 192 leaves 160 bytes of overhead for the transaction structure itself +const MAX_INPUTS: usize = 520; +const MAX_OUTPUTS: usize = 520; + +fn address_from_key(key: ProjectivePoint) -> Address { + Address::new(BAddress::::new(BNetwork::Bitcoin, address_payload(key).unwrap())) + .unwrap() +} + #[async_trait] impl Network for Bitcoin { type Curve = Secp256k1; @@ -549,6 +593,8 @@ impl Network for Bitcoin { type Eventuality = Eventuality; type TransactionMachine = TransactionMachine; + type Scheduler = Scheduler; + type Address = Address; const NETWORK: NetworkId = NetworkId::Bitcoin; @@ -598,19 +644,7 @@ impl Network for Bitcoin { // aggregation TX const COST_TO_AGGREGATE: u64 = 800; - // Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT) - // A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes - // While our inputs are entirely SegWit, such fine tuning is not necessary and could create - // issues in the future (if the size decreases or we misevaluate it) - // It also offers a minimal amount of benefit when we are able to logarithmically accumulate - // inputs - // For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and - // 64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192 - // bytes - // 100,000 / 192 = 520 - // 520 * 192 leaves 160 bytes of overhead for the transaction structure itself - const MAX_INPUTS: usize = 520; - const MAX_OUTPUTS: usize = 520; + const MAX_OUTPUTS: usize = MAX_OUTPUTS; fn tweak_keys(keys: &mut ThresholdKeys) { *keys = tweak_keys(keys); @@ -618,24 +652,24 @@ impl Network for Bitcoin { scanner(keys.group_key()); } - fn external_address(key: ProjectivePoint) -> Address { - Address::new(BAddress::::new(BNetwork::Bitcoin, address_payload(key).unwrap())) - .unwrap() + #[cfg(test)] + async fn external_address(&self, key: ProjectivePoint) -> Address { + address_from_key(key) } - fn branch_address(key: ProjectivePoint) -> Address { + fn branch_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); - Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch])) + Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Branch]))) } - fn change_address(key: ProjectivePoint) -> Address { + fn change_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); - Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change])) + Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Change]))) } - fn forward_address(key: ProjectivePoint) -> Address { + fn forward_address(key: ProjectivePoint) -> Option
{ let (_, offsets, _) = scanner(key); - Self::external_address(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded])) + Some(address_from_key(key + (ProjectivePoint::GENERATOR * offsets[&OutputType::Forwarded]))) } async fn get_latest_block_number(&self) -> Result { @@ -682,7 +716,7 @@ impl Network for Bitcoin { spent_tx.reverse(); let mut tx; while { - tx = self.get_transaction(&spent_tx).await; + tx = self.rpc.get_transaction(&spent_tx).await; tx.is_err() } { log::error!("couldn't get transaction from bitcoin node: {tx:?}"); @@ -697,9 +731,9 @@ impl Network for Bitcoin { let data = Self::extract_serai_data(tx); for output in &mut outputs { if output.kind == OutputType::External { - output.data = data.clone(); + output.data.clone_from(&data); } - output.presumed_origin = presumed_origin.clone(); + output.presumed_origin.clone_from(&presumed_origin); } } @@ -710,7 +744,7 @@ impl Network for Bitcoin { &self, eventualities: &mut EventualitiesTracker, block: &Self::Block, - ) -> HashMap<[u8; 32], (usize, Transaction)> { + ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { let mut res = HashMap::new(); if eventualities.map.is_empty() { return res; @@ -719,11 +753,11 @@ impl Network for Bitcoin { fn check_block( eventualities: &mut EventualitiesTracker, block: &Block, - res: &mut HashMap<[u8; 32], (usize, Transaction)>, + res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, ) { for tx in &block.txdata[1 ..] { if let Some((plan, _)) = eventualities.map.remove(tx.id().as_slice()) { - res.insert(plan, (eventualities.block_number, tx.clone())); + res.insert(plan, (eventualities.block_number, tx.id(), tx.clone())); } } @@ -770,7 +804,6 @@ impl Network for Bitcoin { async fn needed_fee( &self, block_number: usize, - _: &[u8; 32], inputs: &[Output], payments: &[Payment], change: &Option
, @@ -787,9 +820,11 @@ impl Network for Bitcoin { &self, block_number: usize, plan_id: &[u8; 32], + _key: ProjectivePoint, inputs: &[Output], payments: &[Payment], change: &Option
, + (): &(), ) -> Result, NetworkError> { Ok(self.make_signable_transaction(block_number, inputs, payments, change, false).await?.map( |signable| { @@ -803,7 +838,7 @@ impl Network for Bitcoin { )) } - async fn attempt_send( + async fn attempt_sign( &self, keys: ThresholdKeys, transaction: Self::SignableTransaction, @@ -817,7 +852,7 @@ impl Network for Bitcoin { ) } - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> { + async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { match self.rpc.send_raw_transaction(tx).await { Ok(_) => (), Err(RpcError::ConnectionError) => Err(NetworkError::ConnectionError)?, @@ -828,12 +863,14 @@ impl Network for Bitcoin { Ok(()) } - async fn get_transaction(&self, id: &[u8; 32]) -> Result { - self.rpc.get_transaction(id).await.map_err(|_| NetworkError::ConnectionError) - } - - fn confirm_completion(&self, eventuality: &Self::Eventuality, tx: &Transaction) -> bool { - eventuality.0 == tx.id() + async fn confirm_completion( + &self, + eventuality: &Self::Eventuality, + _: &EmptyClaim, + ) -> Result, NetworkError> { + Ok(Some( + self.rpc.get_transaction(&eventuality.0).await.map_err(|_| NetworkError::ConnectionError)?, + )) } #[cfg(test)] @@ -841,6 +878,20 @@ impl Network for Bitcoin { self.rpc.get_block_number(id).await.unwrap() } + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + _: &EmptyClaim, + ) -> bool { + self.rpc.get_transaction(&eventuality.0).await.is_ok() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction { + self.rpc.get_transaction(&id.0).await.unwrap() + } + #[cfg(test)] async fn mine_block(&self) { self @@ -892,3 +943,7 @@ impl Network for Bitcoin { self.get_block(block).await.unwrap() } } + +impl UtxoNetwork for Bitcoin { + const MAX_INPUTS: usize = MAX_INPUTS; +} diff --git a/processor/src/networks/ethereum.rs b/processor/src/networks/ethereum.rs new file mode 100644 index 00000000..4de08837 --- /dev/null +++ b/processor/src/networks/ethereum.rs @@ -0,0 +1,887 @@ +use core::{fmt, time::Duration}; +use std::{ + sync::Arc, + collections::{HashSet, HashMap}, + io, +}; + +use async_trait::async_trait; + +use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1}; +use frost::ThresholdKeys; + +use ethereum_serai::{ + alloy_core::primitives::U256, + alloy_rpc_types::{BlockNumberOrTag, Transaction}, + alloy_simple_request_transport::SimpleRequest, + alloy_rpc_client::ClientBuilder, + alloy_provider::{Provider, RootProvider}, + crypto::{PublicKey, Signature}, + erc20::Erc20, + deployer::Deployer, + router::{Router, Coin as EthereumCoin, InInstruction as EthereumInInstruction}, + machine::*, +}; +#[cfg(test)] +use ethereum_serai::alloy_core::primitives::B256; + +use tokio::{ + time::sleep, + sync::{RwLock, RwLockReadGuard}, +}; + +use serai_client::{ + primitives::{Coin, Amount, Balance, NetworkId}, + validator_sets::primitives::Session, +}; + +use crate::{ + Db, Payment, + networks::{ + OutputType, Output, Transaction as TransactionTrait, SignableTransaction, Block, + Eventuality as EventualityTrait, EventualitiesTracker, NetworkError, Network, + }, + key_gen::NetworkKeyDb, + multisigs::scheduler::{ + Scheduler as SchedulerTrait, + smart_contract::{Addendum, Scheduler}, + }, +}; + +#[cfg(not(test))] +const DAI: [u8; 20] = + match const_hex::const_decode_to_array(b"0x6B175474E89094C44Da98b954EedeAC495271d0F") { + Ok(res) => res, + Err(_) => panic!("invalid non-test DAI hex address"), + }; +#[cfg(test)] // TODO +const DAI: [u8; 20] = + match const_hex::const_decode_to_array(b"0000000000000000000000000000000000000000") { + Ok(res) => res, + Err(_) => panic!("invalid test DAI hex address"), + }; + +fn coin_to_serai_coin(coin: &EthereumCoin) -> Option { + match coin { + EthereumCoin::Ether => Some(Coin::Ether), + EthereumCoin::Erc20(token) => { + if *token == DAI { + return Some(Coin::Dai); + } + None + } + } +} + +fn amount_to_serai_amount(coin: Coin, amount: U256) -> Amount { + assert_eq!(coin.network(), NetworkId::Ethereum); + assert_eq!(coin.decimals(), 8); + // Remove 10 decimals so we go from 18 decimals to 8 decimals + let divisor = U256::from(10_000_000_000u64); + // This is valid up to 184b, which is assumed for the coins allowed + Amount(u64::try_from(amount / divisor).unwrap()) +} + +fn balance_to_ethereum_amount(balance: Balance) -> U256 { + assert_eq!(balance.coin.network(), NetworkId::Ethereum); + assert_eq!(balance.coin.decimals(), 8); + // Restore 10 decimals so we go from 8 decimals to 18 decimals + let factor = U256::from(10_000_000_000u64); + U256::from(balance.amount.0) * factor +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Address(pub [u8; 20]); +impl TryFrom> for Address { + type Error = (); + fn try_from(bytes: Vec) -> Result { + if bytes.len() != 20 { + Err(())?; + } + let mut res = [0; 20]; + res.copy_from_slice(&bytes); + Ok(Address(res)) + } +} +impl TryInto> for Address { + type Error = (); + fn try_into(self) -> Result, ()> { + Ok(self.0.to_vec()) + } +} + +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ethereum_serai::alloy_core::primitives::Address::from(self.0).fmt(f) + } +} + +impl SignableTransaction for RouterCommand { + fn fee(&self) -> u64 { + // Return a fee of 0 as we'll handle amortization on our end + 0 + } +} + +#[async_trait] +impl TransactionTrait> for Transaction { + type Id = [u8; 32]; + fn id(&self) -> Self::Id { + self.hash.0 + } + + #[cfg(test)] + async fn fee(&self, _network: &Ethereum) -> u64 { + // Return a fee of 0 as we'll handle amortization on our end + 0 + } +} + +// We use 32-block Epochs to represent blocks. +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub struct Epoch { + // The hash of the block which ended the prior Epoch. + prior_end_hash: [u8; 32], + // The first block number within this Epoch. + start: u64, + // The hash of the last block within this Epoch. + end_hash: [u8; 32], + // The monotonic time for this Epoch. + time: u64, +} + +impl Epoch { + fn end(&self) -> u64 { + self.start + 31 + } +} + +#[async_trait] +impl Block> for Epoch { + type Id = [u8; 32]; + fn id(&self) -> [u8; 32] { + self.end_hash + } + fn parent(&self) -> [u8; 32] { + self.prior_end_hash + } + async fn time(&self, _: &Ethereum) -> u64 { + self.time + } +} + +impl Output> for EthereumInInstruction { + type Id = [u8; 32]; + + fn kind(&self) -> OutputType { + OutputType::External + } + + fn id(&self) -> Self::Id { + let mut id = [0; 40]; + id[.. 32].copy_from_slice(&self.id.0); + id[32 ..].copy_from_slice(&self.id.1.to_le_bytes()); + *ethereum_serai::alloy_core::primitives::keccak256(id) + } + fn tx_id(&self) -> [u8; 32] { + self.id.0 + } + fn key(&self) -> ::G { + self.key_at_end_of_block + } + + fn presumed_origin(&self) -> Option
{ + Some(Address(self.from)) + } + + fn balance(&self) -> Balance { + let coin = coin_to_serai_coin(&self.coin).unwrap_or_else(|| { + panic!( + "requesting coin for an EthereumInInstruction with a coin {}", + "we don't handle. this never should have been yielded" + ) + }); + Balance { coin, amount: amount_to_serai_amount(coin, self.amount) } + } + fn data(&self) -> &[u8] { + &self.data + } + + fn write(&self, writer: &mut W) -> io::Result<()> { + EthereumInInstruction::write(self, writer) + } + fn read(reader: &mut R) -> io::Result { + EthereumInInstruction::read(reader) + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Claim { + signature: [u8; 64], +} +impl AsRef<[u8]> for Claim { + fn as_ref(&self) -> &[u8] { + &self.signature + } +} +impl AsMut<[u8]> for Claim { + fn as_mut(&mut self) -> &mut [u8] { + &mut self.signature + } +} +impl Default for Claim { + fn default() -> Self { + Self { signature: [0; 64] } + } +} +impl From<&Signature> for Claim { + fn from(sig: &Signature) -> Self { + Self { signature: sig.to_bytes() } + } +} + +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct Eventuality(PublicKey, RouterCommand); +impl EventualityTrait for Eventuality { + type Claim = Claim; + type Completion = SignedRouterCommand; + + fn lookup(&self) -> Vec { + match self.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + nonce.as_le_bytes().to_vec() + } + } + } + + fn read(reader: &mut R) -> io::Result { + let point = Secp256k1::read_G(reader)?; + let command = RouterCommand::read(reader)?; + Ok(Eventuality( + PublicKey::new(point).ok_or(io::Error::other("unusable key within Eventuality"))?, + command, + )) + } + fn serialize(&self) -> Vec { + let mut res = vec![]; + res.extend(self.0.point().to_bytes().as_slice()); + self.1.write(&mut res).unwrap(); + res + } + + fn claim(completion: &Self::Completion) -> Self::Claim { + Claim::from(completion.signature()) + } + fn serialize_completion(completion: &Self::Completion) -> Vec { + let mut res = vec![]; + completion.write(&mut res).unwrap(); + res + } + fn read_completion(reader: &mut R) -> io::Result { + SignedRouterCommand::read(reader) + } +} + +#[derive(Clone, Debug)] +pub struct Ethereum { + // This DB is solely used to access the first key generated, as needed to determine the Router's + // address. Accordingly, all methods present are consistent to a Serai chain with a finalized + // first key (regardless of local state), and this is safe. + db: D, + provider: Arc>, + deployer: Deployer, + router: Arc>>, +} +impl PartialEq for Ethereum { + fn eq(&self, _other: &Ethereum) -> bool { + true + } +} +impl Ethereum { + pub async fn new(db: D, url: String) -> Self { + let provider = Arc::new(RootProvider::new( + ClientBuilder::default().transport(SimpleRequest::new(url), true), + )); + + #[cfg(test)] // TODO: Move to test code + provider.raw_request::<_, ()>("evm_setAutomine".into(), false).await.unwrap(); + + let mut deployer = Deployer::new(provider.clone()).await; + while !matches!(deployer, Ok(Some(_))) { + log::error!("Deployer wasn't deployed yet or networking error"); + sleep(Duration::from_secs(5)).await; + deployer = Deployer::new(provider.clone()).await; + } + let deployer = deployer.unwrap().unwrap(); + + Ethereum { db, provider, deployer, router: Arc::new(RwLock::new(None)) } + } + + // Obtain a reference to the Router, sleeping until it's deployed if it hasn't already been. + // This is guaranteed to return Some. + pub async fn router(&self) -> RwLockReadGuard<'_, Option> { + // If we've already instantiated the Router, return a read reference + { + let router = self.router.read().await; + if router.is_some() { + return router; + } + } + + // Instantiate it + let mut router = self.router.write().await; + // If another attempt beat us to it, return + if router.is_some() { + drop(router); + return self.router.read().await; + } + + // Get the first key from the DB + let first_key = + NetworkKeyDb::get(&self.db, Session(0)).expect("getting outputs before confirming a key"); + let key = Secp256k1::read_G(&mut first_key.as_slice()).unwrap(); + let public_key = PublicKey::new(key).unwrap(); + + // Find the router + let mut found = self.deployer.find_router(self.provider.clone(), &public_key).await; + while !matches!(found, Ok(Some(_))) { + log::error!("Router wasn't deployed yet or networking error"); + sleep(Duration::from_secs(5)).await; + found = self.deployer.find_router(self.provider.clone(), &public_key).await; + } + + // Set it + *router = Some(found.unwrap().unwrap()); + + // Downgrade to a read lock + // Explicitly doesn't use `downgrade` so that another pending write txn can realize it's no + // longer necessary + drop(router); + self.router.read().await + } +} + +#[async_trait] +impl Network for Ethereum { + type Curve = Secp256k1; + + type Transaction = Transaction; + type Block = Epoch; + + type Output = EthereumInInstruction; + type SignableTransaction = RouterCommand; + type Eventuality = Eventuality; + type TransactionMachine = RouterCommandMachine; + + type Scheduler = Scheduler; + + type Address = Address; + + const NETWORK: NetworkId = NetworkId::Ethereum; + const ID: &'static str = "Ethereum"; + const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 32 * 12; + const CONFIRMATIONS: usize = 1; + + const DUST: u64 = 0; // TODO + + const COST_TO_AGGREGATE: u64 = 0; + + // TODO: usize::max, with a merkle tree in the router + const MAX_OUTPUTS: usize = 256; + + fn tweak_keys(keys: &mut ThresholdKeys) { + while PublicKey::new(keys.group_key()).is_none() { + *keys = keys.offset(::F::ONE); + } + } + + #[cfg(test)] + async fn external_address(&self, _key: ::G) -> Address { + Address(self.router().await.as_ref().unwrap().address()) + } + + fn branch_address(_key: ::G) -> Option
{ + None + } + + fn change_address(_key: ::G) -> Option
{ + None + } + + fn forward_address(_key: ::G) -> Option
{ + None + } + + async fn get_latest_block_number(&self) -> Result { + let actual_number = self + .provider + .get_block(BlockNumberOrTag::Finalized.into(), false) + .await + .map_err(|_| NetworkError::ConnectionError)? + .expect("no blocks were finalized") + .header + .number + .unwrap(); + // Error if there hasn't been a full epoch yet + if actual_number < 32 { + Err(NetworkError::ConnectionError)? + } + // If this is 33, the division will return 1, yet 1 is the epoch in progress + let latest_full_epoch = (actual_number / 32).saturating_sub(1); + Ok(latest_full_epoch.try_into().unwrap()) + } + + async fn get_block(&self, number: usize) -> Result { + let latest_finalized = self.get_latest_block_number().await?; + if number > latest_finalized { + Err(NetworkError::ConnectionError)? + } + + let start = number * 32; + let prior_end_hash = if start == 0 { + [0; 32] + } else { + self + .provider + .get_block(u64::try_from(start - 1).unwrap().into(), false) + .await + .ok() + .flatten() + .ok_or(NetworkError::ConnectionError)? + .header + .hash + .unwrap() + .into() + }; + + let end_header = self + .provider + .get_block(u64::try_from(start + 31).unwrap().into(), false) + .await + .ok() + .flatten() + .ok_or(NetworkError::ConnectionError)? + .header; + + let end_hash = end_header.hash.unwrap().into(); + let time = end_header.timestamp; + + Ok(Epoch { prior_end_hash, start: start.try_into().unwrap(), end_hash, time }) + } + + async fn get_outputs( + &self, + block: &Self::Block, + _: ::G, + ) -> Vec { + let router = self.router().await; + let router = router.as_ref().unwrap(); + // Grab the key at the end of the epoch + let key_at_end_of_block = loop { + match router.key_at_end_of_block(block.start + 31).await { + Ok(key) => break key, + Err(e) => { + log::error!("couldn't connect to router for the key at the end of the block: {e:?}"); + sleep(Duration::from_secs(5)).await; + continue; + } + } + }; + + let mut all_events = vec![]; + let mut top_level_txids = HashSet::new(); + for erc20_addr in [DAI] { + let erc20 = loop { + let Ok(Some(erc20)) = Erc20::new(self.provider.clone(), erc20_addr).await else { + log::error!( + "couldn't connect to Ethereum node for an ERC20: {}", + hex::encode(erc20_addr) + ); + sleep(Duration::from_secs(5)).await; + continue; + }; + break erc20; + }; + + for block in block.start .. (block.start + 32) { + let transfers = loop { + match erc20.top_level_transfers(block, router.address()).await { + Ok(transfers) => break transfers, + Err(e) => { + log::error!("couldn't connect to Ethereum node for the top-level transfers: {e:?}"); + sleep(Duration::from_secs(5)).await; + continue; + } + } + }; + + for transfer in transfers { + top_level_txids.insert(transfer.id); + all_events.push(EthereumInInstruction { + id: (transfer.id, 0), + from: transfer.from, + coin: EthereumCoin::Erc20(erc20_addr), + amount: transfer.amount, + data: transfer.data, + key_at_end_of_block, + }); + } + } + } + + for block in block.start .. (block.start + 32) { + let mut events = router.in_instructions(block, &HashSet::from([DAI])).await; + while let Err(e) = events { + log::error!("couldn't connect to Ethereum node for the Router's events: {e:?}"); + sleep(Duration::from_secs(5)).await; + events = router.in_instructions(block, &HashSet::from([DAI])).await; + } + let mut events = events.unwrap(); + for event in &mut events { + // A transaction should either be a top-level transfer or a Router InInstruction + if top_level_txids.contains(&event.id.0) { + panic!("top-level transfer had {} and router had {:?}", hex::encode(event.id.0), event); + } + // Overwrite the key at end of block to key at end of epoch + event.key_at_end_of_block = key_at_end_of_block; + } + all_events.extend(events); + } + + for event in &all_events { + assert!( + coin_to_serai_coin(&event.coin).is_some(), + "router yielded events for unrecognized coins" + ); + } + all_events + } + + async fn get_eventuality_completions( + &self, + eventualities: &mut EventualitiesTracker, + block: &Self::Block, + ) -> HashMap< + [u8; 32], + ( + usize, + >::Id, + ::Completion, + ), + > { + let mut res = HashMap::new(); + if eventualities.map.is_empty() { + return res; + } + + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let past_scanned_epoch = loop { + match self.get_block(eventualities.block_number).await { + Ok(block) => break block, + Err(e) => log::error!("couldn't get the last scanned block in the tracker: {}", e), + } + sleep(Duration::from_secs(10)).await; + }; + assert_eq!( + past_scanned_epoch.start / 32, + u64::try_from(eventualities.block_number).unwrap(), + "assumption of tracker block number's relation to epoch start is incorrect" + ); + + // Iterate from after the epoch number in the tracker to the end of this epoch + for block_num in (past_scanned_epoch.end() + 1) ..= block.end() { + let executed = loop { + match router.executed_commands(block_num).await { + Ok(executed) => break executed, + Err(e) => log::error!("couldn't get the executed commands in block {block_num}: {e}"), + } + sleep(Duration::from_secs(10)).await; + }; + + for executed in executed { + let lookup = executed.nonce.to_le_bytes().to_vec(); + if let Some((plan_id, eventuality)) = eventualities.map.get(&lookup) { + if let Some(command) = + SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &executed.signature) + { + res.insert(*plan_id, (block_num.try_into().unwrap(), executed.tx_id, command)); + eventualities.map.remove(&lookup); + } + } + } + } + eventualities.block_number = (block.start / 32).try_into().unwrap(); + + res + } + + async fn needed_fee( + &self, + _block_number: usize, + inputs: &[Self::Output], + _payments: &[Payment], + _change: &Option, + ) -> Result, NetworkError> { + assert_eq!(inputs.len(), 0); + // Claim no fee is needed so we can perform amortization ourselves + Ok(Some(0)) + } + + async fn signable_transaction( + &self, + _block_number: usize, + _plan_id: &[u8; 32], + key: ::G, + inputs: &[Self::Output], + payments: &[Payment], + change: &Option, + scheduler_addendum: &>::Addendum, + ) -> Result, NetworkError> { + assert_eq!(inputs.len(), 0); + assert!(change.is_none()); + let chain_id = self.provider.get_chain_id().await.map_err(|_| NetworkError::ConnectionError)?; + + // TODO: Perform fee amortization (in scheduler? + // TODO: Make this function internal and have needed_fee properly return None as expected? + // TODO: signable_transaction is written as cannot return None if needed_fee returns Some + // TODO: Why can this return None at all if it isn't allowed to return None? + + let command = match scheduler_addendum { + Addendum::Nonce(nonce) => RouterCommand::Execute { + chain_id: U256::try_from(chain_id).unwrap(), + nonce: U256::try_from(*nonce).unwrap(), + outs: payments + .iter() + .filter_map(|payment| { + Some(OutInstruction { + target: if let Some(data) = payment.data.as_ref() { + // This introspects the Call serialization format, expecting the first 20 bytes to + // be the address + // This avoids wasting the 20-bytes allocated within address + let full_data = [payment.address.0.as_slice(), data].concat(); + let mut reader = full_data.as_slice(); + + let mut calls = vec![]; + while !reader.is_empty() { + calls.push(Call::read(&mut reader).ok()?) + } + // The above must have executed at least once since reader contains the address + assert_eq!(calls[0].to, payment.address.0); + + OutInstructionTarget::Calls(calls) + } else { + OutInstructionTarget::Direct(payment.address.0) + }, + value: { + assert_eq!(payment.balance.coin, Coin::Ether); // TODO + balance_to_ethereum_amount(payment.balance) + }, + }) + }) + .collect(), + }, + Addendum::RotateTo { nonce, new_key } => { + assert!(payments.is_empty()); + RouterCommand::UpdateSeraiKey { + chain_id: U256::try_from(chain_id).unwrap(), + nonce: U256::try_from(*nonce).unwrap(), + key: PublicKey::new(*new_key).expect("new key wasn't a valid ETH public key"), + } + } + }; + Ok(Some(( + command.clone(), + Eventuality(PublicKey::new(key).expect("key wasn't a valid ETH public key"), command), + ))) + } + + async fn attempt_sign( + &self, + keys: ThresholdKeys, + transaction: Self::SignableTransaction, + ) -> Result { + Ok( + RouterCommandMachine::new(keys, transaction) + .expect("keys weren't usable to sign router commands"), + ) + } + + async fn publish_completion( + &self, + completion: &::Completion, + ) -> Result<(), NetworkError> { + // Publish this to the dedicated TX server for a solver to actually publish + #[cfg(not(test))] + { + let _ = completion; + todo!("TODO"); + } + + // Publish this using a dummy account we fund with magic RPC commands + #[cfg(test)] + { + use rand_core::OsRng; + use ciphersuite::group::ff::Field; + + let key = ::F::random(&mut OsRng); + let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); + + // Set a 1.1 ETH balance + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [Address(address).to_string(), "1100000000000000000".into()], + ) + .await + .unwrap(); + + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let mut tx = match completion.command() { + RouterCommand::UpdateSeraiKey { key, .. } => { + router.update_serai_key(key, completion.signature()) + } + RouterCommand::Execute { outs, .. } => router.execute( + &outs.iter().cloned().map(Into::into).collect::>(), + completion.signature(), + ), + }; + tx.gas_price = 100_000_000_000u128; + + use ethereum_serai::alloy_consensus::SignableTransaction; + let sig = + k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) + .sign_prehash_recoverable(tx.signature_hash().as_ref()) + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig.into(), &mut bytes); + let _ = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); + + Ok(()) + } + } + + async fn confirm_completion( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> Result::Completion>, NetworkError> { + Ok(SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature)) + } + + #[cfg(test)] + async fn get_block_number(&self, id: &>::Id) -> usize { + self + .provider + .get_block(B256::from(*id).into(), false) + .await + .unwrap() + .unwrap() + .header + .number + .unwrap() + .try_into() + .unwrap() + } + + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> bool { + SignedRouterCommand::new(&eventuality.0, eventuality.1.clone(), &claim.signature).is_some() + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Self::Eventuality, + ) -> Self::Transaction { + match eventuality.1 { + RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => { + let router = self.router().await; + let router = router.as_ref().unwrap(); + + let block = u64::try_from(block).unwrap(); + let filter = router + .key_updated_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + if let Some(log) = logs.first() { + return self + .provider + .get_transaction_by_hash(log.clone().transaction_hash.unwrap()) + .await + .unwrap(); + }; + + let filter = router + .executed_filter() + .from_block(block * 32) + .to_block(((block + 1) * 32) - 1) + .topic1(nonce); + let logs = self.provider.get_logs(&filter).await.unwrap(); + self.provider.get_transaction_by_hash(logs[0].transaction_hash.unwrap()).await.unwrap() + } + } + } + + #[cfg(test)] + async fn mine_block(&self) { + self.provider.raw_request::<_, ()>("anvil_mine".into(), [32]).await.unwrap(); + } + + #[cfg(test)] + async fn test_send(&self, send_to: Self::Address) -> Self::Block { + use rand_core::OsRng; + use ciphersuite::group::ff::Field; + + let key = ::F::random(&mut OsRng); + let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key)); + + // Set a 1.1 ETH balance + self + .provider + .raw_request::<_, ()>( + "anvil_setBalance".into(), + [Address(address).to_string(), "1100000000000000000".into()], + ) + .await + .unwrap(); + + let tx = ethereum_serai::alloy_consensus::TxLegacy { + chain_id: None, + nonce: 0, + gas_price: 100_000_000_000u128, + gas_limit: 21_0000u128, + to: ethereum_serai::alloy_core::primitives::TxKind::Call(send_to.0.into()), + // 1 ETH + value: U256::from_str_radix("1000000000000000000", 10).unwrap(), + input: vec![].into(), + }; + + use ethereum_serai::alloy_consensus::SignableTransaction; + let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap()) + .sign_prehash_recoverable(tx.signature_hash().as_ref()) + .unwrap(); + + let mut bytes = vec![]; + tx.encode_with_signature_fields(&sig.into(), &mut bytes); + let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap(); + + // Mine an epoch containing this TX + self.mine_block().await; + assert!(pending_tx.get_receipt().await.unwrap().status()); + // Yield the freshly mined block + self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap() + } +} diff --git a/processor/src/networks/mod.rs b/processor/src/networks/mod.rs index d77d43f1..803ed40a 100644 --- a/processor/src/networks/mod.rs +++ b/processor/src/networks/mod.rs @@ -21,12 +21,17 @@ pub mod bitcoin; #[cfg(feature = "bitcoin")] pub use self::bitcoin::Bitcoin; +#[cfg(feature = "ethereum")] +pub mod ethereum; +#[cfg(feature = "ethereum")] +pub use ethereum::Ethereum; + #[cfg(feature = "monero")] pub mod monero; #[cfg(feature = "monero")] pub use monero::Monero; -use crate::{Payment, Plan}; +use crate::{Payment, Plan, multisigs::scheduler::Scheduler}; #[derive(Clone, Copy, Error, Debug)] pub enum NetworkError { @@ -105,7 +110,7 @@ pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Deb fn kind(&self) -> OutputType; fn id(&self) -> Self::Id; - fn tx_id(&self) -> >::Id; + fn tx_id(&self) -> >::Id; // TODO: Review use of fn key(&self) -> ::G; fn presumed_origin(&self) -> Option; @@ -118,25 +123,33 @@ pub trait Output: Send + Sync + Sized + Clone + PartialEq + Eq + Deb } #[async_trait] -pub trait Transaction: Send + Sync + Sized + Clone + Debug { +pub trait Transaction: Send + Sync + Sized + Clone + PartialEq + Debug { type Id: 'static + Id; fn id(&self) -> Self::Id; - fn serialize(&self) -> Vec; - fn read(reader: &mut R) -> io::Result; - + // TODO: Move to Balance #[cfg(test)] async fn fee(&self, network: &N) -> u64; } pub trait SignableTransaction: Send + Sync + Clone + Debug { + // TODO: Move to Balance fn fee(&self) -> u64; } -pub trait Eventuality: Send + Sync + Clone + Debug { +pub trait Eventuality: Send + Sync + Clone + PartialEq + Debug { + type Claim: Send + Sync + Clone + PartialEq + Default + AsRef<[u8]> + AsMut<[u8]> + Debug; + type Completion: Send + Sync + Clone + PartialEq + Debug; + fn lookup(&self) -> Vec; fn read(reader: &mut R) -> io::Result; fn serialize(&self) -> Vec; + + fn claim(completion: &Self::Completion) -> Self::Claim; + + // TODO: Make a dedicated Completion trait + fn serialize_completion(completion: &Self::Completion) -> Vec; + fn read_completion(reader: &mut R) -> io::Result; } #[derive(Clone, PartialEq, Eq, Debug)] @@ -211,7 +224,7 @@ fn drop_branches( ) -> Vec { let mut branch_outputs = vec![]; for payment in payments { - if payment.address == N::branch_address(key) { + if Some(&payment.address) == N::branch_address(key).as_ref() { branch_outputs.push(PostFeeBranch { expected: payment.balance.amount.0, actual: None }); } } @@ -227,12 +240,12 @@ pub struct PreparedSend { } #[async_trait] -pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { +pub trait Network: 'static + Send + Sync + Clone + PartialEq + Debug { /// The elliptic curve used for this network. type Curve: Curve; /// The type representing the transaction for this network. - type Transaction: Transaction; + type Transaction: Transaction; // TODO: Review use of /// The type representing the block for this network. type Block: Block; @@ -246,7 +259,12 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { /// This must be binding to both the outputs expected and the plan ID. type Eventuality: Eventuality; /// The FROST machine to sign a transaction. - type TransactionMachine: PreprocessMachine; + type TransactionMachine: PreprocessMachine< + Signature = ::Completion, + >; + + /// The scheduler for this network. + type Scheduler: Scheduler; /// The type representing an address. // This should NOT be a String, yet a tailored type representing an efficient binary encoding, @@ -269,10 +287,6 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize; /// The amount of confirmations required to consider a block 'final'. const CONFIRMATIONS: usize; - /// The maximum amount of inputs which will fit in a TX. - /// This should be equal to MAX_OUTPUTS unless one is specifically limited. - /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. - const MAX_INPUTS: usize; /// The maximum amount of outputs which will fit in a TX. /// This should be equal to MAX_INPUTS unless one is specifically limited. /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. @@ -293,13 +307,16 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { fn tweak_keys(key: &mut ThresholdKeys); /// Address for the given group key to receive external coins to. - fn external_address(key: ::G) -> Self::Address; + #[cfg(test)] + async fn external_address(&self, key: ::G) -> Self::Address; /// Address for the given group key to use for scheduled branches. - fn branch_address(key: ::G) -> Self::Address; + fn branch_address(key: ::G) -> Option; /// Address for the given group key to use for change. - fn change_address(key: ::G) -> Self::Address; + fn change_address(key: ::G) -> Option; /// Address for forwarded outputs from prior multisigs. - fn forward_address(key: ::G) -> Self::Address; + /// + /// forward_address must only return None if explicit forwarding isn't necessary. + fn forward_address(key: ::G) -> Option; /// Get the latest block's number. async fn get_latest_block_number(&self) -> Result; @@ -349,13 +366,24 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { /// registered eventualities may have been completed in. /// /// This may panic if not fed a block greater than the tracker's block number. + /// + /// Plan ID -> (block number, TX ID, completion) // TODO: get_eventuality_completions_internal + provided get_eventuality_completions for common // code + // TODO: Consider having this return the Transaction + the Completion? + // Or Transaction with extract_completion? async fn get_eventuality_completions( &self, eventualities: &mut EventualitiesTracker, block: &Self::Block, - ) -> HashMap<[u8; 32], (usize, Self::Transaction)>; + ) -> HashMap< + [u8; 32], + ( + usize, + >::Id, + ::Completion, + ), + >; /// Returns the needed fee to fulfill this Plan at this fee rate. /// @@ -363,7 +391,6 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { async fn needed_fee( &self, block_number: usize, - plan_id: &[u8; 32], inputs: &[Self::Output], payments: &[Payment], change: &Option, @@ -375,16 +402,25 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { /// 1) Call needed_fee /// 2) If the Plan is fulfillable, amortize the fee /// 3) Call signable_transaction *which MUST NOT return None if the above was done properly* + /// + /// This takes a destructured Plan as some of these arguments are malleated from the original + /// Plan. + // TODO: Explicit AmortizedPlan? + #[allow(clippy::too_many_arguments)] async fn signable_transaction( &self, block_number: usize, plan_id: &[u8; 32], + key: ::G, inputs: &[Self::Output], payments: &[Payment], change: &Option, + scheduler_addendum: &>::Addendum, ) -> Result, NetworkError>; /// Prepare a SignableTransaction for a transaction. + /// + /// This must not persist anything as we will prepare Plans we never intend to execute. async fn prepare_send( &self, block_number: usize, @@ -395,13 +431,12 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { assert!((!plan.payments.is_empty()) || plan.change.is_some()); let plan_id = plan.id(); - let Plan { key, inputs, mut payments, change } = plan; + let Plan { key, inputs, mut payments, change, scheduler_addendum } = plan; let theoretical_change_amount = inputs.iter().map(|input| input.balance().amount.0).sum::() - payments.iter().map(|payment| payment.balance.amount.0).sum::(); - let Some(tx_fee) = self.needed_fee(block_number, &plan_id, &inputs, &payments, &change).await? - else { + let Some(tx_fee) = self.needed_fee(block_number, &inputs, &payments, &change).await? else { // This Plan is not fulfillable // TODO: Have Plan explicitly distinguish payments and branches in two separate Vecs? return Ok(PreparedSend { @@ -466,7 +501,7 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { // Note the branch outputs' new values let mut branch_outputs = vec![]; for (initial_amount, payment) in initial_payment_amounts.into_iter().zip(&payments) { - if payment.address == Self::branch_address(key) { + if Some(&payment.address) == Self::branch_address(key).as_ref() { branch_outputs.push(PostFeeBranch { expected: initial_amount, actual: if payment.balance.amount.0 == 0 { @@ -508,11 +543,20 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { ) })(); - let Some(tx) = - self.signable_transaction(block_number, &plan_id, &inputs, &payments, &change).await? + let Some(tx) = self + .signable_transaction( + block_number, + &plan_id, + key, + &inputs, + &payments, + &change, + &scheduler_addendum, + ) + .await? else { panic!( - "{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}", + "{}. {}: {}, {}: {:?}, {}: {:?}, {}: {:?}, {}: {}, {}: {:?}", "signable_transaction returned None for a TX we prior successfully calculated the fee for", "id", hex::encode(plan_id), @@ -524,6 +568,8 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { change, "successfully amoritized fee", tx_fee, + "scheduler's addendum", + scheduler_addendum, ) }; @@ -546,31 +592,49 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { } /// Attempt to sign a SignableTransaction. - async fn attempt_send( + async fn attempt_sign( &self, keys: ThresholdKeys, transaction: Self::SignableTransaction, ) -> Result; - /// Publish a transaction. - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError>; - - /// Get a transaction by its ID. - async fn get_transaction( + /// Publish a completion. + async fn publish_completion( &self, - id: &>::Id, - ) -> Result; + completion: &::Completion, + ) -> Result<(), NetworkError>; - /// Confirm a plan was completed by the specified transaction. - // This is allowed to take shortcuts. - // This may assume an honest multisig, solely checking the inputs specified were spent. - // This may solely check the outputs are equivalent *so long as it's locked to the plan ID*. - fn confirm_completion(&self, eventuality: &Self::Eventuality, tx: &Self::Transaction) -> bool; + /// Confirm a plan was completed by the specified transaction, per our bounds. + /// + /// Returns Err if there was an error with the confirmation methodology. + /// Returns Ok(None) if this is not a valid completion. + /// Returns Ok(Some(_)) with the completion if it's valid. + async fn confirm_completion( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> Result::Completion>, NetworkError>; /// Get a block's number by its ID. #[cfg(test)] async fn get_block_number(&self, id: &>::Id) -> usize; + /// Check an Eventuality is fulfilled by a claim. + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + claim: &::Claim, + ) -> bool; + + /// Get a transaction by the Eventuality it completes. + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Self::Eventuality, + ) -> Self::Transaction; + #[cfg(test)] async fn mine_block(&self); @@ -579,3 +643,10 @@ pub trait Network: 'static + Send + Sync + Clone + PartialEq + Eq + Debug { #[cfg(test)] async fn test_send(&self, key: Self::Address) -> Self::Block; } + +pub trait UtxoNetwork: Network { + /// The maximum amount of inputs which will fit in a TX. + /// This should be equal to MAX_OUTPUTS unless one is specifically limited. + /// A TX with MAX_INPUTS and MAX_OUTPUTS must not exceed the max size. + const MAX_INPUTS: usize; +} diff --git a/processor/src/networks/monero.rs b/processor/src/networks/monero.rs index 8d58ee1a..8d4d1760 100644 --- a/processor/src/networks/monero.rs +++ b/processor/src/networks/monero.rs @@ -39,8 +39,9 @@ use crate::{ networks::{ NetworkError, Block as BlockTrait, OutputType, Output as OutputTrait, Transaction as TransactionTrait, SignableTransaction as SignableTransactionTrait, - Eventuality as EventualityTrait, EventualitiesTracker, Network, + Eventuality as EventualityTrait, EventualitiesTracker, Network, UtxoNetwork, }, + multisigs::scheduler::utxo::Scheduler, }; #[derive(Clone, PartialEq, Eq, Debug)] @@ -117,12 +118,6 @@ impl TransactionTrait for Transaction { fn id(&self) -> Self::Id { self.hash() } - fn serialize(&self) -> Vec { - self.serialize() - } - fn read(reader: &mut R) -> io::Result { - Transaction::read(reader) - } #[cfg(test)] async fn fee(&self, _: &Monero) -> u64 { @@ -131,6 +126,9 @@ impl TransactionTrait for Transaction { } impl EventualityTrait for Eventuality { + type Claim = [u8; 32]; + type Completion = Transaction; + // Use the TX extra to look up potential matches // While anyone can forge this, a transaction with distinct outputs won't actually match // Extra includess the one time keys which are derived from the plan ID, so a collision here is a @@ -145,6 +143,16 @@ impl EventualityTrait for Eventuality { fn serialize(&self) -> Vec { self.serialize() } + + fn claim(tx: &Transaction) -> [u8; 32] { + tx.id() + } + fn serialize_completion(completion: &Transaction) -> Vec { + completion.serialize() + } + fn read_completion(reader: &mut R) -> io::Result { + Transaction::read(reader) + } } #[derive(Clone, Debug)] @@ -274,7 +282,8 @@ impl Monero { async fn median_fee(&self, block: &Block) -> Result { let mut fees = vec![]; for tx_hash in &block.txs { - let tx = self.get_transaction(tx_hash).await?; + let tx = + self.rpc.get_transaction(*tx_hash).await.map_err(|_| NetworkError::ConnectionError)?; // Only consider fees from RCT transactions, else the fee property read wouldn't be accurate if tx.rct_signatures.rct_type() != RctType::Null { continue; @@ -454,6 +463,8 @@ impl Network for Monero { type Eventuality = Eventuality; type TransactionMachine = TransactionMachine; + type Scheduler = Scheduler; + type Address = Address; const NETWORK: NetworkId = NetworkId::Monero; @@ -461,11 +472,6 @@ impl Network for Monero { const ESTIMATED_BLOCK_TIME_IN_SECONDS: usize = 120; const CONFIRMATIONS: usize = 10; - // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction - // larger than 150kb. This fits within the 100kb mark - // Technically, it can be ~124, yet a small bit of buffer is appreciated - // TODO: Test creating a TX this big - const MAX_INPUTS: usize = 120; const MAX_OUTPUTS: usize = 16; // 0.01 XMR @@ -478,20 +484,21 @@ impl Network for Monero { // Monero doesn't require/benefit from tweaking fn tweak_keys(_: &mut ThresholdKeys) {} - fn external_address(key: EdwardsPoint) -> Address { + #[cfg(test)] + async fn external_address(&self, key: EdwardsPoint) -> Address { Self::address_internal(key, EXTERNAL_SUBADDRESS) } - fn branch_address(key: EdwardsPoint) -> Address { - Self::address_internal(key, BRANCH_SUBADDRESS) + fn branch_address(key: EdwardsPoint) -> Option
{ + Some(Self::address_internal(key, BRANCH_SUBADDRESS)) } - fn change_address(key: EdwardsPoint) -> Address { - Self::address_internal(key, CHANGE_SUBADDRESS) + fn change_address(key: EdwardsPoint) -> Option
{ + Some(Self::address_internal(key, CHANGE_SUBADDRESS)) } - fn forward_address(key: EdwardsPoint) -> Address { - Self::address_internal(key, FORWARD_SUBADDRESS) + fn forward_address(key: EdwardsPoint) -> Option
{ + Some(Self::address_internal(key, FORWARD_SUBADDRESS)) } async fn get_latest_block_number(&self) -> Result { @@ -558,7 +565,7 @@ impl Network for Monero { &self, eventualities: &mut EventualitiesTracker, block: &Block, - ) -> HashMap<[u8; 32], (usize, Transaction)> { + ) -> HashMap<[u8; 32], (usize, [u8; 32], Transaction)> { let mut res = HashMap::new(); if eventualities.map.is_empty() { return res; @@ -568,13 +575,13 @@ impl Network for Monero { network: &Monero, eventualities: &mut EventualitiesTracker, block: &Block, - res: &mut HashMap<[u8; 32], (usize, Transaction)>, + res: &mut HashMap<[u8; 32], (usize, [u8; 32], Transaction)>, ) { for hash in &block.txs { let tx = { let mut tx; while { - tx = network.get_transaction(hash).await; + tx = network.rpc.get_transaction(*hash).await; tx.is_err() } { log::error!("couldn't get transaction {}: {}", hex::encode(hash), tx.err().unwrap()); @@ -587,7 +594,7 @@ impl Network for Monero { if eventuality.matches(&tx) { res.insert( eventualities.map.remove(&tx.prefix.extra).unwrap().0, - (usize::try_from(block.number().unwrap()).unwrap(), tx), + (usize::try_from(block.number().unwrap()).unwrap(), tx.id(), tx), ); } } @@ -625,14 +632,13 @@ impl Network for Monero { async fn needed_fee( &self, block_number: usize, - plan_id: &[u8; 32], inputs: &[Output], payments: &[Payment], change: &Option
, ) -> Result, NetworkError> { Ok( self - .make_signable_transaction(block_number, plan_id, inputs, payments, change, true) + .make_signable_transaction(block_number, &[0; 32], inputs, payments, change, true) .await? .map(|(_, signable)| signable.fee()), ) @@ -642,9 +648,11 @@ impl Network for Monero { &self, block_number: usize, plan_id: &[u8; 32], + _key: EdwardsPoint, inputs: &[Output], payments: &[Payment], change: &Option
, + (): &(), ) -> Result, NetworkError> { Ok( self @@ -658,7 +666,7 @@ impl Network for Monero { ) } - async fn attempt_send( + async fn attempt_sign( &self, keys: ThresholdKeys, transaction: SignableTransaction, @@ -669,7 +677,7 @@ impl Network for Monero { } } - async fn publish_transaction(&self, tx: &Self::Transaction) -> Result<(), NetworkError> { + async fn publish_completion(&self, tx: &Transaction) -> Result<(), NetworkError> { match self.rpc.publish_transaction(tx).await { Ok(()) => Ok(()), Err(RpcError::ConnectionError(e)) => { @@ -682,12 +690,17 @@ impl Network for Monero { } } - async fn get_transaction(&self, id: &[u8; 32]) -> Result { - self.rpc.get_transaction(*id).await.map_err(map_rpc_err) - } - - fn confirm_completion(&self, eventuality: &Eventuality, tx: &Transaction) -> bool { - eventuality.matches(tx) + async fn confirm_completion( + &self, + eventuality: &Eventuality, + id: &[u8; 32], + ) -> Result, NetworkError> { + let tx = self.rpc.get_transaction(*id).await.map_err(map_rpc_err)?; + if eventuality.matches(&tx) { + Ok(Some(tx)) + } else { + Ok(None) + } } #[cfg(test)] @@ -695,6 +708,31 @@ impl Network for Monero { self.rpc.get_block(*id).await.unwrap().number().unwrap().try_into().unwrap() } + #[cfg(test)] + async fn check_eventuality_by_claim( + &self, + eventuality: &Self::Eventuality, + claim: &[u8; 32], + ) -> bool { + return eventuality.matches(&self.rpc.get_transaction(*claim).await.unwrap()); + } + + #[cfg(test)] + async fn get_transaction_by_eventuality( + &self, + block: usize, + eventuality: &Eventuality, + ) -> Transaction { + let block = self.rpc.get_block_by_number(block).await.unwrap(); + for tx in &block.txs { + let tx = self.rpc.get_transaction(*tx).await.unwrap(); + if eventuality.matches(&tx) { + return tx; + } + } + panic!("block didn't have a transaction for this eventuality") + } + #[cfg(test)] async fn mine_block(&self) { // https://github.com/serai-dex/serai/issues/198 @@ -775,3 +813,11 @@ impl Network for Monero { self.get_block(block).await.unwrap() } } + +impl UtxoNetwork for Monero { + // wallet2 will not create a transaction larger than 100kb, and Monero won't relay a transaction + // larger than 150kb. This fits within the 100kb mark + // Technically, it can be ~124, yet a small bit of buffer is appreciated + // TODO: Test creating a TX this big + const MAX_INPUTS: usize = 120; +} diff --git a/processor/src/plan.rs b/processor/src/plan.rs index 3e10c7d3..58a8a5e1 100644 --- a/processor/src/plan.rs +++ b/processor/src/plan.rs @@ -8,7 +8,10 @@ use frost::curve::Ciphersuite; use serai_client::primitives::Balance; -use crate::networks::{Output, Network}; +use crate::{ + networks::{Output, Network}, + multisigs::scheduler::{SchedulerAddendum, Scheduler}, +}; #[derive(Clone, PartialEq, Eq, Debug)] pub struct Payment { @@ -73,7 +76,7 @@ impl Payment { } } -#[derive(Clone, PartialEq, Eq)] +#[derive(Clone, PartialEq)] pub struct Plan { pub key: ::G, pub inputs: Vec, @@ -90,7 +93,11 @@ pub struct Plan { /// This MUST contain a Serai address. Operating costs may be deducted from the payments in this /// Plan on the premise that the change address is Serai's, and accordingly, Serai will recoup /// the operating costs. + // + // TODO: Consider moving to ::G? pub change: Option, + /// The scheduler's additional data. + pub scheduler_addendum: >::Addendum, } impl core::fmt::Debug for Plan { fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> { @@ -100,6 +107,7 @@ impl core::fmt::Debug for Plan { .field("inputs", &self.inputs) .field("payments", &self.payments) .field("change", &self.change.as_ref().map(ToString::to_string)) + .field("scheduler_addendum", &self.scheduler_addendum) .finish() } } @@ -125,6 +133,10 @@ impl Plan { transcript.append_message(b"change", change.to_string()); } + let mut addendum_bytes = vec![]; + self.scheduler_addendum.write(&mut addendum_bytes).unwrap(); + transcript.append_message(b"scheduler_addendum", addendum_bytes); + transcript } @@ -161,7 +173,8 @@ impl Plan { }; assert!(serai_client::primitives::MAX_ADDRESS_LEN <= u8::MAX.into()); writer.write_all(&[u8::try_from(change.len()).unwrap()])?; - writer.write_all(&change) + writer.write_all(&change)?; + self.scheduler_addendum.write(writer) } pub fn read(reader: &mut R) -> io::Result { @@ -193,6 +206,7 @@ impl Plan { })?) }; - Ok(Plan { key, inputs, payments, change }) + let scheduler_addendum = >::Addendum::read(reader)?; + Ok(Plan { key, inputs, payments, change, scheduler_addendum }) } } diff --git a/processor/src/signer.rs b/processor/src/signer.rs index 7a4fcbed..cab0bceb 100644 --- a/processor/src/signer.rs +++ b/processor/src/signer.rs @@ -2,7 +2,6 @@ use core::{marker::PhantomData, fmt}; use std::collections::HashMap; use rand_core::OsRng; -use ciphersuite::group::GroupEncoding; use frost::{ ThresholdKeys, FrostError, sign::{Writable, PreprocessMachine, SignMachine, SignatureMachine}, @@ -17,7 +16,7 @@ pub use serai_db::*; use crate::{ Get, DbTxn, Db, - networks::{Transaction, Eventuality, Network}, + networks::{Eventuality, Network}, }; create_db!( @@ -25,7 +24,7 @@ create_db!( CompletionsDb: (id: [u8; 32]) -> Vec, EventualityDb: (id: [u8; 32]) -> Vec, AttemptDb: (id: &SignId) -> (), - TransactionDb: (id: &[u8]) -> Vec, + CompletionDb: (claim: &[u8]) -> Vec, ActiveSignsDb: () -> Vec<[u8; 32]>, CompletedOnChainDb: (id: &[u8; 32]) -> (), } @@ -59,12 +58,20 @@ impl CompletionsDb { fn completions( getter: &impl Get, id: [u8; 32], - ) -> Vec<>::Id> { - let completions = Self::get(getter, id).unwrap_or_default(); + ) -> Vec<::Claim> { + let Some(completions) = Self::get(getter, id) else { return vec![] }; + + // If this was set yet is empty, it's because it's the encoding of a claim with a length of 0 + if completions.is_empty() { + let default = ::Claim::default(); + assert_eq!(default.as_ref().len(), 0); + return vec![default]; + } + let mut completions_ref = completions.as_slice(); let mut res = vec![]; while !completions_ref.is_empty() { - let mut id = >::Id::default(); + let mut id = ::Claim::default(); let id_len = id.as_ref().len(); id.as_mut().copy_from_slice(&completions_ref[.. id_len]); completions_ref = &completions_ref[id_len ..]; @@ -73,25 +80,37 @@ impl CompletionsDb { res } - fn complete(txn: &mut impl DbTxn, id: [u8; 32], tx: &N::Transaction) { - let tx_id = tx.id(); - // Transactions can be completed by multiple signatures + fn complete( + txn: &mut impl DbTxn, + id: [u8; 32], + completion: &::Completion, + ) { + // Completions can be completed by multiple signatures // Save every solution in order to be robust - TransactionDb::save_transaction::(txn, tx); - let mut existing = Self::get(txn, id).unwrap_or_default(); - // Don't add this TX if it's already present - let tx_len = tx_id.as_ref().len(); - assert_eq!(existing.len() % tx_len, 0); + CompletionDb::save_completion::(txn, completion); - let mut i = 0; - while i < existing.len() { - if &existing[i .. (i + tx_len)] == tx_id.as_ref() { - return; - } - i += tx_len; + let claim = N::Eventuality::claim(completion); + let claim: &[u8] = claim.as_ref(); + + // If claim has a 0-byte encoding, the set key, even if empty, is the claim + if claim.is_empty() { + Self::set(txn, id, &vec![]); + return; } - existing.extend(tx_id.as_ref()); + let mut existing = Self::get(txn, id).unwrap_or_default(); + assert_eq!(existing.len() % claim.len(), 0); + + // Don't add this completion if it's already present + let mut i = 0; + while i < existing.len() { + if &existing[i .. (i + claim.len())] == claim { + return; + } + i += claim.len(); + } + + existing.extend(claim); Self::set(txn, id, &existing); } } @@ -110,25 +129,33 @@ impl EventualityDb { } } -impl TransactionDb { - fn save_transaction(txn: &mut impl DbTxn, tx: &N::Transaction) { - Self::set(txn, tx.id().as_ref(), &tx.serialize()); +impl CompletionDb { + fn save_completion( + txn: &mut impl DbTxn, + completion: &::Completion, + ) { + let claim = N::Eventuality::claim(completion); + let claim: &[u8] = claim.as_ref(); + Self::set(txn, claim, &N::Eventuality::serialize_completion(completion)); } - fn transaction( + fn completion( getter: &impl Get, - id: &>::Id, - ) -> Option { - Self::get(getter, id.as_ref()).map(|tx| N::Transaction::read(&mut tx.as_slice()).unwrap()) + claim: &::Claim, + ) -> Option<::Completion> { + Self::get(getter, claim.as_ref()) + .map(|completion| N::Eventuality::read_completion::<&[u8]>(&mut completion.as_ref()).unwrap()) } } type PreprocessFor = <::TransactionMachine as PreprocessMachine>::Preprocess; type SignMachineFor = <::TransactionMachine as PreprocessMachine>::SignMachine; -type SignatureShareFor = - as SignMachine<::Transaction>>::SignatureShare; -type SignatureMachineFor = - as SignMachine<::Transaction>>::SignatureMachine; +type SignatureShareFor = as SignMachine< + <::Eventuality as Eventuality>::Completion, +>>::SignatureShare; +type SignatureMachineFor = as SignMachine< + <::Eventuality as Eventuality>::Completion, +>>::SignatureMachine; pub struct Signer { db: PhantomData, @@ -164,12 +191,11 @@ impl Signer { log::info!("rebroadcasting transactions for plans whose completions yet to be confirmed..."); loop { for active in ActiveSignsDb::get(&db).unwrap_or_default() { - for completion in CompletionsDb::completions::(&db, active) { - log::info!("rebroadcasting {}", hex::encode(&completion)); + for claim in CompletionsDb::completions::(&db, active) { + log::info!("rebroadcasting completion with claim {}", hex::encode(claim.as_ref())); // TODO: Don't drop the error entirely. Check for invariants - let _ = network - .publish_transaction(&TransactionDb::transaction::(&db, &completion).unwrap()) - .await; + let _ = + network.publish_completion(&CompletionDb::completion::(&db, &claim).unwrap()).await; } } // Only run every five minutes so we aren't frequently loading tens to hundreds of KB from @@ -242,7 +268,7 @@ impl Signer { fn complete( &mut self, id: [u8; 32], - tx_id: &>::Id, + claim: &::Claim, ) -> ProcessorMessage { // Assert we're actively signing for this TX assert!(self.signable.remove(&id).is_some(), "completed a TX we weren't signing for"); @@ -256,7 +282,7 @@ impl Signer { self.signing.remove(&id); // Emit the event for it - ProcessorMessage::Completed { session: self.session, id, tx: tx_id.as_ref().to_vec() } + ProcessorMessage::Completed { session: self.session, id, tx: claim.as_ref().to_vec() } } #[must_use] @@ -264,16 +290,16 @@ impl Signer { &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], - tx: &N::Transaction, + completion: &::Completion, ) -> Option { let first_completion = !Self::already_completed(txn, id); // Save this completion to the DB CompletedOnChainDb::complete_on_chain(txn, &id); - CompletionsDb::complete::(txn, id, tx); + CompletionsDb::complete::(txn, id, completion); if first_completion { - Some(self.complete(id, &tx.id())) + Some(self.complete(id, &N::Eventuality::claim(completion))) } else { None } @@ -286,49 +312,50 @@ impl Signer { &mut self, txn: &mut D::Transaction<'_>, id: [u8; 32], - tx_id: &>::Id, + claim: &::Claim, ) -> Option { if let Some(eventuality) = EventualityDb::eventuality::(txn, id) { - // Transaction hasn't hit our mempool/was dropped for a different signature - // The latter can happen given certain latency conditions/a single malicious signer - // In the case of a single malicious signer, they can drag multiple honest validators down - // with them, so we unfortunately can't slash on this case - let Ok(tx) = self.network.get_transaction(tx_id).await else { - warn!( - "a validator claimed {} completed {} yet we didn't have that TX in our mempool {}", - hex::encode(tx_id), - hex::encode(id), - "(or had another connectivity issue)", - ); - return None; - }; + match self.network.confirm_completion(&eventuality, claim).await { + Ok(Some(completion)) => { + info!( + "signer eventuality for {} resolved in {}", + hex::encode(id), + hex::encode(claim.as_ref()) + ); - if self.network.confirm_completion(&eventuality, &tx) { - info!("signer eventuality for {} resolved in TX {}", hex::encode(id), hex::encode(tx_id)); + let first_completion = !Self::already_completed(txn, id); - let first_completion = !Self::already_completed(txn, id); + // Save this completion to the DB + CompletionsDb::complete::(txn, id, &completion); - // Save this completion to the DB - CompletionsDb::complete::(txn, id, &tx); - - if first_completion { - return Some(self.complete(id, &tx.id())); + if first_completion { + return Some(self.complete(id, claim)); + } + } + Ok(None) => { + warn!( + "a validator claimed {} completed {} when it did not", + hex::encode(claim.as_ref()), + hex::encode(id), + ); + } + Err(_) => { + // Transaction hasn't hit our mempool/was dropped for a different signature + // The latter can happen given certain latency conditions/a single malicious signer + // In the case of a single malicious signer, they can drag multiple honest validators down + // with them, so we unfortunately can't slash on this case + warn!( + "a validator claimed {} completed {} yet we couldn't check that claim", + hex::encode(claim.as_ref()), + hex::encode(id), + ); } - } else { - warn!( - "a validator claimed {} completed {} when it did not", - hex::encode(tx_id), - hex::encode(id) - ); } } else { - // If we don't have this in RAM, it should be because we already finished signing it - assert!(!CompletionsDb::completions::(txn, id).is_empty()); - info!( - "signer {} informed of the eventuality completion for plan {}, {}", - hex::encode(self.keys[0].group_key().to_bytes()), + warn!( + "informed of completion {} for eventuality {}, when we didn't have that eventuality", + hex::encode(claim.as_ref()), hex::encode(id), - "which we already marked as completed", ); } None @@ -405,7 +432,7 @@ impl Signer { let mut preprocesses = vec![]; let mut serialized_preprocesses = vec![]; for keys in &self.keys { - let machine = match self.network.attempt_send(keys.clone(), tx.clone()).await { + let machine = match self.network.attempt_sign(keys.clone(), tx.clone()).await { Err(e) => { error!("failed to attempt {}, #{}: {:?}", hex::encode(id.id), id.attempt, e); return None; @@ -572,7 +599,7 @@ impl Signer { assert!(shares.insert(self.keys[i].params().i(), our_share).is_none()); } - let tx = match machine.complete(shares) { + let completion = match machine.complete(shares) { Ok(res) => res, Err(e) => match e { FrostError::InternalError(_) | @@ -588,40 +615,39 @@ impl Signer { }, }; - // Save the transaction in case it's needed for recovery - CompletionsDb::complete::(txn, id.id, &tx); + // Save the completion in case it's needed for recovery + CompletionsDb::complete::(txn, id.id, &completion); // Publish it - let tx_id = tx.id(); - if let Err(e) = self.network.publish_transaction(&tx).await { - error!("couldn't publish {:?}: {:?}", tx, e); + if let Err(e) = self.network.publish_completion(&completion).await { + error!("couldn't publish completion for plan {}: {:?}", hex::encode(id.id), e); } else { - info!("published {} for plan {}", hex::encode(&tx_id), hex::encode(id.id)); + info!("published completion for plan {}", hex::encode(id.id)); } // Stop trying to sign for this TX - Some(self.complete(id.id, &tx_id)) + Some(self.complete(id.id, &N::Eventuality::claim(&completion))) } CoordinatorMessage::Reattempt { id } => self.attempt(txn, id.id, id.attempt).await, - CoordinatorMessage::Completed { session: _, id, tx: mut tx_vec } => { - let mut tx = >::Id::default(); - if tx.as_ref().len() != tx_vec.len() { - let true_len = tx_vec.len(); - tx_vec.truncate(2 * tx.as_ref().len()); + CoordinatorMessage::Completed { session: _, id, tx: mut claim_vec } => { + let mut claim = ::Claim::default(); + if claim.as_ref().len() != claim_vec.len() { + let true_len = claim_vec.len(); + claim_vec.truncate(2 * claim.as_ref().len()); warn!( "a validator claimed {}... (actual length {}) completed {} yet {}", - hex::encode(&tx_vec), + hex::encode(&claim_vec), true_len, hex::encode(id), - "that's not a valid TX ID", + "that's not a valid Claim", ); return None; } - tx.as_mut().copy_from_slice(&tx_vec); + claim.as_mut().copy_from_slice(&claim_vec); - self.claimed_eventuality_completion(txn, id, &tx).await + self.claimed_eventuality_completion(txn, id, &claim).await } } } diff --git a/processor/src/tests/addresses.rs b/processor/src/tests/addresses.rs index da20091b..8f730dbd 100644 --- a/processor/src/tests/addresses.rs +++ b/processor/src/tests/addresses.rs @@ -13,18 +13,23 @@ use serai_db::{DbTxn, MemDb}; use crate::{ Plan, Db, - networks::{OutputType, Output, Block, Network}, - multisigs::scanner::{ScannerEvent, Scanner, ScannerHandle}, + networks::{OutputType, Output, Block, UtxoNetwork}, + multisigs::{ + scheduler::Scheduler, + scanner::{ScannerEvent, Scanner, ScannerHandle}, + }, tests::sign, }; -async fn spend( +async fn spend( db: &mut D, network: &N, keys: &HashMap>, scanner: &mut ScannerHandle, outputs: Vec, -) { +) where + >::Addendum: From<()>, +{ let key = keys[&Participant::new(1).unwrap()].group_key(); let mut keys_txs = HashMap::new(); @@ -41,7 +46,8 @@ async fn spend( key, inputs: outputs.clone(), payments: vec![], - change: Some(N::change_address(key)), + change: Some(N::change_address(key).unwrap()), + scheduler_addendum: ().into(), }, 0, ) @@ -70,13 +76,16 @@ async fn spend( scanner.release_lock().await; txn.commit(); } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } } -pub async fn test_addresses(network: N) { +pub async fn test_addresses(network: N) +where + >::Addendum: From<()>, +{ let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); @@ -101,10 +110,10 @@ pub async fn test_addresses(network: N) { // Receive funds to the various addresses and make sure they're properly identified let mut received_outputs = vec![]; for (kind, address) in [ - (OutputType::External, N::external_address(key)), - (OutputType::Branch, N::branch_address(key)), - (OutputType::Change, N::change_address(key)), - (OutputType::Forwarded, N::forward_address(key)), + (OutputType::External, N::external_address(&network, key).await), + (OutputType::Branch, N::branch_address(key).unwrap()), + (OutputType::Change, N::change_address(key).unwrap()), + (OutputType::Forwarded, N::forward_address(key).unwrap()), ] { let block_id = network.test_send(address).await.id(); @@ -123,7 +132,7 @@ pub async fn test_addresses(network: N) { txn.commit(); received_outputs.extend(outputs); } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; diff --git a/processor/src/tests/literal/mod.rs b/processor/src/tests/literal/mod.rs index 192214eb..e2bfdc8a 100644 --- a/processor/src/tests/literal/mod.rs +++ b/processor/src/tests/literal/mod.rs @@ -65,7 +65,7 @@ mod bitcoin { .unwrap(); ::tweak_keys(&mut keys); let group_key = keys.group_key(); - let serai_btc_address = ::external_address(group_key); + let serai_btc_address = ::external_address(&btc, group_key).await; // btc key pair to send from let private_key = PrivateKey::new(SecretKey::new(&mut rand_core::OsRng), BNetwork::Regtest); diff --git a/processor/src/tests/scanner.rs b/processor/src/tests/scanner.rs index 5aad5bb5..42756d8b 100644 --- a/processor/src/tests/scanner.rs +++ b/processor/src/tests/scanner.rs @@ -11,11 +11,11 @@ use tokio::{sync::Mutex, time::timeout}; use serai_db::{DbTxn, Db, MemDb}; use crate::{ - networks::{OutputType, Output, Block, Network}, + networks::{OutputType, Output, Block, UtxoNetwork}, multisigs::scanner::{ScannerEvent, Scanner, ScannerHandle}, }; -pub async fn new_scanner( +pub async fn new_scanner( network: &N, db: &D, group_key: ::G, @@ -40,7 +40,7 @@ pub async fn new_scanner( scanner } -pub async fn test_scanner(network: N) { +pub async fn test_scanner(network: N) { let mut keys = frost::tests::key_gen::<_, N::Curve>(&mut OsRng).remove(&Participant::new(1).unwrap()).unwrap(); N::tweak_keys(&mut keys); @@ -56,7 +56,7 @@ pub async fn test_scanner(network: N) { let scanner = new_scanner(&network, &db, group_key, &first).await; // Receive funds - let block = network.test_send(N::external_address(keys.group_key())).await; + let block = network.test_send(N::external_address(&network, keys.group_key()).await).await; let block_id = block.id(); // Verify the Scanner picked them up @@ -71,7 +71,7 @@ pub async fn test_scanner(network: N) { assert_eq!(outputs[0].kind(), OutputType::External); outputs } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; @@ -101,7 +101,7 @@ pub async fn test_scanner(network: N) { .is_err()); } -pub async fn test_no_deadlock_in_multisig_completed(network: N) { +pub async fn test_no_deadlock_in_multisig_completed(network: N) { // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; @@ -142,14 +142,14 @@ pub async fn test_no_deadlock_in_multisig_completed(network: N) { assert!(!is_retirement_block); block } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { ScannerEvent::Block { .. } => {} - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } }; diff --git a/processor/src/tests/signer.rs b/processor/src/tests/signer.rs index 89d57bf3..524c5d29 100644 --- a/processor/src/tests/signer.rs +++ b/processor/src/tests/signer.rs @@ -17,19 +17,20 @@ use serai_client::{ use messages::sign::*; use crate::{ Payment, Plan, - networks::{Output, Transaction, Network}, + networks::{Output, Transaction, Eventuality, UtxoNetwork}, + multisigs::scheduler::Scheduler, signer::Signer, }; #[allow(clippy::type_complexity)] -pub async fn sign( +pub async fn sign( network: N, session: Session, mut keys_txs: HashMap< Participant, (ThresholdKeys, (N::SignableTransaction, N::Eventuality)), >, -) -> >::Id { +) -> ::Claim { let actual_id = SignId { session, id: [0xaa; 32], attempt: 0 }; let mut keys = HashMap::new(); @@ -65,14 +66,15 @@ pub async fn sign( let mut preprocesses = HashMap::new(); + let mut eventuality = None; for i in 1 ..= signers.len() { let i = Participant::new(u16::try_from(i).unwrap()).unwrap(); - let (tx, eventuality) = txs.remove(&i).unwrap(); + let (tx, this_eventuality) = txs.remove(&i).unwrap(); let mut txn = dbs.get_mut(&i).unwrap().txn(); match signers .get_mut(&i) .unwrap() - .sign_transaction(&mut txn, actual_id.id, tx, &eventuality) + .sign_transaction(&mut txn, actual_id.id, tx, &this_eventuality) .await { // All participants should emit a preprocess @@ -86,6 +88,11 @@ pub async fn sign( _ => panic!("didn't get preprocess back"), } txn.commit(); + + if eventuality.is_none() { + eventuality = Some(this_eventuality.clone()); + } + assert_eq!(eventuality, Some(this_eventuality)); } let mut shares = HashMap::new(); @@ -140,19 +147,25 @@ pub async fn sign( txn.commit(); } - let mut typed_tx_id = >::Id::default(); - typed_tx_id.as_mut().copy_from_slice(tx_id.unwrap().as_ref()); - typed_tx_id + let mut typed_claim = ::Claim::default(); + typed_claim.as_mut().copy_from_slice(tx_id.unwrap().as_ref()); + assert!(network.check_eventuality_by_claim(&eventuality.unwrap(), &typed_claim).await); + typed_claim } -pub async fn test_signer(network: N) { +pub async fn test_signer(network: N) +where + >::Addendum: From<()>, +{ let mut keys = key_gen(&mut OsRng); for keys in keys.values_mut() { N::tweak_keys(keys); } let key = keys[&Participant::new(1).unwrap()].group_key(); - let outputs = network.get_outputs(&network.test_send(N::external_address(key)).await, key).await; + let outputs = network + .get_outputs(&network.test_send(N::external_address(&network, key).await).await, key) + .await; let sync_block = network.get_latest_block_number().await.unwrap() - N::CONFIRMATIONS; let amount = 2 * N::DUST; @@ -166,7 +179,7 @@ pub async fn test_signer(network: N) { key, inputs: outputs.clone(), payments: vec![Payment { - address: N::external_address(key), + address: N::external_address(&network, key).await, data: None, balance: Balance { coin: match N::NETWORK { @@ -178,7 +191,8 @@ pub async fn test_signer(network: N) { amount: Amount(amount), }, }], - change: Some(N::change_address(key)), + change: Some(N::change_address(key).unwrap()), + scheduler_addendum: ().into(), }, 0, ) @@ -191,13 +205,12 @@ pub async fn test_signer(network: N) { keys_txs.insert(i, (keys, (signable, eventuality))); } - // The signer may not publish the TX if it has a connection error - // It doesn't fail in this case - let txid = sign(network.clone(), Session(0), keys_txs).await; - let tx = network.get_transaction(&txid).await.unwrap(); - assert_eq!(tx.id(), txid); + let claim = sign(network.clone(), Session(0), keys_txs).await; + // Mine a block, and scan it, to ensure that the TX actually made it on chain network.mine_block().await; + let block_number = network.get_latest_block_number().await.unwrap(); + let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await; let outputs = network .get_outputs( &network.get_block(network.get_latest_block_number().await.unwrap()).await.unwrap(), @@ -212,6 +225,7 @@ pub async fn test_signer(network: N) { // Check the eventualities pass for eventuality in eventualities { - assert!(network.confirm_completion(&eventuality, &tx)); + let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap(); + assert_eq!(N::Eventuality::claim(&completion), claim); } } diff --git a/processor/src/tests/wallet.rs b/processor/src/tests/wallet.rs index c9cc6c66..4600fcbe 100644 --- a/processor/src/tests/wallet.rs +++ b/processor/src/tests/wallet.rs @@ -15,7 +15,7 @@ use serai_client::{ use crate::{ Payment, Plan, - networks::{Output, Transaction, Block, Network}, + networks::{Output, Transaction, Eventuality, Block, UtxoNetwork}, multisigs::{ scanner::{ScannerEvent, Scanner}, scheduler::Scheduler, @@ -24,7 +24,7 @@ use crate::{ }; // Tests the Scanner, Scheduler, and Signer together -pub async fn test_wallet(network: N) { +pub async fn test_wallet(network: N) { // Mine blocks so there's a confirmed block for _ in 0 .. N::CONFIRMATIONS { network.mine_block().await; @@ -47,7 +47,7 @@ pub async fn test_wallet(network: N) { network.mine_block().await; } - let block = network.test_send(N::external_address(key)).await; + let block = network.test_send(N::external_address(&network, key).await).await; let block_id = block.id(); match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() { @@ -58,7 +58,7 @@ pub async fn test_wallet(network: N) { assert_eq!(outputs.len(), 1); (block_id, outputs) } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } @@ -69,22 +69,13 @@ pub async fn test_wallet(network: N) { txn.commit(); let mut txn = db.txn(); - let mut scheduler = Scheduler::new::( - &mut txn, - key, - match N::NETWORK { - NetworkId::Serai => panic!("test_wallet called with Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - ); + let mut scheduler = N::Scheduler::new::(&mut txn, key, N::NETWORK); let amount = 2 * N::DUST; let plans = scheduler.schedule::( &mut txn, outputs.clone(), vec![Payment { - address: N::external_address(key), + address: N::external_address(&network, key).await, data: None, balance: Balance { coin: match N::NETWORK { @@ -100,27 +91,26 @@ pub async fn test_wallet(network: N) { false, ); txn.commit(); + assert_eq!(plans.len(), 1); + assert_eq!(plans[0].key, key); + assert_eq!(plans[0].inputs, outputs); assert_eq!( - plans, - vec![Plan { - key, - inputs: outputs.clone(), - payments: vec![Payment { - address: N::external_address(key), - data: None, - balance: Balance { - coin: match N::NETWORK { - NetworkId::Serai => panic!("test_wallet called with Serai"), - NetworkId::Bitcoin => Coin::Bitcoin, - NetworkId::Ethereum => Coin::Ether, - NetworkId::Monero => Coin::Monero, - }, - amount: Amount(amount), - } - }], - change: Some(N::change_address(key)), + plans[0].payments, + vec![Payment { + address: N::external_address(&network, key).await, + data: None, + balance: Balance { + coin: match N::NETWORK { + NetworkId::Serai => panic!("test_wallet called with Serai"), + NetworkId::Bitcoin => Coin::Bitcoin, + NetworkId::Ethereum => Coin::Ether, + NetworkId::Monero => Coin::Monero, + }, + amount: Amount(amount), + } }] ); + assert_eq!(plans[0].change, Some(N::change_address(key).unwrap())); { let mut buf = vec![]; @@ -143,10 +133,10 @@ pub async fn test_wallet(network: N) { keys_txs.insert(i, (keys, (signable, eventuality))); } - let txid = sign(network.clone(), Session(0), keys_txs).await; - let tx = network.get_transaction(&txid).await.unwrap(); + let claim = sign(network.clone(), Session(0), keys_txs).await; network.mine_block().await; let block_number = network.get_latest_block_number().await.unwrap(); + let tx = network.get_transaction_by_eventuality(block_number, &eventualities[0]).await; let block = network.get_block(block_number).await.unwrap(); let outputs = network.get_outputs(&block, key).await; assert_eq!(outputs.len(), 2); @@ -154,7 +144,8 @@ pub async fn test_wallet(network: N) { assert!((outputs[0].balance().amount.0 == amount) || (outputs[1].balance().amount.0 == amount)); for eventuality in eventualities { - assert!(network.confirm_completion(&eventuality, &tx)); + let completion = network.confirm_completion(&eventuality, &claim).await.unwrap().unwrap(); + assert_eq!(N::Eventuality::claim(&completion), claim); } for _ in 1 .. N::CONFIRMATIONS { @@ -168,7 +159,7 @@ pub async fn test_wallet(network: N) { assert_eq!(block_id, block.id()); assert_eq!(these_outputs, outputs); } - ScannerEvent::Completed(_, _, _, _) => { + ScannerEvent::Completed(_, _, _, _, _) => { panic!("unexpectedly got eventuality completion"); } } diff --git a/spec/integrations/Ethereum.md b/spec/integrations/Ethereum.md index e66a1f5b..1e1f3ba1 100644 --- a/spec/integrations/Ethereum.md +++ b/spec/integrations/Ethereum.md @@ -2,37 +2,26 @@ ### Addresses -Ethereum addresses are 20-byte hashes. +Ethereum addresses are 20-byte hashes, identical to Ethereum proper. ### In Instructions -Ethereum In Instructions are present via being appended to the calldata -transferring funds to Serai. `origin` is automatically set to the party from -which funds are being transferred. For an ERC20, this is `from`. For ETH, this -is the caller. +In Instructions may be created in one of two ways. + +1) Have an EOA call `transfer` or `transferFrom` on an ERC20, appending the + encoded InInstruction directly after the calldata. `origin` defaults to the + party transferred from. +2) Call `inInstruction` on the Router. `origin` defaults to `msg.sender`. ### Out Instructions `data` is limited to 512 bytes. -If `data` is provided, the Ethereum Router will call a contract-calling child -contract in order to sandbox it. The first byte of `data` designates which child -child contract to call. After this byte is read, `data` is solely considered as -`data`, post its first byte. The child contract is sent the funds before this -call is performed. +If `data` isn't provided or is malformed, ETH transfers will execute with 5,000 +gas and token transfers with 100,000 gas. -##### Child Contract 0 - -This contract is intended to enable connecting with other protocols, and should -be used to convert withdrawn assets to other assets on Ethereum. - - 1) Transfers the asset to `destination`. - 2) Calls `destination` with `data`. - -##### Child Contract 1 - -This contract is intended to enable authenticated calls from Serai. - - 1) Transfers the asset to `destination`. - 2) Calls `destination` with `data[.. 4], serai_address, data[4 ..]`, where -`serai_address` is the address which triggered this Out Instruction. +If `data` is provided and well-formed, `destination` is ignored and the Ethereum +Router will construct and call a new contract to proxy the contained calls. The +transfer executes to the constructed contract as above, before the constructed +contract is called with the calls inside `data`. The sandboxed execution has a +gas limit of 350,000. diff --git a/substrate/abi/Cargo.toml b/substrate/abi/Cargo.toml index 04350486..ac294930 100644 --- a/substrate/abi/Cargo.toml +++ b/substrate/abi/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/abi" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true @@ -31,6 +31,7 @@ sp-consensus-grandpa = { git = "https://github.com/serai-dex/substrate" } serai-primitives = { path = "../primitives", version = "0.1" } serai-coins-primitives = { path = "../coins/primitives", version = "0.1" } serai-validator-sets-primitives = { path = "../validator-sets/primitives", version = "0.1" } +serai-genesis-liquidity-primitives = { path = "../genesis-liquidity/primitives", version = "0.1" } serai-in-instructions-primitives = { path = "../in-instructions/primitives", version = "0.1" } serai-signals-primitives = { path = "../signals/primitives", version = "0.1" } @@ -42,6 +43,7 @@ borsh = [ "serai-primitives/borsh", "serai-coins-primitives/borsh", "serai-validator-sets-primitives/borsh", + "serai-genesis-liquidity-primitives/borsh", "serai-in-instructions-primitives/borsh", "serai-signals-primitives/borsh", ] @@ -50,6 +52,7 @@ serde = [ "serai-primitives/serde", "serai-coins-primitives/serde", "serai-validator-sets-primitives/serde", + "serai-genesis-liquidity-primitives/serde", "serai-in-instructions-primitives/serde", "serai-signals-primitives/serde", ] diff --git a/substrate/abi/src/genesis_liquidity.rs b/substrate/abi/src/genesis_liquidity.rs index 39191223..2b0c208c 100644 --- a/substrate/abi/src/genesis_liquidity.rs +++ b/substrate/abi/src/genesis_liquidity.rs @@ -1,11 +1,13 @@ +pub use serai_genesis_liquidity_primitives as primitives; + use serai_primitives::*; +use primitives::*; #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] -#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Call { - // This call is just a place holder so that abi works as expected. - empty_call, + remove_coin_liquidity { balance: Balance }, + set_initial_price { prices: Prices, signature: Signature }, } #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] diff --git a/substrate/abi/src/liquidity_tokens.rs b/substrate/abi/src/liquidity_tokens.rs new file mode 100644 index 00000000..6bdc651b --- /dev/null +++ b/substrate/abi/src/liquidity_tokens.rs @@ -0,0 +1,18 @@ +use serai_primitives::{Balance, SeraiAddress}; + +#[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] +#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub enum Call { + burn { balance: Balance }, + transfer { to: SeraiAddress, balance: Balance }, +} + +#[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode, scale_info::TypeInfo)] +#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize, borsh::BorshDeserialize))] +#[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] +pub enum Event { + Mint { to: SeraiAddress, balance: Balance }, + Burn { from: SeraiAddress, balance: Balance }, + Transfer { from: SeraiAddress, to: SeraiAddress, balance: Balance }, +} diff --git a/substrate/client/src/networks/bitcoin.rs b/substrate/client/src/networks/bitcoin.rs index 42cf41bf..5ea37898 100644 --- a/substrate/client/src/networks/bitcoin.rs +++ b/substrate/client/src/networks/bitcoin.rs @@ -1,4 +1,4 @@ -use core::str::FromStr; +use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; @@ -35,9 +35,9 @@ impl FromStr for Address { } } -impl ToString for Address { - fn to_string(&self) -> String { - self.0.to_string() +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) } } diff --git a/substrate/client/src/networks/monero.rs b/substrate/client/src/networks/monero.rs index e2268ec2..5b43860e 100644 --- a/substrate/client/src/networks/monero.rs +++ b/substrate/client/src/networks/monero.rs @@ -1,4 +1,4 @@ -use core::str::FromStr; +use core::{str::FromStr, fmt}; use scale::{Encode, Decode}; @@ -24,9 +24,9 @@ impl FromStr for Address { } } -impl ToString for Address { - fn to_string(&self) -> String { - self.0.to_string() +impl fmt::Display for Address { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + self.0.fmt(f) } } diff --git a/substrate/client/src/serai/dex.rs b/substrate/client/src/serai/dex.rs index 00108dfe..18341125 100644 --- a/substrate/client/src/serai/dex.rs +++ b/substrate/client/src/serai/dex.rs @@ -1,7 +1,9 @@ use sp_core::bounded_vec::BoundedVec; use serai_abi::primitives::{SeraiAddress, Amount, Coin}; -use crate::{SeraiError, TemporalSerai}; +use scale::{decode_from_bytes, Encode}; + +use crate::{SeraiError, hex_decode, TemporalSerai}; pub type DexEvent = serai_abi::dex::Event; @@ -57,4 +59,21 @@ impl<'a> SeraiDex<'a> { send_to: address, }) } + + pub async fn get_reserves( + &self, + coin1: Coin, + coin2: Coin, + ) -> Result, SeraiError> { + let hash = self + .0 + .serai + .call("state_call", ["DexApi_get_reserves".to_string(), hex::encode((coin1, coin2).encode())]) + .await?; + let bytes = hex_decode(hash) + .map_err(|_| SeraiError::InvalidNode("expected hex from node wasn't hex".to_string()))?; + let resut = decode_from_bytes::>(bytes.into()) + .map_err(|e| SeraiError::ErrorInResponse(e.to_string()))?; + Ok(resut.map(|amounts| (Amount(amounts.0), Amount(amounts.1)))) + } } diff --git a/substrate/client/src/serai/genesis_liquidity.rs b/substrate/client/src/serai/genesis_liquidity.rs new file mode 100644 index 00000000..b8882bd7 --- /dev/null +++ b/substrate/client/src/serai/genesis_liquidity.rs @@ -0,0 +1,73 @@ +pub use serai_abi::genesis_liquidity::primitives; +use primitives::Prices; + +use serai_abi::primitives::*; + +use sp_core::sr25519::Signature; + +use scale::Encode; + +use crate::{Serai, SeraiError, TemporalSerai, Transaction}; + +pub type GenesisLiquidityEvent = serai_abi::genesis_liquidity::Event; + +const PALLET: &str = "GenesisLiquidity"; + +#[derive(Clone, Copy)] +pub struct SeraiGenesisLiquidity<'a>(pub(crate) &'a TemporalSerai<'a>); +impl<'a> SeraiGenesisLiquidity<'a> { + pub async fn events(&self) -> Result, SeraiError> { + self + .0 + .events(|event| { + if let serai_abi::Event::GenesisLiquidity(event) = event { + Some(event.clone()) + } else { + None + } + }) + .await + } + + pub async fn liquidity_tokens( + &self, + address: &SeraiAddress, + coin: Coin, + ) -> Result { + Ok( + self + .0 + .storage( + PALLET, + "LiquidityTokensPerAddress", + (coin, sp_core::hashing::blake2_128(&address.encode()), &address.0), + ) + .await? + .unwrap_or(Amount(0)), + ) + } + + pub fn set_initial_price(prices: Prices, signature: Signature) -> Transaction { + Serai::unsigned(serai_abi::Call::GenesisLiquidity( + serai_abi::genesis_liquidity::Call::set_initial_price { prices, signature }, + )) + } + + pub fn remove_coin_liquidity(balance: Balance) -> serai_abi::Call { + serai_abi::Call::GenesisLiquidity(serai_abi::genesis_liquidity::Call::remove_coin_liquidity { + balance, + }) + } + + pub async fn liquidity(&self, address: &SeraiAddress, coin: Coin) -> Option { + self + .0 + .storage( + PALLET, + "Liquidity", + (coin, sp_core::hashing::blake2_128(&address.encode()), &address.0), + ) + .await + .unwrap() + } +} diff --git a/substrate/client/src/serai/liquidity_tokens.rs b/substrate/client/src/serai/liquidity_tokens.rs new file mode 100644 index 00000000..22fcd49e --- /dev/null +++ b/substrate/client/src/serai/liquidity_tokens.rs @@ -0,0 +1,41 @@ +use scale::Encode; + +use serai_abi::primitives::{SeraiAddress, Amount, Coin, Balance}; + +use crate::{TemporalSerai, SeraiError}; + +const PALLET: &str = "LiquidityTokens"; + +#[derive(Clone, Copy)] +pub struct SeraiLiquidityTokens<'a>(pub(crate) &'a TemporalSerai<'a>); +impl<'a> SeraiLiquidityTokens<'a> { + pub async fn token_supply(&self, coin: Coin) -> Result { + Ok(self.0.storage(PALLET, "Supply", coin).await?.unwrap_or(Amount(0))) + } + + pub async fn token_balance( + &self, + coin: Coin, + address: SeraiAddress, + ) -> Result { + Ok( + self + .0 + .storage( + PALLET, + "Balances", + (sp_core::hashing::blake2_128(&address.encode()), &address.0, coin), + ) + .await? + .unwrap_or(Amount(0)), + ) + } + + pub fn transfer(to: SeraiAddress, balance: Balance) -> serai_abi::Call { + serai_abi::Call::Coins(serai_abi::coins::Call::transfer { to, balance }) + } + + pub fn burn(balance: Balance) -> serai_abi::Call { + serai_abi::Call::Coins(serai_abi::coins::Call::burn { balance }) + } +} diff --git a/substrate/client/src/serai/mod.rs b/substrate/client/src/serai/mod.rs index 1347fc05..fc4a9ea7 100644 --- a/substrate/client/src/serai/mod.rs +++ b/substrate/client/src/serai/mod.rs @@ -1,3 +1,4 @@ +use hex::FromHexError; use thiserror::Error; use async_lock::RwLock; @@ -26,6 +27,10 @@ pub mod in_instructions; pub use in_instructions::SeraiInInstructions; pub mod validator_sets; pub use validator_sets::SeraiValidatorSets; +pub mod genesis_liquidity; +pub use genesis_liquidity::SeraiGenesisLiquidity; +pub mod liquidity_tokens; +pub use liquidity_tokens::SeraiLiquidityTokens; #[derive(Clone, PartialEq, Eq, Debug, scale::Encode, scale::Decode)] pub struct Block { @@ -82,6 +87,14 @@ impl<'a> Clone for TemporalSerai<'a> { } } +pub fn hex_decode(str: String) -> Result, FromHexError> { + if let Some(stripped) = str.strip_prefix("0x") { + hex::decode(stripped) + } else { + hex::decode(str) + } +} + impl Serai { pub async fn call( &self, @@ -134,19 +147,11 @@ impl Serai { } } - fn hex_decode(str: String) -> Result, SeraiError> { - (if let Some(stripped) = str.strip_prefix("0x") { - hex::decode(stripped) - } else { - hex::decode(str) - }) - .map_err(|_| SeraiError::InvalidNode("expected hex from node wasn't hex".to_string())) - } - pub async fn block_hash(&self, number: u64) -> Result, SeraiError> { let hash: Option = self.call("chain_getBlockHash", [number]).await?; let Some(hash) = hash else { return Ok(None) }; - Self::hex_decode(hash)? + hex_decode(hash) + .map_err(|_| SeraiError::InvalidNode("expected hex from node wasn't hex".to_string()))? .try_into() .map_err(|_| SeraiError::InvalidNode("didn't respond to getBlockHash with hash".to_string())) .map(Some) @@ -195,11 +200,13 @@ impl Serai { Ok(()) } + // TODO: move this into substrate/client/src/validator_sets.rs async fn active_network_validators(&self, network: NetworkId) -> Result, SeraiError> { let hash: String = self .call("state_call", ["SeraiRuntimeApi_validators".to_string(), hex::encode(network.encode())]) .await?; - let bytes = Self::hex_decode(hash)?; + let bytes = hex_decode(hash) + .map_err(|_| SeraiError::InvalidNode("expected hex from node wasn't hex".to_string()))?; let r = Vec::::decode(&mut bytes.as_slice()) .map_err(|e| SeraiError::ErrorInResponse(e.to_string()))?; Ok(r) @@ -207,9 +214,12 @@ impl Serai { pub async fn latest_finalized_block_hash(&self) -> Result<[u8; 32], SeraiError> { let hash: String = self.call("chain_getFinalizedHead", ()).await?; - Self::hex_decode(hash)?.try_into().map_err(|_| { - SeraiError::InvalidNode("didn't respond to getFinalizedHead with hash".to_string()) - }) + hex_decode(hash) + .map_err(|_| SeraiError::InvalidNode("expected hex from node wasn't hex".to_string()))? + .try_into() + .map_err(|_| { + SeraiError::InvalidNode("didn't respond to getFinalizedHead with hash".to_string()) + }) } pub async fn header(&self, hash: [u8; 32]) -> Result, SeraiError> { @@ -219,7 +229,7 @@ impl Serai { pub async fn block(&self, hash: [u8; 32]) -> Result, SeraiError> { let block: Option = self.call("chain_getBlockBin", [hex::encode(hash)]).await?; let Some(block) = block else { return Ok(None) }; - let Ok(bytes) = Self::hex_decode(block) else { + let Ok(bytes) = hex_decode(block) else { Err(SeraiError::InvalidNode("didn't return a hex-encoded block".to_string()))? }; let Ok(block) = Block::decode(&mut bytes.as_slice()) else { @@ -365,7 +375,8 @@ impl<'a> TemporalSerai<'a> { let res: Option = self.serai.call("state_getStorage", [hex::encode(full_key), hex::encode(self.block)]).await?; let Some(res) = res else { return Ok(None) }; - let res = Serai::hex_decode(res)?; + let res = hex_decode(res) + .map_err(|_| SeraiError::InvalidNode("expected hex from node wasn't hex".to_string()))?; Ok(Some(R::decode(&mut res.as_slice()).map_err(|_| { SeraiError::InvalidRuntime("different type present at storage location".to_string()) })?)) @@ -386,4 +397,12 @@ impl<'a> TemporalSerai<'a> { pub fn validator_sets(&'a self) -> SeraiValidatorSets<'a> { SeraiValidatorSets(self) } + + pub fn genesis_liquidity(&'a self) -> SeraiGenesisLiquidity { + SeraiGenesisLiquidity(self) + } + + pub fn liquidity_tokens(&'a self) -> SeraiLiquidityTokens { + SeraiLiquidityTokens(self) + } } diff --git a/substrate/client/tests/common/mod.rs b/substrate/client/tests/common/mod.rs index d7e8436b..e9d88594 100644 --- a/substrate/client/tests/common/mod.rs +++ b/substrate/client/tests/common/mod.rs @@ -66,3 +66,67 @@ macro_rules! serai_test { )* } } + +#[macro_export] +macro_rules! serai_test_fast_epoch { + ($($name: ident: $test: expr)*) => { + $( + #[tokio::test] + async fn $name() { + use std::collections::HashMap; + use dockertest::{ + PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image, + TestBodySpecification, DockerTest, + }; + + serai_docker_tests::build("serai-fast-epoch".to_string()); + + let handle = concat!("serai_client-serai_node-", stringify!($name)); + + let composition = TestBodySpecification::with_image( + Image::with_repository("serai-dev-serai-fast-epoch").pull_policy(PullPolicy::Never), + ) + .replace_cmd(vec![ + "serai-node".to_string(), + "--dev".to_string(), + "--unsafe-rpc-external".to_string(), + "--rpc-cors".to_string(), + "all".to_string(), + ]) + .replace_env( + HashMap::from([ + ("RUST_LOG".to_string(), "runtime=debug".to_string()), + ("KEY".to_string(), " ".to_string()), + ]) + ) + .set_publish_all_ports(true) + .set_handle(handle) + .set_start_policy(StartPolicy::Strict) + .set_log_options(Some(LogOptions { + action: LogAction::Forward, + policy: LogPolicy::Always, + source: LogSource::Both, + })); + + let mut test = DockerTest::new().with_network(dockertest::Network::Isolated); + test.provide_container(composition); + test.run_async(|ops| async move { + // Sleep until the Substrate RPC starts + let serai_rpc = ops.handle(handle).host_port(9944).unwrap(); + let serai_rpc = format!("http://{}:{}", serai_rpc.0, serai_rpc.1); + // Bound execution to 60 seconds + for _ in 0 .. 60 { + tokio::time::sleep(core::time::Duration::from_secs(1)).await; + let Ok(client) = Serai::new(serai_rpc.clone()).await else { continue }; + if client.latest_finalized_block_hash().await.is_err() { + continue; + } + break; + } + #[allow(clippy::redundant_closure_call)] + $test(Serai::new(serai_rpc).await.unwrap()).await; + }).await; + } + )* + } +} diff --git a/substrate/client/tests/genesis_liquidity.rs b/substrate/client/tests/genesis_liquidity.rs new file mode 100644 index 00000000..9efc8f09 --- /dev/null +++ b/substrate/client/tests/genesis_liquidity.rs @@ -0,0 +1,232 @@ +use std::{time::Duration, collections::HashMap}; + +use rand_core::{RngCore, OsRng}; +use zeroize::Zeroizing; + +use ciphersuite::{Ciphersuite, Ristretto}; +use frost::dkg::musig::musig; +use schnorrkel::Schnorrkel; + +use serai_client::{ + genesis_liquidity::{ + primitives::{GENESIS_LIQUIDITY_ACCOUNT, GENESIS_SRI}, + SeraiGenesisLiquidity, + }, + validator_sets::primitives::{musig_context, Session, ValidatorSet}, +}; + +use serai_abi::{ + genesis_liquidity::primitives::{set_initial_price_message, Prices}, + primitives::COINS, +}; + +use sp_core::{sr25519::Signature, Pair as PairTrait}; + +use serai_client::{ + primitives::{ + Amount, NetworkId, Coin, Balance, BlockHash, SeraiAddress, insecure_pair_from_name, + }, + in_instructions::primitives::{InInstruction, InInstructionWithBalance, Batch}, + Serai, +}; + +mod common; +use common::{in_instructions::provide_batch, tx::publish_tx}; + +serai_test_fast_epoch!( + genesis_liquidity: (|serai: Serai| async move { + test_genesis_liquidity(serai).await; + }) +); + +async fn test_genesis_liquidity(serai: Serai) { + // amounts + let amounts = vec![ + Amount(5_53246991), + Amount(3_14562819), + Amount(9_33648912), + Amount(150_873639000000), + Amount(248_665228000000), + Amount(451_765529000000), + ]; + + // addresses + let mut btc_addresses = vec![]; + let mut xmr_addresses = vec![]; + let addr_count = amounts.len(); + for (i, amount) in amounts.into_iter().enumerate() { + let mut address = SeraiAddress::new([0; 32]); + OsRng.fill_bytes(&mut address.0); + if i < addr_count / 2 { + btc_addresses.push((address, amount)); + } else { + xmr_addresses.push((address, amount)); + } + } + btc_addresses.sort_by(|a1, a2| a1.0.cmp(&a2.0)); + xmr_addresses.sort_by(|a1, a2| a1.0.cmp(&a2.0)); + + // btc batch + let mut block_hash = BlockHash([0; 32]); + OsRng.fill_bytes(&mut block_hash.0); + let btc_ins = btc_addresses + .iter() + .map(|(addr, amount)| InInstructionWithBalance { + instruction: InInstruction::GenesisLiquidity(*addr), + balance: Balance { coin: Coin::Bitcoin, amount: *amount }, + }) + .collect::>(); + let batch = + Batch { network: NetworkId::Bitcoin, id: 0, block: block_hash, instructions: btc_ins }; + provide_batch(&serai, batch).await; + + // xmr batch + let mut block_hash = BlockHash([0; 32]); + OsRng.fill_bytes(&mut block_hash.0); + let xmr_ins = xmr_addresses + .iter() + .map(|(addr, amount)| InInstructionWithBalance { + instruction: InInstruction::GenesisLiquidity(*addr), + balance: Balance { coin: Coin::Monero, amount: *amount }, + }) + .collect::>(); + let batch = Batch { network: NetworkId::Monero, id: 0, block: block_hash, instructions: xmr_ins }; + provide_batch(&serai, batch).await; + + // set prices + let prices = Prices { bitcoin: 10u64.pow(8), monero: 184100, ethereum: 4785000, dai: 1500 }; + set_prices(&serai, &prices).await; + + // wait until genesis ends.. + tokio::time::timeout(tokio::time::Duration::from_secs(300), async { + while serai.latest_finalized_block().await.unwrap().number() < 25 { + tokio::time::sleep(Duration::from_secs(6)).await; + } + }) + .await + .unwrap(); + + // check total SRI supply is +100M + let last_block = serai.latest_finalized_block().await.unwrap().hash(); + let serai = serai.as_of(last_block); + // Check balance instead of supply + let sri = serai.coins().coin_supply(Coin::Serai).await.unwrap(); + // there are 6 endowed accounts in dev-net. Take this into consideration when checking + // for the total sri minted at this time. + let endowed_amount: u64 = 1 << 60; + let total_sri = (6 * endowed_amount) + GENESIS_SRI; + assert_eq!(sri, Amount(total_sri)); + + // check genesis account has no coins, all transferred to pools. + for coin in COINS { + let amount = serai.coins().coin_balance(coin, GENESIS_LIQUIDITY_ACCOUNT).await.unwrap(); + assert_eq!(amount.0, 0); + } + + // check pools has proper liquidity + let pool_btc = btc_addresses.iter().fold(0u128, |acc, value| acc + u128::from(value.1 .0)); + let pool_xmr = xmr_addresses.iter().fold(0u128, |acc, value| acc + u128::from(value.1 .0)); + + let pool_btc_value = (pool_btc * u128::from(prices.bitcoin)) / 10u128.pow(8); + let pool_xmr_value = (pool_xmr * u128::from(prices.monero)) / 10u128.pow(12); + let total_value = pool_btc_value + pool_xmr_value; + + // calculated distributed SRI. We know that xmr is at the end of COINS array + // so it will be approximated to roof instead of floor after integer division. + let btc_sri = (pool_btc_value * u128::from(GENESIS_SRI)) / total_value; + let xmr_sri = ((pool_xmr_value * u128::from(GENESIS_SRI)) / total_value) + 1; + + let btc_reserves = serai.dex().get_reserves(Coin::Bitcoin, Coin::Serai).await.unwrap().unwrap(); + assert_eq!(u128::from(btc_reserves.0 .0), pool_btc); + assert_eq!(u128::from(btc_reserves.1 .0), btc_sri); + + let xmr_reserves = serai.dex().get_reserves(Coin::Monero, Coin::Serai).await.unwrap().unwrap(); + assert_eq!(u128::from(xmr_reserves.0 .0), pool_xmr); + assert_eq!(u128::from(xmr_reserves.1 .0), xmr_sri); + + // check each btc liq provider got liq tokens proportional to their value + let btc_liq_token_supply = u128::from( + serai + .liquidity_tokens() + .token_balance(Coin::Bitcoin, GENESIS_LIQUIDITY_ACCOUNT) + .await + .unwrap() + .0, + ); + let mut total_tokens_this_coin: u128 = 0; + for (i, (addr, amount)) in btc_addresses.iter().enumerate() { + let addr_value = (u128::from(amount.0) * u128::from(prices.bitcoin)) / 10u128.pow(8); + let addr_liq_tokens = if i == btc_addresses.len() - 1 { + btc_liq_token_supply - total_tokens_this_coin + } else { + (addr_value * btc_liq_token_supply) / pool_btc_value + }; + + let addr_actual_token_amount = + serai.genesis_liquidity().liquidity_tokens(addr, Coin::Bitcoin).await.unwrap(); + + assert_eq!(addr_liq_tokens, addr_actual_token_amount.0.into()); + total_tokens_this_coin += addr_liq_tokens; + } + + // check each xmr liq provider got liq tokens proportional to their value + let xmr_liq_token_supply = u128::from( + serai + .liquidity_tokens() + .token_balance(Coin::Monero, GENESIS_LIQUIDITY_ACCOUNT) + .await + .unwrap() + .0, + ); + total_tokens_this_coin = 0; + for (i, (addr, amount)) in xmr_addresses.iter().enumerate() { + let addr_value = (u128::from(amount.0) * u128::from(prices.monero)) / 10u128.pow(12); + let addr_liq_tokens = if i == xmr_addresses.len() - 1 { + xmr_liq_token_supply - total_tokens_this_coin + } else { + (addr_value * xmr_liq_token_supply) / pool_xmr_value + }; + + let addr_actual_token_amount = + serai.genesis_liquidity().liquidity_tokens(addr, Coin::Monero).await.unwrap(); + + assert_eq!(addr_liq_tokens, addr_actual_token_amount.0.into()); + total_tokens_this_coin += addr_liq_tokens; + } + + // TODO: remove the liq before/after genesis ended. +} + +async fn set_prices(serai: &Serai, prices: &Prices) { + // prepare a Musig tx to set the initial prices + let pair = insecure_pair_from_name("Alice"); + let public = pair.public(); + let set = ValidatorSet { session: Session(0), network: NetworkId::Serai }; + + let public_key = ::read_G::<&[u8]>(&mut public.0.as_ref()).unwrap(); + let secret_key = ::read_F::<&[u8]>( + &mut pair.as_ref().secret.to_bytes()[.. 32].as_ref(), + ) + .unwrap(); + + assert_eq!(Ristretto::generator() * secret_key, public_key); + let threshold_keys = + musig::(&musig_context(set), &Zeroizing::new(secret_key), &[public_key]).unwrap(); + + let sig = frost::tests::sign_without_caching( + &mut OsRng, + frost::tests::algorithm_machines( + &mut OsRng, + &Schnorrkel::new(b"substrate"), + &HashMap::from([(threshold_keys.params().i(), threshold_keys.into())]), + ), + &set_initial_price_message(&set, prices), + ); + + // set initial prices + let _ = publish_tx( + serai, + &SeraiGenesisLiquidity::set_initial_price(*prices, Signature(sig.to_bytes())), + ) + .await; +} diff --git a/substrate/client/tests/validator_sets.rs b/substrate/client/tests/validator_sets.rs index 8ae150ec..a44a0ac4 100644 --- a/substrate/client/tests/validator_sets.rs +++ b/substrate/client/tests/validator_sets.rs @@ -14,7 +14,7 @@ use serai_client::{ mod common; use common::validator_sets::{set_keys, allocate_stake, deallocate_stake}; -const EPOCH_INTERVAL: u64 = 5; +const EPOCH_INTERVAL: u64 = 300; serai_test!( set_keys_test: (|serai: Serai| async move { diff --git a/substrate/coins/pallet/Cargo.toml b/substrate/coins/pallet/Cargo.toml index 75011add..da9a27f6 100644 --- a/substrate/coins/pallet/Cargo.toml +++ b/substrate/coins/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/coins/pallet" authors = ["Akil Demir "] edition = "2021" -rust-version = "1.70" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/coins/primitives/Cargo.toml b/substrate/coins/primitives/Cargo.toml index 322016da..ec906929 100644 --- a/substrate/coins/primitives/Cargo.toml +++ b/substrate/coins/primitives/Cargo.toml @@ -5,7 +5,7 @@ description = "Serai coins primitives" license = "MIT" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/dex/pallet/Cargo.toml b/substrate/dex/pallet/Cargo.toml index 6a2eadb8..e2ed3928 100644 --- a/substrate/dex/pallet/Cargo.toml +++ b/substrate/dex/pallet/Cargo.toml @@ -6,7 +6,7 @@ license = "AGPL-3.0-only" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/dex/pallet" authors = ["Parity Technologies , Akil Demir "] edition = "2021" -rust-version = "1.70" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/dex/pallet/src/benchmarking.rs b/substrate/dex/pallet/src/benchmarking.rs index a00b6edc..fb23b12a 100644 --- a/substrate/dex/pallet/src/benchmarking.rs +++ b/substrate/dex/pallet/src/benchmarking.rs @@ -43,7 +43,7 @@ fn create_coin(coin: &Coin) -> (T::AccountId, AccountIdLookupOf) { let caller_lookup = T::Lookup::unlookup(caller); assert_ok!(Coins::::mint( caller, - Balance { coin: Coin::native(), amount: Amount(SubstrateAmount::max_value().div(1000u64)) } + Balance { coin: Coin::native(), amount: Amount(SubstrateAmount::MAX.div(1000u64)) } )); assert_ok!(Coins::::mint( caller, diff --git a/substrate/genesis-liquidity/pallet/Cargo.toml b/substrate/genesis-liquidity/pallet/Cargo.toml index 873261d9..5ce41383 100644 --- a/substrate/genesis-liquidity/pallet/Cargo.toml +++ b/substrate/genesis-liquidity/pallet/Cargo.toml @@ -27,12 +27,15 @@ frame-system = { git = "https://github.com/serai-dex/substrate", default-feature frame-support = { git = "https://github.com/serai-dex/substrate", default-features = false } sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false } +sp-core = { git = "https://github.com/serai-dex/substrate", default-features = false } +sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false } dex-pallet = { package = "serai-dex-pallet", path = "../../dex/pallet", default-features = false } coins-pallet = { package = "serai-coins-pallet", path = "../../coins/pallet", default-features = false } serai-primitives = { path = "../../primitives", default-features = false } genesis-liquidity-primitives = { package = "serai-genesis-liquidity-primitives", path = "../primitives", default-features = false } +validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../../validator-sets/primitives", default-features = false } [features] std = [ @@ -43,12 +46,16 @@ std = [ "frame-support/std", "sp-std/std", + "sp-core/std", + "sp-application-crypto/std", "coins-pallet/std", "dex-pallet/std", "serai-primitives/std", "genesis-liquidity-primitives/std", + "validator-sets-primitives/std", ] +fast-epoch = [] -default = ["std"] \ No newline at end of file +default = ["std"] diff --git a/substrate/genesis-liquidity/pallet/src/lib.rs b/substrate/genesis-liquidity/pallet/src/lib.rs index a37b5d64..6e6f2a1f 100644 --- a/substrate/genesis-liquidity/pallet/src/lib.rs +++ b/substrate/genesis-liquidity/pallet/src/lib.rs @@ -5,17 +5,20 @@ pub mod pallet { use super::*; use frame_system::{pallet_prelude::*, RawOrigin}; - use frame_support::{pallet_prelude::*, sp_runtime::SaturatedConversion}; - - use sp_std::{vec, collections::btree_map::BTreeMap}; - - use dex_pallet::{Pallet as Dex, Config as DexConfig}; - use coins_pallet::{ - primitives::{OutInstructionWithBalance, OutInstruction}, - Config as CoinsConfig, Pallet as Coins, AllowMint, + use frame_support::{ + pallet_prelude::*, + sp_runtime::{self, SaturatedConversion}, }; + use sp_std::{vec, vec::Vec, collections::btree_map::BTreeMap}; + use sp_core::sr25519::Signature; + use sp_application_crypto::RuntimePublic; + + use dex_pallet::{Pallet as Dex, Config as DexConfig}; + use coins_pallet::{Config as CoinsConfig, Pallet as Coins, AllowMint}; + use serai_primitives::*; + use validator_sets_primitives::{ValidatorSet, Session, musig_key}; pub use genesis_liquidity_primitives as primitives; use primitives::*; @@ -29,6 +32,19 @@ pub mod pallet { type RuntimeEvent: From> + IsType<::RuntimeEvent>; } + #[pallet::genesis_config] + #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)] + pub struct GenesisConfig { + /// List of participants to place in the initial validator sets. + pub participants: Vec, + } + + impl Default for GenesisConfig { + fn default() -> Self { + GenesisConfig { participants: Default::default() } + } + } + #[pallet::error] pub enum Error { GenesisPeriodEnded, @@ -61,11 +77,31 @@ pub mod pallet { pub(crate) type EconomicSecurityReached = StorageMap<_, Identity, NetworkId, BlockNumberFor, ValueQuery>; + #[pallet::storage] + pub(crate) type Participants = + StorageMap<_, Identity, NetworkId, BoundedVec>, ValueQuery>; + + #[pallet::storage] + pub(crate) type Oracle = StorageMap<_, Identity, Coin, u64, ValueQuery>; + + #[pallet::genesis_build] + impl BuildGenesisConfig for GenesisConfig { + fn build(&self) { + Participants::::set(NetworkId::Serai, self.participants.clone().try_into().unwrap()); + } + } + #[pallet::hooks] impl Hooks> for Pallet { fn on_finalize(n: BlockNumberFor) { + #[cfg(feature = "fast-epoch")] + let final_block = 25u32; + + #[cfg(not(feature = "fast-epoch"))] + let final_block = BLOCKS_PER_MONTH; + // Distribute the genesis sri to pools after a month - if n == BLOCKS_PER_MONTH.into() { + if n == final_block.into() { // mint the SRI Coins::::mint( GENESIS_LIQUIDITY_ACCOUNT.into(), @@ -75,57 +111,97 @@ pub mod pallet { // get coin values & total let mut account_values = BTreeMap::new(); - let mut pool_values = BTreeMap::new(); - let mut total_value: u64 = 0; + let mut pool_values = vec![]; + let mut total_value: u128 = 0; for coin in COINS { - // TODO: following line is just a place holder till we get the actual coin value - // in terms of btc. - let value = Dex::::security_oracle_value(coin).unwrap_or(Amount(0)).0; - account_values.insert(coin, vec![]); - let mut pool_amount: u64 = 0; - for (account, amount) in Liquidity::::iter_prefix(coin) { - pool_amount = pool_amount.saturating_add(amount); - let value_this_addr = amount.saturating_mul(value); - account_values.get_mut(&coin).unwrap().push((account, value_this_addr)) + if coin == Coin::Serai { + continue; } - let pool_value = pool_amount.saturating_mul(value); + // initial coin value in terms of btc + let value = Oracle::::get(coin); + + // get the pool & individual address values + account_values.insert(coin, vec![]); + let mut pool_amount: u128 = 0; + for (account, amount) in Liquidity::::iter_prefix(coin) { + pool_amount = pool_amount.saturating_add(amount.into()); + let value_this_addr = + u128::from(amount).saturating_mul(value.into()) / 10u128.pow(coin.decimals()); + account_values.get_mut(&coin).unwrap().push((account, value_this_addr)) + } + // sort, so that everyone has a consistent accounts vector per coin + account_values.get_mut(&coin).unwrap().sort(); + + let pool_value = pool_amount.saturating_mul(value.into()) / 10u128.pow(coin.decimals()); total_value = total_value.saturating_add(pool_value); - pool_values.insert(coin, (pool_amount, pool_value)); + pool_values.push((coin, pool_amount, pool_value)); } // add the liquidity per pool - for (coin, (amount, value)) in &pool_values { - let sri_amount = GENESIS_SRI.saturating_mul(*value) / total_value; + let mut total_sri_distributed = 0; + let pool_values_len = pool_values.len(); + for (i, (coin, pool_amount, pool_value)) in pool_values.into_iter().enumerate() { + // whatever sri left for the last coin should be ~= it's ratio + let sri_amount = if i == (pool_values_len - 1) { + GENESIS_SRI - total_sri_distributed + } else { + u64::try_from(u128::from(GENESIS_SRI).saturating_mul(pool_value) / total_value).unwrap() + }; + total_sri_distributed += sri_amount; + + // we can't add 0 liquidity + if !(pool_amount > 0 && sri_amount > 0) { + continue; + } + + // actually add the liquidity to dex let origin = RawOrigin::Signed(GENESIS_LIQUIDITY_ACCOUNT.into()); Dex::::add_liquidity( origin.into(), - *coin, - *amount, + coin, + u64::try_from(pool_amount).unwrap(), sri_amount, - *amount, + u64::try_from(pool_amount).unwrap(), sri_amount, GENESIS_LIQUIDITY_ACCOUNT.into(), ) .unwrap(); + + // let everyone know about the event Self::deposit_event(Event::GenesisLiquidityAddedToPool { - coin1: Balance { coin: *coin, amount: Amount(*amount) }, + coin1: Balance { coin, amount: Amount(u64::try_from(pool_amount).unwrap()) }, coin2: Balance { coin: Coin::Serai, amount: Amount(sri_amount) }, }); // set liquidity tokens per account - let tokens = LiquidityTokens::::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), *coin).0; - let mut total_tokens_this_coin: u64 = 0; - for (acc, value) in account_values.get(coin).unwrap() { - let liq_tokens_this_acc = - tokens.saturating_mul(*value) / pool_values.get(coin).unwrap().1; + let tokens = + u128::from(LiquidityTokens::::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), coin).0); + let mut total_tokens_this_coin: u128 = 0; + + let accounts = account_values.get(&coin).unwrap(); + for (i, (acc, acc_value)) in accounts.iter().enumerate() { + // give whatever left to the last account not to have rounding errors. + let liq_tokens_this_acc = if i == accounts.len() - 1 { + tokens - total_tokens_this_coin + } else { + tokens.saturating_mul(*acc_value) / pool_value + }; + total_tokens_this_coin = total_tokens_this_coin.saturating_add(liq_tokens_this_acc); - LiquidityTokensPerAddress::::set(coin, acc, Some(liq_tokens_this_acc)); + + LiquidityTokensPerAddress::::set( + coin, + acc, + Some(u64::try_from(liq_tokens_this_acc).unwrap()), + ); } assert_eq!(tokens, total_tokens_this_coin); } + assert_eq!(total_sri_distributed, GENESIS_SRI); - // we shouldn't have any coin left in our account at this moment, including SRI. + // we shouldn't have left any coin in genesis account at this moment, including SRI. + // All transferred to the pools. for coin in COINS { assert_eq!(Coins::::balance(GENESIS_LIQUIDITY_ACCOUNT.into(), coin), Amount(0)); } @@ -165,12 +241,37 @@ pub mod pallet { Ok(()) } - /// Remove the provided genesis liquidity for an account. If called pre-economic security era, - pub fn remove_coin_liquidity( - account: PublicKey, - balance: Balance, - out_address: ExternalAddress, - ) -> DispatchResult { + /// Returns the number of blocks since the coin's network reached economic security first time. + /// If the network is yet to be reached that threshold, 0 is returned. And maximum of + /// `GENESIS_SRI_TRICKLE_FEED` returned. + fn blocks_since_ec_security(coin: &Coin) -> u64 { + let ec_security_block = + EconomicSecurityReached::::get(coin.network()).saturated_into::(); + let current = >::block_number().saturated_into::(); + if ec_security_block > 0 { + let diff = current - ec_security_block; + if diff > GENESIS_SRI_TRICKLE_FEED { + return GENESIS_SRI_TRICKLE_FEED; + } + + return diff; + } + + 0 + } + + fn genesis_ended() -> bool { + >::block_number() >= BLOCKS_PER_MONTH.into() + } + } + + #[pallet::call] + impl Pallet { + /// Remove the provided genesis liquidity for an account. + #[pallet::call_index(0)] + #[pallet::weight((0, DispatchClass::Operational))] // TODO + pub fn remove_coin_liquidity(origin: OriginFor, balance: Balance) -> DispatchResult { + let account = ensure_signed(origin)?; let origin = RawOrigin::Signed(GENESIS_LIQUIDITY_ACCOUNT.into()); // check we are still in genesis period @@ -232,13 +333,9 @@ pub mod pallet { Err(Error::::CanOnlyRemoveFullAmount)?; } - // TODO: do internal transfer instead? - let origin = RawOrigin::Signed(GENESIS_LIQUIDITY_ACCOUNT.into()); - let instruction = OutInstructionWithBalance { - instruction: OutInstruction { address: out_address, data: None }, - balance, - }; - Coins::::burn_with_instruction(origin.into(), instruction)?; + // TODO: do external transfer instead for making it easier for the user? + // or do we even want to make it easier? + Coins::::transfer(origin.into(), account, balance)?; // save Liquidity::::set(balance.coin, account, None); @@ -248,27 +345,48 @@ pub mod pallet { Ok(()) } - /// Returns the number of blocks since the coin's network reached economic security first time. - /// If the network is yet to be reached that threshold, 0 is returned. And maximum of - /// `GENESIS_SRI_TRICKLE_FEED` returned. - fn blocks_since_ec_security(coin: &Coin) -> u64 { - let ec_security_block = - EconomicSecurityReached::::get(coin.network()).saturated_into::(); - let current = >::block_number().saturated_into::(); - if ec_security_block > 0 { - let diff = current - ec_security_block; - if diff > GENESIS_SRI_TRICKLE_FEED { - return GENESIS_SRI_TRICKLE_FEED; - } + /// A call to submit the initial coi values. + #[pallet::call_index(1)] + #[pallet::weight((0, DispatchClass::Operational))] // TODO + pub fn set_initial_price( + origin: OriginFor, + prices: Prices, + _signature: Signature, + ) -> DispatchResult { + ensure_none(origin)?; - return diff; - } - - 0 + // set the prices + Oracle::::set(Coin::Bitcoin, prices.bitcoin); + Oracle::::set(Coin::Monero, prices.monero); + Oracle::::set(Coin::Ether, prices.ethereum); + Oracle::::set(Coin::Dai, prices.dai); + Ok(()) } + } - fn genesis_ended() -> bool { - >::block_number() >= BLOCKS_PER_MONTH.into() + #[pallet::validate_unsigned] + impl ValidateUnsigned for Pallet { + type Call = Call; + + fn validate_unsigned(_: TransactionSource, call: &Self::Call) -> TransactionValidity { + match call { + Call::set_initial_price { ref prices, ref signature } => { + let set = ValidatorSet { network: NetworkId::Serai, session: Session(0) }; + let signers = Participants::::get(NetworkId::Serai); + + if !musig_key(set, &signers).verify(&set_initial_price_message(&set, prices), signature) { + Err(InvalidTransaction::BadProof)?; + } + + ValidTransaction::with_tag_prefix("GenesisLiquidity") + .and_provides((0, set)) + .longevity(u64::MAX) + .propagate(true) + .build() + } + Call::remove_coin_liquidity { .. } => Err(InvalidTransaction::Call)?, + Call::__Ignore(_, _) => unreachable!(), + } } } } diff --git a/substrate/genesis-liquidity/primitives/Cargo.toml b/substrate/genesis-liquidity/primitives/Cargo.toml index 1e10e840..e795ff24 100644 --- a/substrate/genesis-liquidity/primitives/Cargo.toml +++ b/substrate/genesis-liquidity/primitives/Cargo.toml @@ -16,10 +16,30 @@ rustdoc-args = ["--cfg", "docsrs"] workspace = true [dependencies] +zeroize = { version = "^1.5", features = ["derive"], optional = true } + +borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true } +serde = { version = "1", default-features = false, features = ["derive", "alloc"], optional = true } + +scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["derive"] } +scale-info = { version = "2", default-features = false, features = ["derive"] } + +sp-std = { git = "https://github.com/serai-dex/substrate", default-features = false } + serai-primitives = { path = "../../primitives", default-features = false } +validator-sets-primitives = { package = "serai-validator-sets-primitives", path = "../../validator-sets/primitives", default-features = false } [features] std = [ + "zeroize", + "scale/std", + "borsh?/std", + "serde?/std", + "scale-info/std", + "serai-primitives/std", + "validator-sets-primitives/std", + + "sp-std/std" ] default = ["std"] diff --git a/substrate/genesis-liquidity/primitives/src/lib.rs b/substrate/genesis-liquidity/primitives/src/lib.rs index 0ce75824..f334ec74 100644 --- a/substrate/genesis-liquidity/primitives/src/lib.rs +++ b/substrate/genesis-liquidity/primitives/src/lib.rs @@ -2,7 +2,21 @@ #![cfg_attr(docsrs, feature(doc_auto_cfg))] #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(feature = "std")] +use zeroize::Zeroize; + +#[cfg(feature = "borsh")] +use borsh::{BorshSerialize, BorshDeserialize}; +#[cfg(feature = "serde")] +use serde::{Serialize, Deserialize}; + +use sp_std::vec::Vec; + +use scale::{Encode, Decode, MaxEncodedLen}; +use scale_info::TypeInfo; + use serai_primitives::*; +use validator_sets_primitives::ValidatorSet; // amount of blocks in 30 days for 6s per block. pub const BLOCKS_PER_MONTH: u32 = 10 * 60 * 24 * 30; @@ -15,3 +29,19 @@ pub const GENESIS_SRI: u64 = 100_000_000 * 10_u64.pow(8); // This is the account to hold and manage the genesis liquidity. pub const GENESIS_LIQUIDITY_ACCOUNT: SeraiAddress = system_address(b"Genesis-liquidity-account"); + +#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] +#[cfg_attr(feature = "std", derive(Zeroize))] +#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] +#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] +pub struct Prices { + pub bitcoin: u64, + pub monero: u64, + pub ethereum: u64, + pub dai: u64, +} + +/// The message for the set_initial_price signature. +pub fn set_initial_price_message(set: &ValidatorSet, prices: &Prices) -> Vec { + (b"GenesisLiquidity-set_initial_price", set, prices).encode() +} diff --git a/substrate/in-instructions/pallet/src/lib.rs b/substrate/in-instructions/pallet/src/lib.rs index 2e45cffa..007147d6 100644 --- a/substrate/in-instructions/pallet/src/lib.rs +++ b/substrate/in-instructions/pallet/src/lib.rs @@ -10,7 +10,7 @@ pub use in_instructions_primitives as primitives; use primitives::*; // TODO: Investigate why Substrate generates these -#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding)] +#[allow(clippy::cast_possible_truncation, clippy::no_effect_underscore_binding, clippy::empty_docs)] #[frame_support::pallet] pub mod pallet { use sp_std::vec; @@ -204,14 +204,9 @@ pub mod pallet { } } } - InInstruction::GenesisLiquidity(ops) => match ops { - GenesisLiquidityOperation::Add(address, balance) => { - GenesisLiq::::add_coin_liquidity(address.into(), balance)?; - } - GenesisLiquidityOperation::Remove(address, balance, out_address) => { - GenesisLiq::::remove_coin_liquidity(address.into(), balance, out_address)?; - } - }, + InInstruction::GenesisLiquidity(address) => { + GenesisLiq::::add_coin_liquidity(address.into(), instruction.balance)?; + } } Ok(()) } diff --git a/substrate/in-instructions/primitives/Cargo.toml b/substrate/in-instructions/primitives/Cargo.toml index f579f59d..54551134 100644 --- a/substrate/in-instructions/primitives/Cargo.toml +++ b/substrate/in-instructions/primitives/Cargo.toml @@ -5,7 +5,7 @@ description = "Serai instructions library, enabling encoding and decoding" license = "MIT" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/in-instructions/primitives/src/lib.rs b/substrate/in-instructions/primitives/src/lib.rs index fb2e9503..87d9ce37 100644 --- a/substrate/in-instructions/primitives/src/lib.rs +++ b/substrate/in-instructions/primitives/src/lib.rs @@ -71,15 +71,6 @@ pub enum DexCall { Swap(Balance, OutAddress), } -#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] -#[cfg_attr(feature = "std", derive(Zeroize))] -#[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] -#[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] -pub enum GenesisLiquidityOperation { - Add(SeraiAddress, Balance), - Remove(SeraiAddress, Balance, ExternalAddress), -} - #[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, MaxEncodedLen, TypeInfo)] #[cfg_attr(feature = "std", derive(Zeroize))] #[cfg_attr(feature = "borsh", derive(BorshSerialize, BorshDeserialize))] @@ -87,7 +78,7 @@ pub enum GenesisLiquidityOperation { pub enum InInstruction { Transfer(SeraiAddress), Dex(DexCall), - GenesisLiquidity(GenesisLiquidityOperation), + GenesisLiquidity(SeraiAddress), } #[derive(Clone, PartialEq, Eq, Encode, Decode, MaxEncodedLen, TypeInfo, RuntimeDebug)] diff --git a/substrate/node/Cargo.toml b/substrate/node/Cargo.toml index 12ba4d17..60f7dc0f 100644 --- a/substrate/node/Cargo.toml +++ b/substrate/node/Cargo.toml @@ -26,6 +26,8 @@ hex = "0.4" rand_core = "0.6" schnorrkel = "0.11" +libp2p = "0.52" + sp-core = { git = "https://github.com/serai-dex/substrate" } sp-keystore = { git = "https://github.com/serai-dex/substrate" } sp-timestamp = { git = "https://github.com/serai-dex/substrate" } diff --git a/substrate/node/src/chain_spec.rs b/substrate/node/src/chain_spec.rs index 9c549604..73bcbbc1 100644 --- a/substrate/node/src/chain_spec.rs +++ b/substrate/node/src/chain_spec.rs @@ -1,13 +1,14 @@ use core::marker::PhantomData; +use std::collections::HashSet; -use sp_core::Pair as PairTrait; +use sp_core::{Decode, Pair as PairTrait, sr25519::Public}; use sc_service::ChainType; use serai_runtime::{ primitives::*, WASM_BINARY, BABE_GENESIS_EPOCH_CONFIG, RuntimeGenesisConfig, SystemConfig, CoinsConfig, DexConfig, ValidatorSetsConfig, SignalsConfig, BabeConfig, GrandpaConfig, - EmissionsConfig, + EmissionsConfig, GenesisLiquidityConfig, }; pub type ChainSpec = sc_service::GenericChainSpec; @@ -24,7 +25,7 @@ fn wasm_binary() -> Vec { WASM_BINARY.ok_or("compiled in wasm not available").unwrap().to_vec() } -fn testnet_genesis( +fn devnet_genesis( wasm_binary: &[u8], validators: &[&'static str], endowed_accounts: Vec, @@ -60,6 +61,63 @@ fn testnet_genesis( .collect(), participants: validators.clone(), }, + genesis_liquidity: GenesisLiquidityConfig { participants: validators.clone() }, + emissions: EmissionsConfig { + networks: serai_runtime::primitives::NETWORKS.to_vec(), + participants: validators.clone(), + }, + signals: SignalsConfig::default(), + babe: BabeConfig { + authorities: validators.iter().map(|validator| ((*validator).into(), 1)).collect(), + epoch_config: Some(BABE_GENESIS_EPOCH_CONFIG), + _config: PhantomData, + }, + grandpa: GrandpaConfig { + authorities: validators.into_iter().map(|validator| (validator.into(), 1)).collect(), + _config: PhantomData, + }, + } +} + +fn testnet_genesis(wasm_binary: &[u8], validators: Vec<&'static str>) -> RuntimeGenesisConfig { + let validators = validators + .into_iter() + .map(|validator| Public::decode(&mut hex::decode(validator).unwrap().as_slice()).unwrap()) + .collect::>(); + + assert_eq!(validators.iter().collect::>().len(), validators.len()); + + RuntimeGenesisConfig { + system: SystemConfig { code: wasm_binary.to_vec(), _config: PhantomData }, + + transaction_payment: Default::default(), + + coins: CoinsConfig { + accounts: validators + .iter() + .map(|a| (*a, Balance { coin: Coin::Serai, amount: Amount(5_000_000 * 10_u64.pow(8)) })) + .collect(), + _ignore: Default::default(), + }, + + dex: DexConfig { + pools: vec![Coin::Bitcoin, Coin::Ether, Coin::Dai, Coin::Monero], + _ignore: Default::default(), + }, + + validator_sets: ValidatorSetsConfig { + networks: serai_runtime::primitives::NETWORKS + .iter() + .map(|network| match network { + NetworkId::Serai => (NetworkId::Serai, Amount(50_000 * 10_u64.pow(8))), + NetworkId::Bitcoin => (NetworkId::Bitcoin, Amount(1_000_000 * 10_u64.pow(8))), + NetworkId::Ethereum => (NetworkId::Ethereum, Amount(1_000_000 * 10_u64.pow(8))), + NetworkId::Monero => (NetworkId::Monero, Amount(100_000 * 10_u64.pow(8))), + }) + .collect(), + participants: validators.clone(), + }, + genesis_liquidity: GenesisLiquidityConfig { participants: validators.clone() }, emissions: EmissionsConfig { networks: serai_runtime::primitives::NETWORKS.to_vec(), participants: validators.clone(), @@ -87,7 +145,7 @@ pub fn development_config() -> ChainSpec { "devnet", ChainType::Development, move || { - testnet_genesis( + devnet_genesis( &wasm_binary, &["Alice"], vec![ @@ -105,7 +163,7 @@ pub fn development_config() -> ChainSpec { // Telemetry None, // Protocol ID - Some("serai"), + Some("serai-devnet"), // Fork ID None, // Properties @@ -115,7 +173,7 @@ pub fn development_config() -> ChainSpec { ) } -pub fn testnet_config() -> ChainSpec { +pub fn local_config() -> ChainSpec { let wasm_binary = wasm_binary(); ChainSpec::from_genesis( @@ -125,7 +183,7 @@ pub fn testnet_config() -> ChainSpec { "local", ChainType::Local, move || { - testnet_genesis( + devnet_genesis( &wasm_binary, &["Alice", "Bob", "Charlie", "Dave"], vec![ @@ -143,7 +201,7 @@ pub fn testnet_config() -> ChainSpec { // Telemetry None, // Protocol ID - Some("serai"), + Some("serai-local"), // Fork ID None, // Properties @@ -152,3 +210,39 @@ pub fn testnet_config() -> ChainSpec { None, ) } + +pub fn testnet_config() -> ChainSpec { + let wasm_binary = wasm_binary(); + + ChainSpec::from_genesis( + // Name + "Test Network 2", + // ID + "testnet-2", + ChainType::Live, + move || { + let _ = testnet_genesis(&wasm_binary, vec![]); + todo!() + }, + // Bootnodes + vec![], + // Telemetry + None, + // Protocol ID + Some("serai-testnet-2"), + // Fork ID + None, + // Properties + None, + // Extensions + None, + ) +} + +pub fn bootnode_multiaddrs(id: &str) -> Vec { + match id { + "devnet" | "local" => vec![], + "testnet-2" => todo!(), + _ => panic!("unrecognized network ID"), + } +} diff --git a/substrate/node/src/command.rs b/substrate/node/src/command.rs index 2f7ea0f7..71eee047 100644 --- a/substrate/node/src/command.rs +++ b/substrate/node/src/command.rs @@ -40,7 +40,8 @@ impl SubstrateCli for Cli { fn load_spec(&self, id: &str) -> Result, String> { match id { "dev" | "devnet" => Ok(Box::new(chain_spec::development_config())), - "local" => Ok(Box::new(chain_spec::testnet_config())), + "local" => Ok(Box::new(chain_spec::local_config())), + "testnet" => Ok(Box::new(chain_spec::testnet_config())), _ => panic!("Unknown network ID"), } } diff --git a/substrate/node/src/rpc.rs b/substrate/node/src/rpc.rs index d07778cc..b818c798 100644 --- a/substrate/node/src/rpc.rs +++ b/substrate/node/src/rpc.rs @@ -19,6 +19,7 @@ pub use sc_rpc_api::DenyUnsafe; use sc_transaction_pool_api::TransactionPool; pub struct FullDeps { + pub id: String, pub client: Arc, pub pool: Arc

, pub deny_unsafe: DenyUnsafe, @@ -46,18 +47,19 @@ where use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApiServer}; let mut module = RpcModule::new(()); - let FullDeps { client, pool, deny_unsafe, authority_discovery } = deps; + let FullDeps { id, client, pool, deny_unsafe, authority_discovery } = deps; module.merge(System::new(client.clone(), pool, deny_unsafe).into_rpc())?; module.merge(TransactionPayment::new(client.clone()).into_rpc())?; if let Some(authority_discovery) = authority_discovery { - let mut authority_discovery_module = RpcModule::new((client, RwLock::new(authority_discovery))); + let mut authority_discovery_module = + RpcModule::new((id, client, RwLock::new(authority_discovery))); authority_discovery_module.register_async_method( "p2p_validators", |params, context| async move { let network: NetworkId = params.parse()?; - let (client, authority_discovery) = &*context; + let (id, client, authority_discovery) = &*context; let latest_block = client.info().best_hash; let validators = client.runtime_api().validators(latest_block, network).map_err(|_| { @@ -66,7 +68,9 @@ where "please report this at https://github.com/serai-dex/serai", ))) })?; - let mut all_p2p_addresses = vec![]; + // Always return the protocol's bootnodes + let mut all_p2p_addresses = crate::chain_spec::bootnode_multiaddrs(id); + // Additionally returns validators found over the DHT for validator in validators { let mut returned_addresses = authority_discovery .write() diff --git a/substrate/node/src/service.rs b/substrate/node/src/service.rs index 686e4c39..5f76decf 100644 --- a/substrate/node/src/service.rs +++ b/substrate/node/src/service.rs @@ -161,7 +161,7 @@ pub fn new_partial( )) } -pub fn new_full(config: Configuration) -> Result { +pub fn new_full(mut config: Configuration) -> Result { let ( sc_service::PartialComponents { client, @@ -176,6 +176,11 @@ pub fn new_full(config: Configuration) -> Result { keystore_container, ) = new_partial(&config)?; + config.network.node_name = "serai".to_string(); + config.network.client_version = "0.1.0".to_string(); + config.network.listen_addresses = + vec!["/ip4/0.0.0.0/tcp/30333".parse().unwrap(), "/ip6/::/tcp/30333".parse().unwrap()]; + let mut net_config = sc_network::config::FullNetworkConfiguration::new(&config.network); let grandpa_protocol_name = grandpa::protocol_standard_name(&client.block_hash(0).unwrap().unwrap(), &config.chain_spec); @@ -203,6 +208,59 @@ pub fn new_full(config: Configuration) -> Result { warp_sync_params: Some(WarpSyncParams::WithProvider(warp_sync)), })?; + task_manager.spawn_handle().spawn("bootnodes", "bootnodes", { + let network = network.clone(); + let id = config.chain_spec.id().to_string(); + + async move { + // Transforms the above Multiaddrs into MultiaddrWithPeerIds + // While the PeerIds *should* be known in advance and hardcoded, that data wasn't collected in + // time and this fine for a testnet + let bootnodes = || async { + use libp2p::{Transport as TransportTrait, tcp::tokio::Transport, noise::Config}; + + let bootnode_multiaddrs = crate::chain_spec::bootnode_multiaddrs(&id); + + let mut tasks = vec![]; + for multiaddr in bootnode_multiaddrs { + tasks.push(tokio::time::timeout( + core::time::Duration::from_secs(10), + tokio::task::spawn(async move { + let Ok(noise) = Config::new(&sc_network::Keypair::generate_ed25519()) else { None? }; + let mut transport = Transport::default() + .upgrade(libp2p::core::upgrade::Version::V1) + .authenticate(noise) + .multiplex(libp2p::yamux::Config::default()); + let Ok(transport) = transport.dial(multiaddr.clone()) else { None? }; + let Ok((peer_id, _)) = transport.await else { None? }; + Some(sc_network::config::MultiaddrWithPeerId { multiaddr, peer_id }) + }), + )); + } + + let mut res = vec![]; + for task in tasks { + if let Ok(Ok(Some(bootnode))) = task.await { + res.push(bootnode); + } + } + res + }; + + use sc_network::{NetworkStatusProvider, NetworkPeers}; + loop { + if let Ok(status) = network.status().await { + if status.num_connected_peers < 3 { + for bootnode in bootnodes().await { + let _ = network.add_reserved_peer(bootnode); + } + } + } + tokio::time::sleep(core::time::Duration::from_secs(60)).await; + } + } + }); + if config.offchain_worker.enabled { task_manager.spawn_handle().spawn( "offchain-workers-runner", @@ -258,11 +316,13 @@ pub fn new_full(config: Configuration) -> Result { }; let rpc_builder = { + let id = config.chain_spec.id().to_string(); let client = client.clone(); let pool = transaction_pool.clone(); Box::new(move |deny_unsafe, _| { crate::rpc::create_full(crate::rpc::FullDeps { + id: id.clone(), client: client.clone(), pool: pool.clone(), deny_unsafe, diff --git a/substrate/primitives/Cargo.toml b/substrate/primitives/Cargo.toml index 22fc4709..0e1e8f38 100644 --- a/substrate/primitives/Cargo.toml +++ b/substrate/primitives/Cargo.toml @@ -6,7 +6,7 @@ license = "MIT" repository = "https://github.com/serai-dex/serai/tree/develop/substrate/primitives" authors = ["Luke Parker "] edition = "2021" -rust-version = "1.69" +rust-version = "1.74" [package.metadata.docs.rs] all-features = true diff --git a/substrate/runtime/Cargo.toml b/substrate/runtime/Cargo.toml index d159725a..208274bc 100644 --- a/substrate/runtime/Cargo.toml +++ b/substrate/runtime/Cargo.toml @@ -128,7 +128,7 @@ std = [ "pallet-transaction-payment-rpc-runtime-api/std", ] -fast-epoch = [] +fast-epoch = ["genesis-liquidity-pallet/fast-epoch"] runtime-benchmarks = [ "sp-runtime/runtime-benchmarks", diff --git a/substrate/runtime/src/lib.rs b/substrate/runtime/src/lib.rs index af3bc917..8d70c9fa 100644 --- a/substrate/runtime/src/lib.rs +++ b/substrate/runtime/src/lib.rs @@ -11,7 +11,6 @@ use core::marker::PhantomData; // Re-export all components pub use serai_primitives as primitives; pub use primitives::{BlockNumber, Header}; -use primitives::{NetworkId, NETWORKS}; pub use frame_system as system; pub use frame_support as support; @@ -50,7 +49,7 @@ use sp_runtime::{ BoundedVec, Perbill, ApplyExtrinsicResult, }; -use primitives::{PublicKey, AccountLookup, SubstrateAmount}; +use primitives::{NetworkId, PublicKey, AccountLookup, SubstrateAmount, Coin, NETWORKS}; use support::{ traits::{ConstU8, ConstU16, ConstU32, ConstU64, Contains}, @@ -178,6 +177,9 @@ impl Contains for CallFilter { }, RuntimeCall::Dex(call) => !matches!(call, dex::Call::__Ignore(_, _)), RuntimeCall::ValidatorSets(call) => !matches!(call, validator_sets::Call::__Ignore(_, _)), + RuntimeCall::GenesisLiquidity(call) => { + !matches!(call, genesis_liquidity::Call::__Ignore(_, _)) + } RuntimeCall::InInstructions(call) => !matches!(call, in_instructions::Call::__Ignore(_, _)), RuntimeCall::Signals(call) => !matches!(call, signals::Call::__Ignore(_, _)), @@ -325,7 +327,7 @@ pub type ReportLongevity = ::EpochDuration; impl babe::Config for Runtime { #[cfg(feature = "fast-epoch")] - type EpochDuration = ConstU64<{ MINUTES / 2 }>; // 30 seconds + type EpochDuration = ConstU64<{ HOURS / 2 }>; // 30 minutes #[cfg(not(feature = "fast-epoch"))] type EpochDuration = ConstU64<{ 4 * 7 * DAYS }>; @@ -641,4 +643,28 @@ sp_api::impl_runtime_apis! { } } } + + impl dex::DexApi for Runtime { + fn quote_price_exact_tokens_for_tokens( + asset1: Coin, + asset2: Coin, + amount: SubstrateAmount, + include_fee: bool + ) -> Option { + Dex::quote_price_exact_tokens_for_tokens(asset1, asset2, amount, include_fee) + } + + fn quote_price_tokens_for_exact_tokens( + asset1: Coin, + asset2: Coin, + amount: SubstrateAmount, + include_fee: bool + ) -> Option { + Dex::quote_price_tokens_for_exact_tokens(asset1, asset2, amount, include_fee) + } + + fn get_reserves(asset1: Coin, asset2: Coin) -> Option<(SubstrateAmount, SubstrateAmount)> { + Dex::get_reserves(&asset1, &asset2).ok() + } + } } diff --git a/substrate/validator-sets/pallet/src/lib.rs b/substrate/validator-sets/pallet/src/lib.rs index f712cfbf..72ca67d5 100644 --- a/substrate/validator-sets/pallet/src/lib.rs +++ b/substrate/validator-sets/pallet/src/lib.rs @@ -477,7 +477,7 @@ pub mod pallet { let Some(top) = top else { return false }; - // key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause an off-chain reduction of + // key_shares may be over MAX_KEY_SHARES_PER_SET, which will cause a round robin reduction of // each validator's key shares until their sum is MAX_KEY_SHARES_PER_SET // post_amortization_key_shares_for_top_validator yields what the top validator's key shares // would be after such a reduction, letting us evaluate this correctly diff --git a/tests/processor/src/lib.rs b/tests/processor/src/lib.rs index 511382ab..e400057a 100644 --- a/tests/processor/src/lib.rs +++ b/tests/processor/src/lib.rs @@ -416,7 +416,11 @@ impl Coordinator { } } - pub async fn get_transaction(&self, ops: &DockerOperations, tx: &[u8]) -> Option> { + pub async fn get_published_transaction( + &self, + ops: &DockerOperations, + tx: &[u8], + ) -> Option> { let rpc_url = network_rpc(self.network, ops, &self.network_handle); match self.network { NetworkId::Bitcoin => { @@ -424,8 +428,15 @@ impl Coordinator { let rpc = Rpc::new(rpc_url).await.expect("couldn't connect to the coordinator's Bitcoin RPC"); + + // Bitcoin publishes a 0-byte TX ID to reduce variables + // Accordingly, read the mempool to find the (presumed relevant) TX + let entries: Vec = + rpc.rpc_call("getrawmempool", serde_json::json!([false])).await.unwrap(); + assert_eq!(entries.len(), 1, "more than one entry in the mempool, so unclear which to get"); + let mut hash = [0; 32]; - hash.copy_from_slice(tx); + hash.copy_from_slice(&hex::decode(&entries[0]).unwrap()); if let Ok(tx) = rpc.get_transaction(&hash).await { let mut buf = vec![]; tx.consensus_encode(&mut buf).unwrap(); diff --git a/tests/processor/src/tests/send.rs b/tests/processor/src/tests/send.rs index 8685af04..4d0d3cd6 100644 --- a/tests/processor/src/tests/send.rs +++ b/tests/processor/src/tests/send.rs @@ -261,12 +261,12 @@ fn send_test() { let participating = participating.iter().map(|p| usize::from(u16::from(*p) - 1)).collect::>(); for participant in &participating { - assert!(coordinators[*participant].get_transaction(&ops, &tx_id).await.is_some()); + assert!(coordinators[*participant].get_published_transaction(&ops, &tx_id).await.is_some()); } // Publish this transaction to the left out nodes let tx = coordinators[*participating.iter().next().unwrap()] - .get_transaction(&ops, &tx_id) + .get_published_transaction(&ops, &tx_id) .await .unwrap(); for (i, coordinator) in coordinators.iter_mut().enumerate() {