mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-14 23:19:24 +00:00
Compare commits
179 Commits
e4e4245ee3
...
2c8af04781
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2c8af04781 | ||
|
|
a0ed043372 | ||
|
|
2984d2f8cf | ||
|
|
554c5778e4 | ||
|
|
7e4c59a0a3 | ||
|
|
294462641e | ||
|
|
ae76749513 | ||
|
|
1e1b821d34 | ||
|
|
702b4c860c | ||
|
|
bc1bbf9951 | ||
|
|
ec9211fd84 | ||
|
|
4292660eda | ||
|
|
8ea5acbacb | ||
|
|
1b1aa74770 | ||
|
|
861a8352e5 | ||
|
|
e64827b6d7 | ||
|
|
c27aaf8658 | ||
|
|
53567e91c8 | ||
|
|
1a08d50e16 | ||
|
|
855e53164e | ||
|
|
1367e41510 | ||
|
|
a691be21c8 | ||
|
|
673cf8fd47 | ||
|
|
118d81bc90 | ||
|
|
e75c4ec6ed | ||
|
|
9e628d217f | ||
|
|
a717ae9ea7 | ||
|
|
98c3f75fa2 | ||
|
|
18178f3764 | ||
|
|
bdc3bda04a | ||
|
|
433beac93a | ||
|
|
8f2a9301cf | ||
|
|
d21034c349 | ||
|
|
381495618c | ||
|
|
ee0efe7cde | ||
|
|
7feb7aed22 | ||
|
|
cc75a92641 | ||
|
|
a7d5640642 | ||
|
|
ae61f3d359 | ||
|
|
4bcea31c2a | ||
|
|
eb9bce6862 | ||
|
|
39be23d807 | ||
|
|
3f0f4d520d | ||
|
|
80ca2b780a | ||
|
|
0813351f1f | ||
|
|
a38d135059 | ||
|
|
67f9f76fdf | ||
|
|
1c5bc2259e | ||
|
|
bdf89f5350 | ||
|
|
239127aae5 | ||
|
|
d9543bee40 | ||
|
|
8746b54a43 | ||
|
|
7761798a78 | ||
|
|
72a18bf8bb | ||
|
|
0616085109 | ||
|
|
e23176deeb | ||
|
|
5551521e58 | ||
|
|
a2d9aeaed7 | ||
|
|
e1ad897f7e | ||
|
|
2edc2f3612 | ||
|
|
e56af7fc51 | ||
|
|
947e1067d9 | ||
|
|
b4e94f3d51 | ||
|
|
1b39138472 | ||
|
|
e78236276a | ||
|
|
2c4c33e632 | ||
|
|
02409c5735 | ||
|
|
f2cf03cedf | ||
|
|
0d4c8cf032 | ||
|
|
b6811f9015 | ||
|
|
fcd5fb85df | ||
|
|
3ac0265f07 | ||
|
|
9b8c8f8231 | ||
|
|
59fa49f750 | ||
|
|
723f529659 | ||
|
|
73af09effb | ||
|
|
4054e44471 | ||
|
|
a8159e9070 | ||
|
|
b61ba9d1bb | ||
|
|
776cbbb9a4 | ||
|
|
76a3f3ec4b | ||
|
|
93c7d06684 | ||
|
|
4cb838e248 | ||
|
|
c988b7cdb0 | ||
|
|
017aab2258 | ||
|
|
ba3a6f9e91 | ||
|
|
e36b671f37 | ||
|
|
2d4b775b6e | ||
|
|
247cc8f0cc | ||
|
|
0ccf71df1e | ||
|
|
8aba71b9c4 | ||
|
|
46c12c0e66 | ||
|
|
3cc7b49492 | ||
|
|
0078858c1c | ||
|
|
a3cb514400 | ||
|
|
ed0221d804 | ||
|
|
4152bcacb2 | ||
|
|
f07ec7bee0 | ||
|
|
7484eadbbb | ||
|
|
59ff944152 | ||
|
|
8f848b1abc | ||
|
|
100c80be9f | ||
|
|
a353f9e2da | ||
|
|
b62fc3a1fa | ||
|
|
8380653855 | ||
|
|
b50b889918 | ||
|
|
d570c1d277 | ||
|
|
2da24506a2 | ||
|
|
6e9cb74022 | ||
|
|
0c1aec29bb | ||
|
|
653ead1e8c | ||
|
|
8ff019265f | ||
|
|
0601d47789 | ||
|
|
ebef38d93b | ||
|
|
75b4707002 | ||
|
|
3c787e005f | ||
|
|
f11a6b4ff1 | ||
|
|
fadc88d2ad | ||
|
|
c88ebe985e | ||
|
|
6deb60513c | ||
|
|
bd277e7032 | ||
|
|
fc765bb9e0 | ||
|
|
13b74195f7 | ||
|
|
f21838e0d5 | ||
|
|
76cbe6cf1e | ||
|
|
5999f5d65a | ||
|
|
d429a0bae6 | ||
|
|
775824f373 | ||
|
|
41a74cb513 | ||
|
|
e26da1ec34 | ||
|
|
7266e7f7ea | ||
|
|
a8b9b7bad3 | ||
|
|
2ca7fccb08 | ||
|
|
4f6d91037e | ||
|
|
8db76ed67c | ||
|
|
920303e1b4 | ||
|
|
9f4b28e5ae | ||
|
|
f9d02d43c2 | ||
|
|
8ac501028d | ||
|
|
612c67c537 | ||
|
|
04a971a024 | ||
|
|
738636c238 | ||
|
|
65f3f48517 | ||
|
|
7cc07d64d1 | ||
|
|
fdfe520f9d | ||
|
|
77ef25416b | ||
|
|
7c1025dbcb | ||
|
|
a771fbe1c6 | ||
|
|
9cebdf7c68 | ||
|
|
75251f04b4 | ||
|
|
6196642beb | ||
|
|
2bddf00222 | ||
|
|
9ab8ba0215 | ||
|
|
33e0c85f34 | ||
|
|
1e8f4e6156 | ||
|
|
66f3428051 | ||
|
|
7e71840822 | ||
|
|
b65dbacd6a | ||
|
|
2fcd9530dd | ||
|
|
379780a3c9 | ||
|
|
945f31dfc7 | ||
|
|
d5d1fc3eea | ||
|
|
fd12cc0213 | ||
|
|
ce805c8cc8 | ||
|
|
bc0cc5a754 | ||
|
|
f2ee4daf43 | ||
|
|
4e29678799 | ||
|
|
74d3075dae | ||
|
|
155ad48f4c | ||
|
|
951872b026 | ||
|
|
2b47feafed | ||
|
|
a2717d73f0 | ||
|
|
8763ef23ed | ||
|
|
57a0ba966b | ||
|
|
e843b4a2a0 | ||
|
|
2f3bd7a02a | ||
|
|
1e8a9ec5bd | ||
|
|
2f29c91d30 | ||
|
|
f3b91bd44f |
2
.github/actions/bitcoin/action.yml
vendored
2
.github/actions/bitcoin/action.yml
vendored
@@ -37,4 +37,4 @@ runs:
|
||||
|
||||
- name: Bitcoin Regtest Daemon
|
||||
shell: bash
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon
|
||||
|
||||
@@ -42,8 +42,8 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install svm-rs
|
||||
svm install 0.8.25
|
||||
svm use 0.8.25
|
||||
svm install 0.8.26
|
||||
svm use 0.8.26
|
||||
|
||||
# - name: Cache Rust
|
||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||
|
||||
9
.github/workflows/lint.yml
vendored
9
.github/workflows/lint.yml
vendored
@@ -73,6 +73,15 @@ jobs:
|
||||
- name: Run rustfmt
|
||||
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
||||
|
||||
- name: Install foundry
|
||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||
with:
|
||||
version: nightly-41d4e5437107f6f42c7711123890147bc736a609
|
||||
cache: false
|
||||
|
||||
- name: Run forge fmt
|
||||
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol")
|
||||
|
||||
machete:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
3
.github/workflows/networks-tests.yml
vendored
3
.github/workflows/networks-tests.yml
vendored
@@ -30,8 +30,9 @@ jobs:
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p bitcoin-serai \
|
||||
-p build-solidity-contracts \
|
||||
-p ethereum-schnorr-contract \
|
||||
-p alloy-simple-request-transport \
|
||||
-p ethereum-serai \
|
||||
-p serai-ethereum-relayer \
|
||||
-p monero-io \
|
||||
-p monero-generators \
|
||||
|
||||
20
.github/workflows/tests.yml
vendored
20
.github/workflows/tests.yml
vendored
@@ -39,7 +39,25 @@ jobs:
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p serai-message-queue \
|
||||
-p serai-processor-messages \
|
||||
-p serai-processor \
|
||||
-p serai-processor-key-gen \
|
||||
-p serai-processor-view-keys \
|
||||
-p serai-processor-frost-attempt-manager \
|
||||
-p serai-processor-primitives \
|
||||
-p serai-processor-scanner \
|
||||
-p serai-processor-scheduler-primitives \
|
||||
-p serai-processor-utxo-scheduler-primitives \
|
||||
-p serai-processor-utxo-scheduler \
|
||||
-p serai-processor-transaction-chaining-scheduler \
|
||||
-p serai-processor-smart-contract-scheduler \
|
||||
-p serai-processor-signers \
|
||||
-p serai-processor-bin \
|
||||
-p serai-bitcoin-processor \
|
||||
-p serai-processor-ethereum-primitives \
|
||||
-p serai-processor-ethereum-deployer \
|
||||
-p serai-processor-ethereum-router \
|
||||
-p serai-processor-ethereum-erc20 \
|
||||
-p serai-ethereum-processor \
|
||||
-p serai-monero-processor \
|
||||
-p tendermint-machine \
|
||||
-p tributary-chain \
|
||||
-p serai-coordinator \
|
||||
|
||||
400
Cargo.lock
generated
400
Cargo.lock
generated
@@ -184,17 +184,6 @@ dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-json-abi"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "299d2a937b6c60968df3dad2a988b0f0e03277b344639a4f7a31bd68e6285e59"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"alloy-sol-type-parser",
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-json-rpc"
|
||||
version = "0.3.1"
|
||||
@@ -426,7 +415,6 @@ version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "71c4d842beb7a6686d04125603bc57614d5ed78bf95e4753274db3db4ba95214"
|
||||
dependencies = [
|
||||
"alloy-json-abi",
|
||||
"alloy-sol-macro-input",
|
||||
"const-hex",
|
||||
"heck 0.5.0",
|
||||
@@ -445,33 +433,21 @@ version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1306e8d3c9e6e6ecf7a39ffaf7291e73a5f655a2defd366ee92c2efebcdf7fee"
|
||||
dependencies = [
|
||||
"alloy-json-abi",
|
||||
"const-hex",
|
||||
"dunce",
|
||||
"heck 0.5.0",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"serde_json",
|
||||
"syn 2.0.77",
|
||||
"syn-solidity",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-sol-type-parser"
|
||||
version = "0.8.0"
|
||||
source = "git+https://github.com/alloy-rs/core?rev=446b9d2fbce12b88456152170709a3eaac929af0#446b9d2fbce12b88456152170709a3eaac929af0"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"winnow 0.6.18",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "alloy-sol-types"
|
||||
version = "0.8.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "577e262966e92112edbd15b1b2c0947cc434d6e8311df96d3329793fe8047da9"
|
||||
dependencies = [
|
||||
"alloy-json-abi",
|
||||
"alloy-primitives",
|
||||
"alloy-sol-macro",
|
||||
"const-hex",
|
||||
@@ -1318,6 +1294,10 @@ dependencies = [
|
||||
"semver 0.6.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "build-solidity-contracts"
|
||||
version = "0.1.1"
|
||||
|
||||
[[package]]
|
||||
name = "bumpalo"
|
||||
version = "3.16.0"
|
||||
@@ -2480,24 +2460,22 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "ethereum-serai"
|
||||
name = "ethereum-schnorr-contract"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-core",
|
||||
"alloy-network",
|
||||
"alloy-node-bindings",
|
||||
"alloy-provider",
|
||||
"alloy-rpc-client",
|
||||
"alloy-rpc-types-eth",
|
||||
"alloy-simple-request-transport",
|
||||
"alloy-sol-types",
|
||||
"flexible-transcript",
|
||||
"build-solidity-contracts",
|
||||
"group",
|
||||
"k256",
|
||||
"modular-frost",
|
||||
"rand_core",
|
||||
"thiserror",
|
||||
"sha3",
|
||||
"subtle",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
@@ -8120,6 +8098,34 @@ dependencies = [
|
||||
"sp-runtime",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-bitcoin-processor"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"bitcoin-serai",
|
||||
"borsh",
|
||||
"ciphersuite",
|
||||
"dkg",
|
||||
"hex",
|
||||
"log",
|
||||
"modular-frost",
|
||||
"parity-scale-codec",
|
||||
"rand_core",
|
||||
"secp256k1",
|
||||
"serai-client",
|
||||
"serai-db",
|
||||
"serai-processor-bin",
|
||||
"serai-processor-key-gen",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scanner",
|
||||
"serai-processor-scheduler-primitives",
|
||||
"serai-processor-signers",
|
||||
"serai-processor-transaction-chaining-scheduler",
|
||||
"serai-processor-utxo-scheduler-primitives",
|
||||
"tokio",
|
||||
"zalloc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-client"
|
||||
version = "0.1.0"
|
||||
@@ -8128,13 +8134,14 @@ dependencies = [
|
||||
"bitcoin",
|
||||
"bitvec",
|
||||
"blake2",
|
||||
"borsh",
|
||||
"ciphersuite",
|
||||
"dockertest",
|
||||
"frame-system",
|
||||
"frost-schnorrkel",
|
||||
"hex",
|
||||
"modular-frost",
|
||||
"monero-wallet",
|
||||
"monero-address",
|
||||
"multiaddr",
|
||||
"parity-scale-codec",
|
||||
"rand_core",
|
||||
@@ -8315,6 +8322,46 @@ dependencies = [
|
||||
name = "serai-env"
|
||||
version = "0.1.0"
|
||||
|
||||
[[package]]
|
||||
name = "serai-ethereum-processor"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"alloy-core",
|
||||
"alloy-provider",
|
||||
"alloy-rlp",
|
||||
"alloy-rpc-client",
|
||||
"alloy-rpc-types-eth",
|
||||
"alloy-simple-request-transport",
|
||||
"alloy-transport",
|
||||
"borsh",
|
||||
"ciphersuite",
|
||||
"const-hex",
|
||||
"dkg",
|
||||
"ethereum-schnorr-contract",
|
||||
"hex",
|
||||
"k256",
|
||||
"log",
|
||||
"modular-frost",
|
||||
"parity-scale-codec",
|
||||
"rand_core",
|
||||
"serai-client",
|
||||
"serai-db",
|
||||
"serai-env",
|
||||
"serai-processor-bin",
|
||||
"serai-processor-ethereum-erc20",
|
||||
"serai-processor-ethereum-primitives",
|
||||
"serai-processor-ethereum-router",
|
||||
"serai-processor-key-gen",
|
||||
"serai-processor-messages",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scanner",
|
||||
"serai-processor-scheduler-primitives",
|
||||
"serai-processor-signers",
|
||||
"serai-processor-smart-contract-scheduler",
|
||||
"tokio",
|
||||
"zalloc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-ethereum-relayer"
|
||||
version = "0.1.0"
|
||||
@@ -8343,7 +8390,6 @@ dependencies = [
|
||||
"serai-coordinator-tests",
|
||||
"serai-docker-tests",
|
||||
"serai-message-queue-tests",
|
||||
"serai-processor",
|
||||
"serai-processor-tests",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -8459,6 +8505,36 @@ dependencies = [
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-monero-processor"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"ciphersuite",
|
||||
"dalek-ff-group",
|
||||
"dkg",
|
||||
"log",
|
||||
"modular-frost",
|
||||
"monero-simple-request-rpc",
|
||||
"monero-wallet",
|
||||
"parity-scale-codec",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"serai-client",
|
||||
"serai-processor-bin",
|
||||
"serai-processor-key-gen",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scanner",
|
||||
"serai-processor-scheduler-primitives",
|
||||
"serai-processor-signers",
|
||||
"serai-processor-utxo-scheduler",
|
||||
"serai-processor-utxo-scheduler-primitives",
|
||||
"serai-processor-view-keys",
|
||||
"tokio",
|
||||
"zalloc",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-no-std-tests"
|
||||
version = "0.1.0"
|
||||
@@ -8559,44 +8635,125 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor"
|
||||
name = "serai-processor-bin"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"ciphersuite",
|
||||
"dkg",
|
||||
"env_logger",
|
||||
"hex",
|
||||
"log",
|
||||
"parity-scale-codec",
|
||||
"serai-client",
|
||||
"serai-db",
|
||||
"serai-env",
|
||||
"serai-message-queue",
|
||||
"serai-processor-key-gen",
|
||||
"serai-processor-messages",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scanner",
|
||||
"serai-processor-scheduler-primitives",
|
||||
"serai-processor-signers",
|
||||
"tokio",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-ethereum-deployer"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-core",
|
||||
"alloy-provider",
|
||||
"alloy-rpc-types-eth",
|
||||
"alloy-simple-request-transport",
|
||||
"alloy-sol-macro",
|
||||
"alloy-sol-types",
|
||||
"alloy-transport",
|
||||
"build-solidity-contracts",
|
||||
"serai-processor-ethereum-primitives",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-ethereum-erc20"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"alloy-core",
|
||||
"alloy-provider",
|
||||
"alloy-rpc-types-eth",
|
||||
"alloy-simple-request-transport",
|
||||
"alloy-sol-macro",
|
||||
"alloy-sol-types",
|
||||
"alloy-transport",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-ethereum-primitives"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-core",
|
||||
"group",
|
||||
"k256",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-ethereum-router"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"alloy-consensus",
|
||||
"alloy-core",
|
||||
"alloy-provider",
|
||||
"alloy-rpc-types-eth",
|
||||
"alloy-simple-request-transport",
|
||||
"alloy-sol-macro-expander",
|
||||
"alloy-sol-macro-input",
|
||||
"alloy-sol-types",
|
||||
"alloy-transport",
|
||||
"build-solidity-contracts",
|
||||
"ethereum-schnorr-contract",
|
||||
"group",
|
||||
"serai-client",
|
||||
"serai-processor-ethereum-deployer",
|
||||
"serai-processor-ethereum-erc20",
|
||||
"serai-processor-ethereum-primitives",
|
||||
"syn 2.0.77",
|
||||
"syn-solidity",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-frost-attempt-manager"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"log",
|
||||
"modular-frost",
|
||||
"parity-scale-codec",
|
||||
"rand_core",
|
||||
"serai-db",
|
||||
"serai-processor-messages",
|
||||
"serai-validator-sets-primitives",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-key-gen"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bitcoin-serai",
|
||||
"blake2",
|
||||
"borsh",
|
||||
"ciphersuite",
|
||||
"const-hex",
|
||||
"dalek-ff-group",
|
||||
"dkg",
|
||||
"dockertest",
|
||||
"ec-divisors",
|
||||
"env_logger",
|
||||
"ethereum-serai",
|
||||
"flexible-transcript",
|
||||
"frost-schnorrkel",
|
||||
"hex",
|
||||
"k256",
|
||||
"log",
|
||||
"modular-frost",
|
||||
"monero-simple-request-rpc",
|
||||
"monero-wallet",
|
||||
"parity-scale-codec",
|
||||
"rand_chacha",
|
||||
"rand_core",
|
||||
"secp256k1",
|
||||
"serai-client",
|
||||
"serai-db",
|
||||
"serai-docker-tests",
|
||||
"serai-env",
|
||||
"serai-message-queue",
|
||||
"serai-processor-messages",
|
||||
"serde_json",
|
||||
"sp-application-crypto",
|
||||
"thiserror",
|
||||
"tokio",
|
||||
"zalloc",
|
||||
"serai-validator-sets-primitives",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
@@ -8606,6 +8763,7 @@ version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"dkg",
|
||||
"hex",
|
||||
"parity-scale-codec",
|
||||
"serai-coins-primitives",
|
||||
"serai-in-instructions-primitives",
|
||||
@@ -8613,6 +8771,86 @@ dependencies = [
|
||||
"serai-validator-sets-primitives",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-primitives"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"group",
|
||||
"log",
|
||||
"parity-scale-codec",
|
||||
"serai-coins-primitives",
|
||||
"serai-primitives",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-scanner"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"group",
|
||||
"hex",
|
||||
"log",
|
||||
"parity-scale-codec",
|
||||
"serai-coins-primitives",
|
||||
"serai-db",
|
||||
"serai-in-instructions-primitives",
|
||||
"serai-primitives",
|
||||
"serai-processor-messages",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scheduler-primitives",
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-scheduler-primitives"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"ciphersuite",
|
||||
"modular-frost",
|
||||
"parity-scale-codec",
|
||||
"serai-db",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-signers"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"ciphersuite",
|
||||
"frost-schnorrkel",
|
||||
"log",
|
||||
"modular-frost",
|
||||
"parity-scale-codec",
|
||||
"rand_core",
|
||||
"serai-db",
|
||||
"serai-in-instructions-primitives",
|
||||
"serai-primitives",
|
||||
"serai-processor-frost-attempt-manager",
|
||||
"serai-processor-messages",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scanner",
|
||||
"serai-processor-scheduler-primitives",
|
||||
"serai-validator-sets-primitives",
|
||||
"tokio",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-smart-contract-scheduler"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"group",
|
||||
"parity-scale-codec",
|
||||
"serai-db",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scanner",
|
||||
"serai-processor-scheduler-primitives",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-tests"
|
||||
version = "0.1.0"
|
||||
@@ -8623,7 +8861,6 @@ dependencies = [
|
||||
"curve25519-dalek",
|
||||
"dkg",
|
||||
"dockertest",
|
||||
"ethereum-serai",
|
||||
"hex",
|
||||
"k256",
|
||||
"monero-simple-request-rpc",
|
||||
@@ -8635,13 +8872,60 @@ dependencies = [
|
||||
"serai-docker-tests",
|
||||
"serai-message-queue",
|
||||
"serai-message-queue-tests",
|
||||
"serai-processor",
|
||||
"serai-processor-messages",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
"zeroize",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-transaction-chaining-scheduler"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"group",
|
||||
"parity-scale-codec",
|
||||
"serai-db",
|
||||
"serai-primitives",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scanner",
|
||||
"serai-processor-scheduler-primitives",
|
||||
"serai-processor-utxo-scheduler-primitives",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-utxo-scheduler"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"group",
|
||||
"parity-scale-codec",
|
||||
"serai-db",
|
||||
"serai-primitives",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scanner",
|
||||
"serai-processor-scheduler-primitives",
|
||||
"serai-processor-utxo-scheduler-primitives",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-utxo-scheduler-primitives"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"borsh",
|
||||
"serai-primitives",
|
||||
"serai-processor-primitives",
|
||||
"serai-processor-scanner",
|
||||
"serai-processor-scheduler-primitives",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-processor-view-keys"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"ciphersuite",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serai-reproducible-runtime-tests"
|
||||
version = "0.1.0"
|
||||
|
||||
29
Cargo.toml
29
Cargo.toml
@@ -46,8 +46,9 @@ members = [
|
||||
|
||||
"networks/bitcoin",
|
||||
|
||||
"networks/ethereum/build-contracts",
|
||||
"networks/ethereum/schnorr",
|
||||
"networks/ethereum/alloy-simple-request-transport",
|
||||
"networks/ethereum",
|
||||
"networks/ethereum/relayer",
|
||||
|
||||
"networks/monero/io",
|
||||
@@ -70,7 +71,28 @@ members = [
|
||||
"message-queue",
|
||||
|
||||
"processor/messages",
|
||||
"processor",
|
||||
|
||||
"processor/key-gen",
|
||||
"processor/view-keys",
|
||||
"processor/frost-attempt-manager",
|
||||
|
||||
"processor/primitives",
|
||||
"processor/scanner",
|
||||
"processor/scheduler/primitives",
|
||||
"processor/scheduler/utxo/primitives",
|
||||
"processor/scheduler/utxo/standard",
|
||||
"processor/scheduler/utxo/transaction-chaining",
|
||||
"processor/scheduler/smart-contract",
|
||||
"processor/signers",
|
||||
|
||||
"processor/bin",
|
||||
"processor/bitcoin",
|
||||
"processor/ethereum/primitives",
|
||||
"processor/ethereum/deployer",
|
||||
"processor/ethereum/router",
|
||||
"processor/ethereum/erc20",
|
||||
"processor/ethereum",
|
||||
"processor/monero",
|
||||
|
||||
"coordinator/tributary/tendermint",
|
||||
"coordinator/tributary",
|
||||
@@ -182,9 +204,6 @@ directories-next = { path = "patches/directories-next" }
|
||||
# The official pasta_curves repo doesn't support Zeroize
|
||||
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
||||
|
||||
# https://github.com/alloy-rs/core/issues/717
|
||||
alloy-sol-type-parser = { git = "https://github.com/alloy-rs/core", rev = "446b9d2fbce12b88456152170709a3eaac929af0" }
|
||||
|
||||
[workspace.lints.clippy]
|
||||
unwrap_or_default = "allow"
|
||||
borrow_as_ptr = "deny"
|
||||
|
||||
@@ -38,12 +38,21 @@ pub fn serai_db_key(
|
||||
#[macro_export]
|
||||
macro_rules! create_db {
|
||||
($db_name: ident {
|
||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
||||
$(
|
||||
$field_name: ident:
|
||||
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||
$($arg: ident: $arg_type: ty),*
|
||||
) -> $field_type: ty$(,)?
|
||||
)*
|
||||
}) => {
|
||||
$(
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct $field_name;
|
||||
impl $field_name {
|
||||
pub(crate) struct $field_name$(
|
||||
<$($generic_name: $generic_type),+>
|
||||
)?$(
|
||||
(core::marker::PhantomData<($($generic_name),+)>)
|
||||
)?;
|
||||
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||
use scale::Encode;
|
||||
$crate::serai_db_key(
|
||||
@@ -52,18 +61,43 @@ macro_rules! create_db {
|
||||
($($arg),*).encode()
|
||||
)
|
||||
}
|
||||
pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
|
||||
let key = $field_name::key($($arg),*);
|
||||
pub(crate) fn set(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*,
|
||||
data: &$field_type
|
||||
) {
|
||||
let key = Self::key($($arg),*);
|
||||
txn.put(&key, borsh::to_vec(data).unwrap());
|
||||
}
|
||||
pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
|
||||
getter.get($field_name::key($($arg),*)).map(|data| {
|
||||
pub(crate) fn get(
|
||||
getter: &impl Get,
|
||||
$($arg: $arg_type),*
|
||||
) -> Option<$field_type> {
|
||||
getter.get(Self::key($($arg),*)).map(|data| {
|
||||
borsh::from_slice(data.as_ref()).unwrap()
|
||||
})
|
||||
}
|
||||
// Returns a PhantomData of all generic types so if the generic was only used in the value,
|
||||
// not the keys, this doesn't have unused generic types
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
|
||||
txn.del(&$field_name::key($($arg),*))
|
||||
pub(crate) fn del(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> core::marker::PhantomData<($($($generic_name),+)?)> {
|
||||
txn.del(&Self::key($($arg),*));
|
||||
core::marker::PhantomData
|
||||
}
|
||||
|
||||
pub(crate) fn take(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let key = Self::key($($arg),*);
|
||||
let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap());
|
||||
if res.is_some() {
|
||||
txn.del(key);
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
)*
|
||||
@@ -73,19 +107,30 @@ macro_rules! create_db {
|
||||
#[macro_export]
|
||||
macro_rules! db_channel {
|
||||
($db_name: ident {
|
||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
||||
$($field_name: ident:
|
||||
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||
$($arg: ident: $arg_type: ty),*
|
||||
) -> $field_type: ty$(,)?
|
||||
)*
|
||||
}) => {
|
||||
$(
|
||||
create_db! {
|
||||
$db_name {
|
||||
$field_name: ($($arg: $arg_type,)* index: u32) -> $field_type,
|
||||
$field_name: $(<$($generic_name: $generic_type),+>)?(
|
||||
$($arg: $arg_type,)*
|
||||
index: u32
|
||||
) -> $field_type
|
||||
}
|
||||
}
|
||||
|
||||
impl $field_name {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
|
||||
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||
pub(crate) fn send(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
, value: &$field_type
|
||||
) {
|
||||
// Use index 0 to store the amount of messages
|
||||
let messages_sent_key = $field_name::key($($arg),*, 0);
|
||||
let messages_sent_key = Self::key($($arg,)* 0);
|
||||
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
@@ -96,19 +141,22 @@ macro_rules! db_channel {
|
||||
// at the same time
|
||||
let index_to_use = messages_sent + 2;
|
||||
|
||||
$field_name::set(txn, $($arg),*, index_to_use, value);
|
||||
Self::set(txn, $($arg,)* index_to_use, value);
|
||||
}
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
|
||||
let messages_recvd_key = $field_name::key($($arg),*, 1);
|
||||
pub(crate) fn try_recv(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
|
||||
let index_to_read = messages_recvd + 2;
|
||||
|
||||
let res = $field_name::get(txn, $($arg),*, index_to_read);
|
||||
let res = Self::get(txn, $($arg,)* index_to_read);
|
||||
if res.is_some() {
|
||||
$field_name::del(txn, $($arg),*, index_to_read);
|
||||
Self::del(txn, $($arg,)* index_to_read);
|
||||
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
|
||||
}
|
||||
res
|
||||
|
||||
22
deny.toml
22
deny.toml
@@ -40,13 +40,31 @@ allow = [
|
||||
exceptions = [
|
||||
{ allow = ["AGPL-3.0"], name = "serai-env" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "ethereum-serai" },
|
||||
{ allow = ["AGPL-3.0"], name = "ethereum-schnorr-contract" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-ethereum-relayer" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "serai-message-queue" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-messages" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-key-gen" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-frost-attempt-manager" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-scanner" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-scheduler-primitives" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-utxo-scheduler-primitives" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-standard-scheduler" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-transaction-chaining-scheduler" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-smart-contract-scheduler" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-signers" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "serai-bitcoin-processor" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-ethereum-primitives" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-ethereum-deployer" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-ethereum-router" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-processor-ethereum-erc20" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-ethereum-processor" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-monero-processor" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "tributary-chain" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-coordinator" },
|
||||
|
||||
@@ -72,6 +72,9 @@ pub(crate) fn queue_message(
|
||||
// Assert one, and only one of these, is the coordinator
|
||||
assert!(matches!(meta.from, Service::Coordinator) ^ matches!(meta.to, Service::Coordinator));
|
||||
|
||||
// Lock the queue
|
||||
let queue_lock = QUEUES.read().unwrap()[&(meta.from, meta.to)].write().unwrap();
|
||||
|
||||
// Verify (from, to, intent) hasn't been prior seen
|
||||
fn key(domain: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||
[&[u8::try_from(domain.len()).unwrap()], domain, key.as_ref()].concat()
|
||||
@@ -93,7 +96,7 @@ pub(crate) fn queue_message(
|
||||
DbTxn::put(&mut txn, intent_key, []);
|
||||
|
||||
// Queue it
|
||||
let id = QUEUES.read().unwrap()[&(meta.from, meta.to)].write().unwrap().queue_message(
|
||||
let id = queue_lock.queue_message(
|
||||
&mut txn,
|
||||
QueuedMessage {
|
||||
from: meta.from,
|
||||
|
||||
@@ -44,7 +44,7 @@ pub enum TransactionError {
|
||||
#[error("fee was too low to pass the default minimum fee rate")]
|
||||
TooLowFee,
|
||||
#[error("not enough funds for these payments")]
|
||||
NotEnoughFunds,
|
||||
NotEnoughFunds { inputs: u64, payments: u64, fee: u64 },
|
||||
#[error("transaction was too large")]
|
||||
TooLargeTransaction,
|
||||
}
|
||||
@@ -213,7 +213,11 @@ impl SignableTransaction {
|
||||
}
|
||||
|
||||
if input_sat < (payment_sat + needed_fee) {
|
||||
Err(TransactionError::NotEnoughFunds)?;
|
||||
Err(TransactionError::NotEnoughFunds {
|
||||
inputs: input_sat,
|
||||
payments: payment_sat,
|
||||
fee: needed_fee,
|
||||
})?;
|
||||
}
|
||||
|
||||
// If there's a change address, check if there's change to give it
|
||||
@@ -258,9 +262,9 @@ impl SignableTransaction {
|
||||
res
|
||||
}
|
||||
|
||||
/// Returns the outputs this transaction will create.
|
||||
pub fn outputs(&self) -> &[TxOut] {
|
||||
&self.tx.output
|
||||
/// Returns the transaction, sans witness, this will create if signed.
|
||||
pub fn transaction(&self) -> &Transaction {
|
||||
&self.tx
|
||||
}
|
||||
|
||||
/// Create a multisig machine for this transaction.
|
||||
|
||||
@@ -195,10 +195,10 @@ async_sequential! {
|
||||
Err(TransactionError::TooLowFee),
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
assert!(matches!(
|
||||
SignableTransaction::new(inputs.clone(), &[(addr(), inputs[0].value() * 2)], None, None, FEE),
|
||||
Err(TransactionError::NotEnoughFunds),
|
||||
);
|
||||
Err(TransactionError::NotEnoughFunds { .. }),
|
||||
));
|
||||
|
||||
assert_eq!(
|
||||
SignableTransaction::new(inputs, &vec![(addr(), 1000); 10000], None, None, FEE),
|
||||
|
||||
3
networks/ethereum/.gitignore
vendored
3
networks/ethereum/.gitignore
vendored
@@ -1,3 +0,0 @@
|
||||
# Solidity build outputs
|
||||
cache
|
||||
artifacts
|
||||
@@ -1,49 +0,0 @@
|
||||
[package]
|
||||
name = "ethereum-serai"
|
||||
version = "0.1.0"
|
||||
description = "An Ethereum library supporting Schnorr signing and on-chain verification"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>", "Elizabeth Binks <elizabethjbinks@gmail.com>"]
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.79"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { version = "1", default-features = false }
|
||||
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] }
|
||||
|
||||
group = { version = "0.13", default-features = false }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] }
|
||||
|
||||
alloy-core = { version = "0.8", default-features = false }
|
||||
alloy-sol-types = { version = "0.8", default-features = false, features = ["json"] }
|
||||
alloy-consensus = { version = "0.3", default-features = false, features = ["k256"] }
|
||||
alloy-network = { version = "0.3", default-features = false }
|
||||
alloy-rpc-types-eth = { version = "0.3", default-features = false }
|
||||
alloy-rpc-client = { version = "0.3", default-features = false }
|
||||
alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false }
|
||||
alloy-provider = { version = "0.3", default-features = false }
|
||||
|
||||
alloy-node-bindings = { version = "0.3", default-features = false, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] }
|
||||
|
||||
tokio = { version = "1", features = ["macros"] }
|
||||
|
||||
alloy-node-bindings = { version = "0.3", default-features = false }
|
||||
|
||||
[features]
|
||||
tests = ["alloy-node-bindings", "frost/tests"]
|
||||
@@ -1,15 +0,0 @@
|
||||
# Ethereum
|
||||
|
||||
This package contains Ethereum-related functionality, specifically deploying and
|
||||
interacting with Serai contracts.
|
||||
|
||||
While `monero-serai` and `bitcoin-serai` are general purpose libraries,
|
||||
`ethereum-serai` is Serai specific. If any of the utilities are generally
|
||||
desired, please fork and maintain your own copy to ensure the desired
|
||||
functionality is preserved, or open an issue to request we make this library
|
||||
general purpose.
|
||||
|
||||
### Dependencies
|
||||
|
||||
- solc
|
||||
- [Foundry](https://github.com/foundry-rs/foundry)
|
||||
15
networks/ethereum/build-contracts/Cargo.toml
Normal file
15
networks/ethereum/build-contracts/Cargo.toml
Normal file
@@ -0,0 +1,15 @@
|
||||
[package]
|
||||
name = "build-solidity-contracts"
|
||||
version = "0.1.1"
|
||||
description = "A helper function to build Solidity contracts"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/build-contracts"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -1,6 +1,6 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
4
networks/ethereum/build-contracts/README.md
Normal file
4
networks/ethereum/build-contracts/README.md
Normal file
@@ -0,0 +1,4 @@
|
||||
# Build Solidity Contracts
|
||||
|
||||
A helper function to build Solidity contracts. This is intended to be called
|
||||
from within build scripts.
|
||||
103
networks/ethereum/build-contracts/src/lib.rs
Normal file
103
networks/ethereum/build-contracts/src/lib.rs
Normal file
@@ -0,0 +1,103 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use std::{path::PathBuf, fs, process::Command};
|
||||
|
||||
/// Build contracts from the specified path, outputting the artifacts to the specified path.
|
||||
///
|
||||
/// Requires solc 0.8.26.
|
||||
pub fn build(
|
||||
include_paths: &[&str],
|
||||
contracts_path: &str,
|
||||
artifacts_path: &str,
|
||||
) -> Result<(), String> {
|
||||
if !fs::exists(artifacts_path)
|
||||
.map_err(|e| format!("couldn't check if artifacts directory already exists: {e:?}"))?
|
||||
{
|
||||
fs::create_dir(artifacts_path)
|
||||
.map_err(|e| format!("couldn't create the non-existent artifacts directory: {e:?}"))?;
|
||||
}
|
||||
|
||||
println!("cargo:rerun-if-changed={contracts_path}/*");
|
||||
println!("cargo:rerun-if-changed={artifacts_path}/*");
|
||||
|
||||
for line in String::from_utf8(
|
||||
Command::new("solc")
|
||||
.args(["--version"])
|
||||
.output()
|
||||
.map_err(|_| "couldn't fetch solc output".to_string())?
|
||||
.stdout,
|
||||
)
|
||||
.map_err(|_| "solc stdout wasn't UTF-8")?
|
||||
.lines()
|
||||
{
|
||||
if let Some(version) = line.strip_prefix("Version: ") {
|
||||
let version =
|
||||
version.split('+').next().ok_or_else(|| "no value present on line".to_string())?;
|
||||
if version != "0.8.26" {
|
||||
Err(format!("version was {version}, 0.8.26 required"))?
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
let mut args = vec![
|
||||
"--base-path", ".",
|
||||
"-o", artifacts_path, "--overwrite",
|
||||
"--bin", "--bin-runtime", "--abi",
|
||||
"--via-ir", "--optimize",
|
||||
"--no-color",
|
||||
];
|
||||
for include_path in include_paths {
|
||||
args.push("--include-path");
|
||||
args.push(include_path);
|
||||
}
|
||||
let mut args = args.into_iter().map(str::to_string).collect::<Vec<_>>();
|
||||
|
||||
let mut queue = vec![PathBuf::from(contracts_path)];
|
||||
while let Some(folder) = queue.pop() {
|
||||
for entry in fs::read_dir(folder).map_err(|e| format!("couldn't read directory: {e:?}"))? {
|
||||
let entry = entry.map_err(|e| format!("couldn't read directory in entry: {e:?}"))?;
|
||||
let kind = entry.file_type().map_err(|e| format!("couldn't fetch file type: {e:?}"))?;
|
||||
if kind.is_dir() {
|
||||
queue.push(entry.path());
|
||||
}
|
||||
|
||||
if kind.is_file() &&
|
||||
entry
|
||||
.file_name()
|
||||
.into_string()
|
||||
.map_err(|_| "file name wasn't a valid UTF-8 string".to_string())?
|
||||
.ends_with(".sol")
|
||||
{
|
||||
args.push(
|
||||
entry
|
||||
.path()
|
||||
.into_os_string()
|
||||
.into_string()
|
||||
.map_err(|_| "file path wasn't a valid UTF-8 string".to_string())?,
|
||||
);
|
||||
}
|
||||
|
||||
// We on purposely ignore symlinks to avoid recursive structures
|
||||
}
|
||||
}
|
||||
|
||||
let solc = Command::new("solc")
|
||||
.args(args.clone())
|
||||
.output()
|
||||
.map_err(|_| "couldn't fetch solc output".to_string())?;
|
||||
let stderr =
|
||||
String::from_utf8(solc.stderr).map_err(|_| "solc stderr wasn't UTF-8".to_string())?;
|
||||
if !solc.status.success() {
|
||||
Err(format!("solc (`{}`) didn't successfully execute: {stderr}", args.join(" ")))?;
|
||||
}
|
||||
for line in stderr.lines() {
|
||||
if line.contains("Error:") {
|
||||
Err(format!("solc (`{}`) output had error: {stderr}", args.join(" ")))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,41 +0,0 @@
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=contracts/*");
|
||||
println!("cargo:rerun-if-changed=artifacts/*");
|
||||
|
||||
for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout)
|
||||
.unwrap()
|
||||
.lines()
|
||||
{
|
||||
if let Some(version) = line.strip_prefix("Version: ") {
|
||||
let version = version.split('+').next().unwrap();
|
||||
assert_eq!(version, "0.8.25");
|
||||
}
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
let args = [
|
||||
"--base-path", ".",
|
||||
"-o", "./artifacts", "--overwrite",
|
||||
"--bin", "--abi",
|
||||
"--via-ir", "--optimize",
|
||||
|
||||
"./contracts/IERC20.sol",
|
||||
|
||||
"./contracts/Schnorr.sol",
|
||||
"./contracts/Deployer.sol",
|
||||
"./contracts/Sandbox.sol",
|
||||
"./contracts/Router.sol",
|
||||
|
||||
"./src/tests/contracts/Schnorr.sol",
|
||||
"./src/tests/contracts/ERC20.sol",
|
||||
|
||||
"--no-color",
|
||||
];
|
||||
let solc = Command::new("solc").args(args).output().unwrap();
|
||||
assert!(solc.status.success());
|
||||
for line in String::from_utf8(solc.stderr).unwrap().lines() {
|
||||
assert!(!line.starts_with("Error:"));
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
/*
|
||||
The expected deployment process of the Router is as follows:
|
||||
|
||||
1) A transaction deploying Deployer is made. Then, a deterministic signature is
|
||||
created such that an account with an unknown private key is the creator of
|
||||
the contract. Anyone can fund this address, and once anyone does, the
|
||||
transaction deploying Deployer can be published by anyone. No other
|
||||
transaction may be made from that account.
|
||||
|
||||
2) Anyone deploys the Router through the Deployer. This uses a sequential nonce
|
||||
such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.
|
||||
While such attacks would still be feasible if the Deployer's address was
|
||||
controllable, the usage of a deterministic signature with a NUMS method
|
||||
prevents that.
|
||||
|
||||
This doesn't have any denial-of-service risks and will resolve once anyone steps
|
||||
forward as deployer. This does fail to guarantee an identical address across
|
||||
every chain, though it enables letting anyone efficiently ask the Deployer for
|
||||
the address (with the Deployer having an identical address on every chain).
|
||||
|
||||
Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the
|
||||
Deployer contract to use a consistent salt for the Router, yet the Router must
|
||||
be deployed with a specific public key for Serai. Since Ethereum isn't able to
|
||||
determine a valid public key (one the result of a Serai DKG) from a dishonest
|
||||
public key, we have to allow multiple deployments with Serai being the one to
|
||||
determine which to use.
|
||||
|
||||
The alternative would be to have a council publish the Serai key on-Ethereum,
|
||||
with Serai verifying the published result. This would introduce a DoS risk in
|
||||
the council not publishing the correct key/not publishing any key.
|
||||
*/
|
||||
|
||||
contract Deployer {
|
||||
event Deployment(bytes32 indexed init_code_hash, address created);
|
||||
|
||||
error DeploymentFailed();
|
||||
|
||||
function deploy(bytes memory init_code) external {
|
||||
address created;
|
||||
assembly {
|
||||
created := create(0, add(init_code, 0x20), mload(init_code))
|
||||
}
|
||||
if (created == address(0)) {
|
||||
revert DeploymentFailed();
|
||||
}
|
||||
// These may be emitted out of order upon re-entrancy
|
||||
emit Deployment(keccak256(init_code), created);
|
||||
}
|
||||
}
|
||||
@@ -1,222 +0,0 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "./IERC20.sol";
|
||||
|
||||
import "./Schnorr.sol";
|
||||
import "./Sandbox.sol";
|
||||
|
||||
contract Router {
|
||||
// Nonce is incremented for each batch of transactions executed/key update
|
||||
uint256 public nonce;
|
||||
|
||||
// Current public key's x-coordinate
|
||||
// This key must always have the parity defined within the Schnorr contract
|
||||
bytes32 public seraiKey;
|
||||
|
||||
struct OutInstruction {
|
||||
address to;
|
||||
Call[] calls;
|
||||
|
||||
uint256 value;
|
||||
}
|
||||
|
||||
struct Signature {
|
||||
bytes32 c;
|
||||
bytes32 s;
|
||||
}
|
||||
|
||||
event SeraiKeyUpdated(
|
||||
uint256 indexed nonce,
|
||||
bytes32 indexed key,
|
||||
Signature signature
|
||||
);
|
||||
event InInstruction(
|
||||
address indexed from,
|
||||
address indexed coin,
|
||||
uint256 amount,
|
||||
bytes instruction
|
||||
);
|
||||
// success is a uint256 representing a bitfield of transaction successes
|
||||
event Executed(
|
||||
uint256 indexed nonce,
|
||||
bytes32 indexed batch,
|
||||
uint256 success,
|
||||
Signature signature
|
||||
);
|
||||
|
||||
// error types
|
||||
error InvalidKey();
|
||||
error InvalidSignature();
|
||||
error InvalidAmount();
|
||||
error FailedTransfer();
|
||||
error TooManyTransactions();
|
||||
|
||||
modifier _updateSeraiKeyAtEndOfFn(
|
||||
uint256 _nonce,
|
||||
bytes32 key,
|
||||
Signature memory sig
|
||||
) {
|
||||
if (
|
||||
(key == bytes32(0)) ||
|
||||
((bytes32(uint256(key) % Schnorr.Q)) != key)
|
||||
) {
|
||||
revert InvalidKey();
|
||||
}
|
||||
|
||||
_;
|
||||
|
||||
seraiKey = key;
|
||||
emit SeraiKeyUpdated(_nonce, key, sig);
|
||||
}
|
||||
|
||||
constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn(
|
||||
0,
|
||||
_seraiKey,
|
||||
Signature({ c: bytes32(0), s: bytes32(0) })
|
||||
) {
|
||||
nonce = 1;
|
||||
}
|
||||
|
||||
// updateSeraiKey validates the given Schnorr signature against the current
|
||||
// public key, and if successful, updates the contract's public key to the
|
||||
// given one.
|
||||
function updateSeraiKey(
|
||||
bytes32 _seraiKey,
|
||||
Signature calldata sig
|
||||
) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) {
|
||||
bytes memory message =
|
||||
abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey);
|
||||
nonce++;
|
||||
|
||||
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||
revert InvalidSignature();
|
||||
}
|
||||
}
|
||||
|
||||
function inInstruction(
|
||||
address coin,
|
||||
uint256 amount,
|
||||
bytes memory instruction
|
||||
) external payable {
|
||||
if (coin == address(0)) {
|
||||
if (amount != msg.value) {
|
||||
revert InvalidAmount();
|
||||
}
|
||||
} else {
|
||||
(bool success, bytes memory res) =
|
||||
address(coin).call(
|
||||
abi.encodeWithSelector(
|
||||
IERC20.transferFrom.selector,
|
||||
msg.sender,
|
||||
address(this),
|
||||
amount
|
||||
)
|
||||
);
|
||||
|
||||
// Require there was nothing returned, which is done by some non-standard
|
||||
// tokens, or that the ERC20 contract did in fact return true
|
||||
bool nonStandardResOrTrue =
|
||||
(res.length == 0) || abi.decode(res, (bool));
|
||||
if (!(success && nonStandardResOrTrue)) {
|
||||
revert FailedTransfer();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Due to fee-on-transfer tokens, emitting the amount directly is frowned upon.
|
||||
The amount instructed to transfer may not actually be the amount
|
||||
transferred.
|
||||
|
||||
If we add nonReentrant to every single function which can effect the
|
||||
balance, we can check the amount exactly matches. This prevents transfers of
|
||||
less value than expected occurring, at least, not without an additional
|
||||
transfer to top up the difference (which isn't routed through this contract
|
||||
and accordingly isn't trying to artificially create events).
|
||||
|
||||
If we don't add nonReentrant, a transfer can be started, and then a new
|
||||
transfer for the difference can follow it up (again and again until a
|
||||
rounding error is reached). This contract would believe all transfers were
|
||||
done in full, despite each only being done in part (except for the last
|
||||
one).
|
||||
|
||||
Given fee-on-transfer tokens aren't intended to be supported, the only
|
||||
token planned to be supported is Dai and it doesn't have any fee-on-transfer
|
||||
logic, fee-on-transfer tokens aren't even able to be supported at this time,
|
||||
we simply classify this entire class of tokens as non-standard
|
||||
implementations which induce undefined behavior. It is the Serai network's
|
||||
role not to add support for any non-standard implementations.
|
||||
*/
|
||||
emit InInstruction(msg.sender, coin, amount, instruction);
|
||||
}
|
||||
|
||||
// execute accepts a list of transactions to execute as well as a signature.
|
||||
// if signature verification passes, the given transactions are executed.
|
||||
// if signature verification fails, this function will revert.
|
||||
function execute(
|
||||
OutInstruction[] calldata transactions,
|
||||
Signature calldata sig
|
||||
) external {
|
||||
if (transactions.length > 256) {
|
||||
revert TooManyTransactions();
|
||||
}
|
||||
|
||||
bytes memory message =
|
||||
abi.encode("execute", block.chainid, nonce, transactions);
|
||||
uint256 executed_with_nonce = nonce;
|
||||
// This prevents re-entrancy from causing double spends yet does allow
|
||||
// out-of-order execution via re-entrancy
|
||||
nonce++;
|
||||
|
||||
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||
revert InvalidSignature();
|
||||
}
|
||||
|
||||
uint256 successes;
|
||||
for (uint256 i = 0; i < transactions.length; i++) {
|
||||
bool success;
|
||||
|
||||
// If there are no calls, send to `to` the value
|
||||
if (transactions[i].calls.length == 0) {
|
||||
(success, ) = transactions[i].to.call{
|
||||
value: transactions[i].value,
|
||||
gas: 5_000
|
||||
}("");
|
||||
} else {
|
||||
// If there are calls, ignore `to`. Deploy a new Sandbox and proxy the
|
||||
// calls through that
|
||||
//
|
||||
// We could use a single sandbox in order to reduce gas costs, yet that
|
||||
// risks one person creating an approval that's hooked before another
|
||||
// user's intended action executes, in order to drain their coins
|
||||
//
|
||||
// While technically, that would be a flaw in the sandboxed flow, this
|
||||
// is robust and prevents such flaws from being possible
|
||||
//
|
||||
// We also don't want people to set state via the Sandbox and expect it
|
||||
// future available when anyone else could set a distinct value
|
||||
Sandbox sandbox = new Sandbox();
|
||||
(success, ) = address(sandbox).call{
|
||||
value: transactions[i].value,
|
||||
// TODO: Have the Call specify the gas up front
|
||||
gas: 350_000
|
||||
}(
|
||||
abi.encodeWithSelector(
|
||||
Sandbox.sandbox.selector,
|
||||
transactions[i].calls
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
assembly {
|
||||
successes := or(successes, shl(i, success))
|
||||
}
|
||||
}
|
||||
emit Executed(
|
||||
executed_with_nonce,
|
||||
keccak256(message),
|
||||
successes,
|
||||
sig
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
struct Call {
|
||||
address to;
|
||||
uint256 value;
|
||||
bytes data;
|
||||
}
|
||||
|
||||
// A minimal sandbox focused on gas efficiency.
|
||||
//
|
||||
// The first call is executed if any of the calls fail, making it a fallback.
|
||||
// All other calls are executed sequentially.
|
||||
contract Sandbox {
|
||||
error AlreadyCalled();
|
||||
error CallsFailed();
|
||||
|
||||
function sandbox(Call[] calldata calls) external payable {
|
||||
// Prevent re-entrancy due to this executing arbitrary calls from anyone
|
||||
// and anywhere
|
||||
bool called;
|
||||
assembly { called := tload(0) }
|
||||
if (called) {
|
||||
revert AlreadyCalled();
|
||||
}
|
||||
assembly { tstore(0, 1) }
|
||||
|
||||
// Execute the calls, starting from 1
|
||||
for (uint256 i = 1; i < calls.length; i++) {
|
||||
(bool success, ) =
|
||||
calls[i].to.call{ value: calls[i].value }(calls[i].data);
|
||||
|
||||
// If this call failed, execute the fallback (call 0)
|
||||
if (!success) {
|
||||
(success, ) =
|
||||
calls[0].to.call{ value: address(this).balance }(calls[0].data);
|
||||
// If this call also failed, revert entirely
|
||||
if (!success) {
|
||||
revert CallsFailed();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// We don't clear the re-entrancy guard as this contract should never be
|
||||
// called again, so there's no reason to spend the effort
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
// see https://github.com/noot/schnorr-verify for implementation details
|
||||
library Schnorr {
|
||||
// secp256k1 group order
|
||||
uint256 constant public Q =
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
||||
|
||||
// Fixed parity for the public keys used in this contract
|
||||
// This avoids spending a word passing the parity in a similar style to
|
||||
// Bitcoin's Taproot
|
||||
uint8 constant public KEY_PARITY = 27;
|
||||
|
||||
error InvalidSOrA();
|
||||
error MalformedSignature();
|
||||
|
||||
// px := public key x-coord, where the public key has a parity of KEY_PARITY
|
||||
// message := 32-byte hash of the message
|
||||
// c := schnorr signature challenge
|
||||
// s := schnorr signature
|
||||
function verify(
|
||||
bytes32 px,
|
||||
bytes memory message,
|
||||
bytes32 c,
|
||||
bytes32 s
|
||||
) internal pure returns (bool) {
|
||||
// ecrecover = (m, v, r, s) -> key
|
||||
// We instead pass the following to obtain the nonce (not the key)
|
||||
// Then we hash it and verify it matches the challenge
|
||||
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
||||
|
||||
// For safety, we want each input to ecrecover to be 0 (sa, px, ca)
|
||||
// The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero
|
||||
// That leaves us to check `sa` are non-zero
|
||||
if (sa == 0) revert InvalidSOrA();
|
||||
address R = ecrecover(sa, KEY_PARITY, px, ca);
|
||||
if (R == address(0)) revert MalformedSignature();
|
||||
|
||||
// Check the signature is correct by rebuilding the challenge
|
||||
return c == keccak256(abi.encodePacked(R, px, message));
|
||||
}
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
# Ethereum Transaction Relayer
|
||||
|
||||
This server collects Ethereum router commands to be published, offering an RPC
|
||||
to fetch them.
|
||||
This server collects Ethereum transactions to be published, offering an RPC to
|
||||
fetch them.
|
||||
|
||||
@@ -40,8 +40,8 @@ async fn main() {
|
||||
db
|
||||
};
|
||||
|
||||
// Start command recipience server
|
||||
// This should not be publicly exposed
|
||||
// Start transaction recipience server
|
||||
// This MUST NOT be publicly exposed
|
||||
// TODO: Add auth
|
||||
tokio::spawn({
|
||||
let db = db.clone();
|
||||
@@ -58,25 +58,27 @@ async fn main() {
|
||||
let mut buf = vec![0; usize::try_from(msg_len).unwrap()];
|
||||
let Ok(_) = socket.read_exact(&mut buf).await else { break };
|
||||
|
||||
if buf.len() < 5 {
|
||||
if buf.len() < (4 + 1) {
|
||||
break;
|
||||
}
|
||||
let nonce = u32::from_le_bytes(buf[.. 4].try_into().unwrap());
|
||||
let mut txn = db.txn();
|
||||
// Save the transaction
|
||||
txn.put(nonce.to_le_bytes(), &buf[4 ..]);
|
||||
txn.commit();
|
||||
|
||||
let Ok(()) = socket.write_all(&[1]).await else { break };
|
||||
|
||||
log::info!("received signed command #{nonce}");
|
||||
log::info!("received transaction to publish (nonce {nonce})");
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Start command fetch server
|
||||
// Start transaction fetch server
|
||||
// 5132 ^ ((b'E' << 8) | b'R') + 1
|
||||
// TODO: JSON-RPC server which returns this as JSON?
|
||||
let server = TcpListener::bind("0.0.0.0:20831").await.unwrap();
|
||||
loop {
|
||||
let (mut socket, _) = server.accept().await.unwrap();
|
||||
@@ -84,16 +86,17 @@ async fn main() {
|
||||
tokio::spawn(async move {
|
||||
let db = db.clone();
|
||||
loop {
|
||||
// Nonce to get the router comamnd for
|
||||
// Nonce to get the unsigned transaction for
|
||||
let mut buf = vec![0; 4];
|
||||
let Ok(_) = socket.read_exact(&mut buf).await else { break };
|
||||
|
||||
let command = db.get(&buf[.. 4]).unwrap_or(vec![]);
|
||||
let Ok(()) = socket.write_all(&u32::try_from(command.len()).unwrap().to_le_bytes()).await
|
||||
let transaction = db.get(&buf[.. 4]).unwrap_or(vec![]);
|
||||
let Ok(()) =
|
||||
socket.write_all(&u32::try_from(transaction.len()).unwrap().to_le_bytes()).await
|
||||
else {
|
||||
break;
|
||||
};
|
||||
let Ok(()) = socket.write_all(&command).await else { break };
|
||||
let Ok(()) = socket.write_all(&transaction).await else { break };
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
42
networks/ethereum/schnorr/Cargo.toml
Normal file
42
networks/ethereum/schnorr/Cargo.toml
Normal file
@@ -0,0 +1,42 @@
|
||||
[package]
|
||||
name = "ethereum-schnorr-contract"
|
||||
version = "0.1.0"
|
||||
description = "A Solidity contract to verify Schnorr signatures"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/networks/ethereum/schnorr"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>", "Elizabeth Binks <elizabethjbinks@gmail.com>"]
|
||||
edition = "2021"
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
subtle = { version = "2", default-features = false, features = ["std"] }
|
||||
sha3 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
group = { version = "0.13", default-features = false, features = ["alloc"] }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic"] }
|
||||
|
||||
[build-dependencies]
|
||||
build-solidity-contracts = { path = "../build-contracts", version = "0.1" }
|
||||
|
||||
[dev-dependencies]
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["ecdsa"] }
|
||||
|
||||
alloy-core = { version = "0.8", default-features = false }
|
||||
alloy-sol-types = { version = "0.8", default-features = false }
|
||||
|
||||
alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false }
|
||||
alloy-rpc-types-eth = { version = "0.3", default-features = false }
|
||||
alloy-rpc-client = { version = "0.3", default-features = false }
|
||||
alloy-provider = { version = "0.3", default-features = false }
|
||||
|
||||
alloy-node-bindings = { version = "0.3", default-features = false }
|
||||
|
||||
tokio = { version = "1", default-features = false, features = ["macros"] }
|
||||
@@ -1,6 +1,6 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
5
networks/ethereum/schnorr/README.md
Normal file
5
networks/ethereum/schnorr/README.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# Ethereum Schnorr Contract
|
||||
|
||||
An Ethereum contract to verify Schnorr signatures.
|
||||
|
||||
This crate will fail to build if `solc` is not installed and available.
|
||||
4
networks/ethereum/schnorr/build.rs
Normal file
4
networks/ethereum/schnorr/build.rs
Normal file
@@ -0,0 +1,4 @@
|
||||
fn main() {
|
||||
let artifacts_path = std::env::var("OUT_DIR").unwrap().to_string() + "/ethereum-schnorr-contract";
|
||||
build_solidity_contracts::build(&[], "contracts", &artifacts_path).unwrap();
|
||||
}
|
||||
41
networks/ethereum/schnorr/contracts/Schnorr.sol
Normal file
41
networks/ethereum/schnorr/contracts/Schnorr.sol
Normal file
@@ -0,0 +1,41 @@
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
pragma solidity ^0.8.26;
|
||||
|
||||
// See https://github.com/noot/schnorr-verify for implementation details
|
||||
library Schnorr {
|
||||
// secp256k1 group order
|
||||
uint256 private constant Q = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
||||
|
||||
// We fix the key to have:
|
||||
// 1) An even y-coordinate
|
||||
// 2) An x-coordinate < Q
|
||||
uint8 private constant KEY_PARITY = 27;
|
||||
|
||||
// px := public key x-coordinate, where the public key has an even y-coordinate
|
||||
// message := the message signed
|
||||
// c := Schnorr signature challenge
|
||||
// s := Schnorr signature solution
|
||||
function verify(bytes32 px, bytes32 message, bytes32 c, bytes32 s) internal pure returns (bool) {
|
||||
// ecrecover = (m, v, r, s) -> key
|
||||
// We instead pass the following to obtain the nonce (not the key)
|
||||
// Then we hash it and verify it matches the challenge
|
||||
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
||||
|
||||
/*
|
||||
The ecrecover precompile checks `r` and `s` (`px` and `ca`) are non-zero,
|
||||
banning the two keys with zero for their x-coordinate and zero challenge.
|
||||
Each has negligible probability of occuring (assuming zero x-coordinates
|
||||
are even on-curve in the first place).
|
||||
|
||||
`sa` is not checked to be non-zero yet it does not need to be. The inverse
|
||||
of it is never taken.
|
||||
*/
|
||||
address R = ecrecover(sa, KEY_PARITY, px, ca);
|
||||
// The ecrecover failed
|
||||
if (R == address(0)) return false;
|
||||
|
||||
// Check the signature is correct by rebuilding the challenge
|
||||
return c == keccak256(abi.encodePacked(R, px, message));
|
||||
}
|
||||
}
|
||||
14
networks/ethereum/schnorr/contracts/tests/Schnorr.sol
Normal file
14
networks/ethereum/schnorr/contracts/tests/Schnorr.sol
Normal file
@@ -0,0 +1,14 @@
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
pragma solidity ^0.8.26;
|
||||
|
||||
import "../Schnorr.sol";
|
||||
|
||||
contract TestSchnorr {
|
||||
function verify(bytes32 public_key, bytes calldata message, bytes32 c, bytes32 s)
|
||||
external
|
||||
pure
|
||||
returns (bool)
|
||||
{
|
||||
return Schnorr.verify(public_key, keccak256(message), c, s);
|
||||
}
|
||||
}
|
||||
16
networks/ethereum/schnorr/src/lib.rs
Normal file
16
networks/ethereum/schnorr/src/lib.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
/// The initialization bytecode of the Schnorr library.
|
||||
pub const INIT_BYTECODE: &str =
|
||||
include_str!(concat!(env!("OUT_DIR"), "/ethereum-schnorr-contract/Schnorr.bin"));
|
||||
|
||||
mod public_key;
|
||||
pub use public_key::PublicKey;
|
||||
mod signature;
|
||||
pub use signature::Signature;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
74
networks/ethereum/schnorr/src/public_key.rs
Normal file
74
networks/ethereum/schnorr/src/public_key.rs
Normal file
@@ -0,0 +1,74 @@
|
||||
use subtle::Choice;
|
||||
use group::ff::PrimeField;
|
||||
use k256::{
|
||||
elliptic_curve::{
|
||||
ops::Reduce,
|
||||
point::{AffineCoordinates, DecompressPoint},
|
||||
},
|
||||
AffinePoint, ProjectivePoint, Scalar, U256 as KU256,
|
||||
};
|
||||
|
||||
/// A public key for the Schnorr Solidity library.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct PublicKey {
|
||||
A: ProjectivePoint,
|
||||
x_coordinate: [u8; 32],
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// Construct a new `PublicKey`.
|
||||
///
|
||||
/// This will return None if the provided point isn't eligible to be a public key (due to
|
||||
/// bounds such as parity).
|
||||
#[must_use]
|
||||
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
||||
let affine = A.to_affine();
|
||||
|
||||
// Only allow even keys to save a word within Ethereum
|
||||
if bool::from(affine.y_is_odd()) {
|
||||
None?;
|
||||
}
|
||||
|
||||
let x_coordinate = affine.x();
|
||||
// Return None if the x-coordinate isn't mutual to both fields
|
||||
// While reductions shouldn't be an issue, it's one less headache/concern to have
|
||||
// The trivial amount of public keys this makes non-representable aren't a concern
|
||||
if <Scalar as Reduce<KU256>>::reduce_bytes(&x_coordinate).to_repr() != x_coordinate {
|
||||
None?;
|
||||
}
|
||||
|
||||
let x_coordinate: [u8; 32] = x_coordinate.into();
|
||||
// Returns None if the x-coordinate is 0
|
||||
// Such keys will never have their signatures able to be verified
|
||||
if x_coordinate == [0; 32] {
|
||||
None?;
|
||||
}
|
||||
Some(PublicKey { A, x_coordinate })
|
||||
}
|
||||
|
||||
/// The point for this public key.
|
||||
#[must_use]
|
||||
pub fn point(&self) -> ProjectivePoint {
|
||||
self.A
|
||||
}
|
||||
|
||||
/// The Ethereum representation of this public key.
|
||||
#[must_use]
|
||||
pub fn eth_repr(&self) -> [u8; 32] {
|
||||
// We only encode the x-coordinate due to fixing the sign of the y-coordinate
|
||||
self.x_coordinate
|
||||
}
|
||||
|
||||
/// Construct a PublicKey from its Ethereum representation.
|
||||
// This wouldn't be possible if the x-coordinate had been reduced
|
||||
#[must_use]
|
||||
pub fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
|
||||
let x_coordinate = repr;
|
||||
|
||||
let y_is_odd = Choice::from(0);
|
||||
let A_affine =
|
||||
Option::<AffinePoint>::from(AffinePoint::decompress(&x_coordinate.into(), y_is_odd))?;
|
||||
let A = ProjectivePoint::from(A_affine);
|
||||
Some(PublicKey { A, x_coordinate })
|
||||
}
|
||||
}
|
||||
95
networks/ethereum/schnorr/src/signature.rs
Normal file
95
networks/ethereum/schnorr/src/signature.rs
Normal file
@@ -0,0 +1,95 @@
|
||||
use std::io;
|
||||
|
||||
use sha3::{Digest, Keccak256};
|
||||
|
||||
use group::ff::PrimeField;
|
||||
use k256::{
|
||||
elliptic_curve::{ops::Reduce, sec1::ToEncodedPoint},
|
||||
ProjectivePoint, Scalar, U256 as KU256,
|
||||
};
|
||||
|
||||
use crate::PublicKey;
|
||||
|
||||
/// A signature for the Schnorr Solidity library.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Signature {
|
||||
c: Scalar,
|
||||
s: Scalar,
|
||||
}
|
||||
|
||||
impl Signature {
|
||||
/// Construct a new `Signature`.
|
||||
#[must_use]
|
||||
pub fn new(c: Scalar, s: Scalar) -> Signature {
|
||||
Signature { c, s }
|
||||
}
|
||||
|
||||
/// The challenge for a signature.
|
||||
#[must_use]
|
||||
pub fn challenge(R: ProjectivePoint, key: &PublicKey, message: &[u8]) -> Scalar {
|
||||
// H(R || A || m)
|
||||
let mut hash = Keccak256::new();
|
||||
// We transcript the nonce as an address since ecrecover yields an address
|
||||
hash.update({
|
||||
let uncompressed_encoded_point = R.to_encoded_point(false);
|
||||
// Skip the prefix byte marking this as uncompressed
|
||||
let x_and_y_coordinates = &uncompressed_encoded_point.as_ref()[1 ..];
|
||||
// Last 20 bytes of the hash of the x and y coordinates
|
||||
&Keccak256::digest(x_and_y_coordinates)[12 ..]
|
||||
});
|
||||
hash.update(key.eth_repr());
|
||||
hash.update(Keccak256::digest(message));
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&hash.finalize())
|
||||
}
|
||||
|
||||
/// Verify a signature.
|
||||
#[must_use]
|
||||
pub fn verify(&self, key: &PublicKey, message: &[u8]) -> bool {
|
||||
// Recover the nonce
|
||||
let R = (ProjectivePoint::GENERATOR * self.s) - (key.point() * self.c);
|
||||
// Check the challenge
|
||||
Self::challenge(R, key, message) == self.c
|
||||
}
|
||||
|
||||
/// The challenge present within this signature.
|
||||
pub fn c(&self) -> Scalar {
|
||||
self.c
|
||||
}
|
||||
|
||||
/// The signature solution present within this signature.
|
||||
pub fn s(&self) -> Scalar {
|
||||
self.s
|
||||
}
|
||||
|
||||
/// Convert the signature to bytes.
|
||||
#[must_use]
|
||||
pub fn to_bytes(&self) -> [u8; 64] {
|
||||
let mut res = [0; 64];
|
||||
res[.. 32].copy_from_slice(self.c.to_repr().as_ref());
|
||||
res[32 ..].copy_from_slice(self.s.to_repr().as_ref());
|
||||
res
|
||||
}
|
||||
|
||||
/// Write the signature.
|
||||
pub fn write(&self, writer: &mut impl io::Write) -> io::Result<()> {
|
||||
writer.write_all(&self.to_bytes())
|
||||
}
|
||||
|
||||
/// Read a signature.
|
||||
pub fn read(reader: &mut impl io::Read) -> io::Result<Self> {
|
||||
let mut read_F = || -> io::Result<Scalar> {
|
||||
let mut bytes = [0; 32];
|
||||
reader.read_exact(&mut bytes)?;
|
||||
Option::<Scalar>::from(Scalar::from_repr(bytes.into()))
|
||||
.ok_or_else(|| io::Error::other("invalid scalar"))
|
||||
};
|
||||
let c = read_F()?;
|
||||
let s = read_F()?;
|
||||
Ok(Signature { c, s })
|
||||
}
|
||||
|
||||
/// Read a signature from bytes.
|
||||
pub fn from_bytes(bytes: [u8; 64]) -> io::Result<Self> {
|
||||
Self::read(&mut bytes.as_slice())
|
||||
}
|
||||
}
|
||||
112
networks/ethereum/schnorr/src/tests/mod.rs
Normal file
112
networks/ethereum/schnorr/src/tests/mod.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use group::ff::{Field, PrimeField};
|
||||
use k256::{Scalar, ProjectivePoint};
|
||||
|
||||
use alloy_core::primitives::Address;
|
||||
use alloy_sol_types::SolCall;
|
||||
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_rpc_types_eth::{TransactionInput, TransactionRequest};
|
||||
use alloy_rpc_client::ClientBuilder;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||
|
||||
use crate::{PublicKey, Signature};
|
||||
|
||||
mod premise;
|
||||
|
||||
#[expect(warnings)]
|
||||
#[expect(needless_pass_by_value)]
|
||||
#[expect(clippy::all)]
|
||||
#[expect(clippy::ignored_unit_patterns)]
|
||||
#[expect(clippy::redundant_closure_for_method_calls)]
|
||||
mod abi {
|
||||
alloy_sol_types::sol!("contracts/tests/Schnorr.sol");
|
||||
pub(crate) use TestSchnorr::*;
|
||||
}
|
||||
|
||||
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider = Arc::new(RootProvider::new(
|
||||
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||
));
|
||||
|
||||
let mut address = [0; 20];
|
||||
OsRng.fill_bytes(&mut address);
|
||||
let address = Address::from(address);
|
||||
let _: () = provider
|
||||
.raw_request(
|
||||
"anvil_setCode".into(),
|
||||
[
|
||||
address.to_string(),
|
||||
include_str!(concat!(
|
||||
env!("OUT_DIR"),
|
||||
"/ethereum-schnorr-contract/TestSchnorr.bin-runtime"
|
||||
))
|
||||
.to_string(),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
(anvil, provider, address)
|
||||
}
|
||||
|
||||
async fn call_verify(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
address: Address,
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: &Signature,
|
||||
) -> bool {
|
||||
let public_key: [u8; 32] = public_key.eth_repr();
|
||||
let c_bytes: [u8; 32] = signature.c().to_repr().into();
|
||||
let s_bytes: [u8; 32] = signature.s().to_repr().into();
|
||||
let call = TransactionRequest::default().to(address).input(TransactionInput::new(
|
||||
abi::verifyCall::new((
|
||||
public_key.into(),
|
||||
message.to_vec().into(),
|
||||
c_bytes.into(),
|
||||
s_bytes.into(),
|
||||
))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
));
|
||||
let bytes = provider.call(&call).await.unwrap();
|
||||
let res = abi::verifyCall::abi_decode_returns(&bytes, true).unwrap();
|
||||
|
||||
res._0
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_verify() {
|
||||
let (_anvil, provider, address) = setup_test().await;
|
||||
|
||||
for _ in 0 .. 100 {
|
||||
let (key, public_key) = loop {
|
||||
let key = Scalar::random(&mut OsRng);
|
||||
if let Some(public_key) = PublicKey::new(ProjectivePoint::GENERATOR * key) {
|
||||
break (key, public_key);
|
||||
}
|
||||
};
|
||||
|
||||
let nonce = Scalar::random(&mut OsRng);
|
||||
let mut message = vec![0; 1 + usize::try_from(OsRng.next_u32() % 256).unwrap()];
|
||||
OsRng.fill_bytes(&mut message);
|
||||
|
||||
let c = Signature::challenge(ProjectivePoint::GENERATOR * nonce, &public_key, &message);
|
||||
let s = nonce + (c * key);
|
||||
|
||||
let sig = Signature::new(c, s);
|
||||
assert!(sig.verify(&public_key, &message));
|
||||
assert!(call_verify(&provider, address, &public_key, &message, &sig).await);
|
||||
// Mutate the message and make sure the signature now fails to verify
|
||||
message[0] = message[0].wrapping_add(1);
|
||||
assert!(!call_verify(&provider, address, &public_key, &message, &sig).await);
|
||||
}
|
||||
}
|
||||
111
networks/ethereum/schnorr/src/tests/premise.rs
Normal file
111
networks/ethereum/schnorr/src/tests/premise.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use sha3::{Digest, Keccak256};
|
||||
use group::ff::{Field, PrimeField};
|
||||
use k256::{
|
||||
elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint},
|
||||
ecdsa::{
|
||||
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,
|
||||
},
|
||||
U256, Scalar, ProjectivePoint,
|
||||
};
|
||||
|
||||
use alloy_core::primitives::Address;
|
||||
|
||||
use crate::{PublicKey, Signature};
|
||||
|
||||
// The ecrecover opcode, yet with if the y is odd replacing v
|
||||
fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||
let sig = ecdsa::Signature::from_scalars(r, s).ok()?;
|
||||
let message: [u8; 32] = message.to_repr().into();
|
||||
alloy_core::primitives::Signature::from_signature_and_parity(
|
||||
sig,
|
||||
alloy_core::primitives::Parity::Parity(odd_y),
|
||||
)
|
||||
.ok()?
|
||||
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
|
||||
.ok()
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
// Test ecrecover behaves as expected
|
||||
#[test]
|
||||
fn test_ecrecover() {
|
||||
let private = SigningKey::random(&mut OsRng);
|
||||
let public = VerifyingKey::from(&private);
|
||||
|
||||
// Sign the signature
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let (sig, recovery_id) = private
|
||||
.as_nonzero_scalar()
|
||||
.try_sign_prehashed(Scalar::random(&mut OsRng), &Keccak256::digest(MESSAGE))
|
||||
.unwrap();
|
||||
|
||||
// Sanity check the signature verifies
|
||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||
{
|
||||
assert_eq!(public.verify_prehash(&Keccak256::digest(MESSAGE), &sig).unwrap(), ());
|
||||
}
|
||||
|
||||
// Perform the ecrecover
|
||||
assert_eq!(
|
||||
ecrecover(
|
||||
<Scalar as Reduce<U256>>::reduce_bytes(&Keccak256::digest(MESSAGE)),
|
||||
u8::from(recovery_id.unwrap().is_y_odd()) == 1,
|
||||
*sig.r(),
|
||||
*sig.s()
|
||||
)
|
||||
.unwrap(),
|
||||
Address::from_raw_public_key(&public.to_encoded_point(false).as_ref()[1 ..]),
|
||||
);
|
||||
}
|
||||
|
||||
// Test that we can recover the nonce from a Schnorr signature via a call to ecrecover, the premise
|
||||
// of efficiently verifying Schnorr signatures in an Ethereum contract
|
||||
#[test]
|
||||
fn nonce_recovery_via_ecrecover() {
|
||||
let (key, public_key) = loop {
|
||||
let key = Scalar::random(&mut OsRng);
|
||||
if let Some(public_key) = PublicKey::new(ProjectivePoint::GENERATOR * key) {
|
||||
break (key, public_key);
|
||||
}
|
||||
};
|
||||
|
||||
let nonce = Scalar::random(&mut OsRng);
|
||||
let R = ProjectivePoint::GENERATOR * nonce;
|
||||
|
||||
let mut message = vec![0; 1 + usize::try_from(OsRng.next_u32() % 256).unwrap()];
|
||||
OsRng.fill_bytes(&mut message);
|
||||
|
||||
let c = Signature::challenge(R, &public_key, &message);
|
||||
let s = nonce + (c * key);
|
||||
|
||||
/*
|
||||
An ECDSA signature is `(r, s)` with `s = (H(m) + rx) / k`, where:
|
||||
- `m` is the message
|
||||
- `r` is the x-coordinate of the nonce, reduced into a scalar
|
||||
- `x` is the private key
|
||||
- `k` is the nonce
|
||||
|
||||
We fix the recovery ID to be for the even key with an x-coordinate < the order. Accordingly,
|
||||
`kG = Point::from(Even, r)`. This enables recovering the public key via
|
||||
`((s Point::from(Even, r)) - H(m)G) / r`.
|
||||
|
||||
We want to calculate `R` from `(c, s)` where `s = r + cx`. That means we need to calculate
|
||||
`sG - cX`.
|
||||
|
||||
We can calculate `sG - cX` with `((s Point::from(Even, r)) - H(m)G) / r` if:
|
||||
- Latter `r` = `X.x`
|
||||
- Latter `s` = `c`
|
||||
- `H(m)` = former `s`
|
||||
This gets us to `(cX - sG) / X.x`. If we additionally scale the latter's `s, H(m)` values (the
|
||||
former's `c, s` values) by `X.x`, we get `cX - sG`. This just requires negating each to achieve
|
||||
`sG - cX`.
|
||||
*/
|
||||
let x_scalar = <Scalar as Reduce<U256>>::reduce_bytes(&public_key.point().to_affine().x());
|
||||
let sa = -(s * x_scalar);
|
||||
let ca = -(c * x_scalar);
|
||||
|
||||
let q = ecrecover(sa, false, x_scalar, ca).unwrap();
|
||||
assert_eq!(q, Address::from_raw_public_key(&R.to_encoded_point(false).as_ref()[1 ..]));
|
||||
}
|
||||
@@ -1,37 +0,0 @@
|
||||
use alloy_sol_types::sol;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod erc20_container {
|
||||
use super::*;
|
||||
sol!("contracts/IERC20.sol");
|
||||
}
|
||||
pub use erc20_container::IERC20 as erc20;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod deployer_container {
|
||||
use super::*;
|
||||
sol!("contracts/Deployer.sol");
|
||||
}
|
||||
pub use deployer_container::Deployer as deployer;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod router_container {
|
||||
use super::*;
|
||||
sol!(Router, "artifacts/Router.abi");
|
||||
}
|
||||
pub use router_container::Router as router;
|
||||
@@ -1,188 +0,0 @@
|
||||
use group::ff::PrimeField;
|
||||
use k256::{
|
||||
elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint},
|
||||
ProjectivePoint, Scalar, U256 as KU256,
|
||||
};
|
||||
#[cfg(test)]
|
||||
use k256::{elliptic_curve::point::DecompressPoint, AffinePoint};
|
||||
|
||||
use frost::{
|
||||
algorithm::{Hram, SchnorrSignature},
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
};
|
||||
|
||||
use alloy_core::primitives::{Parity, Signature as AlloySignature};
|
||||
use alloy_consensus::{SignableTransaction, Signed, TxLegacy};
|
||||
|
||||
use crate::abi::router::{Signature as AbiSignature};
|
||||
|
||||
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
|
||||
alloy_core::primitives::keccak256(data).into()
|
||||
}
|
||||
|
||||
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(data).into())
|
||||
}
|
||||
|
||||
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
|
||||
let encoded_point = point.to_encoded_point(false);
|
||||
// Last 20 bytes of the hash of the concatenated x and y coordinates
|
||||
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
|
||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
|
||||
}
|
||||
|
||||
/// Deterministically sign a transaction.
|
||||
///
|
||||
/// This function panics if passed a transaction with a non-None chain ID.
|
||||
pub fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
|
||||
assert!(
|
||||
tx.chain_id.is_none(),
|
||||
"chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)"
|
||||
);
|
||||
|
||||
let sig_hash = tx.signature_hash().0;
|
||||
let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat());
|
||||
let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat());
|
||||
loop {
|
||||
let r_bytes: [u8; 32] = r.to_repr().into();
|
||||
let s_bytes: [u8; 32] = s.to_repr().into();
|
||||
let v = Parity::NonEip155(false);
|
||||
let signature =
|
||||
AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap();
|
||||
let tx = tx.clone().into_signed(signature);
|
||||
if tx.recover_signer().is_ok() {
|
||||
return tx;
|
||||
}
|
||||
|
||||
// Re-hash until valid
|
||||
r = hash_to_scalar(r_bytes.as_ref());
|
||||
s = hash_to_scalar(s_bytes.as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
/// The public key for a Schnorr-signing account.
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct PublicKey {
|
||||
pub(crate) A: ProjectivePoint,
|
||||
pub(crate) px: Scalar,
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// Construct a new `PublicKey`.
|
||||
///
|
||||
/// This will return None if the provided point isn't eligible to be a public key (due to
|
||||
/// bounds such as parity).
|
||||
#[allow(non_snake_case)]
|
||||
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
||||
let affine = A.to_affine();
|
||||
// Only allow even keys to save a word within Ethereum
|
||||
let is_odd = bool::from(affine.y_is_odd());
|
||||
if is_odd {
|
||||
None?;
|
||||
}
|
||||
|
||||
let x_coord = affine.x();
|
||||
let x_coord_scalar = <Scalar as Reduce<KU256>>::reduce_bytes(&x_coord);
|
||||
// Return None if a reduction would occur
|
||||
// Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less
|
||||
// headache/concern to have
|
||||
// This does ban a trivial amoount of public keys
|
||||
if x_coord_scalar.to_repr() != x_coord {
|
||||
None?;
|
||||
}
|
||||
|
||||
Some(PublicKey { A, px: x_coord_scalar })
|
||||
}
|
||||
|
||||
pub fn point(&self) -> ProjectivePoint {
|
||||
self.A
|
||||
}
|
||||
|
||||
pub(crate) fn eth_repr(&self) -> [u8; 32] {
|
||||
self.px.to_repr().into()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
|
||||
#[allow(non_snake_case)]
|
||||
let A = Option::<AffinePoint>::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into();
|
||||
Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px })
|
||||
}
|
||||
}
|
||||
|
||||
/// The HRAm to use for the Schnorr contract.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct EthereumHram {}
|
||||
impl Hram<Secp256k1> for EthereumHram {
|
||||
#[allow(non_snake_case)]
|
||||
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
|
||||
let x_coord = A.to_affine().x();
|
||||
|
||||
let mut data = address(R).to_vec();
|
||||
data.extend(x_coord.as_slice());
|
||||
data.extend(m);
|
||||
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(&data).into())
|
||||
}
|
||||
}
|
||||
|
||||
/// A signature for the Schnorr contract.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Signature {
|
||||
pub(crate) c: Scalar,
|
||||
pub(crate) s: Scalar,
|
||||
}
|
||||
impl Signature {
|
||||
pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {
|
||||
#[allow(non_snake_case)]
|
||||
let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c);
|
||||
EthereumHram::hram(&R, &public_key.A, message) == self.c
|
||||
}
|
||||
|
||||
/// Construct a new `Signature`.
|
||||
///
|
||||
/// This will return None if the signature is invalid.
|
||||
pub fn new(
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: SchnorrSignature<Secp256k1>,
|
||||
) -> Option<Signature> {
|
||||
let c = EthereumHram::hram(&signature.R, &public_key.A, message);
|
||||
if !signature.verify(public_key.A, c) {
|
||||
None?;
|
||||
}
|
||||
|
||||
let res = Signature { c, s: signature.s };
|
||||
assert!(res.verify(public_key, message));
|
||||
Some(res)
|
||||
}
|
||||
|
||||
pub fn c(&self) -> Scalar {
|
||||
self.c
|
||||
}
|
||||
pub fn s(&self) -> Scalar {
|
||||
self.s
|
||||
}
|
||||
|
||||
pub fn to_bytes(&self) -> [u8; 64] {
|
||||
let mut res = [0; 64];
|
||||
res[.. 32].copy_from_slice(self.c.to_repr().as_ref());
|
||||
res[32 ..].copy_from_slice(self.s.to_repr().as_ref());
|
||||
res
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {
|
||||
let mut reader = bytes.as_slice();
|
||||
let c = Secp256k1::read_F(&mut reader)?;
|
||||
let s = Secp256k1::read_F(&mut reader)?;
|
||||
Ok(Signature { c, s })
|
||||
}
|
||||
}
|
||||
impl From<&Signature> for AbiSignature {
|
||||
fn from(sig: &Signature) -> AbiSignature {
|
||||
let c: [u8; 32] = sig.c.to_repr().into();
|
||||
let s: [u8; 32] = sig.s.to_repr().into();
|
||||
AbiSignature { c: c.into(), s: s.into() }
|
||||
}
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind};
|
||||
use alloy_consensus::{Signed, TxLegacy};
|
||||
|
||||
use alloy_sol_types::{SolCall, SolEvent};
|
||||
|
||||
use alloy_rpc_types_eth::{BlockNumberOrTag, Filter};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::{self, keccak256, PublicKey},
|
||||
router::Router,
|
||||
};
|
||||
pub use crate::abi::deployer as abi;
|
||||
|
||||
/// The Deployer contract for the Router contract.
|
||||
///
|
||||
/// This Deployer has a deterministic address, letting it be immediately identified on any
|
||||
/// compatible chain. It then supports retrieving the Router contract's address (which isn't
|
||||
/// deterministic) using a single log query.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Deployer;
|
||||
impl Deployer {
|
||||
/// Obtain the transaction to deploy this contract, already signed.
|
||||
///
|
||||
/// The account this transaction is sent from (which is populated in `from`) must be sufficiently
|
||||
/// funded for this transaction to be submitted. This account has no known private key to anyone,
|
||||
/// so ETH sent can be neither misappropriated nor returned.
|
||||
pub fn deployment_tx() -> Signed<TxLegacy> {
|
||||
let bytecode = include_str!("../artifacts/Deployer.bin");
|
||||
let bytecode =
|
||||
Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex");
|
||||
|
||||
let tx = TxLegacy {
|
||||
chain_id: None,
|
||||
nonce: 0,
|
||||
gas_price: 100_000_000_000u128,
|
||||
// TODO: Use a more accurate gas limit
|
||||
gas_limit: 1_000_000u128,
|
||||
to: TxKind::Create,
|
||||
value: U256::ZERO,
|
||||
input: bytecode,
|
||||
};
|
||||
|
||||
crypto::deterministically_sign(&tx)
|
||||
}
|
||||
|
||||
/// Obtain the deterministic address for this contract.
|
||||
pub fn address() -> [u8; 20] {
|
||||
let deployer_deployer =
|
||||
Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature");
|
||||
**Address::create(&deployer_deployer, 0)
|
||||
}
|
||||
|
||||
/// Construct a new view of the `Deployer`.
|
||||
pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result<Option<Self>, Error> {
|
||||
let address = Self::address();
|
||||
let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?;
|
||||
// Contract has yet to be deployed
|
||||
if code.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(Self))
|
||||
}
|
||||
|
||||
/// Yield the `ContractCall` necessary to deploy the Router.
|
||||
pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {
|
||||
TxLegacy {
|
||||
to: TxKind::Call(Self::address().into()),
|
||||
input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(),
|
||||
gas_limit: 1_000_000,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the first Router deployed with the specified key as its first key.
|
||||
///
|
||||
/// This is the Router Serai will use, and is the only way to construct a `Router`.
|
||||
pub async fn find_router(
|
||||
&self,
|
||||
provider: Arc<RootProvider<SimpleRequest>>,
|
||||
key: &PublicKey,
|
||||
) -> Result<Option<Router>, Error> {
|
||||
let init_code = Router::init_code(key);
|
||||
let init_code_hash = keccak256(&init_code);
|
||||
|
||||
#[cfg(not(test))]
|
||||
let to_block = BlockNumberOrTag::Finalized;
|
||||
#[cfg(test)]
|
||||
let to_block = BlockNumberOrTag::Latest;
|
||||
|
||||
// Find the first log using this init code (where the init code is binding to the key)
|
||||
// TODO: Make an abstraction for event filtering (de-duplicating common code)
|
||||
let filter =
|
||||
Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address()));
|
||||
let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH);
|
||||
let filter = filter.topic1(B256::from(init_code_hash));
|
||||
let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let Some(first_log) = logs.first() else { return Ok(None) };
|
||||
let router = first_log
|
||||
.log_decode::<abi::Deployment>()
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.inner
|
||||
.data
|
||||
.created;
|
||||
|
||||
Ok(Some(Router::new(provider, router)))
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use alloy_core::primitives::{Address, B256, U256};
|
||||
|
||||
use alloy_sol_types::{SolInterface, SolEvent};
|
||||
|
||||
use alloy_rpc_types_eth::Filter;
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::Error;
|
||||
pub use crate::abi::erc20 as abi;
|
||||
use abi::{IERC20Calls, Transfer, transferCall, transferFromCall};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TopLevelErc20Transfer {
|
||||
pub id: [u8; 32],
|
||||
pub from: [u8; 20],
|
||||
pub amount: U256,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A view for an ERC20 contract.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Erc20(Arc<RootProvider<SimpleRequest>>, Address);
|
||||
impl Erc20 {
|
||||
/// Construct a new view of the specified ERC20 contract.
|
||||
pub fn new(provider: Arc<RootProvider<SimpleRequest>>, address: [u8; 20]) -> Self {
|
||||
Self(provider, Address::from(&address))
|
||||
}
|
||||
|
||||
pub async fn top_level_transfers(
|
||||
&self,
|
||||
block: u64,
|
||||
to: [u8; 20],
|
||||
) -> Result<Vec<TopLevelErc20Transfer>, Error> {
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(Transfer::SIGNATURE_HASH);
|
||||
let mut to_topic = [0; 32];
|
||||
to_topic[12 ..].copy_from_slice(&to);
|
||||
let filter = filter.topic2(B256::from(to_topic));
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let mut handled = HashSet::new();
|
||||
|
||||
let mut top_level_transfers = vec![];
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||
let tx =
|
||||
self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?;
|
||||
|
||||
// If this is a top-level call...
|
||||
if tx.to == Some(self.1) {
|
||||
// And we recognize the call...
|
||||
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due
|
||||
// to the InInstruction appended
|
||||
if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) {
|
||||
// Extract the top-level call's from/to/value
|
||||
let (from, call_to, value) = match call {
|
||||
IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value),
|
||||
IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => {
|
||||
(from, call_to, value)
|
||||
}
|
||||
// Treat any other function selectors as unrecognized
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let log = log.log_decode::<Transfer>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
// Ensure the top-level transfer is equivalent, and this presumably isn't a log for an
|
||||
// internal transfer
|
||||
if (log.from != from) || (call_to != to) || (value != log.value) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's
|
||||
// the only log we handle
|
||||
if handled.contains(&tx_id) {
|
||||
continue;
|
||||
}
|
||||
handled.insert(tx_id);
|
||||
|
||||
// Read the data appended after
|
||||
let encoded = call.abi_encode();
|
||||
let data = tx.input.as_ref()[encoded.len() ..].to_vec();
|
||||
|
||||
// Push the transfer
|
||||
top_level_transfers.push(TopLevelErc20Transfer {
|
||||
// Since we'll only handle one log for this TX, set the ID to the TX ID
|
||||
id: *tx_id,
|
||||
from: *log.from.0,
|
||||
amount: log.value,
|
||||
data,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(top_level_transfers)
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
use thiserror::Error;
|
||||
|
||||
pub mod alloy {
|
||||
pub use alloy_core::primitives;
|
||||
pub use alloy_core as core;
|
||||
pub use alloy_sol_types as sol_types;
|
||||
|
||||
pub use alloy_consensus as consensus;
|
||||
pub use alloy_network as network;
|
||||
pub use alloy_rpc_types_eth as rpc_types;
|
||||
pub use alloy_simple_request_transport as simple_request_transport;
|
||||
pub use alloy_rpc_client as rpc_client;
|
||||
pub use alloy_provider as provider;
|
||||
}
|
||||
|
||||
pub mod crypto;
|
||||
|
||||
pub(crate) mod abi;
|
||||
|
||||
pub mod erc20;
|
||||
pub mod deployer;
|
||||
pub mod router;
|
||||
|
||||
pub mod machine;
|
||||
|
||||
#[cfg(any(test, feature = "tests"))]
|
||||
pub mod tests;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("failed to verify Schnorr signature")]
|
||||
InvalidSignature,
|
||||
#[error("couldn't make call/send TX")]
|
||||
ConnectionError,
|
||||
}
|
||||
@@ -1,414 +0,0 @@
|
||||
use std::{
|
||||
io::{self, Read},
|
||||
collections::HashMap,
|
||||
};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use group::GroupEncoding;
|
||||
use frost::{
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
Participant, ThresholdKeys, FrostError,
|
||||
algorithm::Schnorr,
|
||||
sign::*,
|
||||
};
|
||||
|
||||
use alloy_core::primitives::U256;
|
||||
|
||||
use crate::{
|
||||
crypto::{PublicKey, EthereumHram, Signature},
|
||||
router::{
|
||||
abi::{Call as AbiCall, OutInstruction as AbiOutInstruction},
|
||||
Router,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Call {
|
||||
pub to: [u8; 20],
|
||||
pub value: U256,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
impl Call {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut to = [0; 20];
|
||||
reader.read_exact(&mut to)?;
|
||||
|
||||
let value = {
|
||||
let mut value_bytes = [0; 32];
|
||||
reader.read_exact(&mut value_bytes)?;
|
||||
U256::from_le_slice(&value_bytes)
|
||||
};
|
||||
|
||||
let mut data_len = {
|
||||
let mut data_len = [0; 4];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize")
|
||||
};
|
||||
|
||||
// A valid DoS would be to claim a 4 GB data is present for only 4 bytes
|
||||
// We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB)
|
||||
let mut data = vec![];
|
||||
while data_len > 0 {
|
||||
let chunk_len = data_len.min(1024);
|
||||
let mut chunk = vec![0; chunk_len];
|
||||
reader.read_exact(&mut chunk)?;
|
||||
data.extend(&chunk);
|
||||
data_len -= chunk_len;
|
||||
}
|
||||
|
||||
Ok(Call { to, value, data })
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.to)?;
|
||||
writer.write_all(&self.value.as_le_bytes())?;
|
||||
|
||||
let data_len = u32::try_from(self.data.len())
|
||||
.map_err(|_| io::Error::other("call data length exceeded 2**32"))?;
|
||||
writer.write_all(&data_len.to_le_bytes())?;
|
||||
writer.write_all(&self.data)
|
||||
}
|
||||
}
|
||||
impl From<Call> for AbiCall {
|
||||
fn from(call: Call) -> AbiCall {
|
||||
AbiCall { to: call.to.into(), value: call.value, data: call.data.into() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum OutInstructionTarget {
|
||||
Direct([u8; 20]),
|
||||
Calls(Vec<Call>),
|
||||
}
|
||||
impl OutInstructionTarget {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut addr = [0; 20];
|
||||
reader.read_exact(&mut addr)?;
|
||||
Ok(OutInstructionTarget::Direct(addr))
|
||||
}
|
||||
1 => {
|
||||
let mut calls_len = [0; 4];
|
||||
reader.read_exact(&mut calls_len)?;
|
||||
let calls_len = u32::from_le_bytes(calls_len);
|
||||
|
||||
let mut calls = vec![];
|
||||
for _ in 0 .. calls_len {
|
||||
calls.push(Call::read(reader)?);
|
||||
}
|
||||
Ok(OutInstructionTarget::Calls(calls))
|
||||
}
|
||||
_ => Err(io::Error::other("unrecognized OutInstructionTarget"))?,
|
||||
}
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
OutInstructionTarget::Direct(addr) => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(addr)?;
|
||||
}
|
||||
OutInstructionTarget::Calls(calls) => {
|
||||
writer.write_all(&[1])?;
|
||||
let call_len = u32::try_from(calls.len())
|
||||
.map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?;
|
||||
writer.write_all(&call_len.to_le_bytes())?;
|
||||
for call in calls {
|
||||
call.write(writer)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct OutInstruction {
|
||||
pub target: OutInstructionTarget,
|
||||
pub value: U256,
|
||||
}
|
||||
impl OutInstruction {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let target = OutInstructionTarget::read(reader)?;
|
||||
|
||||
let value = {
|
||||
let mut value_bytes = [0; 32];
|
||||
reader.read_exact(&mut value_bytes)?;
|
||||
U256::from_le_slice(&value_bytes)
|
||||
};
|
||||
|
||||
Ok(OutInstruction { target, value })
|
||||
}
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.target.write(writer)?;
|
||||
writer.write_all(&self.value.as_le_bytes())
|
||||
}
|
||||
}
|
||||
impl From<OutInstruction> for AbiOutInstruction {
|
||||
fn from(instruction: OutInstruction) -> AbiOutInstruction {
|
||||
match instruction.target {
|
||||
OutInstructionTarget::Direct(addr) => {
|
||||
AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value }
|
||||
}
|
||||
OutInstructionTarget::Calls(calls) => AbiOutInstruction {
|
||||
to: [0; 20].into(),
|
||||
calls: calls.into_iter().map(Into::into).collect(),
|
||||
value: instruction.value,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum RouterCommand {
|
||||
UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey },
|
||||
Execute { chain_id: U256, nonce: U256, outs: Vec<OutInstruction> },
|
||||
}
|
||||
|
||||
impl RouterCommand {
|
||||
pub fn msg(&self) -> Vec<u8> {
|
||||
match self {
|
||||
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||
Router::update_serai_key_message(*chain_id, *nonce, key)
|
||||
}
|
||||
RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message(
|
||||
*chain_id,
|
||||
*nonce,
|
||||
outs.iter().map(|out| out.clone().into()).collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut chain_id = [0; 32];
|
||||
reader.read_exact(&mut chain_id)?;
|
||||
|
||||
let mut nonce = [0; 32];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
|
||||
let key = PublicKey::new(Secp256k1::read_G(reader)?)
|
||||
.ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?;
|
||||
Ok(RouterCommand::UpdateSeraiKey {
|
||||
chain_id: U256::from_le_slice(&chain_id),
|
||||
nonce: U256::from_le_slice(&nonce),
|
||||
key,
|
||||
})
|
||||
}
|
||||
1 => {
|
||||
let mut chain_id = [0; 32];
|
||||
reader.read_exact(&mut chain_id)?;
|
||||
let chain_id = U256::from_le_slice(&chain_id);
|
||||
|
||||
let mut nonce = [0; 32];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
let nonce = U256::from_le_slice(&nonce);
|
||||
|
||||
let mut outs_len = [0; 4];
|
||||
reader.read_exact(&mut outs_len)?;
|
||||
let outs_len = u32::from_le_bytes(outs_len);
|
||||
|
||||
let mut outs = vec![];
|
||||
for _ in 0 .. outs_len {
|
||||
outs.push(OutInstruction::read(reader)?);
|
||||
}
|
||||
|
||||
Ok(RouterCommand::Execute { chain_id, nonce, outs })
|
||||
}
|
||||
_ => Err(io::Error::other("reading unknown type of RouterCommand"))?,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(&chain_id.as_le_bytes())?;
|
||||
writer.write_all(&nonce.as_le_bytes())?;
|
||||
writer.write_all(&key.A.to_bytes())
|
||||
}
|
||||
RouterCommand::Execute { chain_id, nonce, outs } => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(&chain_id.as_le_bytes())?;
|
||||
writer.write_all(&nonce.as_le_bytes())?;
|
||||
writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;
|
||||
for out in outs {
|
||||
out.write(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut res = vec![];
|
||||
self.write(&mut res).unwrap();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct SignedRouterCommand {
|
||||
command: RouterCommand,
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl SignedRouterCommand {
|
||||
pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option<Self> {
|
||||
let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?;
|
||||
let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?;
|
||||
let signature = Signature { c, s };
|
||||
|
||||
if !signature.verify(key, &command.msg()) {
|
||||
None?
|
||||
}
|
||||
Some(SignedRouterCommand { command, signature })
|
||||
}
|
||||
|
||||
pub fn command(&self) -> &RouterCommand {
|
||||
&self.command
|
||||
}
|
||||
|
||||
pub fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let command = RouterCommand::read(reader)?;
|
||||
|
||||
let mut sig = [0; 64];
|
||||
reader.read_exact(&mut sig)?;
|
||||
let signature = Signature::from_bytes(sig)?;
|
||||
|
||||
Ok(SignedRouterCommand { command, signature })
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.command.write(writer)?;
|
||||
writer.write_all(&self.signature.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine: AlgorithmMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl RouterCommandMachine {
|
||||
pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> Option<Self> {
|
||||
// The Schnorr algorithm should be fine without this, even when using the IETF variant
|
||||
// If this is better and more comprehensive, we should do it, even if not necessary
|
||||
let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1");
|
||||
let key = keys.group_key();
|
||||
transcript.append_message(b"key", key.to_bytes());
|
||||
transcript.append_message(b"command", command.serialize());
|
||||
|
||||
Some(Self {
|
||||
key: PublicKey::new(key)?,
|
||||
command,
|
||||
machine: AlgorithmMachine::new(Schnorr::new(transcript), keys),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PreprocessMachine for RouterCommandMachine {
|
||||
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||
type Signature = SignedRouterCommand;
|
||||
type SignMachine = RouterCommandSignMachine;
|
||||
|
||||
fn preprocess<R: RngCore + CryptoRng>(
|
||||
self,
|
||||
rng: &mut R,
|
||||
) -> (Self::SignMachine, Self::Preprocess) {
|
||||
let (machine, preprocess) = self.machine.preprocess(rng);
|
||||
|
||||
(RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandSignMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine: AlgorithmSignMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl SignMachine<SignedRouterCommand> for RouterCommandSignMachine {
|
||||
type Params = ();
|
||||
type Keys = ThresholdKeys<Secp256k1>;
|
||||
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||
type SignatureShare = SignatureShare<Secp256k1>;
|
||||
type SignatureMachine = RouterCommandSignatureMachine;
|
||||
|
||||
fn cache(self) -> CachedPreprocess {
|
||||
unimplemented!(
|
||||
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||
"being already bound to a specific command"
|
||||
);
|
||||
}
|
||||
|
||||
fn from_cache(
|
||||
(): (),
|
||||
_: ThresholdKeys<Secp256k1>,
|
||||
_: CachedPreprocess,
|
||||
) -> (Self, Self::Preprocess) {
|
||||
unimplemented!(
|
||||
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||
"being already bound to a specific command"
|
||||
);
|
||||
}
|
||||
|
||||
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
||||
self.machine.read_preprocess(reader)
|
||||
}
|
||||
|
||||
fn sign(
|
||||
self,
|
||||
commitments: HashMap<Participant, Self::Preprocess>,
|
||||
msg: &[u8],
|
||||
) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> {
|
||||
if !msg.is_empty() {
|
||||
panic!("message was passed to a RouterCommand machine when it generates its own");
|
||||
}
|
||||
|
||||
let (machine, share) = self.machine.sign(commitments, &self.command.msg())?;
|
||||
|
||||
Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandSignatureMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine:
|
||||
AlgorithmSignatureMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl SignatureMachine<SignedRouterCommand> for RouterCommandSignatureMachine {
|
||||
type SignatureShare = SignatureShare<Secp256k1>;
|
||||
|
||||
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
|
||||
self.machine.read_share(reader)
|
||||
}
|
||||
|
||||
fn complete(
|
||||
self,
|
||||
shares: HashMap<Participant, Self::SignatureShare>,
|
||||
) -> Result<SignedRouterCommand, FrostError> {
|
||||
let sig = self.machine.complete(shares)?;
|
||||
let signature = Signature::new(&self.key, &self.command.msg(), sig)
|
||||
.expect("machine produced an invalid signature");
|
||||
Ok(SignedRouterCommand { command: self.command, signature })
|
||||
}
|
||||
}
|
||||
@@ -1,443 +0,0 @@
|
||||
use std::{sync::Arc, io, collections::HashSet};
|
||||
|
||||
use k256::{
|
||||
elliptic_curve::{group::GroupEncoding, sec1},
|
||||
ProjectivePoint,
|
||||
};
|
||||
|
||||
use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind};
|
||||
#[cfg(test)]
|
||||
use alloy_core::primitives::B256;
|
||||
use alloy_consensus::TxLegacy;
|
||||
|
||||
use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent};
|
||||
|
||||
use alloy_rpc_types_eth::Filter;
|
||||
#[cfg(test)]
|
||||
use alloy_rpc_types_eth::{BlockId, TransactionRequest, TransactionInput};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
pub use crate::{
|
||||
Error,
|
||||
crypto::{PublicKey, Signature},
|
||||
abi::{erc20::Transfer, router as abi},
|
||||
};
|
||||
use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Coin {
|
||||
Ether,
|
||||
Erc20([u8; 20]),
|
||||
}
|
||||
|
||||
impl Coin {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
Ok(match kind[0] {
|
||||
0 => Coin::Ether,
|
||||
1 => {
|
||||
let mut address = [0; 20];
|
||||
reader.read_exact(&mut address)?;
|
||||
Coin::Erc20(address)
|
||||
}
|
||||
_ => Err(io::Error::other("unrecognized Coin type"))?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
Coin::Ether => writer.write_all(&[0]),
|
||||
Coin::Erc20(token) => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(token)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct InInstruction {
|
||||
pub id: ([u8; 32], u64),
|
||||
pub from: [u8; 20],
|
||||
pub coin: Coin,
|
||||
pub amount: U256,
|
||||
pub data: Vec<u8>,
|
||||
pub key_at_end_of_block: ProjectivePoint,
|
||||
}
|
||||
|
||||
impl InInstruction {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let id = {
|
||||
let mut id_hash = [0; 32];
|
||||
reader.read_exact(&mut id_hash)?;
|
||||
let mut id_pos = [0; 8];
|
||||
reader.read_exact(&mut id_pos)?;
|
||||
let id_pos = u64::from_le_bytes(id_pos);
|
||||
(id_hash, id_pos)
|
||||
};
|
||||
|
||||
let mut from = [0; 20];
|
||||
reader.read_exact(&mut from)?;
|
||||
|
||||
let coin = Coin::read(reader)?;
|
||||
let mut amount = [0; 32];
|
||||
reader.read_exact(&mut amount)?;
|
||||
let amount = U256::from_le_slice(&amount);
|
||||
|
||||
let mut data_len = [0; 4];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
let data_len = usize::try_from(u32::from_le_bytes(data_len))
|
||||
.map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?;
|
||||
let mut data = vec![0; data_len];
|
||||
reader.read_exact(&mut data)?;
|
||||
|
||||
let mut key_at_end_of_block = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||
reader.read_exact(&mut key_at_end_of_block)?;
|
||||
let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block))
|
||||
.ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?;
|
||||
|
||||
Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block })
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.id.0)?;
|
||||
writer.write_all(&self.id.1.to_le_bytes())?;
|
||||
|
||||
writer.write_all(&self.from)?;
|
||||
|
||||
self.coin.write(writer)?;
|
||||
writer.write_all(&self.amount.as_le_bytes())?;
|
||||
|
||||
writer.write_all(
|
||||
&u32::try_from(self.data.len())
|
||||
.map_err(|_| {
|
||||
io::Error::other("InInstruction being written had data exceeding 2**32 in length")
|
||||
})?
|
||||
.to_le_bytes(),
|
||||
)?;
|
||||
writer.write_all(&self.data)?;
|
||||
|
||||
writer.write_all(&self.key_at_end_of_block.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Executed {
|
||||
pub tx_id: [u8; 32],
|
||||
pub nonce: u64,
|
||||
pub signature: [u8; 64],
|
||||
}
|
||||
|
||||
/// The contract Serai uses to manage its state.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Router(Arc<RootProvider<SimpleRequest>>, Address);
|
||||
impl Router {
|
||||
pub(crate) fn code() -> Vec<u8> {
|
||||
let bytecode = include_str!("../artifacts/Router.bin");
|
||||
Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {
|
||||
let mut bytecode = Self::code();
|
||||
// Append the constructor arguments
|
||||
bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode());
|
||||
bytecode
|
||||
}
|
||||
|
||||
// This isn't pub in order to force users to use `Deployer::find_router`.
|
||||
pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
|
||||
Self(provider, address)
|
||||
}
|
||||
|
||||
pub fn address(&self) -> [u8; 20] {
|
||||
**self.1
|
||||
}
|
||||
|
||||
/// Get the key for Serai at the specified block.
|
||||
#[cfg(test)]
|
||||
pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {
|
||||
let call = TransactionRequest::default()
|
||||
.to(self.1)
|
||||
.input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into()));
|
||||
let bytes = self
|
||||
.0
|
||||
.call(&call)
|
||||
.block(BlockId::Hash(B256::from(at).into()))
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError)
|
||||
}
|
||||
|
||||
/// Get the message to be signed in order to update the key for Serai.
|
||||
pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec<u8> {
|
||||
let mut buffer = b"updateSeraiKey".to_vec();
|
||||
buffer.extend(&chain_id.to_be_bytes::<32>());
|
||||
buffer.extend(&nonce.to_be_bytes::<32>());
|
||||
buffer.extend(&key.eth_repr());
|
||||
buffer
|
||||
}
|
||||
|
||||
/// Update the key representing Serai.
|
||||
pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy {
|
||||
// TODO: Set a more accurate gas
|
||||
TxLegacy {
|
||||
to: TxKind::Call(self.1),
|
||||
input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into()))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
gas_limit: 100_000,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current nonce for the published batches.
|
||||
#[cfg(test)]
|
||||
pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {
|
||||
let call = TransactionRequest::default()
|
||||
.to(self.1)
|
||||
.input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into()));
|
||||
let bytes = self
|
||||
.0
|
||||
.call(&call)
|
||||
.block(BlockId::Hash(B256::from(at).into()))
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
Ok(res._0)
|
||||
}
|
||||
|
||||
/// Get the message to be signed in order to update the key for Serai.
|
||||
pub(crate) fn execute_message(
|
||||
chain_id: U256,
|
||||
nonce: U256,
|
||||
outs: Vec<abi::OutInstruction>,
|
||||
) -> Vec<u8> {
|
||||
("execute".to_string(), chain_id, nonce, outs).abi_encode_params()
|
||||
}
|
||||
|
||||
/// Execute a batch of `OutInstruction`s.
|
||||
pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy {
|
||||
TxLegacy {
|
||||
to: TxKind::Call(self.1),
|
||||
input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(),
|
||||
// TODO
|
||||
gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn key_at_end_of_block(&self, block: u64) -> Result<Option<ProjectivePoint>, Error> {
|
||||
let filter = Filter::new().from_block(0).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||
let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
if all_keys.is_empty() {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?;
|
||||
let last_key_x_coordinate = last_key_x_coordinate_log
|
||||
.log_decode::<SeraiKeyUpdated>()
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.inner
|
||||
.data
|
||||
.key;
|
||||
|
||||
let mut compressed_point = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||
compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY);
|
||||
compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice());
|
||||
|
||||
let key =
|
||||
Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?;
|
||||
Ok(Some(key))
|
||||
}
|
||||
|
||||
pub async fn in_instructions(
|
||||
&self,
|
||||
block: u64,
|
||||
allowed_tokens: &HashSet<[u8; 20]>,
|
||||
) -> Result<Vec<InInstruction>, Error> {
|
||||
let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else {
|
||||
return Ok(vec![]);
|
||||
};
|
||||
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let mut transfer_check = HashSet::new();
|
||||
let mut in_instructions = vec![];
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let id = (
|
||||
log.block_hash.ok_or(Error::ConnectionError)?.into(),
|
||||
log.log_index.ok_or(Error::ConnectionError)?,
|
||||
);
|
||||
|
||||
let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||
let tx = self
|
||||
.0
|
||||
.get_transaction_by_hash(tx_hash)
|
||||
.await
|
||||
.ok()
|
||||
.flatten()
|
||||
.ok_or(Error::ConnectionError)?;
|
||||
|
||||
let log =
|
||||
log.log_decode::<InInstructionEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let coin = if log.coin.0 == [0; 20] {
|
||||
Coin::Ether
|
||||
} else {
|
||||
let token = *log.coin.0;
|
||||
|
||||
if !allowed_tokens.contains(&token) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this also counts as a top-level transfer via the token, drop it
|
||||
//
|
||||
// Necessary in order to handle a potential edge case with some theoretical token
|
||||
// implementations
|
||||
//
|
||||
// This will either let it be handled by the top-level transfer hook or will drop it
|
||||
// entirely on the side of caution
|
||||
if tx.to == Some(token.into()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get all logs for this TX
|
||||
let receipt = self
|
||||
.0
|
||||
.get_transaction_receipt(tx_hash)
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.ok_or(Error::ConnectionError)?;
|
||||
let tx_logs = receipt.inner.logs();
|
||||
|
||||
// Find a matching transfer log
|
||||
let mut found_transfer = false;
|
||||
for tx_log in tx_logs {
|
||||
let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?;
|
||||
// Ensure we didn't already use this transfer to check a distinct InInstruction event
|
||||
if transfer_check.contains(&log_index) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this log is from the token we expected to be transferred
|
||||
if tx_log.address().0 != token {
|
||||
continue;
|
||||
}
|
||||
// Check if this is a transfer log
|
||||
// https://github.com/alloy-rs/core/issues/589
|
||||
if tx_log.topics()[0] != Transfer::SIGNATURE_HASH {
|
||||
continue;
|
||||
}
|
||||
let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue };
|
||||
// Check if this is a transfer to us for the expected amount
|
||||
if (transfer.to == self.1) && (transfer.value == log.amount) {
|
||||
transfer_check.insert(log_index);
|
||||
found_transfer = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !found_transfer {
|
||||
// This shouldn't be a ConnectionError
|
||||
// This is an exploit, a non-conforming ERC20, or an invalid connection
|
||||
// This should halt the process which is sufficient, yet this is sub-optimal
|
||||
// TODO
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
Coin::Erc20(token)
|
||||
};
|
||||
|
||||
in_instructions.push(InInstruction {
|
||||
id,
|
||||
from: *log.from.0,
|
||||
coin,
|
||||
amount: log.amount,
|
||||
data: log.instruction.as_ref().to_vec(),
|
||||
key_at_end_of_block,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(in_instructions)
|
||||
}
|
||||
|
||||
pub async fn executed_commands(&self, block: u64) -> Result<Vec<Executed>, Error> {
|
||||
let mut res = vec![];
|
||||
|
||||
{
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||
|
||||
let log =
|
||||
log.log_decode::<SeraiKeyUpdated>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let mut signature = [0; 64];
|
||||
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||
res.push(Executed {
|
||||
tx_id,
|
||||
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||
|
||||
let log = log.log_decode::<ExecutedEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let mut signature = [0; 64];
|
||||
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||
res.push(Executed {
|
||||
tx_id,
|
||||
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[cfg(feature = "tests")]
|
||||
pub fn key_updated_filter(&self) -> Filter {
|
||||
Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH)
|
||||
}
|
||||
#[cfg(feature = "tests")]
|
||||
pub fn executed_filter(&self) -> Filter {
|
||||
Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH)
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
use alloy_sol_types::sol;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod schnorr_container {
|
||||
use super::*;
|
||||
sol!("src/tests/contracts/Schnorr.sol");
|
||||
}
|
||||
pub(crate) use schnorr_container::TestSchnorr as schnorr;
|
||||
@@ -1,15 +0,0 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "../../../contracts/Schnorr.sol";
|
||||
|
||||
contract TestSchnorr {
|
||||
function verify(
|
||||
bytes32 px,
|
||||
bytes calldata message,
|
||||
bytes32 c,
|
||||
bytes32 s
|
||||
) external pure returns (bool) {
|
||||
return Schnorr.verify(px, message, c, s);
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::{Field, PrimeField};
|
||||
use k256::{
|
||||
ecdsa::{
|
||||
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,
|
||||
},
|
||||
Scalar, ProjectivePoint,
|
||||
};
|
||||
|
||||
use frost::{
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
algorithm::{Hram, IetfSchnorr},
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use crate::{crypto::*, tests::key_gen};
|
||||
|
||||
// The ecrecover opcode, yet with parity replacing v
|
||||
pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||
let sig = ecdsa::Signature::from_scalars(r, s).ok()?;
|
||||
let message: [u8; 32] = message.to_repr().into();
|
||||
alloy_core::primitives::Signature::from_signature_and_parity(
|
||||
sig,
|
||||
alloy_core::primitives::Parity::Parity(odd_y),
|
||||
)
|
||||
.ok()?
|
||||
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
|
||||
.ok()
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover() {
|
||||
let private = SigningKey::random(&mut OsRng);
|
||||
let public = VerifyingKey::from(&private);
|
||||
|
||||
// Sign the signature
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let (sig, recovery_id) = private
|
||||
.as_nonzero_scalar()
|
||||
.try_sign_prehashed(
|
||||
<Secp256k1 as Ciphersuite>::F::random(&mut OsRng),
|
||||
&keccak256(MESSAGE).into(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Sanity check the signature verifies
|
||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||
{
|
||||
assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ());
|
||||
}
|
||||
|
||||
// Perform the ecrecover
|
||||
assert_eq!(
|
||||
ecrecover(
|
||||
hash_to_scalar(MESSAGE),
|
||||
u8::from(recovery_id.unwrap().is_y_odd()) == 1,
|
||||
*sig.r(),
|
||||
*sig.s()
|
||||
)
|
||||
.unwrap(),
|
||||
address(&ProjectivePoint::from(public.as_affine()))
|
||||
);
|
||||
}
|
||||
|
||||
// Run the sign test with the EthereumHram
|
||||
#[test]
|
||||
fn test_signing() {
|
||||
let (keys, _) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let _sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub fn preprocess_signature_for_ecrecover(
|
||||
R: ProjectivePoint,
|
||||
public_key: &PublicKey,
|
||||
m: &[u8],
|
||||
s: Scalar,
|
||||
) -> (Scalar, Scalar) {
|
||||
let c = EthereumHram::hram(&R, &public_key.A, m);
|
||||
let sa = -(s * public_key.px);
|
||||
let ca = -(c * public_key.px);
|
||||
(sa, ca)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover_hack() {
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
|
||||
let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s);
|
||||
let q = ecrecover(sa, false, public_key.px, ca).unwrap();
|
||||
assert_eq!(q, address(&sig.R));
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::PrimeField;
|
||||
use k256::Scalar;
|
||||
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use alloy_core::primitives::Address;
|
||||
|
||||
use alloy_sol_types::SolCall;
|
||||
|
||||
use alloy_rpc_types_eth::{TransactionInput, TransactionRequest};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_rpc_client::ClientBuilder;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::*,
|
||||
tests::{key_gen, deploy_contract, abi::schnorr as abi},
|
||||
};
|
||||
|
||||
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider = RootProvider::new(
|
||||
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||
);
|
||||
let wallet = anvil.keys()[0].clone().into();
|
||||
let client = Arc::new(provider);
|
||||
|
||||
let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap();
|
||||
(anvil, client, address)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_contract() {
|
||||
setup_test().await;
|
||||
}
|
||||
|
||||
pub async fn call_verify(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
contract: Address,
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: &Signature,
|
||||
) -> Result<(), Error> {
|
||||
let px: [u8; 32] = public_key.px.to_repr().into();
|
||||
let c_bytes: [u8; 32] = signature.c.to_repr().into();
|
||||
let s_bytes: [u8; 32] = signature.s.to_repr().into();
|
||||
let call = TransactionRequest::default().to(contract).input(TransactionInput::new(
|
||||
abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into()))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
));
|
||||
let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
if res._0 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::InvalidSignature)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ecrecover_hack() {
|
||||
let (_anvil, client, contract) = setup_test().await;
|
||||
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
let sig = Signature::new(&public_key, MESSAGE, sig).unwrap();
|
||||
|
||||
call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap();
|
||||
// Test an invalid signature fails
|
||||
let mut sig = sig;
|
||||
sig.s += Scalar::ONE;
|
||||
assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err());
|
||||
}
|
||||
@@ -249,7 +249,7 @@ fn rpc_point(point: &str) -> Result<EdwardsPoint, RpcError> {
|
||||
/// While no implementors are directly provided, [monero-simple-request-rpc](
|
||||
/// https://github.com/serai-dex/serai/tree/develop/networks/monero/rpc/simple-request
|
||||
/// ) is recommended.
|
||||
pub trait Rpc: Sync + Clone + Debug {
|
||||
pub trait Rpc: Sync + Clone {
|
||||
/// Perform a POST request to the specified route with the specified body.
|
||||
///
|
||||
/// The implementor is left to handle anything such as authentication.
|
||||
@@ -1003,10 +1003,10 @@ pub trait Rpc: Sync + Clone + Debug {
|
||||
/// An implementation is provided for any satisfier of `Rpc`. It is not recommended to use an `Rpc`
|
||||
/// object to satisfy this. This should be satisfied by a local store of the output distribution,
|
||||
/// both for performance and to prevent potential attacks a remote node can perform.
|
||||
pub trait DecoyRpc: Sync + Clone + Debug {
|
||||
pub trait DecoyRpc: Sync {
|
||||
/// Get the height the output distribution ends at.
|
||||
///
|
||||
/// This is equivalent to the hight of the blockchain it's for. This is intended to be cheaper
|
||||
/// This is equivalent to the height of the blockchain it's for. This is intended to be cheaper
|
||||
/// than fetching the entire output distribution.
|
||||
fn get_output_distribution_end_height(
|
||||
&self,
|
||||
|
||||
@@ -79,10 +79,13 @@ pub struct Block {
|
||||
}
|
||||
|
||||
impl Block {
|
||||
/// The zero-index position of this block within the blockchain.
|
||||
/// The zero-indexed position of this block within the blockchain.
|
||||
///
|
||||
/// This information comes from the Block's miner transaction. If the miner transaction isn't
|
||||
/// structed as expected, this will return None.
|
||||
/// structed as expected, this will return None. This will return Some for any Block which would
|
||||
/// pass the consensus rules.
|
||||
// https://github.com/monero-project/monero/blob/a1dc85c5373a30f14aaf7dcfdd95f5a7375d3623
|
||||
// /src/cryptonote_core/blockchain.cpp#L1365-L1382
|
||||
pub fn number(&self) -> Option<usize> {
|
||||
match &self.miner_transaction {
|
||||
Transaction::V1 { prefix, .. } | Transaction::V2 { prefix, .. } => {
|
||||
|
||||
@@ -100,10 +100,11 @@ impl Change {
|
||||
///
|
||||
/// 1) The change in the TX is shunted to the fee (making it fingerprintable).
|
||||
///
|
||||
/// 2) If there are two outputs in the TX, Monero would create a payment ID for the non-change
|
||||
/// output so an observer can't tell apart TXs with a payment ID from TXs without a payment
|
||||
/// ID. monero-wallet will simply not create a payment ID in this case, revealing it's a
|
||||
/// monero-wallet TX without change.
|
||||
/// 2) In two-output transactions, where the payment address doesn't have a payment ID, wallet2
|
||||
/// includes an encrypted dummy payment ID for the non-change output in order to not allow
|
||||
/// differentiating if transactions send to addresses with payment IDs or not. monero-wallet
|
||||
/// includes a dummy payment ID which at least one recipient will identify as not the expected
|
||||
/// dummy payment ID, revealing to the recipient(s) the sender is using non-wallet2 software.
|
||||
pub fn fingerprintable(address: Option<MoneroAddress>) -> Change {
|
||||
if let Some(address) = address {
|
||||
Change(Some(ChangeEnum::AddressOnly(address)))
|
||||
|
||||
@@ -76,10 +76,18 @@ impl SignableTransaction {
|
||||
PaymentId::Encrypted(id).write(&mut id_vec).unwrap();
|
||||
extra.push_nonce(id_vec);
|
||||
} else {
|
||||
// If there's no payment ID, we push a dummy (as wallet2 does) if there's only one payment
|
||||
if (self.payments.len() == 2) &&
|
||||
self.payments.iter().any(|payment| matches!(payment, InternalPayment::Change(_)))
|
||||
{
|
||||
/*
|
||||
If there's no payment ID, we push a dummy (as wallet2 does) to the first payment.
|
||||
|
||||
This does cause a random payment ID for the other recipient (a documented fingerprint).
|
||||
Functionally, random payment IDs should be fine as wallet2 will trigger this same behavior
|
||||
(a random payment ID being seen by the recipient) with a batch send if one of the recipient
|
||||
addresses has a payment ID.
|
||||
|
||||
The alternative would be to not include any payment ID, fingerprinting to the entire
|
||||
blockchain this is non-standard wallet software (instead of just a single recipient).
|
||||
*/
|
||||
if self.payments.len() == 2 {
|
||||
let (_, payment_id_xor) = self
|
||||
.payments
|
||||
.iter()
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
RPC_USER="${RPC_USER:=serai}"
|
||||
RPC_PASS="${RPC_PASS:=seraidex}"
|
||||
|
||||
bitcoind -txindex -regtest --port=8333 \
|
||||
bitcoind -regtest --port=8333 \
|
||||
-rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \
|
||||
-rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \
|
||||
$1
|
||||
$@
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
RPC_USER="${RPC_USER:=serai}"
|
||||
RPC_PASS="${RPC_PASS:=seraidex}"
|
||||
|
||||
# Run Monero
|
||||
monerod --non-interactive --regtest --offline --fixed-difficulty=1 \
|
||||
--no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \
|
||||
--rpc-access-control-origins "*" --disable-rpc-ban \
|
||||
--rpc-login=$RPC_USER:$RPC_PASS \
|
||||
$1
|
||||
|
||||
@@ -8,4 +8,4 @@ monerod --non-interactive --regtest --offline --fixed-difficulty=1 \
|
||||
--no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \
|
||||
--rpc-access-control-origins "*" --disable-rpc-ban \
|
||||
--rpc-login=$RPC_USER:$RPC_PASS --log-level 2 \
|
||||
$1
|
||||
$@
|
||||
|
||||
@@ -21,8 +21,8 @@ pub fn processor(
|
||||
if coin == "ethereum" {
|
||||
r#"
|
||||
RUN cargo install svm-rs
|
||||
RUN svm install 0.8.25
|
||||
RUN svm use 0.8.25
|
||||
RUN svm install 0.8.26
|
||||
RUN svm use 0.8.26
|
||||
"#
|
||||
} else {
|
||||
""
|
||||
|
||||
@@ -3,7 +3,7 @@
|
||||
RPC_USER="${RPC_USER:=serai}"
|
||||
RPC_PASS="${RPC_PASS:=seraidex}"
|
||||
|
||||
bitcoind -txindex -testnet -port=8333 \
|
||||
bitcoind -testnet -port=8333 \
|
||||
-rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \
|
||||
-rpcbind=0.0.0.0 -rpcallowip=0.0.0.0/0 -rpcport=8332 \
|
||||
--datadir=/volume
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
RPC_USER="${RPC_USER:=serai}"
|
||||
RPC_PASS="${RPC_PASS:=seraidex}"
|
||||
|
||||
# Run Monero
|
||||
monerod --non-interactive --regtest --offline --fixed-difficulty=1 \
|
||||
--no-zmq --rpc-bind-ip=0.0.0.0 --rpc-bind-port=18081 --confirm-external-bind \
|
||||
--rpc-access-control-origins "*" --disable-rpc-ban \
|
||||
--rpc-login=$RPC_USER:$RPC_PASS \
|
||||
$1
|
||||
|
||||
@@ -1,97 +0,0 @@
|
||||
[package]
|
||||
name = "serai-processor"
|
||||
version = "0.1.0"
|
||||
description = "Multichain processor premised on canonicity to reach distributed consensus automatically"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/processor"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Macros
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
zeroize = { version = "1", default-features = false, features = ["std"] }
|
||||
thiserror = { version = "1", default-features = false }
|
||||
|
||||
# Libs
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] }
|
||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||
|
||||
# Encoders
|
||||
const-hex = { version = "1", default-features = false }
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
serde_json = { version = "1", default-features = false, features = ["std"] }
|
||||
|
||||
# Cryptography
|
||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
|
||||
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std"] }
|
||||
ec-divisors = { package = "ec-divisors", path = "../crypto/evrf/divisors", default-features = false }
|
||||
dkg = { package = "dkg", path = "../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] }
|
||||
frost = { package = "modular-frost", path = "../crypto/frost", default-features = false, features = ["ristretto"] }
|
||||
frost-schnorrkel = { path = "../crypto/schnorrkel", default-features = false }
|
||||
|
||||
# Bitcoin/Ethereum
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std"], optional = true }
|
||||
|
||||
# Bitcoin
|
||||
secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"], optional = true }
|
||||
bitcoin-serai = { path = "../networks/bitcoin", default-features = false, features = ["std"], optional = true }
|
||||
|
||||
# Ethereum
|
||||
ethereum-serai = { path = "../networks/ethereum", default-features = false, optional = true }
|
||||
|
||||
# Monero
|
||||
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"], optional = true }
|
||||
monero-simple-request-rpc = { path = "../networks/monero/rpc/simple-request", default-features = false, optional = true }
|
||||
monero-wallet = { path = "../networks/monero/wallet", default-features = false, features = ["std", "multisig", "compile-time-generators"], optional = true }
|
||||
|
||||
# Application
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"], optional = true }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
|
||||
zalloc = { path = "../common/zalloc" }
|
||||
serai-db = { path = "../common/db" }
|
||||
serai-env = { path = "../common/env", optional = true }
|
||||
# TODO: Replace with direct usage of primitives
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] }
|
||||
|
||||
messages = { package = "serai-processor-messages", path = "./messages" }
|
||||
|
||||
message-queue = { package = "serai-message-queue", path = "../message-queue", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
frost = { package = "modular-frost", path = "../crypto/frost", features = ["tests"] }
|
||||
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
|
||||
ethereum-serai = { path = "../networks/ethereum", default-features = false, features = ["tests"] }
|
||||
|
||||
dockertest = "0.5"
|
||||
serai-docker-tests = { path = "../tests/docker" }
|
||||
|
||||
[features]
|
||||
secp256k1 = ["k256", "dkg/evrf-secp256k1", "frost/secp256k1"]
|
||||
bitcoin = ["dep:secp256k1", "secp256k1", "bitcoin-serai", "serai-client/bitcoin"]
|
||||
|
||||
ethereum = ["secp256k1", "ethereum-serai/tests"]
|
||||
|
||||
ed25519 = ["dalek-ff-group", "dkg/evrf-ed25519", "frost/ed25519"]
|
||||
monero = ["ed25519", "monero-simple-request-rpc", "monero-wallet", "serai-client/monero"]
|
||||
|
||||
binaries = ["env_logger", "serai-env", "message-queue"]
|
||||
parity-db = ["serai-db/parity-db"]
|
||||
rocksdb = ["serai-db/rocksdb"]
|
||||
@@ -1,5 +1,5 @@
|
||||
# Processor
|
||||
|
||||
The Serai processor scans a specified external network, communicating with the
|
||||
coordinator. For details on its exact messaging flow, and overall policies,
|
||||
please view `docs/processor`.
|
||||
The Serai processors, built from the libraries here, scan an external network
|
||||
and report the indexed data to the coordinator. For details on its exact
|
||||
messaging flow, and overall policies, please view `docs/processor`.
|
||||
|
||||
61
processor/TODO/main.rs
Normal file
61
processor/TODO/main.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
use messages::{
|
||||
coordinator::{
|
||||
SubstrateSignableId, PlanMeta, CoordinatorMessage as CoordinatorCoordinatorMessage,
|
||||
},
|
||||
CoordinatorMessage,
|
||||
};
|
||||
|
||||
use serai_env as env;
|
||||
|
||||
use message_queue::{Service, client::MessageQueue};
|
||||
|
||||
mod db;
|
||||
pub use db::*;
|
||||
|
||||
mod coordinator;
|
||||
pub use coordinator::*;
|
||||
|
||||
mod multisigs;
|
||||
use multisigs::{MultisigEvent, MultisigManager};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
async fn handle_coordinator_msg<D: Db, N: Network, Co: Coordinator>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
network: &N,
|
||||
coordinator: &mut Co,
|
||||
tributary_mutable: &mut TributaryMutable<N, D>,
|
||||
substrate_mutable: &mut SubstrateMutable<N, D>,
|
||||
msg: &Message,
|
||||
) {
|
||||
match msg.msg.clone() {
|
||||
CoordinatorMessage::Substrate(msg) => {
|
||||
match msg {
|
||||
messages::substrate::CoordinatorMessage::SubstrateBlock {
|
||||
context,
|
||||
block: substrate_block,
|
||||
burns,
|
||||
batches,
|
||||
} => {
|
||||
// Send SubstrateBlockAck, with relevant plan IDs, before we trigger the signing of these
|
||||
// plans
|
||||
if !tributary_mutable.signers.is_empty() {
|
||||
coordinator
|
||||
.send(messages::coordinator::ProcessorMessage::SubstrateBlockAck {
|
||||
block: substrate_block,
|
||||
plans: to_sign
|
||||
.iter()
|
||||
.filter_map(|signable| {
|
||||
SessionDb::get(txn, signable.0.to_bytes().as_ref())
|
||||
.map(|session| PlanMeta { session, id: signable.1 })
|
||||
})
|
||||
.collect(),
|
||||
})
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use core::{time::Duration, pin::Pin, future::Future};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use dockertest::{
|
||||
PullPolicy, StartPolicy, LogOptions, LogAction, LogPolicy, LogSource, Image,
|
||||
TestBodySpecification, DockerOperations, DockerTest,
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use std::sync::OnceLock;
|
||||
|
||||
mod key_gen;
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use core::{pin::Pin, time::Duration, future::Future};
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -71,7 +73,7 @@ pub async fn test_scanner<N: Network>(
|
||||
let block_id = block.id();
|
||||
|
||||
// Verify the Scanner picked them up
|
||||
let verify_event = |mut scanner: ScannerHandle<N, MemDb>| async {
|
||||
let verify_event = |mut scanner: ScannerHandle<N, MemDb>| async move {
|
||||
let outputs =
|
||||
match timeout(Duration::from_secs(30), scanner.events.recv()).await.unwrap().unwrap() {
|
||||
ScannerEvent::Block { is_retirement_block, block, outputs } => {
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use core::{pin::Pin, future::Future};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -184,7 +186,6 @@ pub async fn test_signer<N: Network>(
|
||||
let mut scheduler = N::Scheduler::new::<MemDb>(&mut txn, key, N::NETWORK);
|
||||
let payments = vec![Payment {
|
||||
address: N::external_address(&network, key).await,
|
||||
data: None,
|
||||
balance: Balance {
|
||||
coin: match N::NETWORK {
|
||||
NetworkId::Serai => panic!("test_signer called with Serai"),
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use core::{time::Duration, pin::Pin, future::Future};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -88,7 +90,6 @@ pub async fn test_wallet<N: Network>(
|
||||
outputs.clone(),
|
||||
vec![Payment {
|
||||
address: N::external_address(&network, key).await,
|
||||
data: None,
|
||||
balance: Balance {
|
||||
coin: match N::NETWORK {
|
||||
NetworkId::Serai => panic!("test_wallet called with Serai"),
|
||||
@@ -116,7 +117,6 @@ pub async fn test_wallet<N: Network>(
|
||||
plans[0].payments,
|
||||
vec![Payment {
|
||||
address: N::external_address(&network, key).await,
|
||||
data: None,
|
||||
balance: Balance {
|
||||
coin: match N::NETWORK {
|
||||
NetworkId::Serai => panic!("test_wallet called with Serai"),
|
||||
50
processor/bin/Cargo.toml
Normal file
50
processor/bin/Cargo.toml
Normal file
@@ -0,0 +1,50 @@
|
||||
[package]
|
||||
name = "serai-processor-bin"
|
||||
version = "0.1.0"
|
||||
description = "Framework for Serai processor binaries"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/processor/bin"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
zeroize = { version = "1", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-ristretto"] }
|
||||
|
||||
serai-client = { path = "../../substrate/client", default-features = false }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
|
||||
serai-env = { path = "../../common/env" }
|
||||
serai-db = { path = "../../common/db" }
|
||||
|
||||
messages = { package = "serai-processor-messages", path = "../messages" }
|
||||
key-gen = { package = "serai-processor-key-gen", path = "../key-gen" }
|
||||
|
||||
primitives = { package = "serai-processor-primitives", path = "../primitives" }
|
||||
scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" }
|
||||
scanner = { package = "serai-processor-scanner", path = "../scanner" }
|
||||
signers = { package = "serai-processor-signers", path = "../signers" }
|
||||
|
||||
message-queue = { package = "serai-message-queue", path = "../../message-queue" }
|
||||
|
||||
[features]
|
||||
parity-db = ["serai-db/parity-db"]
|
||||
rocksdb = ["serai-db/rocksdb"]
|
||||
15
processor/bin/LICENSE
Normal file
15
processor/bin/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
3
processor/bin/README.md
Normal file
3
processor/bin/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Serai Processor Bin
|
||||
|
||||
The framework for Serai processor binaries, common to the Serai processors.
|
||||
238
processor/bin/src/coordinator.rs
Normal file
238
processor/bin/src/coordinator.rs
Normal file
@@ -0,0 +1,238 @@
|
||||
use core::future::Future;
|
||||
use std::sync::{LazyLock, Arc, Mutex};
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use scale::Encode;
|
||||
use serai_client::{
|
||||
primitives::Signature,
|
||||
validator_sets::primitives::Session,
|
||||
in_instructions::primitives::{Batch, SignedBatch},
|
||||
};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db, create_db, db_channel};
|
||||
|
||||
use scanner::ScannerFeed;
|
||||
|
||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||
|
||||
create_db! {
|
||||
ProcessorBinCoordinator {
|
||||
SavedMessages: () -> u64,
|
||||
}
|
||||
}
|
||||
|
||||
db_channel! {
|
||||
ProcessorBinCoordinator {
|
||||
ReceivedCoordinatorMessages: () -> Vec<u8>,
|
||||
}
|
||||
}
|
||||
|
||||
// A lock to access SentCoordinatorMessages::send
|
||||
static SEND_LOCK: LazyLock<Mutex<()>> = LazyLock::new(|| Mutex::new(()));
|
||||
|
||||
db_channel! {
|
||||
ProcessorBinCoordinator {
|
||||
SentCoordinatorMessages: () -> Vec<u8>,
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct CoordinatorSend {
|
||||
db: crate::Db,
|
||||
sent_message: mpsc::UnboundedSender<()>,
|
||||
}
|
||||
|
||||
impl CoordinatorSend {
|
||||
fn send(&mut self, msg: &messages::ProcessorMessage) {
|
||||
let _lock = SEND_LOCK.lock().unwrap();
|
||||
let mut txn = self.db.txn();
|
||||
SentCoordinatorMessages::send(&mut txn, &borsh::to_vec(msg).unwrap());
|
||||
txn.commit();
|
||||
self
|
||||
.sent_message
|
||||
.send(())
|
||||
.expect("failed to tell the Coordinator tasks there's a new message to send");
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) struct Coordinator {
|
||||
received_message: mpsc::UnboundedReceiver<()>,
|
||||
send: CoordinatorSend,
|
||||
}
|
||||
|
||||
impl Coordinator {
|
||||
pub(crate) fn new<S: ScannerFeed>(db: crate::Db) -> Self {
|
||||
let (received_message_send, received_message_recv) = mpsc::unbounded_channel();
|
||||
let (sent_message_send, mut sent_message_recv) = mpsc::unbounded_channel();
|
||||
|
||||
let service = Service::Processor(S::NETWORK);
|
||||
let message_queue = Arc::new(MessageQueue::from_env(service));
|
||||
|
||||
// Spawn a task to move messages from the message-queue to our database so we can achieve
|
||||
// atomicity. This is the only place we read/ack messages from
|
||||
tokio::spawn({
|
||||
let mut db = db.clone();
|
||||
let message_queue = message_queue.clone();
|
||||
async move {
|
||||
loop {
|
||||
let msg = message_queue.next(Service::Coordinator).await;
|
||||
|
||||
let prior_msg = msg.id.checked_sub(1);
|
||||
let saved_messages = SavedMessages::get(&db);
|
||||
/*
|
||||
This should either be:
|
||||
A) The message after the message we just saved (as normal)
|
||||
B) The message we just saved (if we rebooted and failed to ack it)
|
||||
*/
|
||||
assert!((saved_messages == prior_msg) || (saved_messages == Some(msg.id)));
|
||||
if saved_messages < Some(msg.id) {
|
||||
let mut txn = db.txn();
|
||||
ReceivedCoordinatorMessages::send(&mut txn, &msg.msg);
|
||||
SavedMessages::set(&mut txn, &msg.id);
|
||||
txn.commit();
|
||||
}
|
||||
// Acknowledge this message
|
||||
message_queue.ack(Service::Coordinator, msg.id).await;
|
||||
|
||||
// Fire that there's a new message
|
||||
received_message_send
|
||||
.send(())
|
||||
.expect("failed to tell the Coordinator there's a new message");
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn a task to send messages to the message-queue
|
||||
tokio::spawn({
|
||||
let mut db = db.clone();
|
||||
async move {
|
||||
loop {
|
||||
let mut txn = db.txn();
|
||||
match SentCoordinatorMessages::try_recv(&mut txn) {
|
||||
Some(msg) => {
|
||||
let metadata = Metadata {
|
||||
from: service,
|
||||
to: Service::Coordinator,
|
||||
intent: borsh::from_slice::<messages::ProcessorMessage>(&msg).unwrap().intent(),
|
||||
};
|
||||
message_queue.queue(metadata, msg).await;
|
||||
txn.commit();
|
||||
}
|
||||
None => {
|
||||
let _ =
|
||||
tokio::time::timeout(core::time::Duration::from_secs(60), sent_message_recv.recv())
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let send = CoordinatorSend { db, sent_message: sent_message_send };
|
||||
Coordinator { received_message: received_message_recv, send }
|
||||
}
|
||||
|
||||
pub(crate) fn coordinator_send(&self) -> CoordinatorSend {
|
||||
self.send.clone()
|
||||
}
|
||||
|
||||
/// Fetch the next message from the Coordinator.
|
||||
///
|
||||
/// This message is guaranteed to have never been handled before, where handling is defined as
|
||||
/// this `txn` being committed.
|
||||
pub(crate) async fn next_message(
|
||||
&mut self,
|
||||
txn: &mut impl DbTxn,
|
||||
) -> messages::CoordinatorMessage {
|
||||
loop {
|
||||
match ReceivedCoordinatorMessages::try_recv(txn) {
|
||||
Some(msg) => {
|
||||
return borsh::from_slice(&msg)
|
||||
.expect("message wasn't a borsh-encoded CoordinatorMessage")
|
||||
}
|
||||
None => {
|
||||
let _ =
|
||||
tokio::time::timeout(core::time::Duration::from_secs(60), self.received_message.recv())
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn send_message(&mut self, msg: &messages::ProcessorMessage) {
|
||||
self.send.send(msg);
|
||||
}
|
||||
}
|
||||
|
||||
impl signers::Coordinator for CoordinatorSend {
|
||||
type EphemeralError = ();
|
||||
|
||||
fn send(
|
||||
&mut self,
|
||||
msg: messages::sign::ProcessorMessage,
|
||||
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||
async move {
|
||||
self.send(&messages::ProcessorMessage::Sign(msg));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn publish_cosign(
|
||||
&mut self,
|
||||
block_number: u64,
|
||||
block: [u8; 32],
|
||||
signature: Signature,
|
||||
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||
async move {
|
||||
self.send(&messages::ProcessorMessage::Coordinator(
|
||||
messages::coordinator::ProcessorMessage::CosignedBlock {
|
||||
block_number,
|
||||
block,
|
||||
signature: signature.encode(),
|
||||
},
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn publish_batch(
|
||||
&mut self,
|
||||
batch: Batch,
|
||||
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||
async move {
|
||||
self.send(&messages::ProcessorMessage::Substrate(
|
||||
messages::substrate::ProcessorMessage::Batch { batch },
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn publish_signed_batch(
|
||||
&mut self,
|
||||
batch: SignedBatch,
|
||||
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||
async move {
|
||||
self.send(&messages::ProcessorMessage::Coordinator(
|
||||
messages::coordinator::ProcessorMessage::SignedBatch { batch },
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn publish_slash_report_signature(
|
||||
&mut self,
|
||||
session: Session,
|
||||
signature: Signature,
|
||||
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||
async move {
|
||||
self.send(&messages::ProcessorMessage::Coordinator(
|
||||
messages::coordinator::ProcessorMessage::SignedSlashReport {
|
||||
session,
|
||||
signature: signature.encode(),
|
||||
},
|
||||
));
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
316
processor/bin/src/lib.rs
Normal file
316
processor/bin/src/lib.rs
Normal file
@@ -0,0 +1,316 @@
|
||||
use core::cmp::Ordering;
|
||||
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
|
||||
use ciphersuite::{
|
||||
group::{ff::PrimeField, GroupEncoding},
|
||||
Ciphersuite, Ristretto,
|
||||
};
|
||||
use dkg::evrf::EvrfCurve;
|
||||
|
||||
use serai_client::validator_sets::primitives::Session;
|
||||
|
||||
use serai_env as env;
|
||||
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
||||
|
||||
use primitives::EncodableG;
|
||||
use ::key_gen::{KeyGenParams, KeyGen};
|
||||
use scheduler::{SignableTransaction, TransactionFor};
|
||||
use scanner::{ScannerFeed, Scanner, KeyFor, Scheduler};
|
||||
use signers::{TransactionPublisher, Signers};
|
||||
|
||||
mod coordinator;
|
||||
use coordinator::Coordinator;
|
||||
|
||||
create_db! {
|
||||
ProcessorBin {
|
||||
ExternalKeyForSessionForSigners: <K: GroupEncoding>(session: Session) -> EncodableG<K>,
|
||||
}
|
||||
}
|
||||
|
||||
db_channel! {
|
||||
ProcessorBin {
|
||||
KeyToActivate: <K: GroupEncoding>() -> EncodableG<K>
|
||||
}
|
||||
}
|
||||
|
||||
/// The type used for the database.
|
||||
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||
pub type Db = serai_db::ParityDb;
|
||||
/// The type used for the database.
|
||||
#[cfg(feature = "rocksdb")]
|
||||
pub type Db = serai_db::RocksDB;
|
||||
|
||||
/// Initialize the processor.
|
||||
///
|
||||
/// Yields the database.
|
||||
#[allow(unused_variables, unreachable_code)]
|
||||
pub fn init() -> Db {
|
||||
// Override the panic handler with one which will panic if any tokio task panics
|
||||
{
|
||||
let existing = std::panic::take_hook();
|
||||
std::panic::set_hook(Box::new(move |panic| {
|
||||
existing(panic);
|
||||
const MSG: &str = "exiting the process due to a task panicking";
|
||||
println!("{MSG}");
|
||||
log::error!("{MSG}");
|
||||
std::process::exit(1);
|
||||
}));
|
||||
}
|
||||
|
||||
if std::env::var("RUST_LOG").is_err() {
|
||||
std::env::set_var("RUST_LOG", serai_env::var("RUST_LOG").unwrap_or_else(|| "info".to_string()));
|
||||
}
|
||||
env_logger::init();
|
||||
|
||||
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||
let db =
|
||||
serai_db::new_parity_db(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
|
||||
#[cfg(feature = "rocksdb")]
|
||||
let db = serai_db::new_rocksdb(&serai_env::var("DB_PATH").expect("path to DB wasn't specified"));
|
||||
db
|
||||
}
|
||||
|
||||
/// THe URL for the external network's node.
|
||||
pub fn url() -> String {
|
||||
let login = env::var("NETWORK_RPC_LOGIN").expect("network RPC login wasn't specified");
|
||||
let hostname = env::var("NETWORK_RPC_HOSTNAME").expect("network RPC hostname wasn't specified");
|
||||
let port = env::var("NETWORK_RPC_PORT").expect("network port domain wasn't specified");
|
||||
"http://".to_string() + &login + "@" + &hostname + ":" + &port
|
||||
}
|
||||
|
||||
fn key_gen<K: KeyGenParams>() -> KeyGen<K> {
|
||||
fn read_key_from_env<C: Ciphersuite>(label: &'static str) -> Zeroizing<C::F> {
|
||||
let key_hex =
|
||||
Zeroizing::new(env::var(label).unwrap_or_else(|| panic!("{label} wasn't provided")));
|
||||
let bytes = Zeroizing::new(
|
||||
hex::decode(key_hex).unwrap_or_else(|_| panic!("{label} wasn't a valid hex string")),
|
||||
);
|
||||
|
||||
let mut repr = <C::F as PrimeField>::Repr::default();
|
||||
if repr.as_ref().len() != bytes.len() {
|
||||
panic!("{label} wasn't the correct length");
|
||||
}
|
||||
repr.as_mut().copy_from_slice(bytes.as_slice());
|
||||
let res = Zeroizing::new(
|
||||
Option::from(<C::F as PrimeField>::from_repr(repr))
|
||||
.unwrap_or_else(|| panic!("{label} wasn't a valid scalar")),
|
||||
);
|
||||
repr.as_mut().zeroize();
|
||||
res
|
||||
}
|
||||
KeyGen::new(
|
||||
read_key_from_env::<<Ristretto as EvrfCurve>::EmbeddedCurve>("SUBSTRATE_EVRF_KEY"),
|
||||
read_key_from_env::<<K::ExternalNetworkCiphersuite as EvrfCurve>::EmbeddedCurve>(
|
||||
"NETWORK_EVRF_KEY",
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
async fn first_block_after_time<S: ScannerFeed>(feed: &S, serai_time: u64) -> u64 {
|
||||
async fn first_block_after_time_iteration<S: ScannerFeed>(
|
||||
feed: &S,
|
||||
serai_time: u64,
|
||||
) -> Result<Option<u64>, S::EphemeralError> {
|
||||
let latest = feed.latest_finalized_block_number().await?;
|
||||
let latest_time = feed.time_of_block(latest).await?;
|
||||
if latest_time < serai_time {
|
||||
tokio::time::sleep(core::time::Duration::from_secs(serai_time - latest_time)).await;
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// A finalized block has a time greater than or equal to the time we want to start at
|
||||
// Find the first such block with a binary search
|
||||
// start_search and end_search are inclusive
|
||||
let mut start_search = 0;
|
||||
let mut end_search = latest;
|
||||
while start_search != end_search {
|
||||
// This on purposely chooses the earlier block in the case two blocks are both in the middle
|
||||
let to_check = start_search + ((end_search - start_search) / 2);
|
||||
let block_time = feed.time_of_block(to_check).await?;
|
||||
match block_time.cmp(&serai_time) {
|
||||
Ordering::Less => {
|
||||
start_search = to_check + 1;
|
||||
assert!(start_search <= end_search);
|
||||
}
|
||||
Ordering::Equal | Ordering::Greater => {
|
||||
// This holds true since we pick the earlier block upon an even search distance
|
||||
// If it didn't, this would cause an infinite loop
|
||||
assert!(to_check < end_search);
|
||||
end_search = to_check;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Some(start_search))
|
||||
}
|
||||
loop {
|
||||
match first_block_after_time_iteration(feed, serai_time).await {
|
||||
Ok(Some(block)) => return block,
|
||||
Ok(None) => {
|
||||
log::info!("waiting for block to activate at (a block with timestamp >= {serai_time})");
|
||||
}
|
||||
Err(e) => {
|
||||
log::error!("couldn't find the first block Serai should scan due to an RPC error: {e:?}");
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Hooks to run during the main loop.
|
||||
pub trait Hooks {
|
||||
/// A hook to run upon receiving a message.
|
||||
fn on_message(txn: &mut impl DbTxn, msg: &messages::CoordinatorMessage);
|
||||
}
|
||||
impl Hooks for () {
|
||||
fn on_message(_: &mut impl DbTxn, _: &messages::CoordinatorMessage) {}
|
||||
}
|
||||
|
||||
/// The main loop of a Processor, interacting with the Coordinator.
|
||||
pub async fn main_loop<
|
||||
H: Hooks,
|
||||
S: ScannerFeed,
|
||||
K: KeyGenParams<ExternalNetworkCiphersuite: Ciphersuite<G = KeyFor<S>>>,
|
||||
Sch: Clone
|
||||
+ Scheduler<
|
||||
S,
|
||||
SignableTransaction: SignableTransaction<Ciphersuite = K::ExternalNetworkCiphersuite>,
|
||||
>,
|
||||
>(
|
||||
mut db: Db,
|
||||
feed: S,
|
||||
scheduler: Sch,
|
||||
publisher: impl TransactionPublisher<TransactionFor<Sch::SignableTransaction>>,
|
||||
) {
|
||||
let mut coordinator = Coordinator::new::<S>(db.clone());
|
||||
|
||||
let mut key_gen = key_gen::<K>();
|
||||
let mut scanner = Scanner::new(db.clone(), feed.clone(), scheduler.clone()).await;
|
||||
let mut signers =
|
||||
Signers::<Db, S, Sch, _>::new(db.clone(), coordinator.coordinator_send(), publisher);
|
||||
|
||||
loop {
|
||||
let db_clone = db.clone();
|
||||
let mut txn = db.txn();
|
||||
let msg = coordinator.next_message(&mut txn).await;
|
||||
H::on_message(&mut txn, &msg);
|
||||
let mut txn = Some(txn);
|
||||
match msg {
|
||||
messages::CoordinatorMessage::KeyGen(msg) => {
|
||||
let txn = txn.as_mut().unwrap();
|
||||
let mut new_key = None;
|
||||
// This is a computationally expensive call yet it happens infrequently
|
||||
for msg in key_gen.handle(txn, msg) {
|
||||
if let messages::key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } = &msg {
|
||||
new_key = Some(*session)
|
||||
}
|
||||
coordinator.send_message(&messages::ProcessorMessage::KeyGen(msg));
|
||||
}
|
||||
|
||||
// If we were yielded a key, register it in the signers
|
||||
if let Some(session) = new_key {
|
||||
let (substrate_keys, network_keys) = KeyGen::<K>::key_shares(txn, session)
|
||||
.expect("generated key pair yet couldn't get key shares");
|
||||
signers.register_keys(txn, session, substrate_keys, network_keys);
|
||||
}
|
||||
}
|
||||
|
||||
// These are cheap calls which are fine to be here in this loop
|
||||
messages::CoordinatorMessage::Sign(msg) => {
|
||||
let txn = txn.as_mut().unwrap();
|
||||
signers.queue_message(txn, &msg)
|
||||
}
|
||||
messages::CoordinatorMessage::Coordinator(
|
||||
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
||||
session,
|
||||
block_number,
|
||||
block,
|
||||
},
|
||||
) => {
|
||||
let txn = txn.take().unwrap();
|
||||
signers.cosign_block(txn, session, block_number, block)
|
||||
}
|
||||
messages::CoordinatorMessage::Coordinator(
|
||||
messages::coordinator::CoordinatorMessage::SignSlashReport { session, report },
|
||||
) => {
|
||||
let txn = txn.take().unwrap();
|
||||
signers.sign_slash_report(txn, session, &report)
|
||||
}
|
||||
|
||||
messages::CoordinatorMessage::Substrate(msg) => match msg {
|
||||
messages::substrate::CoordinatorMessage::SetKeys { serai_time, session, key_pair } => {
|
||||
let txn = txn.as_mut().unwrap();
|
||||
let key =
|
||||
EncodableG(K::decode_key(key_pair.1.as_ref()).expect("invalid key set on serai"));
|
||||
|
||||
// Queue the key to be activated upon the next Batch
|
||||
KeyToActivate::<KeyFor<S>>::send(txn, &key);
|
||||
|
||||
// Set the external key, as needed by the signers
|
||||
ExternalKeyForSessionForSigners::<KeyFor<S>>::set(txn, session, &key);
|
||||
|
||||
// This is presumed extremely expensive, potentially blocking for several minutes, yet
|
||||
// only happens for the very first set of keys
|
||||
if session == Session(0) {
|
||||
assert!(scanner.is_none());
|
||||
let start_block = first_block_after_time(&feed, serai_time).await;
|
||||
scanner = Some(
|
||||
Scanner::initialize(db_clone, feed.clone(), scheduler.clone(), start_block, key.0)
|
||||
.await,
|
||||
);
|
||||
}
|
||||
}
|
||||
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
||||
let txn = txn.as_mut().unwrap();
|
||||
|
||||
// Since this session had its slashes reported, it has finished all its signature
|
||||
// protocols and has been fully retired. We retire it from the signers accordingly
|
||||
let key = ExternalKeyForSessionForSigners::<KeyFor<S>>::take(txn, session).unwrap().0;
|
||||
|
||||
// This is a cheap call
|
||||
signers.retire_session(txn, session, &key)
|
||||
}
|
||||
messages::substrate::CoordinatorMessage::Block {
|
||||
serai_block_number: _,
|
||||
batches,
|
||||
mut burns,
|
||||
} => {
|
||||
let scanner = scanner.as_mut().unwrap();
|
||||
|
||||
// Substrate sets this limit to prevent DoSs from malicious validator sets
|
||||
// That bound lets us consume this txn in the following loop body, as an optimization
|
||||
assert!(batches.len() <= 1);
|
||||
for messages::substrate::ExecutedBatch { id, in_instructions } in batches {
|
||||
let key_to_activate =
|
||||
KeyToActivate::<KeyFor<S>>::try_recv(txn.as_mut().unwrap()).map(|key| key.0);
|
||||
|
||||
// This is a cheap call as it internally just queues this to be done later
|
||||
let _: () = scanner.acknowledge_batch(
|
||||
txn.take().unwrap(),
|
||||
id,
|
||||
in_instructions,
|
||||
/*
|
||||
`acknowledge_batch` takes burns to optimize handling returns with standard
|
||||
payments. That's why handling these with a Batch (and not waiting until the
|
||||
following potential `queue_burns` call makes sense. As for which Batch, the first
|
||||
is equally valid unless we want to start introspecting (and should be our only
|
||||
Batch anyways).
|
||||
*/
|
||||
burns.drain(..).collect(),
|
||||
key_to_activate,
|
||||
);
|
||||
}
|
||||
|
||||
// This is a cheap call as it internally just queues this to be done later
|
||||
if !burns.is_empty() {
|
||||
let _: () = scanner.queue_burns(txn.take().unwrap(), burns);
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
// If the txn wasn't already consumed and committed, commit it
|
||||
if let Some(txn) = txn {
|
||||
txn.commit();
|
||||
}
|
||||
}
|
||||
}
|
||||
54
processor/bitcoin/Cargo.toml
Normal file
54
processor/bitcoin/Cargo.toml
Normal file
@@ -0,0 +1,54 @@
|
||||
[package]
|
||||
name = "serai-bitcoin-processor"
|
||||
version = "0.1.0"
|
||||
description = "Serai Bitcoin Processor"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/processor/bitcoin"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] }
|
||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false }
|
||||
|
||||
secp256k1 = { version = "0.29", default-features = false, features = ["std", "global-context", "rand-std"] }
|
||||
bitcoin-serai = { path = "../../networks/bitcoin", default-features = false, features = ["std"] }
|
||||
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["bitcoin"] }
|
||||
|
||||
zalloc = { path = "../../common/zalloc" }
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
|
||||
serai-db = { path = "../../common/db" }
|
||||
|
||||
key-gen = { package = "serai-processor-key-gen", path = "../key-gen" }
|
||||
|
||||
primitives = { package = "serai-processor-primitives", path = "../primitives" }
|
||||
scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" }
|
||||
scanner = { package = "serai-processor-scanner", path = "../scanner" }
|
||||
utxo-scheduler = { package = "serai-processor-utxo-scheduler-primitives", path = "../scheduler/utxo/primitives" }
|
||||
transaction-chaining-scheduler = { package = "serai-processor-transaction-chaining-scheduler", path = "../scheduler/utxo/transaction-chaining" }
|
||||
signers = { package = "serai-processor-signers", path = "../signers" }
|
||||
|
||||
bin = { package = "serai-processor-bin", path = "../bin" }
|
||||
|
||||
[features]
|
||||
parity-db = ["bin/parity-db"]
|
||||
rocksdb = ["bin/rocksdb"]
|
||||
15
processor/bitcoin/LICENSE
Normal file
15
processor/bitcoin/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
1
processor/bitcoin/README.md
Normal file
1
processor/bitcoin/README.md
Normal file
@@ -0,0 +1 @@
|
||||
# Serai Bitcoin Processor
|
||||
8
processor/bitcoin/src/db.rs
Normal file
8
processor/bitcoin/src/db.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
use serai_db::{Get, DbTxn, create_db};
|
||||
|
||||
create_db! {
|
||||
BitcoinProcessor {
|
||||
LatestBlockToYieldAsFinalized: () -> u64,
|
||||
ScriptPubKey: (tx: [u8; 32], vout: u32) -> Vec<u8>,
|
||||
}
|
||||
}
|
||||
28
processor/bitcoin/src/key_gen.rs
Normal file
28
processor/bitcoin/src/key_gen.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Secp256k1};
|
||||
use frost::ThresholdKeys;
|
||||
|
||||
use crate::{primitives::x_coord_to_even_point, scan::scanner};
|
||||
|
||||
pub(crate) struct KeyGenParams;
|
||||
impl key_gen::KeyGenParams for KeyGenParams {
|
||||
const ID: &'static str = "Bitcoin";
|
||||
|
||||
type ExternalNetworkCiphersuite = Secp256k1;
|
||||
|
||||
fn tweak_keys(keys: &mut ThresholdKeys<Self::ExternalNetworkCiphersuite>) {
|
||||
*keys = bitcoin_serai::wallet::tweak_keys(keys);
|
||||
// Also create a scanner to assert these keys, and all expected paths, are usable
|
||||
scanner(keys.group_key());
|
||||
}
|
||||
|
||||
fn encode_key(key: <Self::ExternalNetworkCiphersuite as Ciphersuite>::G) -> Vec<u8> {
|
||||
let key = key.to_bytes();
|
||||
let key: &[u8] = key.as_ref();
|
||||
// Skip the parity encoding as we know this key is even
|
||||
key[1 ..].to_vec()
|
||||
}
|
||||
|
||||
fn decode_key(key: &[u8]) -> Option<<Self::ExternalNetworkCiphersuite as Ciphersuite>::G> {
|
||||
x_coord_to_even_point(key)
|
||||
}
|
||||
}
|
||||
286
processor/bitcoin/src/main.rs
Normal file
286
processor/bitcoin/src/main.rs
Normal file
@@ -0,0 +1,286 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =
|
||||
zalloc::ZeroizingAlloc(std::alloc::System);
|
||||
|
||||
use bitcoin_serai::rpc::Rpc as BRpc;
|
||||
|
||||
use ::primitives::task::{Task, ContinuallyRan};
|
||||
|
||||
mod primitives;
|
||||
pub(crate) use crate::primitives::*;
|
||||
|
||||
// Internal utilities for scanning transactions
|
||||
mod scan;
|
||||
|
||||
// App-logic trait satisfactions
|
||||
mod key_gen;
|
||||
use crate::key_gen::KeyGenParams;
|
||||
mod rpc;
|
||||
use rpc::Rpc;
|
||||
mod scheduler;
|
||||
use scheduler::{Planner, Scheduler};
|
||||
|
||||
// Our custom code for Bitcoin
|
||||
mod db;
|
||||
mod txindex;
|
||||
use txindex::TxIndexTask;
|
||||
|
||||
pub(crate) fn hash_bytes(hash: bitcoin_serai::bitcoin::hashes::sha256d::Hash) -> [u8; 32] {
|
||||
use bitcoin_serai::bitcoin::hashes::Hash;
|
||||
|
||||
let mut res = hash.to_byte_array();
|
||||
res.reverse();
|
||||
res
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let db = bin::init();
|
||||
let feed = Rpc {
|
||||
db: db.clone(),
|
||||
rpc: loop {
|
||||
match BRpc::new(bin::url()).await {
|
||||
Ok(rpc) => break rpc,
|
||||
Err(e) => {
|
||||
log::error!("couldn't connect to the Bitcoin node: {e:?}");
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
},
|
||||
};
|
||||
|
||||
let (index_task, index_handle) = Task::new();
|
||||
tokio::spawn(TxIndexTask(feed.clone()).continually_run(index_task, vec![]));
|
||||
core::mem::forget(index_handle);
|
||||
|
||||
bin::main_loop::<(), _, KeyGenParams, _>(db, feed.clone(), Scheduler::new(Planner), feed).await;
|
||||
}
|
||||
|
||||
/*
|
||||
use bitcoin_serai::{
|
||||
bitcoin::{
|
||||
hashes::Hash as HashTrait,
|
||||
key::{Parity, XOnlyPublicKey},
|
||||
consensus::{Encodable, Decodable},
|
||||
script::Instruction,
|
||||
Transaction, Block, ScriptBuf,
|
||||
opcodes::all::{OP_SHA256, OP_EQUALVERIFY},
|
||||
},
|
||||
wallet::{
|
||||
tweak_keys, p2tr_script_buf, ReceivedOutput, Scanner, TransactionError,
|
||||
SignableTransaction as BSignableTransaction, TransactionMachine,
|
||||
},
|
||||
rpc::{RpcError, Rpc},
|
||||
};
|
||||
|
||||
#[cfg(test)]
|
||||
use bitcoin_serai::bitcoin::{
|
||||
secp256k1::{SECP256K1, SecretKey, Message},
|
||||
PrivateKey, PublicKey,
|
||||
sighash::{EcdsaSighashType, SighashCache},
|
||||
script::PushBytesBuf,
|
||||
absolute::LockTime,
|
||||
Amount as BAmount, Sequence, Script, Witness, OutPoint,
|
||||
transaction::Version,
|
||||
blockdata::transaction::{TxIn, TxOut},
|
||||
};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{MAX_DATA_LEN, Coin, NetworkId, Amount, Balance},
|
||||
networks::bitcoin::Address,
|
||||
};
|
||||
*/
|
||||
|
||||
/*
|
||||
impl TransactionTrait<Bitcoin> for Transaction {
|
||||
#[cfg(test)]
|
||||
async fn fee(&self, network: &Bitcoin) -> u64 {
|
||||
let mut value = 0;
|
||||
for input in &self.input {
|
||||
let output = input.previous_output;
|
||||
let mut hash = *output.txid.as_raw_hash().as_byte_array();
|
||||
hash.reverse();
|
||||
value += network.rpc.get_transaction(&hash).await.unwrap().output
|
||||
[usize::try_from(output.vout).unwrap()]
|
||||
.value
|
||||
.to_sat();
|
||||
}
|
||||
for output in &self.output {
|
||||
value -= output.value.to_sat();
|
||||
}
|
||||
value
|
||||
}
|
||||
}
|
||||
|
||||
impl Bitcoin {
|
||||
pub(crate) async fn new(url: String) -> Bitcoin {
|
||||
let mut res = Rpc::new(url.clone()).await;
|
||||
while let Err(e) = res {
|
||||
log::error!("couldn't connect to Bitcoin node: {e:?}");
|
||||
sleep(Duration::from_secs(5)).await;
|
||||
res = Rpc::new(url.clone()).await;
|
||||
}
|
||||
Bitcoin { rpc: res.unwrap() }
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) async fn fresh_chain(&self) {
|
||||
if self.rpc.get_latest_block_number().await.unwrap() > 0 {
|
||||
self
|
||||
.rpc
|
||||
.rpc_call(
|
||||
"invalidateblock",
|
||||
serde_json::json!([hex::encode(self.rpc.get_block_hash(1).await.unwrap())]),
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
// This function panics on a node which doesn't follow the Bitcoin protocol, which is deemed fine
|
||||
async fn median_fee(&self, block: &Block) -> Result<Fee, NetworkError> {
|
||||
let mut fees = vec![];
|
||||
if block.txdata.len() > 1 {
|
||||
for tx in &block.txdata[1 ..] {
|
||||
let mut in_value = 0;
|
||||
for input in &tx.input {
|
||||
let mut input_tx = input.previous_output.txid.to_raw_hash().to_byte_array();
|
||||
input_tx.reverse();
|
||||
in_value += self
|
||||
.rpc
|
||||
.get_transaction(&input_tx)
|
||||
.await
|
||||
.map_err(|_| NetworkError::ConnectionError)?
|
||||
.output[usize::try_from(input.previous_output.vout).unwrap()]
|
||||
.value
|
||||
.to_sat();
|
||||
}
|
||||
let out = tx.output.iter().map(|output| output.value.to_sat()).sum::<u64>();
|
||||
fees.push((in_value - out) / u64::try_from(tx.vsize()).unwrap());
|
||||
}
|
||||
}
|
||||
fees.sort();
|
||||
let fee = fees.get(fees.len() / 2).copied().unwrap_or(0);
|
||||
|
||||
// The DUST constant documentation notes a relay rule practically enforcing a
|
||||
// 1000 sat/kilo-vbyte minimum fee.
|
||||
Ok(Fee(fee.max(1)))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn sign_btc_input_for_p2pkh(
|
||||
tx: &Transaction,
|
||||
input_index: usize,
|
||||
private_key: &PrivateKey,
|
||||
) -> ScriptBuf {
|
||||
use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress};
|
||||
|
||||
let public_key = PublicKey::from_private_key(SECP256K1, private_key);
|
||||
let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest);
|
||||
|
||||
let mut der = SECP256K1
|
||||
.sign_ecdsa_low_r(
|
||||
&Message::from_digest_slice(
|
||||
SighashCache::new(tx)
|
||||
.legacy_signature_hash(
|
||||
input_index,
|
||||
&main_addr.script_pubkey(),
|
||||
EcdsaSighashType::All.to_u32(),
|
||||
)
|
||||
.unwrap()
|
||||
.to_raw_hash()
|
||||
.as_ref(),
|
||||
)
|
||||
.unwrap(),
|
||||
&private_key.inner,
|
||||
)
|
||||
.serialize_der()
|
||||
.to_vec();
|
||||
der.push(1);
|
||||
|
||||
ScriptBuf::builder()
|
||||
.push_slice(PushBytesBuf::try_from(der).unwrap())
|
||||
.push_key(&public_key)
|
||||
.into_script()
|
||||
}
|
||||
}
|
||||
|
||||
impl Network for Bitcoin {
|
||||
// 2 inputs should be 2 * 230 = 460 weight units
|
||||
// The output should be ~36 bytes, or 144 weight units
|
||||
// The overhead should be ~20 bytes at most, or 80 weight units
|
||||
// 684 weight units, 171 vbytes, round up to 200
|
||||
// 200 vbytes at 1 sat/weight (our current minimum fee, 4 sat/vbyte) = 800 sat fee for the
|
||||
// aggregation TX
|
||||
const COST_TO_AGGREGATE: u64 = 800;
|
||||
|
||||
#[cfg(test)]
|
||||
async fn get_block_number(&self, id: &[u8; 32]) -> usize {
|
||||
self.rpc.get_block_number(id).await.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn get_transaction_by_eventuality(&self, _: usize, id: &Eventuality) -> Transaction {
|
||||
self.rpc.get_transaction(&id.0).await.unwrap()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn mine_block(&self) {
|
||||
use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress};
|
||||
|
||||
self
|
||||
.rpc
|
||||
.rpc_call::<Vec<String>>(
|
||||
"generatetoaddress",
|
||||
serde_json::json!([1, BAddress::p2sh(Script::new(), BNetwork::Regtest).unwrap()]),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn test_send(&self, address: Address) -> Block {
|
||||
use bitcoin_serai::bitcoin::{Network as BNetwork, Address as BAddress};
|
||||
|
||||
let secret_key = SecretKey::new(&mut rand_core::OsRng);
|
||||
let private_key = PrivateKey::new(secret_key, BNetwork::Regtest);
|
||||
let public_key = PublicKey::from_private_key(SECP256K1, &private_key);
|
||||
let main_addr = BAddress::p2pkh(public_key, BNetwork::Regtest);
|
||||
|
||||
let new_block = self.get_latest_block_number().await.unwrap() + 1;
|
||||
self
|
||||
.rpc
|
||||
.rpc_call::<Vec<String>>("generatetoaddress", serde_json::json!([100, main_addr]))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let tx = self.get_block(new_block).await.unwrap().txdata.swap_remove(0);
|
||||
let mut tx = Transaction {
|
||||
version: Version(2),
|
||||
lock_time: LockTime::ZERO,
|
||||
input: vec![TxIn {
|
||||
previous_output: OutPoint { txid: tx.compute_txid(), vout: 0 },
|
||||
script_sig: Script::new().into(),
|
||||
sequence: Sequence(u32::MAX),
|
||||
witness: Witness::default(),
|
||||
}],
|
||||
output: vec![TxOut {
|
||||
value: tx.output[0].value - BAmount::from_sat(10000),
|
||||
script_pubkey: address.clone().into(),
|
||||
}],
|
||||
};
|
||||
tx.input[0].script_sig = Self::sign_btc_input_for_p2pkh(&tx, 0, &private_key);
|
||||
|
||||
let block = self.get_latest_block_number().await.unwrap() + 1;
|
||||
self.rpc.send_raw_transaction(&tx).await.unwrap();
|
||||
for _ in 0 .. Self::CONFIRMATIONS {
|
||||
self.mine_block().await;
|
||||
}
|
||||
self.get_block(block).await.unwrap()
|
||||
}
|
||||
}
|
||||
*/
|
||||
80
processor/bitcoin/src/primitives/block.rs
Normal file
80
processor/bitcoin/src/primitives/block.rs
Normal file
@@ -0,0 +1,80 @@
|
||||
use core::fmt;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Secp256k1};
|
||||
|
||||
use bitcoin_serai::bitcoin::block::{Header, Block as BBlock};
|
||||
|
||||
use serai_client::networks::bitcoin::Address;
|
||||
|
||||
use serai_db::Db;
|
||||
use primitives::{ReceivedOutput, EventualityTracker};
|
||||
|
||||
use crate::{hash_bytes, scan::scanner, output::Output, transaction::Eventuality};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct BlockHeader(pub(crate) Header);
|
||||
impl primitives::BlockHeader for BlockHeader {
|
||||
fn id(&self) -> [u8; 32] {
|
||||
hash_bytes(self.0.block_hash().to_raw_hash())
|
||||
}
|
||||
fn parent(&self) -> [u8; 32] {
|
||||
hash_bytes(self.0.prev_blockhash.to_raw_hash())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Block<D: Db>(pub(crate) D, pub(crate) BBlock);
|
||||
impl<D: Db> fmt::Debug for Block<D> {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
fmt.debug_struct("Block").field("1", &self.1).finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> primitives::Block for Block<D> {
|
||||
type Header = BlockHeader;
|
||||
|
||||
type Key = <Secp256k1 as Ciphersuite>::G;
|
||||
type Address = Address;
|
||||
type Output = Output;
|
||||
type Eventuality = Eventuality;
|
||||
|
||||
fn id(&self) -> [u8; 32] {
|
||||
primitives::BlockHeader::id(&BlockHeader(self.1.header))
|
||||
}
|
||||
|
||||
fn scan_for_outputs_unordered(
|
||||
&self,
|
||||
_latest_active_key: Self::Key,
|
||||
key: Self::Key,
|
||||
) -> Vec<Self::Output> {
|
||||
let scanner = scanner(key);
|
||||
|
||||
let mut res = vec![];
|
||||
// We skip the coinbase transaction as its burdened by maturity
|
||||
for tx in &self.1.txdata[1 ..] {
|
||||
for output in scanner.scan_transaction(tx) {
|
||||
res.push(Output::new(&self.0, key, tx, output));
|
||||
}
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
fn check_for_eventuality_resolutions(
|
||||
&self,
|
||||
eventualities: &mut EventualityTracker<Self::Eventuality>,
|
||||
) -> HashMap<
|
||||
<Self::Output as ReceivedOutput<Self::Key, Self::Address>>::TransactionId,
|
||||
Self::Eventuality,
|
||||
> {
|
||||
let mut res = HashMap::new();
|
||||
for tx in &self.1.txdata[1 ..] {
|
||||
let id = hash_bytes(tx.compute_txid().to_raw_hash());
|
||||
if let Some(eventuality) = eventualities.active_eventualities.remove(id.as_slice()) {
|
||||
res.insert(id, eventuality);
|
||||
}
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
20
processor/bitcoin/src/primitives/mod.rs
Normal file
20
processor/bitcoin/src/primitives/mod.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
use ciphersuite::{Ciphersuite, Secp256k1};
|
||||
|
||||
use bitcoin_serai::bitcoin::key::{Parity, XOnlyPublicKey};
|
||||
|
||||
pub(crate) mod output;
|
||||
pub(crate) mod transaction;
|
||||
pub(crate) mod block;
|
||||
|
||||
pub(crate) fn x_coord_to_even_point(key: &[u8]) -> Option<<Secp256k1 as Ciphersuite>::G> {
|
||||
if key.len() != 32 {
|
||||
None?
|
||||
};
|
||||
|
||||
// Read the x-only public key
|
||||
let key = XOnlyPublicKey::from_slice(key).ok()?;
|
||||
// Convert to a full public key
|
||||
let key = key.public_key(Parity::Even);
|
||||
// Convert to k256 (from libsecp256k1)
|
||||
Secp256k1::read_G(&mut key.serialize().as_slice()).ok()
|
||||
}
|
||||
170
processor/bitcoin/src/primitives/output.rs
Normal file
170
processor/bitcoin/src/primitives/output.rs
Normal file
@@ -0,0 +1,170 @@
|
||||
use std::io;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Secp256k1};
|
||||
|
||||
use bitcoin_serai::{
|
||||
bitcoin::{
|
||||
hashes::Hash as HashTrait, consensus::Encodable, script::Instruction, transaction::Transaction,
|
||||
},
|
||||
wallet::ReceivedOutput as WalletOutput,
|
||||
};
|
||||
|
||||
use scale::{Encode, Decode, IoReader};
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_db::Get;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{Coin, Amount, Balance, ExternalAddress},
|
||||
networks::bitcoin::Address,
|
||||
};
|
||||
|
||||
use primitives::{OutputType, ReceivedOutput};
|
||||
|
||||
use crate::{
|
||||
primitives::x_coord_to_even_point,
|
||||
scan::{offsets_for_key, presumed_origin, extract_serai_data},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Hash, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) struct OutputId([u8; 36]);
|
||||
impl Default for OutputId {
|
||||
fn default() -> Self {
|
||||
Self([0; 36])
|
||||
}
|
||||
}
|
||||
impl AsRef<[u8]> for OutputId {
|
||||
fn as_ref(&self) -> &[u8] {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl AsMut<[u8]> for OutputId {
|
||||
fn as_mut(&mut self) -> &mut [u8] {
|
||||
self.0.as_mut()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub(crate) struct Output {
|
||||
kind: OutputType,
|
||||
presumed_origin: Option<Address>,
|
||||
pub(crate) output: WalletOutput,
|
||||
data: Vec<u8>,
|
||||
}
|
||||
|
||||
impl Output {
|
||||
pub(crate) fn new(
|
||||
getter: &impl Get,
|
||||
key: <Secp256k1 as Ciphersuite>::G,
|
||||
tx: &Transaction,
|
||||
output: WalletOutput,
|
||||
) -> Self {
|
||||
Self {
|
||||
kind: offsets_for_key(key)
|
||||
.into_iter()
|
||||
.find_map(|(kind, offset)| (offset == output.offset()).then_some(kind))
|
||||
.expect("scanned output for unknown offset"),
|
||||
presumed_origin: presumed_origin(getter, tx),
|
||||
output,
|
||||
data: extract_serai_data(tx),
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn new_with_presumed_origin(
|
||||
key: <Secp256k1 as Ciphersuite>::G,
|
||||
tx: &Transaction,
|
||||
presumed_origin: Option<Address>,
|
||||
output: WalletOutput,
|
||||
) -> Self {
|
||||
Self {
|
||||
kind: offsets_for_key(key)
|
||||
.into_iter()
|
||||
.find_map(|(kind, offset)| (offset == output.offset()).then_some(kind))
|
||||
.expect("scanned output for unknown offset"),
|
||||
presumed_origin,
|
||||
output,
|
||||
data: extract_serai_data(tx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ReceivedOutput<<Secp256k1 as Ciphersuite>::G, Address> for Output {
|
||||
type Id = OutputId;
|
||||
type TransactionId = [u8; 32];
|
||||
|
||||
fn kind(&self) -> OutputType {
|
||||
self.kind
|
||||
}
|
||||
|
||||
fn id(&self) -> Self::Id {
|
||||
let mut id = OutputId::default();
|
||||
self.output.outpoint().consensus_encode(&mut id.as_mut()).unwrap();
|
||||
id
|
||||
}
|
||||
|
||||
fn transaction_id(&self) -> Self::TransactionId {
|
||||
let mut res = self.output.outpoint().txid.to_raw_hash().to_byte_array();
|
||||
res.reverse();
|
||||
res
|
||||
}
|
||||
|
||||
fn key(&self) -> <Secp256k1 as Ciphersuite>::G {
|
||||
// We read the key from the script pubkey so we don't have to independently store it
|
||||
let script = &self.output.output().script_pubkey;
|
||||
|
||||
// These assumptions are safe since it's an output we successfully scanned
|
||||
assert!(script.is_p2tr());
|
||||
let Instruction::PushBytes(key) = script.instructions_minimal().last().unwrap().unwrap() else {
|
||||
panic!("last item in v1 Taproot script wasn't bytes")
|
||||
};
|
||||
let key = x_coord_to_even_point(key.as_ref())
|
||||
.expect("last item in scanned v1 Taproot script wasn't a valid x-only public key");
|
||||
|
||||
// The output's key minus the output's offset is the root key
|
||||
key - (<Secp256k1 as Ciphersuite>::G::GENERATOR * self.output.offset())
|
||||
}
|
||||
|
||||
fn presumed_origin(&self) -> Option<Address> {
|
||||
self.presumed_origin.clone()
|
||||
}
|
||||
|
||||
fn balance(&self) -> Balance {
|
||||
Balance { coin: Coin::Bitcoin, amount: Amount(self.output.value()) }
|
||||
}
|
||||
|
||||
fn data(&self) -> &[u8] {
|
||||
&self.data
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.kind.write(writer)?;
|
||||
let presumed_origin: Option<ExternalAddress> = self.presumed_origin.clone().map(Into::into);
|
||||
writer.write_all(&presumed_origin.encode())?;
|
||||
self.output.write(writer)?;
|
||||
writer.write_all(&u16::try_from(self.data.len()).unwrap().to_le_bytes())?;
|
||||
writer.write_all(&self.data)
|
||||
}
|
||||
|
||||
fn read<R: io::Read>(mut reader: &mut R) -> io::Result<Self> {
|
||||
Ok(Output {
|
||||
kind: OutputType::read(reader)?,
|
||||
presumed_origin: {
|
||||
Option::<ExternalAddress>::decode(&mut IoReader(&mut reader))
|
||||
.map_err(|e| io::Error::other(format!("couldn't decode ExternalAddress: {e:?}")))?
|
||||
.map(|address| {
|
||||
Address::try_from(address)
|
||||
.map_err(|()| io::Error::other("couldn't decode Address from ExternalAddress"))
|
||||
})
|
||||
.transpose()?
|
||||
},
|
||||
output: WalletOutput::read(reader)?,
|
||||
data: {
|
||||
let mut data_len = [0; 2];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
|
||||
let mut data = vec![0; usize::from(u16::from_le_bytes(data_len))];
|
||||
reader.read_exact(&mut data)?;
|
||||
data
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
171
processor/bitcoin/src/primitives/transaction.rs
Normal file
171
processor/bitcoin/src/primitives/transaction.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use std::io;
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use ciphersuite::Secp256k1;
|
||||
use frost::{dkg::ThresholdKeys, sign::PreprocessMachine};
|
||||
|
||||
use bitcoin_serai::{
|
||||
bitcoin::{
|
||||
consensus::{Encodable, Decodable},
|
||||
ScriptBuf, Transaction as BTransaction,
|
||||
},
|
||||
wallet::{
|
||||
ReceivedOutput, TransactionError, SignableTransaction as BSignableTransaction,
|
||||
TransactionMachine,
|
||||
},
|
||||
};
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::networks::bitcoin::Address;
|
||||
|
||||
use crate::output::OutputId;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct Transaction(pub(crate) BTransaction);
|
||||
|
||||
impl From<BTransaction> for Transaction {
|
||||
fn from(tx: BTransaction) -> Self {
|
||||
Self(tx)
|
||||
}
|
||||
}
|
||||
|
||||
impl scheduler::Transaction for Transaction {
|
||||
fn read(reader: &mut impl io::Read) -> io::Result<Self> {
|
||||
let tx =
|
||||
BTransaction::consensus_decode(&mut io::BufReader::new(reader)).map_err(io::Error::other)?;
|
||||
Ok(Self(tx))
|
||||
}
|
||||
fn write(&self, writer: &mut impl io::Write) -> io::Result<()> {
|
||||
let mut writer = io::BufWriter::new(writer);
|
||||
self.0.consensus_encode(&mut writer)?;
|
||||
writer.into_inner()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct SignableTransaction {
|
||||
pub(crate) inputs: Vec<ReceivedOutput>,
|
||||
pub(crate) payments: Vec<(ScriptBuf, u64)>,
|
||||
pub(crate) change: Option<Address>,
|
||||
pub(crate) fee_per_vbyte: u64,
|
||||
}
|
||||
|
||||
impl SignableTransaction {
|
||||
fn signable(self) -> Result<BSignableTransaction, TransactionError> {
|
||||
BSignableTransaction::new(
|
||||
self.inputs,
|
||||
&self.payments,
|
||||
self.change.map(ScriptBuf::from),
|
||||
None,
|
||||
self.fee_per_vbyte,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct ClonableTransctionMachine(SignableTransaction, ThresholdKeys<Secp256k1>);
|
||||
impl PreprocessMachine for ClonableTransctionMachine {
|
||||
type Preprocess = <TransactionMachine as PreprocessMachine>::Preprocess;
|
||||
type Signature = <TransactionMachine as PreprocessMachine>::Signature;
|
||||
type SignMachine = <TransactionMachine as PreprocessMachine>::SignMachine;
|
||||
|
||||
fn preprocess<R: RngCore + CryptoRng>(
|
||||
self,
|
||||
rng: &mut R,
|
||||
) -> (Self::SignMachine, Self::Preprocess) {
|
||||
self
|
||||
.0
|
||||
.signable()
|
||||
.expect("signing an invalid SignableTransaction")
|
||||
.multisig(&self.1)
|
||||
.expect("incorrect keys used for SignableTransaction")
|
||||
.preprocess(rng)
|
||||
}
|
||||
}
|
||||
|
||||
impl scheduler::SignableTransaction for SignableTransaction {
|
||||
type Transaction = Transaction;
|
||||
type Ciphersuite = Secp256k1;
|
||||
type PreprocessMachine = ClonableTransctionMachine;
|
||||
|
||||
fn read(reader: &mut impl io::Read) -> io::Result<Self> {
|
||||
let inputs = {
|
||||
let mut input_len = [0; 4];
|
||||
reader.read_exact(&mut input_len)?;
|
||||
let mut inputs = vec![];
|
||||
for _ in 0 .. u32::from_le_bytes(input_len) {
|
||||
inputs.push(ReceivedOutput::read(reader)?);
|
||||
}
|
||||
inputs
|
||||
};
|
||||
|
||||
let payments = Vec::<(Vec<u8>, u64)>::deserialize_reader(reader)?;
|
||||
let change = <_>::deserialize_reader(reader)?;
|
||||
let fee_per_vbyte = <_>::deserialize_reader(reader)?;
|
||||
|
||||
Ok(Self {
|
||||
inputs,
|
||||
payments: payments
|
||||
.into_iter()
|
||||
.map(|(address, amount)| (ScriptBuf::from_bytes(address), amount))
|
||||
.collect(),
|
||||
change,
|
||||
fee_per_vbyte,
|
||||
})
|
||||
}
|
||||
fn write(&self, writer: &mut impl io::Write) -> io::Result<()> {
|
||||
writer.write_all(&u32::try_from(self.inputs.len()).unwrap().to_le_bytes())?;
|
||||
for input in &self.inputs {
|
||||
input.write(writer)?;
|
||||
}
|
||||
|
||||
for payment in &self.payments {
|
||||
(payment.0.as_script().as_bytes(), payment.1).serialize(writer)?;
|
||||
}
|
||||
self.change.serialize(writer)?;
|
||||
self.fee_per_vbyte.serialize(writer)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn id(&self) -> [u8; 32] {
|
||||
self.clone().signable().unwrap().txid()
|
||||
}
|
||||
|
||||
fn sign(self, keys: ThresholdKeys<Self::Ciphersuite>) -> Self::PreprocessMachine {
|
||||
ClonableTransctionMachine(self, keys)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) struct Eventuality {
|
||||
pub(crate) txid: [u8; 32],
|
||||
pub(crate) singular_spent_output: Option<OutputId>,
|
||||
}
|
||||
|
||||
impl primitives::Eventuality for Eventuality {
|
||||
type OutputId = OutputId;
|
||||
|
||||
fn id(&self) -> [u8; 32] {
|
||||
self.txid
|
||||
}
|
||||
|
||||
// We define the lookup as our ID since the resolving transaction only has a singular possible ID
|
||||
fn lookup(&self) -> Vec<u8> {
|
||||
self.txid.to_vec()
|
||||
}
|
||||
|
||||
fn singular_spent_output(&self) -> Option<Self::OutputId> {
|
||||
self.singular_spent_output.clone()
|
||||
}
|
||||
|
||||
fn read(reader: &mut impl io::Read) -> io::Result<Self> {
|
||||
Self::deserialize_reader(reader)
|
||||
}
|
||||
fn write(&self, writer: &mut impl io::Write) -> io::Result<()> {
|
||||
self.serialize(writer)
|
||||
}
|
||||
}
|
||||
181
processor/bitcoin/src/rpc.rs
Normal file
181
processor/bitcoin/src/rpc.rs
Normal file
@@ -0,0 +1,181 @@
|
||||
use core::future::Future;
|
||||
|
||||
use bitcoin_serai::rpc::{RpcError, Rpc as BRpc};
|
||||
|
||||
use serai_client::primitives::{NetworkId, Coin, Amount};
|
||||
|
||||
use serai_db::Db;
|
||||
use scanner::ScannerFeed;
|
||||
use signers::TransactionPublisher;
|
||||
|
||||
use crate::{
|
||||
db,
|
||||
transaction::Transaction,
|
||||
block::{BlockHeader, Block},
|
||||
};
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Rpc<D: Db> {
|
||||
pub(crate) db: D,
|
||||
pub(crate) rpc: BRpc,
|
||||
}
|
||||
|
||||
impl<D: Db> ScannerFeed for Rpc<D> {
|
||||
const NETWORK: NetworkId = NetworkId::Bitcoin;
|
||||
// 6 confirmations is widely accepted as secure and shouldn't occur
|
||||
const CONFIRMATIONS: u64 = 6;
|
||||
// The window length should be roughly an hour
|
||||
const WINDOW_LENGTH: u64 = 6;
|
||||
|
||||
const TEN_MINUTES: u64 = 1;
|
||||
|
||||
type Block = Block<D>;
|
||||
|
||||
type EphemeralError = RpcError;
|
||||
|
||||
fn latest_finalized_block_number(
|
||||
&self,
|
||||
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>> {
|
||||
async move { db::LatestBlockToYieldAsFinalized::get(&self.db).ok_or(RpcError::ConnectionError) }
|
||||
}
|
||||
|
||||
fn time_of_block(
|
||||
&self,
|
||||
number: u64,
|
||||
) -> impl Send + Future<Output = Result<u64, Self::EphemeralError>> {
|
||||
async move {
|
||||
let number = usize::try_from(number).unwrap();
|
||||
|
||||
/*
|
||||
The block time isn't guaranteed to be monotonic. It is guaranteed to be greater than the
|
||||
median time of prior blocks, as detailed in BIP-0113 (a BIP which used that fact to improve
|
||||
CLTV). This creates a monotonic median time which we use as the block time.
|
||||
*/
|
||||
// This implements `GetMedianTimePast`
|
||||
let median = {
|
||||
const MEDIAN_TIMESPAN: usize = 11;
|
||||
let mut timestamps = Vec::with_capacity(MEDIAN_TIMESPAN);
|
||||
for i in number.saturating_sub(MEDIAN_TIMESPAN) .. number {
|
||||
timestamps
|
||||
.push(self.rpc.get_block(&self.rpc.get_block_hash(i).await?).await?.header.time);
|
||||
}
|
||||
timestamps.sort();
|
||||
timestamps[timestamps.len() / 2]
|
||||
};
|
||||
|
||||
/*
|
||||
This block's timestamp is guaranteed to be greater than this median:
|
||||
https://github.com/bitcoin/bitcoin/blob/0725a374941355349bb4bc8a79dad1affb27d3b9
|
||||
/src/validation.cpp#L4182-L4184
|
||||
|
||||
This does not guarantee the median always increases however. Take the following trivial
|
||||
example, as the window is initially built:
|
||||
|
||||
0 block has time 0 // Prior blocks: []
|
||||
1 block has time 1 // Prior blocks: [0]
|
||||
2 block has time 2 // Prior blocks: [0, 1]
|
||||
3 block has time 2 // Prior blocks: [0, 1, 2]
|
||||
|
||||
These two blocks have the same time (both greater than the median of their prior blocks) and
|
||||
the same median.
|
||||
|
||||
The median will never decrease however. The values pushed onto the window will always be
|
||||
greater than the median. If a value greater than the median is popped, the median will
|
||||
remain the same (due to the counterbalance of the pushed value). If a value less than the
|
||||
median is popped, the median will increase (either to another instance of the same value,
|
||||
yet one closer to the end of the repeating sequence, or to a higher value).
|
||||
*/
|
||||
Ok(median.into())
|
||||
}
|
||||
}
|
||||
|
||||
fn unchecked_block_header_by_number(
|
||||
&self,
|
||||
number: u64,
|
||||
) -> impl Send
|
||||
+ Future<Output = Result<<Self::Block as primitives::Block>::Header, Self::EphemeralError>>
|
||||
{
|
||||
async move {
|
||||
Ok(BlockHeader(
|
||||
self
|
||||
.rpc
|
||||
.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?)
|
||||
.await?
|
||||
.header,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn unchecked_block_by_number(
|
||||
&self,
|
||||
number: u64,
|
||||
) -> impl Send + Future<Output = Result<Self::Block, Self::EphemeralError>> {
|
||||
async move {
|
||||
Ok(Block(
|
||||
self.db.clone(),
|
||||
self.rpc.get_block(&self.rpc.get_block_hash(number.try_into().unwrap()).await?).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
fn dust(coin: Coin) -> Amount {
|
||||
assert_eq!(coin, Coin::Bitcoin);
|
||||
|
||||
/*
|
||||
A Taproot input is:
|
||||
- 36 bytes for the OutPoint
|
||||
- 0 bytes for the script (+1 byte for the length)
|
||||
- 4 bytes for the sequence
|
||||
Per https://developer.bitcoin.org/reference/transactions.html#raw-transaction-format
|
||||
|
||||
There's also:
|
||||
- 1 byte for the witness length
|
||||
- 1 byte for the signature length
|
||||
- 64 bytes for the signature
|
||||
which have the SegWit discount.
|
||||
|
||||
(4 * (36 + 1 + 4)) + (1 + 1 + 64) = 164 + 66 = 230 weight units
|
||||
230 ceil div 4 = 57 vbytes
|
||||
|
||||
Bitcoin defines multiple minimum feerate constants *per kilo-vbyte*. Currently, these are:
|
||||
- 1000 sat/kilo-vbyte for a transaction to be relayed
|
||||
- Each output's value must exceed the fee of the TX spending it at 3000 sat/kilo-vbyte
|
||||
The DUST constant needs to be determined by the latter.
|
||||
Since these are solely relay rules, and may be raised, we require all outputs be spendable
|
||||
under a 5000 sat/kilo-vbyte fee rate.
|
||||
|
||||
5000 sat/kilo-vbyte = 5 sat/vbyte
|
||||
5 * 57 = 285 sats/spent-output
|
||||
|
||||
Even if an output took 100 bytes (it should be just ~29-43), taking 400 weight units, adding
|
||||
100 vbytes, tripling the transaction size, then the sats/tx would be < 1000.
|
||||
|
||||
Increase by an order of magnitude, in order to ensure this is actually worth our time, and we
|
||||
get 10,000 satoshis. This is $5 if 1 BTC = 50,000 USD.
|
||||
*/
|
||||
Amount(10_000)
|
||||
}
|
||||
|
||||
fn cost_to_aggregate(
|
||||
&self,
|
||||
coin: Coin,
|
||||
_reference_block: &Self::Block,
|
||||
) -> impl Send + Future<Output = Result<Amount, Self::EphemeralError>> {
|
||||
async move {
|
||||
assert_eq!(coin, Coin::Bitcoin);
|
||||
// TODO
|
||||
Ok(Amount(0))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> TransactionPublisher<Transaction> for Rpc<D> {
|
||||
type EphemeralError = RpcError;
|
||||
|
||||
fn publish(
|
||||
&self,
|
||||
tx: Transaction,
|
||||
) -> impl Send + Future<Output = Result<(), Self::EphemeralError>> {
|
||||
async move { self.rpc.send_raw_transaction(&tx.0).await.map(|_| ()) }
|
||||
}
|
||||
}
|
||||
125
processor/bitcoin/src/scan.rs
Normal file
125
processor/bitcoin/src/scan.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
use std::{sync::LazyLock, collections::HashMap};
|
||||
|
||||
use ciphersuite::{Ciphersuite, Secp256k1};
|
||||
|
||||
use bitcoin_serai::{
|
||||
bitcoin::{
|
||||
blockdata::opcodes,
|
||||
script::{Instruction, ScriptBuf},
|
||||
Transaction,
|
||||
},
|
||||
wallet::Scanner,
|
||||
};
|
||||
|
||||
use serai_client::networks::bitcoin::Address;
|
||||
|
||||
use serai_db::Get;
|
||||
use primitives::OutputType;
|
||||
|
||||
use crate::hash_bytes;
|
||||
|
||||
const KEY_DST: &[u8] = b"Serai Bitcoin Processor Key Offset";
|
||||
static BRANCH_BASE_OFFSET: LazyLock<<Secp256k1 as Ciphersuite>::F> =
|
||||
LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"branch"));
|
||||
static CHANGE_BASE_OFFSET: LazyLock<<Secp256k1 as Ciphersuite>::F> =
|
||||
LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"change"));
|
||||
static FORWARD_BASE_OFFSET: LazyLock<<Secp256k1 as Ciphersuite>::F> =
|
||||
LazyLock::new(|| Secp256k1::hash_to_F(KEY_DST, b"forward"));
|
||||
|
||||
// Unfortunately, we have per-key offsets as it's the root key plus the base offset may not be
|
||||
// even. While we could tweak the key until all derivations are even, that'd require significantly
|
||||
// more tweaking. This algorithmic complexity is preferred.
|
||||
pub(crate) fn offsets_for_key(
|
||||
key: <Secp256k1 as Ciphersuite>::G,
|
||||
) -> HashMap<OutputType, <Secp256k1 as Ciphersuite>::F> {
|
||||
let mut offsets = HashMap::from([(OutputType::External, <Secp256k1 as Ciphersuite>::F::ZERO)]);
|
||||
|
||||
// We create an actual Bitcoin scanner as upon adding an offset, it yields the tweaked offset
|
||||
// actually used
|
||||
let mut scanner = Scanner::new(key).unwrap();
|
||||
let mut register = |kind, offset| {
|
||||
let tweaked_offset = scanner.register_offset(offset).expect("offset collision");
|
||||
offsets.insert(kind, tweaked_offset);
|
||||
};
|
||||
|
||||
register(OutputType::Branch, *BRANCH_BASE_OFFSET);
|
||||
register(OutputType::Change, *CHANGE_BASE_OFFSET);
|
||||
register(OutputType::Forwarded, *FORWARD_BASE_OFFSET);
|
||||
|
||||
offsets
|
||||
}
|
||||
|
||||
pub(crate) fn scanner(key: <Secp256k1 as Ciphersuite>::G) -> Scanner {
|
||||
let mut scanner = Scanner::new(key).unwrap();
|
||||
for (_, offset) in offsets_for_key(key) {
|
||||
let tweaked_offset = scanner.register_offset(offset).unwrap();
|
||||
assert_eq!(tweaked_offset, offset);
|
||||
}
|
||||
scanner
|
||||
}
|
||||
|
||||
pub(crate) fn presumed_origin(getter: &impl Get, tx: &Transaction) -> Option<Address> {
|
||||
for input in &tx.input {
|
||||
let txid = hash_bytes(input.previous_output.txid.to_raw_hash());
|
||||
let vout = input.previous_output.vout;
|
||||
if let Some(address) =
|
||||
Address::new(crate::txindex::script_pubkey_for_on_chain_output(getter, txid, vout))
|
||||
{
|
||||
return Some(address);
|
||||
}
|
||||
}
|
||||
None?
|
||||
}
|
||||
|
||||
// Checks if this script matches SHA256 PUSH MSG_HASH OP_EQUALVERIFY ..
|
||||
fn matches_segwit_data(script: &ScriptBuf) -> Option<bool> {
|
||||
let mut ins = script.instructions();
|
||||
|
||||
// first item should be SHA256 code
|
||||
if ins.next()?.ok()?.opcode()? != opcodes::all::OP_SHA256 {
|
||||
return Some(false);
|
||||
}
|
||||
|
||||
// next should be a data push
|
||||
ins.next()?.ok()?.push_bytes()?;
|
||||
|
||||
// next should be a equality check
|
||||
if ins.next()?.ok()?.opcode()? != opcodes::all::OP_EQUALVERIFY {
|
||||
return Some(false);
|
||||
}
|
||||
|
||||
Some(true)
|
||||
}
|
||||
|
||||
// Extract the data for Serai from a transaction
|
||||
pub(crate) fn extract_serai_data(tx: &Transaction) -> Vec<u8> {
|
||||
// Check for an OP_RETURN output
|
||||
let mut data = (|| {
|
||||
for output in &tx.output {
|
||||
if output.script_pubkey.is_op_return() {
|
||||
match output.script_pubkey.instructions_minimal().last() {
|
||||
Some(Ok(Instruction::PushBytes(data))) => return Some(data.as_bytes().to_vec()),
|
||||
_ => continue,
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
})();
|
||||
|
||||
// Check the inputs
|
||||
if data.is_none() {
|
||||
for input in &tx.input {
|
||||
let witness = input.witness.to_vec();
|
||||
// The witness has to have at least 2 items, msg and the redeem script
|
||||
if witness.len() >= 2 {
|
||||
let redeem_script = ScriptBuf::from_bytes(witness.last().unwrap().clone());
|
||||
if matches_segwit_data(&redeem_script) == Some(true) {
|
||||
data = Some(witness[witness.len() - 2].clone()); // len() - 1 is the redeem_script
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
data.unwrap_or(vec![])
|
||||
}
|
||||
213
processor/bitcoin/src/scheduler.rs
Normal file
213
processor/bitcoin/src/scheduler.rs
Normal file
@@ -0,0 +1,213 @@
|
||||
use core::future::Future;
|
||||
|
||||
use ciphersuite::{Ciphersuite, Secp256k1};
|
||||
|
||||
use bitcoin_serai::{
|
||||
bitcoin::ScriptBuf,
|
||||
wallet::{TransactionError, SignableTransaction as BSignableTransaction, p2tr_script_buf},
|
||||
};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{Coin, Amount},
|
||||
networks::bitcoin::Address,
|
||||
};
|
||||
|
||||
use serai_db::Db;
|
||||
use primitives::{OutputType, ReceivedOutput, Payment};
|
||||
use scanner::{KeyFor, AddressFor, OutputFor, BlockFor};
|
||||
use utxo_scheduler::{PlannedTransaction, TransactionPlanner};
|
||||
use transaction_chaining_scheduler::{EffectedReceivedOutputs, Scheduler as GenericScheduler};
|
||||
|
||||
use crate::{
|
||||
scan::{offsets_for_key, scanner},
|
||||
output::Output,
|
||||
transaction::{SignableTransaction, Eventuality},
|
||||
rpc::Rpc,
|
||||
};
|
||||
|
||||
fn address_from_serai_key(key: <Secp256k1 as Ciphersuite>::G, kind: OutputType) -> Address {
|
||||
let offset = <Secp256k1 as Ciphersuite>::G::GENERATOR * offsets_for_key(key)[&kind];
|
||||
Address::new(
|
||||
p2tr_script_buf(key + offset)
|
||||
.expect("creating address from Serai key which wasn't properly tweaked"),
|
||||
)
|
||||
.expect("couldn't create Serai-representable address for P2TR script")
|
||||
}
|
||||
|
||||
fn signable_transaction<D: Db>(
|
||||
_reference_block: &BlockFor<Rpc<D>>,
|
||||
inputs: Vec<OutputFor<Rpc<D>>>,
|
||||
payments: Vec<Payment<AddressFor<Rpc<D>>>>,
|
||||
change: Option<KeyFor<Rpc<D>>>,
|
||||
) -> Result<(SignableTransaction, BSignableTransaction), TransactionError> {
|
||||
assert!(
|
||||
inputs.len() <
|
||||
<Planner as TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>>>::MAX_INPUTS
|
||||
);
|
||||
assert!(
|
||||
(payments.len() + usize::from(u8::from(change.is_some()))) <
|
||||
<Planner as TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>>>::MAX_OUTPUTS
|
||||
);
|
||||
|
||||
// TODO
|
||||
let fee_per_vbyte = 1;
|
||||
|
||||
let inputs = inputs.into_iter().map(|input| input.output).collect::<Vec<_>>();
|
||||
|
||||
let mut payments = payments
|
||||
.into_iter()
|
||||
.map(|payment| {
|
||||
(ScriptBuf::from(payment.address().clone()), {
|
||||
let balance = payment.balance();
|
||||
assert_eq!(balance.coin, Coin::Bitcoin);
|
||||
balance.amount.0
|
||||
})
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
/*
|
||||
Push a payment to a key with a known private key which anyone can spend. If this transaction
|
||||
gets stuck, this lets anyone create a child transaction spending this output, raising the fee,
|
||||
getting the transaction unstuck (via CPFP).
|
||||
*/
|
||||
payments.push((
|
||||
// The generator is even so this is valid
|
||||
p2tr_script_buf(<Secp256k1 as Ciphersuite>::G::GENERATOR).unwrap(),
|
||||
// This uses the minimum output value allowed, as defined as a constant in bitcoin-serai
|
||||
// TODO: Add a test for this comparing to bitcoin's `minimal_non_dust`
|
||||
bitcoin_serai::wallet::DUST,
|
||||
));
|
||||
|
||||
let change = change
|
||||
.map(<Planner as TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>>>::change_address);
|
||||
|
||||
BSignableTransaction::new(
|
||||
inputs.clone(),
|
||||
&payments,
|
||||
change.clone().map(ScriptBuf::from),
|
||||
None,
|
||||
fee_per_vbyte,
|
||||
)
|
||||
.map(|bst| (SignableTransaction { inputs, payments, change, fee_per_vbyte }, bst))
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct Planner;
|
||||
impl<D: Db> TransactionPlanner<Rpc<D>, EffectedReceivedOutputs<Rpc<D>>> for Planner {
|
||||
type EphemeralError = ();
|
||||
|
||||
type SignableTransaction = SignableTransaction;
|
||||
|
||||
/*
|
||||
Bitcoin has a max weight of 400,000 (MAX_STANDARD_TX_WEIGHT).
|
||||
|
||||
A non-SegWit TX will have 4 weight units per byte, leaving a max size of 100,000 bytes. While
|
||||
our inputs are entirely SegWit, such fine tuning is not necessary and could create issues in
|
||||
the future (if the size decreases or we misevaluate it). It also offers a minimal amount of
|
||||
benefit when we are able to logarithmically accumulate inputs/fulfill payments.
|
||||
|
||||
For 128-byte inputs (36-byte output specification, 64-byte signature, whatever overhead) and
|
||||
64-byte outputs (40-byte script, 8-byte amount, whatever overhead), they together take up 192
|
||||
bytes.
|
||||
|
||||
100,000 / 192 = 520
|
||||
520 * 192 leaves 160 bytes of overhead for the transaction structure itself.
|
||||
*/
|
||||
const MAX_INPUTS: usize = 520;
|
||||
// We always reserve one output to create an anyone-can-spend output enabling anyone to use CPFP
|
||||
// to unstick any transactions which had too low of a fee.
|
||||
const MAX_OUTPUTS: usize = 519;
|
||||
|
||||
fn branch_address(key: KeyFor<Rpc<D>>) -> AddressFor<Rpc<D>> {
|
||||
address_from_serai_key(key, OutputType::Branch)
|
||||
}
|
||||
fn change_address(key: KeyFor<Rpc<D>>) -> AddressFor<Rpc<D>> {
|
||||
address_from_serai_key(key, OutputType::Change)
|
||||
}
|
||||
fn forwarding_address(key: KeyFor<Rpc<D>>) -> AddressFor<Rpc<D>> {
|
||||
address_from_serai_key(key, OutputType::Forwarded)
|
||||
}
|
||||
|
||||
fn calculate_fee(
|
||||
&self,
|
||||
reference_block: &BlockFor<Rpc<D>>,
|
||||
inputs: Vec<OutputFor<Rpc<D>>>,
|
||||
payments: Vec<Payment<AddressFor<Rpc<D>>>>,
|
||||
change: Option<KeyFor<Rpc<D>>>,
|
||||
) -> impl Send + Future<Output = Result<Amount, Self::EphemeralError>> {
|
||||
async move {
|
||||
Ok(match signable_transaction::<D>(reference_block, inputs, payments, change) {
|
||||
Ok(tx) => Amount(tx.1.needed_fee()),
|
||||
Err(
|
||||
TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment,
|
||||
) => panic!("malformed arguments to calculate_fee"),
|
||||
// No data, we have a minimum fee rate, we checked the amount of inputs/outputs
|
||||
Err(
|
||||
TransactionError::TooMuchData |
|
||||
TransactionError::TooLowFee |
|
||||
TransactionError::TooLargeTransaction,
|
||||
) => unreachable!(),
|
||||
Err(TransactionError::NotEnoughFunds { fee, .. }) => Amount(fee),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
fn plan(
|
||||
&self,
|
||||
reference_block: &BlockFor<Rpc<D>>,
|
||||
inputs: Vec<OutputFor<Rpc<D>>>,
|
||||
payments: Vec<Payment<AddressFor<Rpc<D>>>>,
|
||||
change: Option<KeyFor<Rpc<D>>>,
|
||||
) -> impl Send
|
||||
+ Future<
|
||||
Output = Result<
|
||||
PlannedTransaction<Rpc<D>, Self::SignableTransaction, EffectedReceivedOutputs<Rpc<D>>>,
|
||||
Self::EphemeralError,
|
||||
>,
|
||||
> {
|
||||
async move {
|
||||
let key = inputs.first().unwrap().key();
|
||||
for input in &inputs {
|
||||
assert_eq!(key, input.key());
|
||||
}
|
||||
|
||||
let singular_spent_output = (inputs.len() == 1).then(|| inputs[0].id());
|
||||
match signable_transaction::<D>(reference_block, inputs.clone(), payments, change) {
|
||||
Ok(tx) => Ok(PlannedTransaction {
|
||||
signable: tx.0,
|
||||
eventuality: Eventuality { txid: tx.1.txid(), singular_spent_output },
|
||||
auxilliary: EffectedReceivedOutputs({
|
||||
let tx = tx.1.transaction();
|
||||
let scanner = scanner(key);
|
||||
|
||||
let mut res = vec![];
|
||||
for output in scanner.scan_transaction(tx) {
|
||||
res.push(Output::new_with_presumed_origin(
|
||||
key,
|
||||
tx,
|
||||
// It shouldn't matter if this is wrong as we should never try to return these
|
||||
// We still provide an accurate value to ensure a lack of discrepancies
|
||||
Some(Address::new(inputs[0].output.output().script_pubkey.clone()).unwrap()),
|
||||
output,
|
||||
));
|
||||
}
|
||||
res
|
||||
}),
|
||||
}),
|
||||
Err(
|
||||
TransactionError::NoInputs | TransactionError::NoOutputs | TransactionError::DustPayment,
|
||||
) => panic!("malformed arguments to plan"),
|
||||
// No data, we have a minimum fee rate, we checked the amount of inputs/outputs
|
||||
Err(
|
||||
TransactionError::TooMuchData |
|
||||
TransactionError::TooLowFee |
|
||||
TransactionError::TooLargeTransaction,
|
||||
) => unreachable!(),
|
||||
Err(TransactionError::NotEnoughFunds { .. }) => {
|
||||
panic!("plan called for a transaction without enough funds")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type Scheduler<D> = GenericScheduler<Rpc<D>, Planner>;
|
||||
108
processor/bitcoin/src/txindex.rs
Normal file
108
processor/bitcoin/src/txindex.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
use core::future::Future;
|
||||
|
||||
use bitcoin_serai::bitcoin::ScriptBuf;
|
||||
|
||||
use serai_db::{Get, DbTxn, Db};
|
||||
|
||||
use primitives::task::ContinuallyRan;
|
||||
use scanner::ScannerFeed;
|
||||
|
||||
use crate::{db, rpc::Rpc, hash_bytes};
|
||||
|
||||
pub(crate) fn script_pubkey_for_on_chain_output(
|
||||
getter: &impl Get,
|
||||
txid: [u8; 32],
|
||||
vout: u32,
|
||||
) -> ScriptBuf {
|
||||
// We index every single output on the blockchain, so this shouldn't be possible
|
||||
ScriptBuf::from_bytes(
|
||||
db::ScriptPubKey::get(getter, txid, vout)
|
||||
.expect("requested script public key for unknown output"),
|
||||
)
|
||||
}
|
||||
|
||||
/*
|
||||
We want to be able to return received outputs. We do that by iterating over the inputs to find an
|
||||
address format we recognize, then setting that address as the address to return to.
|
||||
|
||||
Since inputs only contain the script signatures, yet addresses are for script public keys, we
|
||||
need to pull up the output spent by an input and read the script public key from that. While we
|
||||
could use `txindex=1`, and an asynchronous call to the Bitcoin node, we:
|
||||
|
||||
1) Can maintain a much smaller index ourselves
|
||||
2) Don't want the asynchronous call (which would require the flow be async, allowed to
|
||||
potentially error, and more latent)
|
||||
3) Don't want to risk Bitcoin's `txindex` corruptions (frequently observed on testnet)
|
||||
|
||||
This task builds that index.
|
||||
*/
|
||||
pub(crate) struct TxIndexTask<D: Db>(pub(crate) Rpc<D>);
|
||||
|
||||
impl<D: Db> ContinuallyRan for TxIndexTask<D> {
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, String>> {
|
||||
async move {
|
||||
let latest_block_number = self
|
||||
.0
|
||||
.rpc
|
||||
.get_latest_block_number()
|
||||
.await
|
||||
.map_err(|e| format!("couldn't fetch latest block number: {e:?}"))?;
|
||||
let latest_block_number = u64::try_from(latest_block_number).unwrap();
|
||||
// `CONFIRMATIONS - 1` as any on-chain block inherently has one confirmation (itself)
|
||||
let finalized_block_number =
|
||||
latest_block_number.checked_sub(Rpc::<D>::CONFIRMATIONS - 1).ok_or(format!(
|
||||
"blockchain only just started and doesn't have {} blocks yet",
|
||||
Rpc::<D>::CONFIRMATIONS
|
||||
))?;
|
||||
|
||||
/*
|
||||
`finalized_block_number` is the latest block number minus confirmations. The blockchain may
|
||||
undetectably re-organize though, as while the scanner will maintain an index of finalized
|
||||
blocks and panics on reorganization, this runs prior to the scanner and that index.
|
||||
|
||||
A reorganization of `CONFIRMATIONS` blocks is still an invariant. Even if that occurs, this
|
||||
saves the script public keys *by the transaction hash an output index*. Accordingly, it
|
||||
isn't invalidated on reorganization. The only risk would be if the new chain reorganized to
|
||||
include a transaction to Serai which we didn't index the parents of. If that happens, we'll
|
||||
panic when we scan the transaction, causing the invariant to be detected.
|
||||
*/
|
||||
|
||||
let finalized_block_number_in_db = db::LatestBlockToYieldAsFinalized::get(&self.0.db);
|
||||
let next_block = finalized_block_number_in_db.map_or(0, |block| block + 1);
|
||||
|
||||
let mut iterated = false;
|
||||
for b in next_block ..= finalized_block_number {
|
||||
iterated = true;
|
||||
|
||||
// Fetch the block
|
||||
let block_hash = self
|
||||
.0
|
||||
.rpc
|
||||
.get_block_hash(b.try_into().unwrap())
|
||||
.await
|
||||
.map_err(|e| format!("couldn't fetch block hash for block {b}: {e:?}"))?;
|
||||
let block = self
|
||||
.0
|
||||
.rpc
|
||||
.get_block(&block_hash)
|
||||
.await
|
||||
.map_err(|e| format!("couldn't fetch block {b}: {e:?}"))?;
|
||||
|
||||
let mut txn = self.0.db.txn();
|
||||
|
||||
for tx in &block.txdata {
|
||||
let txid = hash_bytes(tx.compute_txid().to_raw_hash());
|
||||
for (o, output) in tx.output.iter().enumerate() {
|
||||
let o = u32::try_from(o).unwrap();
|
||||
// Set the script public key for this transaction
|
||||
db::ScriptPubKey::set(&mut txn, txid, o, &output.script_pubkey.clone().into_bytes());
|
||||
}
|
||||
}
|
||||
|
||||
db::LatestBlockToYieldAsFinalized::set(&mut txn, &b);
|
||||
txn.commit();
|
||||
}
|
||||
Ok(iterated)
|
||||
}
|
||||
}
|
||||
}
|
||||
69
processor/ethereum/Cargo.toml
Normal file
69
processor/ethereum/Cargo.toml
Normal file
@@ -0,0 +1,69 @@
|
||||
[package]
|
||||
name = "serai-ethereum-processor"
|
||||
version = "0.1.0"
|
||||
description = "Serai Ethereum Processor"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
const-hex = { version = "1", default-features = false, features = ["std"] }
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std", "secp256k1"] }
|
||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std", "evrf-secp256k1"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] }
|
||||
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std"] }
|
||||
|
||||
alloy-core = { version = "0.8", default-features = false }
|
||||
alloy-rlp = { version = "0.3", default-features = false }
|
||||
|
||||
alloy-rpc-types-eth = { version = "0.3", default-features = false }
|
||||
alloy-transport = { version = "0.3", default-features = false }
|
||||
alloy-simple-request-transport = { path = "../../networks/ethereum/alloy-simple-request-transport", default-features = false }
|
||||
alloy-rpc-client = { version = "0.3", default-features = false }
|
||||
alloy-provider = { version = "0.3", default-features = false }
|
||||
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["ethereum"] }
|
||||
|
||||
zalloc = { path = "../../common/zalloc" }
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
|
||||
serai-env = { path = "../../common/env" }
|
||||
serai-db = { path = "../../common/db" }
|
||||
|
||||
messages = { package = "serai-processor-messages", path = "../messages" }
|
||||
key-gen = { package = "serai-processor-key-gen", path = "../key-gen" }
|
||||
|
||||
primitives = { package = "serai-processor-primitives", path = "../primitives" }
|
||||
scheduler = { package = "serai-processor-scheduler-primitives", path = "../scheduler/primitives" }
|
||||
scanner = { package = "serai-processor-scanner", path = "../scanner" }
|
||||
smart-contract-scheduler = { package = "serai-processor-smart-contract-scheduler", path = "../scheduler/smart-contract" }
|
||||
signers = { package = "serai-processor-signers", path = "../signers" }
|
||||
|
||||
ethereum-schnorr = { package = "ethereum-schnorr-contract", path = "../../networks/ethereum/schnorr" }
|
||||
ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "./primitives" }
|
||||
ethereum-router = { package = "serai-processor-ethereum-router", path = "./router" }
|
||||
ethereum-erc20 = { package = "serai-processor-ethereum-erc20", path = "./erc20" }
|
||||
|
||||
bin = { package = "serai-processor-bin", path = "../bin" }
|
||||
|
||||
[features]
|
||||
parity-db = ["bin/parity-db"]
|
||||
rocksdb = ["bin/rocksdb"]
|
||||
15
processor/ethereum/LICENSE
Normal file
15
processor/ethereum/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
1
processor/ethereum/README.md
Normal file
1
processor/ethereum/README.md
Normal file
@@ -0,0 +1 @@
|
||||
# Serai Ethereum Processor
|
||||
@@ -1,5 +1,5 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
// SPDX-License-Identifier: AGPL-3.0-only
|
||||
pragma solidity ^0.8.26;
|
||||
|
||||
contract TestERC20 {
|
||||
event Transfer(address indexed from, address indexed to, uint256 value);
|
||||
@@ -8,9 +8,11 @@ contract TestERC20 {
|
||||
function name() public pure returns (string memory) {
|
||||
return "Test ERC20";
|
||||
}
|
||||
|
||||
function symbol() public pure returns (string memory) {
|
||||
return "TEST";
|
||||
}
|
||||
|
||||
function decimals() public pure returns (uint8) {
|
||||
return 18;
|
||||
}
|
||||
@@ -29,11 +31,13 @@ contract TestERC20 {
|
||||
function balanceOf(address owner) public view returns (uint256) {
|
||||
return balances[owner];
|
||||
}
|
||||
|
||||
function transfer(address to, uint256 value) public returns (bool) {
|
||||
balances[msg.sender] -= value;
|
||||
balances[to] += value;
|
||||
return true;
|
||||
}
|
||||
|
||||
function transferFrom(address from, address to, uint256 value) public returns (bool) {
|
||||
allowances[from][msg.sender] -= value;
|
||||
balances[from] -= value;
|
||||
@@ -45,6 +49,7 @@ contract TestERC20 {
|
||||
allowances[msg.sender][spender] = value;
|
||||
return true;
|
||||
}
|
||||
|
||||
function allowance(address owner, address spender) public view returns (uint256) {
|
||||
return allowances[owner][spender];
|
||||
}
|
||||
164
processor/ethereum/TODO/old_processor.rs
Normal file
164
processor/ethereum/TODO/old_processor.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
TODO
|
||||
|
||||
async fn publish_completion(
|
||||
&self,
|
||||
completion: &<Self::Eventuality as EventualityTrait>::Completion,
|
||||
) -> Result<(), NetworkError> {
|
||||
// Publish this to the dedicated TX server for a solver to actually publish
|
||||
#[cfg(not(test))]
|
||||
{
|
||||
}
|
||||
|
||||
// Publish this using a dummy account we fund with magic RPC commands
|
||||
#[cfg(test)]
|
||||
{
|
||||
let router = self.router().await;
|
||||
let router = router.as_ref().unwrap();
|
||||
|
||||
let mut tx = match completion.command() {
|
||||
RouterCommand::UpdateSeraiKey { key, .. } => {
|
||||
router.update_serai_key(key, completion.signature())
|
||||
}
|
||||
RouterCommand::Execute { outs, .. } => router.execute(
|
||||
&outs.iter().cloned().map(Into::into).collect::<Vec<_>>(),
|
||||
completion.signature(),
|
||||
),
|
||||
};
|
||||
tx.gas_limit = 1_000_000u64.into();
|
||||
tx.gas_price = 1_000_000_000u64.into();
|
||||
let tx = ethereum_serai::crypto::deterministically_sign(&tx);
|
||||
|
||||
if self.provider.get_transaction_by_hash(*tx.hash()).await.unwrap().is_none() {
|
||||
self
|
||||
.provider
|
||||
.raw_request::<_, ()>(
|
||||
"anvil_setBalance".into(),
|
||||
[
|
||||
tx.recover_signer().unwrap().to_string(),
|
||||
(U256::from(tx.tx().gas_limit) * U256::from(tx.tx().gas_price)).to_string(),
|
||||
],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (tx, sig, _) = tx.into_parts();
|
||||
let mut bytes = vec![];
|
||||
tx.encode_with_signature_fields(&sig, &mut bytes);
|
||||
let pending_tx = self.provider.send_raw_transaction(&bytes).await.unwrap();
|
||||
self.mine_block().await;
|
||||
assert!(pending_tx.get_receipt().await.unwrap().status());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn get_transaction_by_eventuality(
|
||||
&self,
|
||||
block: usize,
|
||||
eventuality: &Self::Eventuality,
|
||||
) -> Self::Transaction {
|
||||
// We mine 96 blocks to ensure the 32 blocks relevant are finalized
|
||||
// Back-check the prior two epochs in response to this
|
||||
// TODO: Review why this is sub(3) and not sub(2)
|
||||
for block in block.saturating_sub(3) ..= block {
|
||||
match eventuality.1 {
|
||||
RouterCommand::UpdateSeraiKey { nonce, .. } | RouterCommand::Execute { nonce, .. } => {
|
||||
let router = self.router().await;
|
||||
let router = router.as_ref().unwrap();
|
||||
|
||||
let block = u64::try_from(block).unwrap();
|
||||
let filter = router
|
||||
.key_updated_filter()
|
||||
.from_block(block * 32)
|
||||
.to_block(((block + 1) * 32) - 1)
|
||||
.topic1(nonce);
|
||||
let logs = self.provider.get_logs(&filter).await.unwrap();
|
||||
if let Some(log) = logs.first() {
|
||||
return self
|
||||
.provider
|
||||
.get_transaction_by_hash(log.clone().transaction_hash.unwrap())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
};
|
||||
|
||||
let filter = router
|
||||
.executed_filter()
|
||||
.from_block(block * 32)
|
||||
.to_block(((block + 1) * 32) - 1)
|
||||
.topic1(nonce);
|
||||
let logs = self.provider.get_logs(&filter).await.unwrap();
|
||||
if logs.is_empty() {
|
||||
continue;
|
||||
}
|
||||
return self
|
||||
.provider
|
||||
.get_transaction_by_hash(logs[0].transaction_hash.unwrap())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
panic!("couldn't find completion in any three of checked blocks");
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn mine_block(&self) {
|
||||
self.provider.raw_request::<_, ()>("anvil_mine".into(), [96]).await.unwrap();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
async fn test_send(&self, send_to: Self::Address) -> Self::Block {
|
||||
use rand_core::OsRng;
|
||||
use ciphersuite::group::ff::Field;
|
||||
use ethereum_serai::alloy::sol_types::SolCall;
|
||||
|
||||
let key = <Secp256k1 as Ciphersuite>::F::random(&mut OsRng);
|
||||
let address = ethereum_serai::crypto::address(&(Secp256k1::generator() * key));
|
||||
|
||||
// Set a 1.1 ETH balance
|
||||
self
|
||||
.provider
|
||||
.raw_request::<_, ()>(
|
||||
"anvil_setBalance".into(),
|
||||
[Address(address).to_string(), "1100000000000000000".into()],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let value = U256::from_str_radix("1000000000000000000", 10).unwrap();
|
||||
let tx = ethereum_serai::alloy::consensus::TxLegacy {
|
||||
chain_id: None,
|
||||
nonce: 0,
|
||||
gas_price: 1_000_000_000u128,
|
||||
gas_limit: 200_000u128,
|
||||
to: ethereum_serai::alloy::primitives::TxKind::Call(send_to.0.into()),
|
||||
// 1 ETH
|
||||
value,
|
||||
input: ethereum_serai::router::abi::inInstructionCall::new((
|
||||
[0; 20].into(),
|
||||
value,
|
||||
vec![].into(),
|
||||
))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
};
|
||||
|
||||
use ethereum_serai::alloy::{primitives::Signature, consensus::SignableTransaction};
|
||||
let sig = k256::ecdsa::SigningKey::from(k256::elliptic_curve::NonZeroScalar::new(key).unwrap())
|
||||
.sign_prehash_recoverable(tx.signature_hash().as_ref())
|
||||
.unwrap();
|
||||
|
||||
let mut bytes = vec![];
|
||||
tx.encode_with_signature_fields(&Signature::from(sig), &mut bytes);
|
||||
let pending_tx = self.provider.send_raw_transaction(&bytes).await.ok().unwrap();
|
||||
|
||||
// Mine an epoch containing this TX
|
||||
self.mine_block().await;
|
||||
assert!(pending_tx.get_receipt().await.unwrap().status());
|
||||
// Yield the freshly mined block
|
||||
self.get_block(self.get_latest_block_number().await.unwrap()).await.unwrap()
|
||||
}
|
||||
31
processor/ethereum/TODO/tests/crypto.rs
Normal file
31
processor/ethereum/TODO/tests/crypto.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
// TODO
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::{Field, PrimeField};
|
||||
use k256::{
|
||||
ecdsa::{
|
||||
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,
|
||||
},
|
||||
Scalar, ProjectivePoint,
|
||||
};
|
||||
|
||||
use frost::{
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
algorithm::{Hram, IetfSchnorr},
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use crate::{crypto::*, tests::key_gen};
|
||||
|
||||
// Run the sign test with the EthereumHram
|
||||
#[test]
|
||||
fn test_signing() {
|
||||
let (keys, _) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let _sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
}
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use rand_core::OsRng;
|
||||
@@ -21,9 +23,7 @@ use crate::crypto::{address, deterministically_sign, PublicKey};
|
||||
mod crypto;
|
||||
|
||||
#[cfg(test)]
|
||||
mod abi;
|
||||
#[cfg(test)]
|
||||
mod schnorr;
|
||||
use contracts::tests as abi;
|
||||
#[cfg(test)]
|
||||
mod router;
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
// TODO
|
||||
|
||||
use std::{convert::TryFrom, sync::Arc, collections::HashMap};
|
||||
|
||||
use rand_core::OsRng;
|
||||
34
processor/ethereum/deployer/Cargo.toml
Normal file
34
processor/ethereum/deployer/Cargo.toml
Normal file
@@ -0,0 +1,34 @@
|
||||
[package]
|
||||
name = "serai-processor-ethereum-deployer"
|
||||
version = "0.1.0"
|
||||
description = "The deployer for Serai's Ethereum contracts"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/processor/ethereum/deployer"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.79"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
alloy-core = { version = "0.8", default-features = false }
|
||||
alloy-consensus = { version = "0.3", default-features = false }
|
||||
|
||||
alloy-sol-types = { version = "0.8", default-features = false }
|
||||
alloy-sol-macro = { version = "0.8", default-features = false }
|
||||
|
||||
alloy-rpc-types-eth = { version = "0.3", default-features = false }
|
||||
alloy-transport = { version = "0.3", default-features = false }
|
||||
alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false }
|
||||
alloy-provider = { version = "0.3", default-features = false }
|
||||
|
||||
ethereum-primitives = { package = "serai-processor-ethereum-primitives", path = "../primitives", default-features = false }
|
||||
|
||||
[build-dependencies]
|
||||
build-solidity-contracts = { path = "../../../networks/ethereum/build-contracts", default-features = false }
|
||||
15
processor/ethereum/deployer/LICENSE
Normal file
15
processor/ethereum/deployer/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
23
processor/ethereum/deployer/README.md
Normal file
23
processor/ethereum/deployer/README.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# Ethereum Smart Contracts Deployer
|
||||
|
||||
The deployer for Serai's Ethereum contracts.
|
||||
|
||||
## Goals
|
||||
|
||||
It should be possible to efficiently locate the Serai Router on an blockchain with the EVM, without
|
||||
relying on any centralized (or even federated) entities. While deploying and locating an instance of
|
||||
the Router would be trivial, by using a fixed signature for the deployment transaction, the Router
|
||||
must be constructed with the correct key for the Serai network (or set to have the correct key
|
||||
post-construction). Since this cannot be guaranteed to occur, the process must be retryable and the
|
||||
first successful invocation must be efficiently findable.
|
||||
|
||||
## Methodology
|
||||
|
||||
We define a contract, the Deployer, to deploy the router. This contract could use `CREATE2` with the
|
||||
key representing Serai as the salt, yet this would be open to collision attacks with just 2**80
|
||||
complexity. Instead, we use `CREATE` which would require 2**80 on-chain transactions (infeasible) to
|
||||
use as the basis of a collision.
|
||||
|
||||
In order to efficiently find the contract for a key, the Deployer contract saves the addresses of
|
||||
deployed contracts (indexed by the initialization code hash). This allows using a single call to a
|
||||
contract with a known address to find the proper Router.
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user