mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-13 22:49:25 +00:00
Compare commits
362 Commits
e94a04d47c
...
undroppabl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce3b90541e | ||
|
|
cb410cc4e0 | ||
|
|
6c145a5ec3 | ||
|
|
a7fef2ba7a | ||
|
|
291ebf5e24 | ||
|
|
5e0e91c85d | ||
|
|
b5a6b0693e | ||
|
|
3cc2abfedc | ||
|
|
0ce9aad9b2 | ||
|
|
e35aa04afb | ||
|
|
e7de5125a2 | ||
|
|
158140c3a7 | ||
|
|
df9a9adaa8 | ||
|
|
d854807edd | ||
|
|
f501d46d44 | ||
|
|
74106b025f | ||
|
|
e731b546ab | ||
|
|
77d60660d2 | ||
|
|
3c664ff05f | ||
|
|
c05b0c9eba | ||
|
|
6d5049cab2 | ||
|
|
1419ba570a | ||
|
|
542bf2170a | ||
|
|
378d6b90cf | ||
|
|
cbe83956aa | ||
|
|
091d485fd8 | ||
|
|
2a3eaf4d7e | ||
|
|
23122712cb | ||
|
|
47eb793ce9 | ||
|
|
9b0b5fd1e2 | ||
|
|
893a24a1cc | ||
|
|
b101e2211a | ||
|
|
201a444e89 | ||
|
|
9833911e06 | ||
|
|
465e8498c4 | ||
|
|
adf20773ac | ||
|
|
295c1bd044 | ||
|
|
dda6e3e899 | ||
|
|
75a00f2a1a | ||
|
|
6cde2bb6ef | ||
|
|
20326bba73 | ||
|
|
ce83b41712 | ||
|
|
b2bd5d3a44 | ||
|
|
de2d6568a4 | ||
|
|
fd9b464b35 | ||
|
|
376a66b000 | ||
|
|
2121a9b131 | ||
|
|
419223c54e | ||
|
|
a731c0005d | ||
|
|
f27e4e3202 | ||
|
|
f55165e016 | ||
|
|
d9e9887d34 | ||
|
|
82e753db30 | ||
|
|
052388285b | ||
|
|
47a4e534ef | ||
|
|
257f691277 | ||
|
|
c6d0fb477c | ||
|
|
96518500b1 | ||
|
|
2b8f481364 | ||
|
|
479ca0410a | ||
|
|
9a5a661d04 | ||
|
|
3daeea09e6 | ||
|
|
a64e2004ab | ||
|
|
f9f6d40695 | ||
|
|
4836c1676b | ||
|
|
985261574c | ||
|
|
3f3b0255f8 | ||
|
|
5fc8500f8d | ||
|
|
49c221cca2 | ||
|
|
906e2fb669 | ||
|
|
ce676efb1f | ||
|
|
0a611cb155 | ||
|
|
bcd3f14f4f | ||
|
|
6272c40561 | ||
|
|
2240a50a0c | ||
|
|
7e2b31e5da | ||
|
|
8c9441a1a5 | ||
|
|
5a42f66dc2 | ||
|
|
b584a2beab | ||
|
|
26ccff25a1 | ||
|
|
f0094b3c7c | ||
|
|
458f4fe170 | ||
|
|
1de8136739 | ||
|
|
445c49f030 | ||
|
|
5b74fc8ac1 | ||
|
|
e67e301fc2 | ||
|
|
1d50792eed | ||
|
|
9c92709e62 | ||
|
|
3d15710a43 | ||
|
|
df06da5552 | ||
|
|
cef5bc95b0 | ||
|
|
f336ab1ece | ||
|
|
2aebfb21af | ||
|
|
56af6c44eb | ||
|
|
4b34be05bf | ||
|
|
5b337c3ce8 | ||
|
|
e119fb4c16 | ||
|
|
ef972b2658 | ||
|
|
4de1a5804d | ||
|
|
147a6e43d0 | ||
|
|
066aa9eda4 | ||
|
|
9593a428e3 | ||
|
|
5b3c5ec02b | ||
|
|
9ccfa8a9f5 | ||
|
|
18897978d0 | ||
|
|
3192370484 | ||
|
|
8013c56195 | ||
|
|
834c16930b | ||
|
|
2920987173 | ||
|
|
26230377b0 | ||
|
|
2f5c0c68d0 | ||
|
|
8de42cc2d4 | ||
|
|
cf4123b0f8 | ||
|
|
6a520a7412 | ||
|
|
b2ec58a445 | ||
|
|
8e800885fb | ||
|
|
2a427382f1 | ||
|
|
ce1689b325 | ||
|
|
0b61a75afc | ||
|
|
2aee21e507 | ||
|
|
b3e003bd5d | ||
|
|
251a6e96e8 | ||
|
|
2c8af04781 | ||
|
|
a0ed043372 | ||
|
|
2984d2f8cf | ||
|
|
554c5778e4 | ||
|
|
7e4c59a0a3 | ||
|
|
294462641e | ||
|
|
ae76749513 | ||
|
|
1e1b821d34 | ||
|
|
702b4c860c | ||
|
|
bc1bbf9951 | ||
|
|
ec9211fd84 | ||
|
|
4292660eda | ||
|
|
8ea5acbacb | ||
|
|
1b1aa74770 | ||
|
|
861a8352e5 | ||
|
|
e64827b6d7 | ||
|
|
c27aaf8658 | ||
|
|
53567e91c8 | ||
|
|
1a08d50e16 | ||
|
|
855e53164e | ||
|
|
1367e41510 | ||
|
|
a691be21c8 | ||
|
|
673cf8fd47 | ||
|
|
118d81bc90 | ||
|
|
e75c4ec6ed | ||
|
|
9e628d217f | ||
|
|
a717ae9ea7 | ||
|
|
98c3f75fa2 | ||
|
|
18178f3764 | ||
|
|
bdc3bda04a | ||
|
|
433beac93a | ||
|
|
8f2a9301cf | ||
|
|
d21034c349 | ||
|
|
381495618c | ||
|
|
ee0efe7cde | ||
|
|
7feb7aed22 | ||
|
|
cc75a92641 | ||
|
|
a7d5640642 | ||
|
|
ae61f3d359 | ||
|
|
4bcea31c2a | ||
|
|
eb9bce6862 | ||
|
|
39be23d807 | ||
|
|
3f0f4d520d | ||
|
|
80ca2b780a | ||
|
|
0813351f1f | ||
|
|
a38d135059 | ||
|
|
67f9f76fdf | ||
|
|
1c5bc2259e | ||
|
|
bdf89f5350 | ||
|
|
239127aae5 | ||
|
|
d9543bee40 | ||
|
|
8746b54a43 | ||
|
|
7761798a78 | ||
|
|
72a18bf8bb | ||
|
|
0616085109 | ||
|
|
e23176deeb | ||
|
|
5551521e58 | ||
|
|
a2d9aeaed7 | ||
|
|
e1ad897f7e | ||
|
|
2edc2f3612 | ||
|
|
e56af7fc51 | ||
|
|
947e1067d9 | ||
|
|
b4e94f3d51 | ||
|
|
1b39138472 | ||
|
|
e78236276a | ||
|
|
2c4c33e632 | ||
|
|
02409c5735 | ||
|
|
f2cf03cedf | ||
|
|
0d4c8cf032 | ||
|
|
b6811f9015 | ||
|
|
fcd5fb85df | ||
|
|
3ac0265f07 | ||
|
|
9b8c8f8231 | ||
|
|
59fa49f750 | ||
|
|
723f529659 | ||
|
|
73af09effb | ||
|
|
4054e44471 | ||
|
|
a8159e9070 | ||
|
|
b61ba9d1bb | ||
|
|
776cbbb9a4 | ||
|
|
76a3f3ec4b | ||
|
|
93c7d06684 | ||
|
|
4cb838e248 | ||
|
|
c988b7cdb0 | ||
|
|
017aab2258 | ||
|
|
ba3a6f9e91 | ||
|
|
e36b671f37 | ||
|
|
2d4b775b6e | ||
|
|
247cc8f0cc | ||
|
|
0ccf71df1e | ||
|
|
8aba71b9c4 | ||
|
|
46c12c0e66 | ||
|
|
3cc7b49492 | ||
|
|
0078858c1c | ||
|
|
a3cb514400 | ||
|
|
ed0221d804 | ||
|
|
4152bcacb2 | ||
|
|
f07ec7bee0 | ||
|
|
7484eadbbb | ||
|
|
59ff944152 | ||
|
|
8f848b1abc | ||
|
|
100c80be9f | ||
|
|
a353f9e2da | ||
|
|
b62fc3a1fa | ||
|
|
8380653855 | ||
|
|
b50b889918 | ||
|
|
d570c1d277 | ||
|
|
2da24506a2 | ||
|
|
6e9cb74022 | ||
|
|
0c1aec29bb | ||
|
|
653ead1e8c | ||
|
|
8ff019265f | ||
|
|
0601d47789 | ||
|
|
ebef38d93b | ||
|
|
75b4707002 | ||
|
|
3c787e005f | ||
|
|
f11a6b4ff1 | ||
|
|
fadc88d2ad | ||
|
|
c88ebe985e | ||
|
|
6deb60513c | ||
|
|
bd277e7032 | ||
|
|
fc765bb9e0 | ||
|
|
13b74195f7 | ||
|
|
f21838e0d5 | ||
|
|
76cbe6cf1e | ||
|
|
5999f5d65a | ||
|
|
d429a0bae6 | ||
|
|
775824f373 | ||
|
|
41a74cb513 | ||
|
|
e26da1ec34 | ||
|
|
7266e7f7ea | ||
|
|
a8b9b7bad3 | ||
|
|
2ca7fccb08 | ||
|
|
4f6d91037e | ||
|
|
8db76ed67c | ||
|
|
920303e1b4 | ||
|
|
9f4b28e5ae | ||
|
|
f9d02d43c2 | ||
|
|
8ac501028d | ||
|
|
612c67c537 | ||
|
|
04a971a024 | ||
|
|
738636c238 | ||
|
|
65f3f48517 | ||
|
|
7cc07d64d1 | ||
|
|
fdfe520f9d | ||
|
|
77ef25416b | ||
|
|
7c1025dbcb | ||
|
|
a771fbe1c6 | ||
|
|
9cebdf7c68 | ||
|
|
75251f04b4 | ||
|
|
6196642beb | ||
|
|
2bddf00222 | ||
|
|
9ab8ba0215 | ||
|
|
33e0c85f34 | ||
|
|
1e8f4e6156 | ||
|
|
66f3428051 | ||
|
|
7e71840822 | ||
|
|
b65dbacd6a | ||
|
|
2fcd9530dd | ||
|
|
379780a3c9 | ||
|
|
945f31dfc7 | ||
|
|
d5d1fc3eea | ||
|
|
fd12cc0213 | ||
|
|
ce805c8cc8 | ||
|
|
bc0cc5a754 | ||
|
|
f2ee4daf43 | ||
|
|
4e29678799 | ||
|
|
74d3075dae | ||
|
|
155ad48f4c | ||
|
|
951872b026 | ||
|
|
2b47feafed | ||
|
|
a2717d73f0 | ||
|
|
8763ef23ed | ||
|
|
57a0ba966b | ||
|
|
e843b4a2a0 | ||
|
|
2f3bd7a02a | ||
|
|
1e8a9ec5bd | ||
|
|
2f29c91d30 | ||
|
|
f3b91bd44f | ||
|
|
e4e4245ee3 | ||
|
|
669b2fef72 | ||
|
|
3af430d8de | ||
|
|
dfb5a053ae | ||
|
|
bdcc061bb4 | ||
|
|
2c7148d636 | ||
|
|
6b270bc6aa | ||
|
|
875c669a7a | ||
|
|
0d399ecb28 | ||
|
|
88440807e1 | ||
|
|
c1a9256cc5 | ||
|
|
0d5756ffcf | ||
|
|
ac7b98daac | ||
|
|
efc7d70ab1 | ||
|
|
4e834873d3 | ||
|
|
a506d74d69 | ||
|
|
394db44b30 | ||
|
|
a2df54dd6a | ||
|
|
efc45c391b | ||
|
|
cccc1fc7e6 | ||
|
|
bf1c493d9a | ||
|
|
3de1e4dee2 | ||
|
|
2591b5ade9 | ||
|
|
e6620963c7 | ||
|
|
d5205ce231 | ||
|
|
0f6878567f | ||
|
|
880565cb81 | ||
|
|
6f34c2ff77 | ||
|
|
1493f49416 | ||
|
|
2ccb0cd90d | ||
|
|
b33a6487aa | ||
|
|
491500057b | ||
|
|
d9f85fab26 | ||
|
|
7d2d739042 | ||
|
|
40cc180853 | ||
|
|
2aac6f6998 | ||
|
|
149c2a4437 | ||
|
|
e772b8a5f7 | ||
|
|
c0200df75a | ||
|
|
9955ef54a5 | ||
|
|
8e7e61adbd | ||
|
|
0cb24dde02 | ||
|
|
97bfb183e8 | ||
|
|
85fc31fd82 | ||
|
|
7b8bcae396 | ||
|
|
70fe52437c | ||
|
|
ba657e23d1 | ||
|
|
32c24917c4 | ||
|
|
4ba961b2cb | ||
|
|
c59be46e2f | ||
|
|
2c165e19ae | ||
|
|
ee10692b23 | ||
|
|
7a68b065e0 | ||
|
|
3ddf1eec0c | ||
|
|
84f0e6c26e | ||
|
|
5bb3256d1f | ||
|
|
774424b70b | ||
|
|
ed662568e2 | ||
|
|
b744ac9a76 | ||
|
|
d7f7f69738 | ||
|
|
a2c3aba82b |
2
.github/actions/bitcoin/action.yml
vendored
2
.github/actions/bitcoin/action.yml
vendored
@@ -37,4 +37,4 @@ runs:
|
||||
|
||||
- name: Bitcoin Regtest Daemon
|
||||
shell: bash
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/coins/bitcoin/run.sh -daemon
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon
|
||||
|
||||
@@ -42,8 +42,8 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install svm-rs
|
||||
svm install 0.8.25
|
||||
svm use 0.8.25
|
||||
svm install 0.8.26
|
||||
svm use 0.8.26
|
||||
|
||||
# - name: Cache Rust
|
||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||
|
||||
2
.github/actions/monero-wallet-rpc/action.yml
vendored
2
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: v0.18.3.1
|
||||
default: v0.18.3.4
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
4
.github/actions/monero/action.yml
vendored
4
.github/actions/monero/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: v0.18.3.1
|
||||
default: v0.18.3.4
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
@@ -43,4 +43,4 @@ runs:
|
||||
|
||||
- name: Monero Regtest Daemon
|
||||
shell: bash
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/coins/monero/run.sh --detach
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/monero/run.sh --detach
|
||||
|
||||
4
.github/actions/test-dependencies/action.yml
vendored
4
.github/actions/test-dependencies/action.yml
vendored
@@ -5,12 +5,12 @@ inputs:
|
||||
monero-version:
|
||||
description: "Monero version to download and run as a regtest node"
|
||||
required: false
|
||||
default: v0.18.3.1
|
||||
default: v0.18.3.4
|
||||
|
||||
bitcoin-version:
|
||||
description: "Bitcoin version to download and run as a regtest node"
|
||||
required: false
|
||||
default: "27.0"
|
||||
default: "27.1"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
36
.github/workflows/coins-tests.yml
vendored
36
.github/workflows/coins-tests.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: coins/ Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test-coins:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p bitcoin-serai \
|
||||
-p alloy-simple-request-transport \
|
||||
-p ethereum-serai \
|
||||
-p monero-generators \
|
||||
-p monero-serai
|
||||
2
.github/workflows/common-tests.yml
vendored
2
.github/workflows/common-tests.yml
vendored
@@ -27,6 +27,8 @@ jobs:
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p std-shims \
|
||||
-p zalloc \
|
||||
-p patchable-async-sleep \
|
||||
-p serai-db \
|
||||
-p serai-env \
|
||||
-p serai-task \
|
||||
-p simple-request
|
||||
|
||||
6
.github/workflows/coordinator-tests.yml
vendored
6
.github/workflows/coordinator-tests.yml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "coordinator/**"
|
||||
- "orchestration/**"
|
||||
@@ -18,7 +18,7 @@ on:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "coordinator/**"
|
||||
- "orchestration/**"
|
||||
@@ -37,4 +37,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run coordinator Docker tests
|
||||
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-coordinator-tests
|
||||
|
||||
4
.github/workflows/crypto-tests.yml
vendored
4
.github/workflows/crypto-tests.yml
vendored
@@ -35,6 +35,10 @@ jobs:
|
||||
-p multiexp \
|
||||
-p schnorr-signatures \
|
||||
-p dleq \
|
||||
-p generalized-bulletproofs \
|
||||
-p generalized-bulletproofs-circuit-abstraction \
|
||||
-p ec-divisors \
|
||||
-p generalized-bulletproofs-ec-gadgets \
|
||||
-p dkg \
|
||||
-p modular-frost \
|
||||
-p frost-schnorrkel
|
||||
|
||||
2
.github/workflows/full-stack-tests.yml
vendored
2
.github/workflows/full-stack-tests.yml
vendored
@@ -19,4 +19,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Full Stack Docker tests
|
||||
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-full-stack-tests
|
||||
|
||||
31
.github/workflows/lint.yml
vendored
31
.github/workflows/lint.yml
vendored
@@ -73,6 +73,15 @@ jobs:
|
||||
- name: Run rustfmt
|
||||
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
||||
|
||||
- name: Install foundry
|
||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||
with:
|
||||
version: nightly-41d4e5437107f6f42c7711123890147bc736a609
|
||||
cache: false
|
||||
|
||||
- name: Run forge fmt
|
||||
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol")
|
||||
|
||||
machete:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -81,3 +90,25 @@ jobs:
|
||||
run: |
|
||||
cargo install cargo-machete
|
||||
cargo machete
|
||||
|
||||
slither:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- name: Slither
|
||||
run: |
|
||||
python3 -m pip install solc-select
|
||||
solc-select install 0.8.26
|
||||
solc-select use 0.8.26
|
||||
|
||||
python3 -m pip install slither-analyzer
|
||||
|
||||
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
||||
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
|
||||
slither processor/ethereum/deployer/contracts/Deployer.sol
|
||||
slither processor/ethereum/erc20/contracts/IERC20.sol
|
||||
|
||||
cp networks/ethereum/schnorr/contracts/Schnorr.sol processor/ethereum/router/contracts/
|
||||
cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/
|
||||
cd processor/ethereum/router/contracts
|
||||
slither Router.sol
|
||||
|
||||
2
.github/workflows/message-queue-tests.yml
vendored
2
.github/workflows/message-queue-tests.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run message-queue Docker tests
|
||||
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-message-queue-tests
|
||||
|
||||
39
.github/workflows/monero-tests.yaml
vendored
39
.github/workflows/monero-tests.yaml
vendored
@@ -5,12 +5,12 @@ on:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "coins/monero/**"
|
||||
- "networks/monero/**"
|
||||
- "processor/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "coins/monero/**"
|
||||
- "networks/monero/**"
|
||||
- "processor/**"
|
||||
|
||||
workflow_dispatch:
|
||||
@@ -26,7 +26,22 @@ jobs:
|
||||
uses: ./.github/actions/test-dependencies
|
||||
|
||||
- name: Run Unit Tests Without Features
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
|
||||
|
||||
# Doesn't run unit tests with features as the tests workflow will
|
||||
|
||||
@@ -35,7 +50,7 @@ jobs:
|
||||
# Test against all supported protocol versions
|
||||
strategy:
|
||||
matrix:
|
||||
version: [v0.17.3.2, v0.18.2.0]
|
||||
version: [v0.17.3.2, v0.18.3.4]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
@@ -46,11 +61,17 @@ jobs:
|
||||
monero-version: ${{ matrix.version }}
|
||||
|
||||
- name: Run Integration Tests Without Features
|
||||
# Runs with the binaries feature so the binaries build
|
||||
# https://github.com/rust-lang/cargo/issues/8396
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --features binaries --test '*'
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
|
||||
|
||||
- name: Run Integration Tests
|
||||
# Don't run if the the tests workflow also will
|
||||
if: ${{ matrix.version != 'v0.18.2.0' }}
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
||||
if: ${{ matrix.version != 'v0.18.3.4' }}
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'
|
||||
|
||||
259
.github/workflows/msrv.yml
vendored
Normal file
259
.github/workflows/msrv.yml
vendored
Normal file
@@ -0,0 +1,259 @@
|
||||
name: Weekly MSRV Check
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * 0"
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
msrv-common:
|
||||
name: Run cargo msrv on common
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on common
|
||||
run: |
|
||||
cargo msrv verify --manifest-path common/zalloc/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/std-shims/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/env/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/db/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/task/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/request/Cargo.toml
|
||||
cargo msrv verify --manifest-path common/patchable-async-sleep/Cargo.toml
|
||||
|
||||
msrv-crypto:
|
||||
name: Run cargo msrv on crypto
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on crypto
|
||||
run: |
|
||||
cargo msrv verify --manifest-path crypto/transcript/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/ff-group-tests/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/dalek-ff-group/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/ed448/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/multiexp/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/dleq/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/ciphersuite/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/schnorr/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/evrf/generalized-bulletproofs/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/circuit-abstraction/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/divisors/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/ec-gadgets/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/embedwards25519/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/evrf/secq256k1/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path crypto/dkg/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/frost/Cargo.toml
|
||||
cargo msrv verify --manifest-path crypto/schnorrkel/Cargo.toml
|
||||
|
||||
msrv-networks:
|
||||
name: Run cargo msrv on networks
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on networks
|
||||
run: |
|
||||
cargo msrv verify --manifest-path networks/bitcoin/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path networks/ethereum/build-contracts/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/ethereum/schnorr/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/ethereum/alloy-simple-request-transport/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/ethereum/relayer/Cargo.toml --features parity-db
|
||||
|
||||
cargo msrv verify --manifest-path networks/monero/io/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/generators/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/mlsag/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/clsag/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/borromean/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/ringct/bulletproofs/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/rpc/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/rpc/simple-request/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/wallet/address/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/wallet/Cargo.toml
|
||||
cargo msrv verify --manifest-path networks/monero/verify-chain/Cargo.toml
|
||||
|
||||
msrv-message-queue:
|
||||
name: Run cargo msrv on message-queue
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on message-queue
|
||||
run: |
|
||||
cargo msrv verify --manifest-path message-queue/Cargo.toml --features parity-db
|
||||
|
||||
msrv-processor:
|
||||
name: Run cargo msrv on processor
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on processor
|
||||
run: |
|
||||
cargo msrv verify --manifest-path processor/view-keys/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/messages/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/scanner/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/scheduler/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/smart-contract/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/utxo/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/utxo/standard/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/scheduler/utxo/transaction-chaining/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/key-gen/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/frost-attempt-manager/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/signers/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/bin/Cargo.toml --features parity-db
|
||||
|
||||
cargo msrv verify --manifest-path processor/bitcoin/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/ethereum/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/test-primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/erc20/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/deployer/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/router/Cargo.toml
|
||||
cargo msrv verify --manifest-path processor/ethereum/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path processor/monero/Cargo.toml
|
||||
|
||||
msrv-coordinator:
|
||||
name: Run cargo msrv on coordinator
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on coordinator
|
||||
run: |
|
||||
cargo msrv verify --manifest-path coordinator/tributary-sdk/tendermint/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/tributary-sdk/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/cosign/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/substrate/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/tributary/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/p2p/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/p2p/libp2p/Cargo.toml
|
||||
cargo msrv verify --manifest-path coordinator/Cargo.toml
|
||||
|
||||
msrv-substrate:
|
||||
name: Run cargo msrv on substrate
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on substrate
|
||||
run: |
|
||||
cargo msrv verify --manifest-path substrate/primitives/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/coins/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/coins/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/dex/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/economic-security/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/genesis-liquidity/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/genesis-liquidity/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/in-instructions/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/in-instructions/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/validator-sets/pallet/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/validator-sets/primitives/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/emissions/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/emissions/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/signals/primitives/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/signals/pallet/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/abi/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/client/Cargo.toml
|
||||
|
||||
cargo msrv verify --manifest-path substrate/runtime/Cargo.toml
|
||||
cargo msrv verify --manifest-path substrate/node/Cargo.toml
|
||||
|
||||
msrv-orchestration:
|
||||
name: Run cargo msrv on orchestration
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on message-queue
|
||||
run: |
|
||||
cargo msrv verify --manifest-path orchestration/Cargo.toml
|
||||
|
||||
msrv-mini:
|
||||
name: Run cargo msrv on mini
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install cargo msrv
|
||||
run: cargo install --locked cargo-msrv
|
||||
|
||||
- name: Run cargo msrv on mini
|
||||
run: |
|
||||
cargo msrv verify --manifest-path mini/Cargo.toml
|
||||
52
.github/workflows/networks-tests.yml
vendored
Normal file
52
.github/workflows/networks-tests.yml
vendored
Normal file
@@ -0,0 +1,52 @@
|
||||
name: networks/ Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test-networks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p bitcoin-serai \
|
||||
-p build-solidity-contracts \
|
||||
-p ethereum-schnorr-contract \
|
||||
-p alloy-simple-request-transport \
|
||||
-p serai-ethereum-relayer \
|
||||
-p monero-io \
|
||||
-p monero-generators \
|
||||
-p monero-primitives \
|
||||
-p monero-mlsag \
|
||||
-p monero-clsag \
|
||||
-p monero-borromean \
|
||||
-p monero-bulletproofs \
|
||||
-p monero-serai \
|
||||
-p monero-rpc \
|
||||
-p monero-simple-request-rpc \
|
||||
-p monero-address \
|
||||
-p monero-wallet \
|
||||
-p monero-seed \
|
||||
-p polyseed \
|
||||
-p monero-wallet-util \
|
||||
-p monero-serai-verify-chain
|
||||
6
.github/workflows/no-std.yml
vendored
6
.github/workflows/no-std.yml
vendored
@@ -7,14 +7,14 @@ on:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "networks/**"
|
||||
- "tests/no-std/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "networks/**"
|
||||
- "tests/no-std/**"
|
||||
|
||||
workflow_dispatch:
|
||||
@@ -32,4 +32,4 @@ jobs:
|
||||
run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
|
||||
|
||||
- name: Verify no-std builds
|
||||
run: cd tests/no-std && CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf
|
||||
run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
|
||||
|
||||
6
.github/workflows/processor-tests.yml
vendored
6
.github/workflows/processor-tests.yml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "processor/**"
|
||||
- "orchestration/**"
|
||||
@@ -18,7 +18,7 @@ on:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "processor/**"
|
||||
- "orchestration/**"
|
||||
@@ -37,4 +37,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run processor Docker tests
|
||||
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-processor-tests
|
||||
|
||||
2
.github/workflows/reproducible-runtime.yml
vendored
2
.github/workflows/reproducible-runtime.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Reproducible Runtime tests
|
||||
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests
|
||||
|
||||
37
.github/workflows/tests.yml
vendored
37
.github/workflows/tests.yml
vendored
@@ -7,7 +7,7 @@ on:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "processor/**"
|
||||
- "coordinator/**"
|
||||
@@ -17,7 +17,7 @@ on:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "processor/**"
|
||||
- "coordinator/**"
|
||||
@@ -39,9 +39,33 @@ jobs:
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p serai-message-queue \
|
||||
-p serai-processor-messages \
|
||||
-p serai-processor \
|
||||
-p serai-processor-key-gen \
|
||||
-p serai-processor-view-keys \
|
||||
-p serai-processor-frost-attempt-manager \
|
||||
-p serai-processor-primitives \
|
||||
-p serai-processor-scanner \
|
||||
-p serai-processor-scheduler-primitives \
|
||||
-p serai-processor-utxo-scheduler-primitives \
|
||||
-p serai-processor-utxo-scheduler \
|
||||
-p serai-processor-transaction-chaining-scheduler \
|
||||
-p serai-processor-smart-contract-scheduler \
|
||||
-p serai-processor-signers \
|
||||
-p serai-processor-bin \
|
||||
-p serai-bitcoin-processor \
|
||||
-p serai-processor-ethereum-primitives \
|
||||
-p serai-processor-ethereum-test-primitives \
|
||||
-p serai-processor-ethereum-deployer \
|
||||
-p serai-processor-ethereum-router \
|
||||
-p serai-processor-ethereum-erc20 \
|
||||
-p serai-ethereum-processor \
|
||||
-p serai-monero-processor \
|
||||
-p tendermint-machine \
|
||||
-p tributary-chain \
|
||||
-p tributary-sdk \
|
||||
-p serai-cosign \
|
||||
-p serai-coordinator-substrate \
|
||||
-p serai-coordinator-tributary \
|
||||
-p serai-coordinator-p2p \
|
||||
-p serai-coordinator-libp2p-p2p \
|
||||
-p serai-coordinator \
|
||||
-p serai-orchestrator \
|
||||
-p serai-docker-tests
|
||||
@@ -63,6 +87,11 @@ jobs:
|
||||
-p serai-dex-pallet \
|
||||
-p serai-validator-sets-primitives \
|
||||
-p serai-validator-sets-pallet \
|
||||
-p serai-genesis-liquidity-primitives \
|
||||
-p serai-genesis-liquidity-pallet \
|
||||
-p serai-emissions-primitives \
|
||||
-p serai-emissions-pallet \
|
||||
-p serai-economic-security-pallet \
|
||||
-p serai-in-instructions-primitives \
|
||||
-p serai-in-instructions-pallet \
|
||||
-p serai-signals-primitives \
|
||||
|
||||
2915
Cargo.lock
generated
2915
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
127
Cargo.toml
127
Cargo.toml
@@ -6,7 +6,6 @@ members = [
|
||||
"patches/parking_lot",
|
||||
"patches/zstd",
|
||||
"patches/rocksdb",
|
||||
"patches/proc-macro-crate",
|
||||
|
||||
# std patches
|
||||
"patches/matches",
|
||||
@@ -18,8 +17,10 @@ members = [
|
||||
|
||||
"common/std-shims",
|
||||
"common/zalloc",
|
||||
"common/patchable-async-sleep",
|
||||
"common/db",
|
||||
"common/env",
|
||||
"common/task",
|
||||
"common/request",
|
||||
|
||||
"crypto/transcript",
|
||||
@@ -30,43 +31,78 @@ members = [
|
||||
"crypto/ciphersuite",
|
||||
|
||||
"crypto/multiexp",
|
||||
|
||||
"crypto/schnorr",
|
||||
"crypto/dleq",
|
||||
|
||||
"crypto/evrf/secq256k1",
|
||||
"crypto/evrf/embedwards25519",
|
||||
"crypto/evrf/generalized-bulletproofs",
|
||||
"crypto/evrf/circuit-abstraction",
|
||||
"crypto/evrf/divisors",
|
||||
"crypto/evrf/ec-gadgets",
|
||||
|
||||
"crypto/dkg",
|
||||
"crypto/frost",
|
||||
"crypto/schnorrkel",
|
||||
|
||||
"coins/bitcoin",
|
||||
"networks/bitcoin",
|
||||
|
||||
"coins/ethereum/alloy-simple-request-transport",
|
||||
"coins/ethereum",
|
||||
"coins/ethereum/relayer",
|
||||
"networks/ethereum/build-contracts",
|
||||
"networks/ethereum/schnorr",
|
||||
"networks/ethereum/alloy-simple-request-transport",
|
||||
"networks/ethereum/relayer",
|
||||
|
||||
"coins/monero/io",
|
||||
"coins/monero/generators",
|
||||
"coins/monero/primitives",
|
||||
"coins/monero/ringct/mlsag",
|
||||
"coins/monero/ringct/clsag",
|
||||
"coins/monero/ringct/borromean",
|
||||
"coins/monero/ringct/bulletproofs",
|
||||
"coins/monero",
|
||||
"coins/monero/rpc",
|
||||
"coins/monero/rpc/simple-request",
|
||||
"coins/monero/wallet/address",
|
||||
"coins/monero/wallet",
|
||||
"coins/monero/wallet/seed",
|
||||
"coins/monero/wallet/polyseed",
|
||||
"coins/monero/wallet/util",
|
||||
"coins/monero/verify-chain",
|
||||
"networks/monero/io",
|
||||
"networks/monero/generators",
|
||||
"networks/monero/primitives",
|
||||
"networks/monero/ringct/mlsag",
|
||||
"networks/monero/ringct/clsag",
|
||||
"networks/monero/ringct/borromean",
|
||||
"networks/monero/ringct/bulletproofs",
|
||||
"networks/monero",
|
||||
"networks/monero/rpc",
|
||||
"networks/monero/rpc/simple-request",
|
||||
"networks/monero/wallet/address",
|
||||
"networks/monero/wallet",
|
||||
"networks/monero/wallet/seed",
|
||||
"networks/monero/wallet/polyseed",
|
||||
"networks/monero/wallet/util",
|
||||
"networks/monero/verify-chain",
|
||||
|
||||
"message-queue",
|
||||
|
||||
"processor/messages",
|
||||
"processor",
|
||||
|
||||
"coordinator/tributary/tendermint",
|
||||
"processor/key-gen",
|
||||
"processor/view-keys",
|
||||
"processor/frost-attempt-manager",
|
||||
|
||||
"processor/primitives",
|
||||
"processor/scanner",
|
||||
"processor/scheduler/primitives",
|
||||
"processor/scheduler/utxo/primitives",
|
||||
"processor/scheduler/utxo/standard",
|
||||
"processor/scheduler/utxo/transaction-chaining",
|
||||
"processor/scheduler/smart-contract",
|
||||
"processor/signers",
|
||||
|
||||
"processor/bin",
|
||||
"processor/bitcoin",
|
||||
"processor/ethereum/primitives",
|
||||
"processor/ethereum/test-primitives",
|
||||
"processor/ethereum/deployer",
|
||||
"processor/ethereum/router",
|
||||
"processor/ethereum/erc20",
|
||||
"processor/ethereum",
|
||||
"processor/monero",
|
||||
|
||||
"coordinator/tributary-sdk/tendermint",
|
||||
"coordinator/tributary-sdk",
|
||||
"coordinator/cosign",
|
||||
"coordinator/substrate",
|
||||
"coordinator/tributary",
|
||||
"coordinator/p2p",
|
||||
"coordinator/p2p/libp2p",
|
||||
"coordinator",
|
||||
|
||||
"substrate/primitives",
|
||||
@@ -74,12 +110,22 @@ members = [
|
||||
"substrate/coins/primitives",
|
||||
"substrate/coins/pallet",
|
||||
|
||||
"substrate/in-instructions/primitives",
|
||||
"substrate/in-instructions/pallet",
|
||||
"substrate/dex/pallet",
|
||||
|
||||
"substrate/validator-sets/primitives",
|
||||
"substrate/validator-sets/pallet",
|
||||
|
||||
"substrate/genesis-liquidity/primitives",
|
||||
"substrate/genesis-liquidity/pallet",
|
||||
|
||||
"substrate/emissions/primitives",
|
||||
"substrate/emissions/pallet",
|
||||
|
||||
"substrate/economic-security/pallet",
|
||||
|
||||
"substrate/in-instructions/primitives",
|
||||
"substrate/in-instructions/pallet",
|
||||
|
||||
"substrate/signals/primitives",
|
||||
"substrate/signals/pallet",
|
||||
|
||||
@@ -108,18 +154,32 @@ members = [
|
||||
# to the extensive operations required for Bulletproofs
|
||||
[profile.dev.package]
|
||||
subtle = { opt-level = 3 }
|
||||
curve25519-dalek = { opt-level = 3 }
|
||||
|
||||
ff = { opt-level = 3 }
|
||||
group = { opt-level = 3 }
|
||||
|
||||
crypto-bigint = { opt-level = 3 }
|
||||
secp256k1 = { opt-level = 3 }
|
||||
curve25519-dalek = { opt-level = 3 }
|
||||
dalek-ff-group = { opt-level = 3 }
|
||||
minimal-ed448 = { opt-level = 3 }
|
||||
|
||||
multiexp = { opt-level = 3 }
|
||||
|
||||
monero-serai = { opt-level = 3 }
|
||||
secq256k1 = { opt-level = 3 }
|
||||
embedwards25519 = { opt-level = 3 }
|
||||
generalized-bulletproofs = { opt-level = 3 }
|
||||
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
|
||||
ec-divisors = { opt-level = 3 }
|
||||
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
|
||||
|
||||
dkg = { opt-level = 3 }
|
||||
|
||||
monero-generators = { opt-level = 3 }
|
||||
monero-borromean = { opt-level = 3 }
|
||||
monero-bulletproofs = { opt-level = 3 }
|
||||
monero-mlsag = { opt-level = 3 }
|
||||
monero-clsag = { opt-level = 3 }
|
||||
|
||||
[profile.release]
|
||||
panic = "unwind"
|
||||
@@ -128,17 +188,12 @@ panic = "unwind"
|
||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||
|
||||
# Needed due to dockertest's usage of `Rc`s when we need `Arc`s
|
||||
dockertest = { git = "https://github.com/orcalabs/dockertest-rs", rev = "4dd6ae24738aa6dc5c89444cc822ea4745517493" }
|
||||
|
||||
parking_lot_core = { path = "patches/parking_lot_core" }
|
||||
parking_lot = { path = "patches/parking_lot" }
|
||||
# wasmtime pulls in an old version for this
|
||||
zstd = { path = "patches/zstd" }
|
||||
# Needed for WAL compression
|
||||
rocksdb = { path = "patches/rocksdb" }
|
||||
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
|
||||
proc-macro-crate = { path = "patches/proc-macro-crate" }
|
||||
|
||||
# is-terminal now has an std-based solution with an equivalent API
|
||||
is-terminal = { path = "patches/is-terminal" }
|
||||
@@ -153,8 +208,12 @@ matches = { path = "patches/matches" }
|
||||
option-ext = { path = "patches/option-ext" }
|
||||
directories-next = { path = "patches/directories-next" }
|
||||
|
||||
# The official pasta_curves repo doesn't support Zeroize
|
||||
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
||||
|
||||
[workspace.lints.clippy]
|
||||
unwrap_or_default = "allow"
|
||||
map_unwrap_or = "allow"
|
||||
borrow_as_ptr = "deny"
|
||||
cast_lossless = "deny"
|
||||
cast_possible_truncation = "deny"
|
||||
@@ -182,7 +241,6 @@ manual_instant_elapsed = "deny"
|
||||
manual_let_else = "deny"
|
||||
manual_ok_or = "deny"
|
||||
manual_string_new = "deny"
|
||||
map_unwrap_or = "deny"
|
||||
match_bool = "deny"
|
||||
match_same_arms = "deny"
|
||||
missing_fields_in_debug = "deny"
|
||||
@@ -194,6 +252,7 @@ range_plus_one = "deny"
|
||||
redundant_closure_for_method_calls = "deny"
|
||||
redundant_else = "deny"
|
||||
string_add_assign = "deny"
|
||||
string_slice = "deny"
|
||||
unchecked_duration_subtraction = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unnecessary_box_returns = "deny"
|
||||
|
||||
@@ -24,7 +24,7 @@ wallet.
|
||||
infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as
|
||||
needed for Bitcoin-Monero atomic swaps.
|
||||
|
||||
- `coins`: Various coin libraries intended for usage in Serai yet also by the
|
||||
- `networks`: Various libraries intended for usage in Serai yet also by the
|
||||
wider community. This means they will always support the functionality Serai
|
||||
needs, yet won't disadvantage other use cases when possible.
|
||||
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
# Cypher Stack /coins/bitcoin Audit, August 2023
|
||||
|
||||
This audit was over the /coins/bitcoin folder. It is encompassing up to commit
|
||||
5121ca75199dff7bd34230880a1fdd793012068c.
|
||||
|
||||
Please see https://github.com/cypherstack/serai-btc-audit for provenance.
|
||||
@@ -0,0 +1,7 @@
|
||||
# Cypher Stack /networks/bitcoin Audit, August 2023
|
||||
|
||||
This audit was over the `/networks/bitcoin` folder (at the time located at
|
||||
`/coins/bitcoin`). It is encompassing up to commit
|
||||
5121ca75199dff7bd34230880a1fdd793012068c.
|
||||
|
||||
Please see https://github.com/cypherstack/serai-btc-audit for provenance.
|
||||
3
coins/ethereum/.gitignore
vendored
3
coins/ethereum/.gitignore
vendored
@@ -1,3 +0,0 @@
|
||||
# Solidity build outputs
|
||||
cache
|
||||
artifacts
|
||||
@@ -1,49 +0,0 @@
|
||||
[package]
|
||||
name = "ethereum-serai"
|
||||
version = "0.1.0"
|
||||
description = "An Ethereum library supporting Schnorr signing and on-chain verification"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>", "Elizabeth Binks <elizabethjbinks@gmail.com>"]
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.79"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
thiserror = { version = "1", default-features = false }
|
||||
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", default-features = false, features = ["recommended"] }
|
||||
|
||||
group = { version = "0.13", default-features = false }
|
||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa", "arithmetic"] }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["secp256k1"] }
|
||||
|
||||
alloy-core = { version = "0.7", default-features = false }
|
||||
alloy-sol-types = { version = "0.7", default-features = false, features = ["json"] }
|
||||
alloy-consensus = { version = "0.1", default-features = false, features = ["k256"] }
|
||||
alloy-network = { version = "0.1", default-features = false }
|
||||
alloy-rpc-types-eth = { version = "0.1", default-features = false }
|
||||
alloy-rpc-client = { version = "0.1", default-features = false }
|
||||
alloy-simple-request-transport = { path = "./alloy-simple-request-transport", default-features = false }
|
||||
alloy-provider = { version = "0.1", default-features = false }
|
||||
|
||||
alloy-node-bindings = { version = "0.1", default-features = false, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", default-features = false, features = ["tests"] }
|
||||
|
||||
tokio = { version = "1", features = ["macros"] }
|
||||
|
||||
alloy-node-bindings = { version = "0.1", default-features = false }
|
||||
|
||||
[features]
|
||||
tests = ["alloy-node-bindings", "frost/tests"]
|
||||
@@ -1,15 +0,0 @@
|
||||
# Ethereum
|
||||
|
||||
This package contains Ethereum-related functionality, specifically deploying and
|
||||
interacting with Serai contracts.
|
||||
|
||||
While `monero-serai` and `bitcoin-serai` are general purpose libraries,
|
||||
`ethereum-serai` is Serai specific. If any of the utilities are generally
|
||||
desired, please fork and maintain your own copy to ensure the desired
|
||||
functionality is preserved, or open an issue to request we make this library
|
||||
general purpose.
|
||||
|
||||
### Dependencies
|
||||
|
||||
- solc
|
||||
- [Foundry](https://github.com/foundry-rs/foundry)
|
||||
@@ -1,41 +0,0 @@
|
||||
use std::process::Command;
|
||||
|
||||
fn main() {
|
||||
println!("cargo:rerun-if-changed=contracts/*");
|
||||
println!("cargo:rerun-if-changed=artifacts/*");
|
||||
|
||||
for line in String::from_utf8(Command::new("solc").args(["--version"]).output().unwrap().stdout)
|
||||
.unwrap()
|
||||
.lines()
|
||||
{
|
||||
if let Some(version) = line.strip_prefix("Version: ") {
|
||||
let version = version.split('+').next().unwrap();
|
||||
assert_eq!(version, "0.8.25");
|
||||
}
|
||||
}
|
||||
|
||||
#[rustfmt::skip]
|
||||
let args = [
|
||||
"--base-path", ".",
|
||||
"-o", "./artifacts", "--overwrite",
|
||||
"--bin", "--abi",
|
||||
"--via-ir", "--optimize",
|
||||
|
||||
"./contracts/IERC20.sol",
|
||||
|
||||
"./contracts/Schnorr.sol",
|
||||
"./contracts/Deployer.sol",
|
||||
"./contracts/Sandbox.sol",
|
||||
"./contracts/Router.sol",
|
||||
|
||||
"./src/tests/contracts/Schnorr.sol",
|
||||
"./src/tests/contracts/ERC20.sol",
|
||||
|
||||
"--no-color",
|
||||
];
|
||||
let solc = Command::new("solc").args(args).output().unwrap();
|
||||
assert!(solc.status.success());
|
||||
for line in String::from_utf8(solc.stderr).unwrap().lines() {
|
||||
assert!(!line.starts_with("Error:"));
|
||||
}
|
||||
}
|
||||
@@ -1,52 +0,0 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
/*
|
||||
The expected deployment process of the Router is as follows:
|
||||
|
||||
1) A transaction deploying Deployer is made. Then, a deterministic signature is
|
||||
created such that an account with an unknown private key is the creator of
|
||||
the contract. Anyone can fund this address, and once anyone does, the
|
||||
transaction deploying Deployer can be published by anyone. No other
|
||||
transaction may be made from that account.
|
||||
|
||||
2) Anyone deploys the Router through the Deployer. This uses a sequential nonce
|
||||
such that meet-in-the-middle attacks, with complexity 2**80, aren't feasible.
|
||||
While such attacks would still be feasible if the Deployer's address was
|
||||
controllable, the usage of a deterministic signature with a NUMS method
|
||||
prevents that.
|
||||
|
||||
This doesn't have any denial-of-service risks and will resolve once anyone steps
|
||||
forward as deployer. This does fail to guarantee an identical address across
|
||||
every chain, though it enables letting anyone efficiently ask the Deployer for
|
||||
the address (with the Deployer having an identical address on every chain).
|
||||
|
||||
Unfortunately, guaranteeing identical addresses aren't feasible. We'd need the
|
||||
Deployer contract to use a consistent salt for the Router, yet the Router must
|
||||
be deployed with a specific public key for Serai. Since Ethereum isn't able to
|
||||
determine a valid public key (one the result of a Serai DKG) from a dishonest
|
||||
public key, we have to allow multiple deployments with Serai being the one to
|
||||
determine which to use.
|
||||
|
||||
The alternative would be to have a council publish the Serai key on-Ethereum,
|
||||
with Serai verifying the published result. This would introduce a DoS risk in
|
||||
the council not publishing the correct key/not publishing any key.
|
||||
*/
|
||||
|
||||
contract Deployer {
|
||||
event Deployment(bytes32 indexed init_code_hash, address created);
|
||||
|
||||
error DeploymentFailed();
|
||||
|
||||
function deploy(bytes memory init_code) external {
|
||||
address created;
|
||||
assembly {
|
||||
created := create(0, add(init_code, 0x20), mload(init_code))
|
||||
}
|
||||
if (created == address(0)) {
|
||||
revert DeploymentFailed();
|
||||
}
|
||||
// These may be emitted out of order upon re-entrancy
|
||||
emit Deployment(keccak256(init_code), created);
|
||||
}
|
||||
}
|
||||
@@ -1,222 +0,0 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "./IERC20.sol";
|
||||
|
||||
import "./Schnorr.sol";
|
||||
import "./Sandbox.sol";
|
||||
|
||||
contract Router {
|
||||
// Nonce is incremented for each batch of transactions executed/key update
|
||||
uint256 public nonce;
|
||||
|
||||
// Current public key's x-coordinate
|
||||
// This key must always have the parity defined within the Schnorr contract
|
||||
bytes32 public seraiKey;
|
||||
|
||||
struct OutInstruction {
|
||||
address to;
|
||||
Call[] calls;
|
||||
|
||||
uint256 value;
|
||||
}
|
||||
|
||||
struct Signature {
|
||||
bytes32 c;
|
||||
bytes32 s;
|
||||
}
|
||||
|
||||
event SeraiKeyUpdated(
|
||||
uint256 indexed nonce,
|
||||
bytes32 indexed key,
|
||||
Signature signature
|
||||
);
|
||||
event InInstruction(
|
||||
address indexed from,
|
||||
address indexed coin,
|
||||
uint256 amount,
|
||||
bytes instruction
|
||||
);
|
||||
// success is a uint256 representing a bitfield of transaction successes
|
||||
event Executed(
|
||||
uint256 indexed nonce,
|
||||
bytes32 indexed batch,
|
||||
uint256 success,
|
||||
Signature signature
|
||||
);
|
||||
|
||||
// error types
|
||||
error InvalidKey();
|
||||
error InvalidSignature();
|
||||
error InvalidAmount();
|
||||
error FailedTransfer();
|
||||
error TooManyTransactions();
|
||||
|
||||
modifier _updateSeraiKeyAtEndOfFn(
|
||||
uint256 _nonce,
|
||||
bytes32 key,
|
||||
Signature memory sig
|
||||
) {
|
||||
if (
|
||||
(key == bytes32(0)) ||
|
||||
((bytes32(uint256(key) % Schnorr.Q)) != key)
|
||||
) {
|
||||
revert InvalidKey();
|
||||
}
|
||||
|
||||
_;
|
||||
|
||||
seraiKey = key;
|
||||
emit SeraiKeyUpdated(_nonce, key, sig);
|
||||
}
|
||||
|
||||
constructor(bytes32 _seraiKey) _updateSeraiKeyAtEndOfFn(
|
||||
0,
|
||||
_seraiKey,
|
||||
Signature({ c: bytes32(0), s: bytes32(0) })
|
||||
) {
|
||||
nonce = 1;
|
||||
}
|
||||
|
||||
// updateSeraiKey validates the given Schnorr signature against the current
|
||||
// public key, and if successful, updates the contract's public key to the
|
||||
// given one.
|
||||
function updateSeraiKey(
|
||||
bytes32 _seraiKey,
|
||||
Signature calldata sig
|
||||
) external _updateSeraiKeyAtEndOfFn(nonce, _seraiKey, sig) {
|
||||
bytes memory message =
|
||||
abi.encodePacked("updateSeraiKey", block.chainid, nonce, _seraiKey);
|
||||
nonce++;
|
||||
|
||||
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||
revert InvalidSignature();
|
||||
}
|
||||
}
|
||||
|
||||
function inInstruction(
|
||||
address coin,
|
||||
uint256 amount,
|
||||
bytes memory instruction
|
||||
) external payable {
|
||||
if (coin == address(0)) {
|
||||
if (amount != msg.value) {
|
||||
revert InvalidAmount();
|
||||
}
|
||||
} else {
|
||||
(bool success, bytes memory res) =
|
||||
address(coin).call(
|
||||
abi.encodeWithSelector(
|
||||
IERC20.transferFrom.selector,
|
||||
msg.sender,
|
||||
address(this),
|
||||
amount
|
||||
)
|
||||
);
|
||||
|
||||
// Require there was nothing returned, which is done by some non-standard
|
||||
// tokens, or that the ERC20 contract did in fact return true
|
||||
bool nonStandardResOrTrue =
|
||||
(res.length == 0) || abi.decode(res, (bool));
|
||||
if (!(success && nonStandardResOrTrue)) {
|
||||
revert FailedTransfer();
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Due to fee-on-transfer tokens, emitting the amount directly is frowned upon.
|
||||
The amount instructed to transfer may not actually be the amount
|
||||
transferred.
|
||||
|
||||
If we add nonReentrant to every single function which can effect the
|
||||
balance, we can check the amount exactly matches. This prevents transfers of
|
||||
less value than expected occurring, at least, not without an additional
|
||||
transfer to top up the difference (which isn't routed through this contract
|
||||
and accordingly isn't trying to artificially create events).
|
||||
|
||||
If we don't add nonReentrant, a transfer can be started, and then a new
|
||||
transfer for the difference can follow it up (again and again until a
|
||||
rounding error is reached). This contract would believe all transfers were
|
||||
done in full, despite each only being done in part (except for the last
|
||||
one).
|
||||
|
||||
Given fee-on-transfer tokens aren't intended to be supported, the only
|
||||
token planned to be supported is Dai and it doesn't have any fee-on-transfer
|
||||
logic, fee-on-transfer tokens aren't even able to be supported at this time,
|
||||
we simply classify this entire class of tokens as non-standard
|
||||
implementations which induce undefined behavior. It is the Serai network's
|
||||
role not to add support for any non-standard implementations.
|
||||
*/
|
||||
emit InInstruction(msg.sender, coin, amount, instruction);
|
||||
}
|
||||
|
||||
// execute accepts a list of transactions to execute as well as a signature.
|
||||
// if signature verification passes, the given transactions are executed.
|
||||
// if signature verification fails, this function will revert.
|
||||
function execute(
|
||||
OutInstruction[] calldata transactions,
|
||||
Signature calldata sig
|
||||
) external {
|
||||
if (transactions.length > 256) {
|
||||
revert TooManyTransactions();
|
||||
}
|
||||
|
||||
bytes memory message =
|
||||
abi.encode("execute", block.chainid, nonce, transactions);
|
||||
uint256 executed_with_nonce = nonce;
|
||||
// This prevents re-entrancy from causing double spends yet does allow
|
||||
// out-of-order execution via re-entrancy
|
||||
nonce++;
|
||||
|
||||
if (!Schnorr.verify(seraiKey, message, sig.c, sig.s)) {
|
||||
revert InvalidSignature();
|
||||
}
|
||||
|
||||
uint256 successes;
|
||||
for (uint256 i = 0; i < transactions.length; i++) {
|
||||
bool success;
|
||||
|
||||
// If there are no calls, send to `to` the value
|
||||
if (transactions[i].calls.length == 0) {
|
||||
(success, ) = transactions[i].to.call{
|
||||
value: transactions[i].value,
|
||||
gas: 5_000
|
||||
}("");
|
||||
} else {
|
||||
// If there are calls, ignore `to`. Deploy a new Sandbox and proxy the
|
||||
// calls through that
|
||||
//
|
||||
// We could use a single sandbox in order to reduce gas costs, yet that
|
||||
// risks one person creating an approval that's hooked before another
|
||||
// user's intended action executes, in order to drain their coins
|
||||
//
|
||||
// While technically, that would be a flaw in the sandboxed flow, this
|
||||
// is robust and prevents such flaws from being possible
|
||||
//
|
||||
// We also don't want people to set state via the Sandbox and expect it
|
||||
// future available when anyone else could set a distinct value
|
||||
Sandbox sandbox = new Sandbox();
|
||||
(success, ) = address(sandbox).call{
|
||||
value: transactions[i].value,
|
||||
// TODO: Have the Call specify the gas up front
|
||||
gas: 350_000
|
||||
}(
|
||||
abi.encodeWithSelector(
|
||||
Sandbox.sandbox.selector,
|
||||
transactions[i].calls
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
assembly {
|
||||
successes := or(successes, shl(i, success))
|
||||
}
|
||||
}
|
||||
emit Executed(
|
||||
executed_with_nonce,
|
||||
keccak256(message),
|
||||
successes,
|
||||
sig
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,48 +0,0 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.24;
|
||||
|
||||
struct Call {
|
||||
address to;
|
||||
uint256 value;
|
||||
bytes data;
|
||||
}
|
||||
|
||||
// A minimal sandbox focused on gas efficiency.
|
||||
//
|
||||
// The first call is executed if any of the calls fail, making it a fallback.
|
||||
// All other calls are executed sequentially.
|
||||
contract Sandbox {
|
||||
error AlreadyCalled();
|
||||
error CallsFailed();
|
||||
|
||||
function sandbox(Call[] calldata calls) external payable {
|
||||
// Prevent re-entrancy due to this executing arbitrary calls from anyone
|
||||
// and anywhere
|
||||
bool called;
|
||||
assembly { called := tload(0) }
|
||||
if (called) {
|
||||
revert AlreadyCalled();
|
||||
}
|
||||
assembly { tstore(0, 1) }
|
||||
|
||||
// Execute the calls, starting from 1
|
||||
for (uint256 i = 1; i < calls.length; i++) {
|
||||
(bool success, ) =
|
||||
calls[i].to.call{ value: calls[i].value }(calls[i].data);
|
||||
|
||||
// If this call failed, execute the fallback (call 0)
|
||||
if (!success) {
|
||||
(success, ) =
|
||||
calls[0].to.call{ value: address(this).balance }(calls[0].data);
|
||||
// If this call also failed, revert entirely
|
||||
if (!success) {
|
||||
revert CallsFailed();
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// We don't clear the re-entrancy guard as this contract should never be
|
||||
// called again, so there's no reason to spend the effort
|
||||
}
|
||||
}
|
||||
@@ -1,44 +0,0 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
// see https://github.com/noot/schnorr-verify for implementation details
|
||||
library Schnorr {
|
||||
// secp256k1 group order
|
||||
uint256 constant public Q =
|
||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
||||
|
||||
// Fixed parity for the public keys used in this contract
|
||||
// This avoids spending a word passing the parity in a similar style to
|
||||
// Bitcoin's Taproot
|
||||
uint8 constant public KEY_PARITY = 27;
|
||||
|
||||
error InvalidSOrA();
|
||||
error MalformedSignature();
|
||||
|
||||
// px := public key x-coord, where the public key has a parity of KEY_PARITY
|
||||
// message := 32-byte hash of the message
|
||||
// c := schnorr signature challenge
|
||||
// s := schnorr signature
|
||||
function verify(
|
||||
bytes32 px,
|
||||
bytes memory message,
|
||||
bytes32 c,
|
||||
bytes32 s
|
||||
) internal pure returns (bool) {
|
||||
// ecrecover = (m, v, r, s) -> key
|
||||
// We instead pass the following to obtain the nonce (not the key)
|
||||
// Then we hash it and verify it matches the challenge
|
||||
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
||||
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
||||
|
||||
// For safety, we want each input to ecrecover to be 0 (sa, px, ca)
|
||||
// The ecreover precomple checks `r` and `s` (`px` and `ca`) are non-zero
|
||||
// That leaves us to check `sa` are non-zero
|
||||
if (sa == 0) revert InvalidSOrA();
|
||||
address R = ecrecover(sa, KEY_PARITY, px, ca);
|
||||
if (R == address(0)) revert MalformedSignature();
|
||||
|
||||
// Check the signature is correct by rebuilding the challenge
|
||||
return c == keccak256(abi.encodePacked(R, px, message));
|
||||
}
|
||||
}
|
||||
@@ -1,4 +0,0 @@
|
||||
# Ethereum Transaction Relayer
|
||||
|
||||
This server collects Ethereum router commands to be published, offering an RPC
|
||||
to fetch them.
|
||||
@@ -1,37 +0,0 @@
|
||||
use alloy_sol_types::sol;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod erc20_container {
|
||||
use super::*;
|
||||
sol!("contracts/IERC20.sol");
|
||||
}
|
||||
pub use erc20_container::IERC20 as erc20;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod deployer_container {
|
||||
use super::*;
|
||||
sol!("contracts/Deployer.sol");
|
||||
}
|
||||
pub use deployer_container::Deployer as deployer;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod router_container {
|
||||
use super::*;
|
||||
sol!(Router, "artifacts/Router.abi");
|
||||
}
|
||||
pub use router_container::Router as router;
|
||||
@@ -1,188 +0,0 @@
|
||||
use group::ff::PrimeField;
|
||||
use k256::{
|
||||
elliptic_curve::{ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint},
|
||||
ProjectivePoint, Scalar, U256 as KU256,
|
||||
};
|
||||
#[cfg(test)]
|
||||
use k256::{elliptic_curve::point::DecompressPoint, AffinePoint};
|
||||
|
||||
use frost::{
|
||||
algorithm::{Hram, SchnorrSignature},
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
};
|
||||
|
||||
use alloy_core::primitives::{Parity, Signature as AlloySignature};
|
||||
use alloy_consensus::{SignableTransaction, Signed, TxLegacy};
|
||||
|
||||
use crate::abi::router::{Signature as AbiSignature};
|
||||
|
||||
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
|
||||
alloy_core::primitives::keccak256(data).into()
|
||||
}
|
||||
|
||||
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(data).into())
|
||||
}
|
||||
|
||||
pub fn address(point: &ProjectivePoint) -> [u8; 20] {
|
||||
let encoded_point = point.to_encoded_point(false);
|
||||
// Last 20 bytes of the hash of the concatenated x and y coordinates
|
||||
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
|
||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
|
||||
}
|
||||
|
||||
/// Deterministically sign a transaction.
|
||||
///
|
||||
/// This function panics if passed a transaction with a non-None chain ID.
|
||||
pub fn deterministically_sign(tx: &TxLegacy) -> Signed<TxLegacy> {
|
||||
assert!(
|
||||
tx.chain_id.is_none(),
|
||||
"chain ID was Some when deterministically signing a TX (causing a non-deterministic signer)"
|
||||
);
|
||||
|
||||
let sig_hash = tx.signature_hash().0;
|
||||
let mut r = hash_to_scalar(&[sig_hash.as_slice(), b"r"].concat());
|
||||
let mut s = hash_to_scalar(&[sig_hash.as_slice(), b"s"].concat());
|
||||
loop {
|
||||
let r_bytes: [u8; 32] = r.to_repr().into();
|
||||
let s_bytes: [u8; 32] = s.to_repr().into();
|
||||
let v = Parity::NonEip155(false);
|
||||
let signature =
|
||||
AlloySignature::from_scalars_and_parity(r_bytes.into(), s_bytes.into(), v).unwrap();
|
||||
let tx = tx.clone().into_signed(signature);
|
||||
if tx.recover_signer().is_ok() {
|
||||
return tx;
|
||||
}
|
||||
|
||||
// Re-hash until valid
|
||||
r = hash_to_scalar(r_bytes.as_ref());
|
||||
s = hash_to_scalar(s_bytes.as_ref());
|
||||
}
|
||||
}
|
||||
|
||||
/// The public key for a Schnorr-signing account.
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct PublicKey {
|
||||
pub(crate) A: ProjectivePoint,
|
||||
pub(crate) px: Scalar,
|
||||
}
|
||||
|
||||
impl PublicKey {
|
||||
/// Construct a new `PublicKey`.
|
||||
///
|
||||
/// This will return None if the provided point isn't eligible to be a public key (due to
|
||||
/// bounds such as parity).
|
||||
#[allow(non_snake_case)]
|
||||
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
||||
let affine = A.to_affine();
|
||||
// Only allow even keys to save a word within Ethereum
|
||||
let is_odd = bool::from(affine.y_is_odd());
|
||||
if is_odd {
|
||||
None?;
|
||||
}
|
||||
|
||||
let x_coord = affine.x();
|
||||
let x_coord_scalar = <Scalar as Reduce<KU256>>::reduce_bytes(&x_coord);
|
||||
// Return None if a reduction would occur
|
||||
// Reductions would be incredibly unlikely and shouldn't be an issue, yet it's one less
|
||||
// headache/concern to have
|
||||
// This does ban a trivial amoount of public keys
|
||||
if x_coord_scalar.to_repr() != x_coord {
|
||||
None?;
|
||||
}
|
||||
|
||||
Some(PublicKey { A, px: x_coord_scalar })
|
||||
}
|
||||
|
||||
pub fn point(&self) -> ProjectivePoint {
|
||||
self.A
|
||||
}
|
||||
|
||||
pub(crate) fn eth_repr(&self) -> [u8; 32] {
|
||||
self.px.to_repr().into()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub(crate) fn from_eth_repr(repr: [u8; 32]) -> Option<Self> {
|
||||
#[allow(non_snake_case)]
|
||||
let A = Option::<AffinePoint>::from(AffinePoint::decompress(&repr.into(), 0.into()))?.into();
|
||||
Option::from(Scalar::from_repr(repr.into())).map(|px| PublicKey { A, px })
|
||||
}
|
||||
}
|
||||
|
||||
/// The HRAm to use for the Schnorr contract.
|
||||
#[derive(Clone, Default)]
|
||||
pub struct EthereumHram {}
|
||||
impl Hram<Secp256k1> for EthereumHram {
|
||||
#[allow(non_snake_case)]
|
||||
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
|
||||
let x_coord = A.to_affine().x();
|
||||
|
||||
let mut data = address(R).to_vec();
|
||||
data.extend(x_coord.as_slice());
|
||||
data.extend(m);
|
||||
|
||||
<Scalar as Reduce<KU256>>::reduce_bytes(&keccak256(&data).into())
|
||||
}
|
||||
}
|
||||
|
||||
/// A signature for the Schnorr contract.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Signature {
|
||||
pub(crate) c: Scalar,
|
||||
pub(crate) s: Scalar,
|
||||
}
|
||||
impl Signature {
|
||||
pub fn verify(&self, public_key: &PublicKey, message: &[u8]) -> bool {
|
||||
#[allow(non_snake_case)]
|
||||
let R = (Secp256k1::generator() * self.s) - (public_key.A * self.c);
|
||||
EthereumHram::hram(&R, &public_key.A, message) == self.c
|
||||
}
|
||||
|
||||
/// Construct a new `Signature`.
|
||||
///
|
||||
/// This will return None if the signature is invalid.
|
||||
pub fn new(
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: SchnorrSignature<Secp256k1>,
|
||||
) -> Option<Signature> {
|
||||
let c = EthereumHram::hram(&signature.R, &public_key.A, message);
|
||||
if !signature.verify(public_key.A, c) {
|
||||
None?;
|
||||
}
|
||||
|
||||
let res = Signature { c, s: signature.s };
|
||||
assert!(res.verify(public_key, message));
|
||||
Some(res)
|
||||
}
|
||||
|
||||
pub fn c(&self) -> Scalar {
|
||||
self.c
|
||||
}
|
||||
pub fn s(&self) -> Scalar {
|
||||
self.s
|
||||
}
|
||||
|
||||
pub fn to_bytes(&self) -> [u8; 64] {
|
||||
let mut res = [0; 64];
|
||||
res[.. 32].copy_from_slice(self.c.to_repr().as_ref());
|
||||
res[32 ..].copy_from_slice(self.s.to_repr().as_ref());
|
||||
res
|
||||
}
|
||||
|
||||
pub fn from_bytes(bytes: [u8; 64]) -> std::io::Result<Self> {
|
||||
let mut reader = bytes.as_slice();
|
||||
let c = Secp256k1::read_F(&mut reader)?;
|
||||
let s = Secp256k1::read_F(&mut reader)?;
|
||||
Ok(Signature { c, s })
|
||||
}
|
||||
}
|
||||
impl From<&Signature> for AbiSignature {
|
||||
fn from(sig: &Signature) -> AbiSignature {
|
||||
let c: [u8; 32] = sig.c.to_repr().into();
|
||||
let s: [u8; 32] = sig.s.to_repr().into();
|
||||
AbiSignature { c: c.into(), s: s.into() }
|
||||
}
|
||||
}
|
||||
@@ -1,113 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use alloy_core::primitives::{hex::FromHex, Address, B256, U256, Bytes, TxKind};
|
||||
use alloy_consensus::{Signed, TxLegacy};
|
||||
|
||||
use alloy_sol_types::{SolCall, SolEvent};
|
||||
|
||||
use alloy_rpc_types_eth::{BlockNumberOrTag, Filter};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::{self, keccak256, PublicKey},
|
||||
router::Router,
|
||||
};
|
||||
pub use crate::abi::deployer as abi;
|
||||
|
||||
/// The Deployer contract for the Router contract.
|
||||
///
|
||||
/// This Deployer has a deterministic address, letting it be immediately identified on any
|
||||
/// compatible chain. It then supports retrieving the Router contract's address (which isn't
|
||||
/// deterministic) using a single log query.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Deployer;
|
||||
impl Deployer {
|
||||
/// Obtain the transaction to deploy this contract, already signed.
|
||||
///
|
||||
/// The account this transaction is sent from (which is populated in `from`) must be sufficiently
|
||||
/// funded for this transaction to be submitted. This account has no known private key to anyone,
|
||||
/// so ETH sent can be neither misappropriated nor returned.
|
||||
pub fn deployment_tx() -> Signed<TxLegacy> {
|
||||
let bytecode = include_str!("../artifacts/Deployer.bin");
|
||||
let bytecode =
|
||||
Bytes::from_hex(bytecode).expect("compiled-in Deployer bytecode wasn't valid hex");
|
||||
|
||||
let tx = TxLegacy {
|
||||
chain_id: None,
|
||||
nonce: 0,
|
||||
gas_price: 100_000_000_000u128,
|
||||
// TODO: Use a more accurate gas limit
|
||||
gas_limit: 1_000_000u128,
|
||||
to: TxKind::Create,
|
||||
value: U256::ZERO,
|
||||
input: bytecode,
|
||||
};
|
||||
|
||||
crypto::deterministically_sign(&tx)
|
||||
}
|
||||
|
||||
/// Obtain the deterministic address for this contract.
|
||||
pub fn address() -> [u8; 20] {
|
||||
let deployer_deployer =
|
||||
Self::deployment_tx().recover_signer().expect("deployment_tx didn't have a valid signature");
|
||||
**Address::create(&deployer_deployer, 0)
|
||||
}
|
||||
|
||||
/// Construct a new view of the `Deployer`.
|
||||
pub async fn new(provider: Arc<RootProvider<SimpleRequest>>) -> Result<Option<Self>, Error> {
|
||||
let address = Self::address();
|
||||
let code = provider.get_code_at(address.into()).await.map_err(|_| Error::ConnectionError)?;
|
||||
// Contract has yet to be deployed
|
||||
if code.is_empty() {
|
||||
return Ok(None);
|
||||
}
|
||||
Ok(Some(Self))
|
||||
}
|
||||
|
||||
/// Yield the `ContractCall` necessary to deploy the Router.
|
||||
pub fn deploy_router(&self, key: &PublicKey) -> TxLegacy {
|
||||
TxLegacy {
|
||||
to: TxKind::Call(Self::address().into()),
|
||||
input: abi::deployCall::new((Router::init_code(key).into(),)).abi_encode().into(),
|
||||
gas_limit: 1_000_000,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Find the first Router deployed with the specified key as its first key.
|
||||
///
|
||||
/// This is the Router Serai will use, and is the only way to construct a `Router`.
|
||||
pub async fn find_router(
|
||||
&self,
|
||||
provider: Arc<RootProvider<SimpleRequest>>,
|
||||
key: &PublicKey,
|
||||
) -> Result<Option<Router>, Error> {
|
||||
let init_code = Router::init_code(key);
|
||||
let init_code_hash = keccak256(&init_code);
|
||||
|
||||
#[cfg(not(test))]
|
||||
let to_block = BlockNumberOrTag::Finalized;
|
||||
#[cfg(test)]
|
||||
let to_block = BlockNumberOrTag::Latest;
|
||||
|
||||
// Find the first log using this init code (where the init code is binding to the key)
|
||||
// TODO: Make an abstraction for event filtering (de-duplicating common code)
|
||||
let filter =
|
||||
Filter::new().from_block(0).to_block(to_block).address(Address::from(Self::address()));
|
||||
let filter = filter.event_signature(abi::Deployment::SIGNATURE_HASH);
|
||||
let filter = filter.topic1(B256::from(init_code_hash));
|
||||
let logs = provider.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let Some(first_log) = logs.first() else { return Ok(None) };
|
||||
let router = first_log
|
||||
.log_decode::<abi::Deployment>()
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.inner
|
||||
.data
|
||||
.created;
|
||||
|
||||
Ok(Some(Router::new(provider, router)))
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use alloy_core::primitives::{Address, B256, U256};
|
||||
|
||||
use alloy_sol_types::{SolInterface, SolEvent};
|
||||
|
||||
use alloy_rpc_types_eth::Filter;
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use crate::Error;
|
||||
pub use crate::abi::erc20 as abi;
|
||||
use abi::{IERC20Calls, Transfer, transferCall, transferFromCall};
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct TopLevelErc20Transfer {
|
||||
pub id: [u8; 32],
|
||||
pub from: [u8; 20],
|
||||
pub amount: U256,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A view for an ERC20 contract.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Erc20(Arc<RootProvider<SimpleRequest>>, Address);
|
||||
impl Erc20 {
|
||||
/// Construct a new view of the specified ERC20 contract.
|
||||
pub fn new(provider: Arc<RootProvider<SimpleRequest>>, address: [u8; 20]) -> Self {
|
||||
Self(provider, Address::from(&address))
|
||||
}
|
||||
|
||||
pub async fn top_level_transfers(
|
||||
&self,
|
||||
block: u64,
|
||||
to: [u8; 20],
|
||||
) -> Result<Vec<TopLevelErc20Transfer>, Error> {
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(Transfer::SIGNATURE_HASH);
|
||||
let mut to_topic = [0; 32];
|
||||
to_topic[12 ..].copy_from_slice(&to);
|
||||
let filter = filter.topic2(B256::from(to_topic));
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let mut handled = HashSet::new();
|
||||
|
||||
let mut top_level_transfers = vec![];
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||
let tx =
|
||||
self.0.get_transaction_by_hash(tx_id).await.ok().flatten().ok_or(Error::ConnectionError)?;
|
||||
|
||||
// If this is a top-level call...
|
||||
if tx.to == Some(self.1) {
|
||||
// And we recognize the call...
|
||||
// Don't validate the encoding as this can't be re-encoded to an identical bytestring due
|
||||
// to the InInstruction appended
|
||||
if let Ok(call) = IERC20Calls::abi_decode(&tx.input, false) {
|
||||
// Extract the top-level call's from/to/value
|
||||
let (from, call_to, value) = match call {
|
||||
IERC20Calls::transfer(transferCall { to: call_to, value }) => (tx.from, call_to, value),
|
||||
IERC20Calls::transferFrom(transferFromCall { from, to: call_to, value }) => {
|
||||
(from, call_to, value)
|
||||
}
|
||||
// Treat any other function selectors as unrecognized
|
||||
_ => continue,
|
||||
};
|
||||
|
||||
let log = log.log_decode::<Transfer>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
// Ensure the top-level transfer is equivalent, and this presumably isn't a log for an
|
||||
// internal transfer
|
||||
if (log.from != from) || (call_to != to) || (value != log.value) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Now that the top-level transfer is confirmed to be equivalent to the log, ensure it's
|
||||
// the only log we handle
|
||||
if handled.contains(&tx_id) {
|
||||
continue;
|
||||
}
|
||||
handled.insert(tx_id);
|
||||
|
||||
// Read the data appended after
|
||||
let encoded = call.abi_encode();
|
||||
let data = tx.input.as_ref()[encoded.len() ..].to_vec();
|
||||
|
||||
// Push the transfer
|
||||
top_level_transfers.push(TopLevelErc20Transfer {
|
||||
// Since we'll only handle one log for this TX, set the ID to the TX ID
|
||||
id: *tx_id,
|
||||
from: *log.from.0,
|
||||
amount: log.value,
|
||||
data,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(top_level_transfers)
|
||||
}
|
||||
}
|
||||
@@ -1,35 +0,0 @@
|
||||
use thiserror::Error;
|
||||
|
||||
pub mod alloy {
|
||||
pub use alloy_core::primitives;
|
||||
pub use alloy_core as core;
|
||||
pub use alloy_sol_types as sol_types;
|
||||
|
||||
pub use alloy_consensus as consensus;
|
||||
pub use alloy_network as network;
|
||||
pub use alloy_rpc_types_eth as rpc_types;
|
||||
pub use alloy_simple_request_transport as simple_request_transport;
|
||||
pub use alloy_rpc_client as rpc_client;
|
||||
pub use alloy_provider as provider;
|
||||
}
|
||||
|
||||
pub mod crypto;
|
||||
|
||||
pub(crate) mod abi;
|
||||
|
||||
pub mod erc20;
|
||||
pub mod deployer;
|
||||
pub mod router;
|
||||
|
||||
pub mod machine;
|
||||
|
||||
#[cfg(any(test, feature = "tests"))]
|
||||
pub mod tests;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("failed to verify Schnorr signature")]
|
||||
InvalidSignature,
|
||||
#[error("couldn't make call/send TX")]
|
||||
ConnectionError,
|
||||
}
|
||||
@@ -1,414 +0,0 @@
|
||||
use std::{
|
||||
io::{self, Read},
|
||||
collections::HashMap,
|
||||
};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
|
||||
use group::GroupEncoding;
|
||||
use frost::{
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
Participant, ThresholdKeys, FrostError,
|
||||
algorithm::Schnorr,
|
||||
sign::*,
|
||||
};
|
||||
|
||||
use alloy_core::primitives::U256;
|
||||
|
||||
use crate::{
|
||||
crypto::{PublicKey, EthereumHram, Signature},
|
||||
router::{
|
||||
abi::{Call as AbiCall, OutInstruction as AbiOutInstruction},
|
||||
Router,
|
||||
},
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Call {
|
||||
pub to: [u8; 20],
|
||||
pub value: U256,
|
||||
pub data: Vec<u8>,
|
||||
}
|
||||
impl Call {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut to = [0; 20];
|
||||
reader.read_exact(&mut to)?;
|
||||
|
||||
let value = {
|
||||
let mut value_bytes = [0; 32];
|
||||
reader.read_exact(&mut value_bytes)?;
|
||||
U256::from_le_slice(&value_bytes)
|
||||
};
|
||||
|
||||
let mut data_len = {
|
||||
let mut data_len = [0; 4];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
usize::try_from(u32::from_le_bytes(data_len)).expect("u32 couldn't fit within a usize")
|
||||
};
|
||||
|
||||
// A valid DoS would be to claim a 4 GB data is present for only 4 bytes
|
||||
// We read this in 1 KB chunks to only read data actually present (with a max DoS of 1 KB)
|
||||
let mut data = vec![];
|
||||
while data_len > 0 {
|
||||
let chunk_len = data_len.min(1024);
|
||||
let mut chunk = vec![0; chunk_len];
|
||||
reader.read_exact(&mut chunk)?;
|
||||
data.extend(&chunk);
|
||||
data_len -= chunk_len;
|
||||
}
|
||||
|
||||
Ok(Call { to, value, data })
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.to)?;
|
||||
writer.write_all(&self.value.as_le_bytes())?;
|
||||
|
||||
let data_len = u32::try_from(self.data.len())
|
||||
.map_err(|_| io::Error::other("call data length exceeded 2**32"))?;
|
||||
writer.write_all(&data_len.to_le_bytes())?;
|
||||
writer.write_all(&self.data)
|
||||
}
|
||||
}
|
||||
impl From<Call> for AbiCall {
|
||||
fn from(call: Call) -> AbiCall {
|
||||
AbiCall { to: call.to.into(), value: call.value, data: call.data.into() }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum OutInstructionTarget {
|
||||
Direct([u8; 20]),
|
||||
Calls(Vec<Call>),
|
||||
}
|
||||
impl OutInstructionTarget {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut addr = [0; 20];
|
||||
reader.read_exact(&mut addr)?;
|
||||
Ok(OutInstructionTarget::Direct(addr))
|
||||
}
|
||||
1 => {
|
||||
let mut calls_len = [0; 4];
|
||||
reader.read_exact(&mut calls_len)?;
|
||||
let calls_len = u32::from_le_bytes(calls_len);
|
||||
|
||||
let mut calls = vec![];
|
||||
for _ in 0 .. calls_len {
|
||||
calls.push(Call::read(reader)?);
|
||||
}
|
||||
Ok(OutInstructionTarget::Calls(calls))
|
||||
}
|
||||
_ => Err(io::Error::other("unrecognized OutInstructionTarget"))?,
|
||||
}
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
OutInstructionTarget::Direct(addr) => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(addr)?;
|
||||
}
|
||||
OutInstructionTarget::Calls(calls) => {
|
||||
writer.write_all(&[1])?;
|
||||
let call_len = u32::try_from(calls.len())
|
||||
.map_err(|_| io::Error::other("amount of calls exceeded 2**32"))?;
|
||||
writer.write_all(&call_len.to_le_bytes())?;
|
||||
for call in calls {
|
||||
call.write(writer)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct OutInstruction {
|
||||
pub target: OutInstructionTarget,
|
||||
pub value: U256,
|
||||
}
|
||||
impl OutInstruction {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let target = OutInstructionTarget::read(reader)?;
|
||||
|
||||
let value = {
|
||||
let mut value_bytes = [0; 32];
|
||||
reader.read_exact(&mut value_bytes)?;
|
||||
U256::from_le_slice(&value_bytes)
|
||||
};
|
||||
|
||||
Ok(OutInstruction { target, value })
|
||||
}
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.target.write(writer)?;
|
||||
writer.write_all(&self.value.as_le_bytes())
|
||||
}
|
||||
}
|
||||
impl From<OutInstruction> for AbiOutInstruction {
|
||||
fn from(instruction: OutInstruction) -> AbiOutInstruction {
|
||||
match instruction.target {
|
||||
OutInstructionTarget::Direct(addr) => {
|
||||
AbiOutInstruction { to: addr.into(), calls: vec![], value: instruction.value }
|
||||
}
|
||||
OutInstructionTarget::Calls(calls) => AbiOutInstruction {
|
||||
to: [0; 20].into(),
|
||||
calls: calls.into_iter().map(Into::into).collect(),
|
||||
value: instruction.value,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum RouterCommand {
|
||||
UpdateSeraiKey { chain_id: U256, nonce: U256, key: PublicKey },
|
||||
Execute { chain_id: U256, nonce: U256, outs: Vec<OutInstruction> },
|
||||
}
|
||||
|
||||
impl RouterCommand {
|
||||
pub fn msg(&self) -> Vec<u8> {
|
||||
match self {
|
||||
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||
Router::update_serai_key_message(*chain_id, *nonce, key)
|
||||
}
|
||||
RouterCommand::Execute { chain_id, nonce, outs } => Router::execute_message(
|
||||
*chain_id,
|
||||
*nonce,
|
||||
outs.iter().map(|out| out.clone().into()).collect(),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
|
||||
match kind[0] {
|
||||
0 => {
|
||||
let mut chain_id = [0; 32];
|
||||
reader.read_exact(&mut chain_id)?;
|
||||
|
||||
let mut nonce = [0; 32];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
|
||||
let key = PublicKey::new(Secp256k1::read_G(reader)?)
|
||||
.ok_or(io::Error::other("key for RouterCommand doesn't have an eth representation"))?;
|
||||
Ok(RouterCommand::UpdateSeraiKey {
|
||||
chain_id: U256::from_le_slice(&chain_id),
|
||||
nonce: U256::from_le_slice(&nonce),
|
||||
key,
|
||||
})
|
||||
}
|
||||
1 => {
|
||||
let mut chain_id = [0; 32];
|
||||
reader.read_exact(&mut chain_id)?;
|
||||
let chain_id = U256::from_le_slice(&chain_id);
|
||||
|
||||
let mut nonce = [0; 32];
|
||||
reader.read_exact(&mut nonce)?;
|
||||
let nonce = U256::from_le_slice(&nonce);
|
||||
|
||||
let mut outs_len = [0; 4];
|
||||
reader.read_exact(&mut outs_len)?;
|
||||
let outs_len = u32::from_le_bytes(outs_len);
|
||||
|
||||
let mut outs = vec![];
|
||||
for _ in 0 .. outs_len {
|
||||
outs.push(OutInstruction::read(reader)?);
|
||||
}
|
||||
|
||||
Ok(RouterCommand::Execute { chain_id, nonce, outs })
|
||||
}
|
||||
_ => Err(io::Error::other("reading unknown type of RouterCommand"))?,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
RouterCommand::UpdateSeraiKey { chain_id, nonce, key } => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(&chain_id.as_le_bytes())?;
|
||||
writer.write_all(&nonce.as_le_bytes())?;
|
||||
writer.write_all(&key.A.to_bytes())
|
||||
}
|
||||
RouterCommand::Execute { chain_id, nonce, outs } => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(&chain_id.as_le_bytes())?;
|
||||
writer.write_all(&nonce.as_le_bytes())?;
|
||||
writer.write_all(&u32::try_from(outs.len()).unwrap().to_le_bytes())?;
|
||||
for out in outs {
|
||||
out.write(writer)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut res = vec![];
|
||||
self.write(&mut res).unwrap();
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct SignedRouterCommand {
|
||||
command: RouterCommand,
|
||||
signature: Signature,
|
||||
}
|
||||
|
||||
impl SignedRouterCommand {
|
||||
pub fn new(key: &PublicKey, command: RouterCommand, signature: &[u8; 64]) -> Option<Self> {
|
||||
let c = Secp256k1::read_F(&mut &signature[.. 32]).ok()?;
|
||||
let s = Secp256k1::read_F(&mut &signature[32 ..]).ok()?;
|
||||
let signature = Signature { c, s };
|
||||
|
||||
if !signature.verify(key, &command.msg()) {
|
||||
None?
|
||||
}
|
||||
Some(SignedRouterCommand { command, signature })
|
||||
}
|
||||
|
||||
pub fn command(&self) -> &RouterCommand {
|
||||
&self.command
|
||||
}
|
||||
|
||||
pub fn signature(&self) -> &Signature {
|
||||
&self.signature
|
||||
}
|
||||
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let command = RouterCommand::read(reader)?;
|
||||
|
||||
let mut sig = [0; 64];
|
||||
reader.read_exact(&mut sig)?;
|
||||
let signature = Signature::from_bytes(sig)?;
|
||||
|
||||
Ok(SignedRouterCommand { command, signature })
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
self.command.write(writer)?;
|
||||
writer.write_all(&self.signature.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine: AlgorithmMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl RouterCommandMachine {
|
||||
pub fn new(keys: ThresholdKeys<Secp256k1>, command: RouterCommand) -> Option<Self> {
|
||||
// The Schnorr algorithm should be fine without this, even when using the IETF variant
|
||||
// If this is better and more comprehensive, we should do it, even if not necessary
|
||||
let mut transcript = RecommendedTranscript::new(b"ethereum-serai RouterCommandMachine v0.1");
|
||||
let key = keys.group_key();
|
||||
transcript.append_message(b"key", key.to_bytes());
|
||||
transcript.append_message(b"command", command.serialize());
|
||||
|
||||
Some(Self {
|
||||
key: PublicKey::new(key)?,
|
||||
command,
|
||||
machine: AlgorithmMachine::new(Schnorr::new(transcript), keys),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl PreprocessMachine for RouterCommandMachine {
|
||||
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||
type Signature = SignedRouterCommand;
|
||||
type SignMachine = RouterCommandSignMachine;
|
||||
|
||||
fn preprocess<R: RngCore + CryptoRng>(
|
||||
self,
|
||||
rng: &mut R,
|
||||
) -> (Self::SignMachine, Self::Preprocess) {
|
||||
let (machine, preprocess) = self.machine.preprocess(rng);
|
||||
|
||||
(RouterCommandSignMachine { key: self.key, command: self.command, machine }, preprocess)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandSignMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine: AlgorithmSignMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl SignMachine<SignedRouterCommand> for RouterCommandSignMachine {
|
||||
type Params = ();
|
||||
type Keys = ThresholdKeys<Secp256k1>;
|
||||
type Preprocess = Preprocess<Secp256k1, ()>;
|
||||
type SignatureShare = SignatureShare<Secp256k1>;
|
||||
type SignatureMachine = RouterCommandSignatureMachine;
|
||||
|
||||
fn cache(self) -> CachedPreprocess {
|
||||
unimplemented!(
|
||||
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||
"being already bound to a specific command"
|
||||
);
|
||||
}
|
||||
|
||||
fn from_cache(
|
||||
(): (),
|
||||
_: ThresholdKeys<Secp256k1>,
|
||||
_: CachedPreprocess,
|
||||
) -> (Self, Self::Preprocess) {
|
||||
unimplemented!(
|
||||
"RouterCommand machines don't support caching their preprocesses due to {}",
|
||||
"being already bound to a specific command"
|
||||
);
|
||||
}
|
||||
|
||||
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
||||
self.machine.read_preprocess(reader)
|
||||
}
|
||||
|
||||
fn sign(
|
||||
self,
|
||||
commitments: HashMap<Participant, Self::Preprocess>,
|
||||
msg: &[u8],
|
||||
) -> Result<(RouterCommandSignatureMachine, Self::SignatureShare), FrostError> {
|
||||
if !msg.is_empty() {
|
||||
panic!("message was passed to a RouterCommand machine when it generates its own");
|
||||
}
|
||||
|
||||
let (machine, share) = self.machine.sign(commitments, &self.command.msg())?;
|
||||
|
||||
Ok((RouterCommandSignatureMachine { key: self.key, command: self.command, machine }, share))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RouterCommandSignatureMachine {
|
||||
key: PublicKey,
|
||||
command: RouterCommand,
|
||||
machine:
|
||||
AlgorithmSignatureMachine<Secp256k1, Schnorr<Secp256k1, RecommendedTranscript, EthereumHram>>,
|
||||
}
|
||||
|
||||
impl SignatureMachine<SignedRouterCommand> for RouterCommandSignatureMachine {
|
||||
type SignatureShare = SignatureShare<Secp256k1>;
|
||||
|
||||
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
|
||||
self.machine.read_share(reader)
|
||||
}
|
||||
|
||||
fn complete(
|
||||
self,
|
||||
shares: HashMap<Participant, Self::SignatureShare>,
|
||||
) -> Result<SignedRouterCommand, FrostError> {
|
||||
let sig = self.machine.complete(shares)?;
|
||||
let signature = Signature::new(&self.key, &self.command.msg(), sig)
|
||||
.expect("machine produced an invalid signature");
|
||||
Ok(SignedRouterCommand { command: self.command, signature })
|
||||
}
|
||||
}
|
||||
@@ -1,443 +0,0 @@
|
||||
use std::{sync::Arc, io, collections::HashSet};
|
||||
|
||||
use k256::{
|
||||
elliptic_curve::{group::GroupEncoding, sec1},
|
||||
ProjectivePoint,
|
||||
};
|
||||
|
||||
use alloy_core::primitives::{hex::FromHex, Address, U256, Bytes, TxKind};
|
||||
#[cfg(test)]
|
||||
use alloy_core::primitives::B256;
|
||||
use alloy_consensus::TxLegacy;
|
||||
|
||||
use alloy_sol_types::{SolValue, SolConstructor, SolCall, SolEvent};
|
||||
|
||||
use alloy_rpc_types_eth::Filter;
|
||||
#[cfg(test)]
|
||||
use alloy_rpc_types_eth::{BlockId, TransactionRequest, TransactionInput};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
pub use crate::{
|
||||
Error,
|
||||
crypto::{PublicKey, Signature},
|
||||
abi::{erc20::Transfer, router as abi},
|
||||
};
|
||||
use abi::{SeraiKeyUpdated, InInstruction as InInstructionEvent, Executed as ExecutedEvent};
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum Coin {
|
||||
Ether,
|
||||
Erc20([u8; 20]),
|
||||
}
|
||||
|
||||
impl Coin {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let mut kind = [0xff];
|
||||
reader.read_exact(&mut kind)?;
|
||||
Ok(match kind[0] {
|
||||
0 => Coin::Ether,
|
||||
1 => {
|
||||
let mut address = [0; 20];
|
||||
reader.read_exact(&mut address)?;
|
||||
Coin::Erc20(address)
|
||||
}
|
||||
_ => Err(io::Error::other("unrecognized Coin type"))?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
Coin::Ether => writer.write_all(&[0]),
|
||||
Coin::Erc20(token) => {
|
||||
writer.write_all(&[1])?;
|
||||
writer.write_all(token)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct InInstruction {
|
||||
pub id: ([u8; 32], u64),
|
||||
pub from: [u8; 20],
|
||||
pub coin: Coin,
|
||||
pub amount: U256,
|
||||
pub data: Vec<u8>,
|
||||
pub key_at_end_of_block: ProjectivePoint,
|
||||
}
|
||||
|
||||
impl InInstruction {
|
||||
pub fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let id = {
|
||||
let mut id_hash = [0; 32];
|
||||
reader.read_exact(&mut id_hash)?;
|
||||
let mut id_pos = [0; 8];
|
||||
reader.read_exact(&mut id_pos)?;
|
||||
let id_pos = u64::from_le_bytes(id_pos);
|
||||
(id_hash, id_pos)
|
||||
};
|
||||
|
||||
let mut from = [0; 20];
|
||||
reader.read_exact(&mut from)?;
|
||||
|
||||
let coin = Coin::read(reader)?;
|
||||
let mut amount = [0; 32];
|
||||
reader.read_exact(&mut amount)?;
|
||||
let amount = U256::from_le_slice(&amount);
|
||||
|
||||
let mut data_len = [0; 4];
|
||||
reader.read_exact(&mut data_len)?;
|
||||
let data_len = usize::try_from(u32::from_le_bytes(data_len))
|
||||
.map_err(|_| io::Error::other("InInstruction data exceeded 2**32 in length"))?;
|
||||
let mut data = vec![0; data_len];
|
||||
reader.read_exact(&mut data)?;
|
||||
|
||||
let mut key_at_end_of_block = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||
reader.read_exact(&mut key_at_end_of_block)?;
|
||||
let key_at_end_of_block = Option::from(ProjectivePoint::from_bytes(&key_at_end_of_block))
|
||||
.ok_or(io::Error::other("InInstruction had key at end of block which wasn't valid"))?;
|
||||
|
||||
Ok(InInstruction { id, from, coin, amount, data, key_at_end_of_block })
|
||||
}
|
||||
|
||||
pub fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
writer.write_all(&self.id.0)?;
|
||||
writer.write_all(&self.id.1.to_le_bytes())?;
|
||||
|
||||
writer.write_all(&self.from)?;
|
||||
|
||||
self.coin.write(writer)?;
|
||||
writer.write_all(&self.amount.as_le_bytes())?;
|
||||
|
||||
writer.write_all(
|
||||
&u32::try_from(self.data.len())
|
||||
.map_err(|_| {
|
||||
io::Error::other("InInstruction being written had data exceeding 2**32 in length")
|
||||
})?
|
||||
.to_le_bytes(),
|
||||
)?;
|
||||
writer.write_all(&self.data)?;
|
||||
|
||||
writer.write_all(&self.key_at_end_of_block.to_bytes())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Executed {
|
||||
pub tx_id: [u8; 32],
|
||||
pub nonce: u64,
|
||||
pub signature: [u8; 64],
|
||||
}
|
||||
|
||||
/// The contract Serai uses to manage its state.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Router(Arc<RootProvider<SimpleRequest>>, Address);
|
||||
impl Router {
|
||||
pub(crate) fn code() -> Vec<u8> {
|
||||
let bytecode = include_str!("../artifacts/Router.bin");
|
||||
Bytes::from_hex(bytecode).expect("compiled-in Router bytecode wasn't valid hex").to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn init_code(key: &PublicKey) -> Vec<u8> {
|
||||
let mut bytecode = Self::code();
|
||||
// Append the constructor arguments
|
||||
bytecode.extend((abi::constructorCall { _seraiKey: key.eth_repr().into() }).abi_encode());
|
||||
bytecode
|
||||
}
|
||||
|
||||
// This isn't pub in order to force users to use `Deployer::find_router`.
|
||||
pub(crate) fn new(provider: Arc<RootProvider<SimpleRequest>>, address: Address) -> Self {
|
||||
Self(provider, address)
|
||||
}
|
||||
|
||||
pub fn address(&self) -> [u8; 20] {
|
||||
**self.1
|
||||
}
|
||||
|
||||
/// Get the key for Serai at the specified block.
|
||||
#[cfg(test)]
|
||||
pub async fn serai_key(&self, at: [u8; 32]) -> Result<PublicKey, Error> {
|
||||
let call = TransactionRequest::default()
|
||||
.to(self.1)
|
||||
.input(TransactionInput::new(abi::seraiKeyCall::new(()).abi_encode().into()));
|
||||
let bytes = self
|
||||
.0
|
||||
.call(&call)
|
||||
.block(BlockId::Hash(B256::from(at).into()))
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::seraiKeyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
PublicKey::from_eth_repr(res._0.0).ok_or(Error::ConnectionError)
|
||||
}
|
||||
|
||||
/// Get the message to be signed in order to update the key for Serai.
|
||||
pub(crate) fn update_serai_key_message(chain_id: U256, nonce: U256, key: &PublicKey) -> Vec<u8> {
|
||||
let mut buffer = b"updateSeraiKey".to_vec();
|
||||
buffer.extend(&chain_id.to_be_bytes::<32>());
|
||||
buffer.extend(&nonce.to_be_bytes::<32>());
|
||||
buffer.extend(&key.eth_repr());
|
||||
buffer
|
||||
}
|
||||
|
||||
/// Update the key representing Serai.
|
||||
pub fn update_serai_key(&self, public_key: &PublicKey, sig: &Signature) -> TxLegacy {
|
||||
// TODO: Set a more accurate gas
|
||||
TxLegacy {
|
||||
to: TxKind::Call(self.1),
|
||||
input: abi::updateSeraiKeyCall::new((public_key.eth_repr().into(), sig.into()))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
gas_limit: 100_000,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current nonce for the published batches.
|
||||
#[cfg(test)]
|
||||
pub async fn nonce(&self, at: [u8; 32]) -> Result<U256, Error> {
|
||||
let call = TransactionRequest::default()
|
||||
.to(self.1)
|
||||
.input(TransactionInput::new(abi::nonceCall::new(()).abi_encode().into()));
|
||||
let bytes = self
|
||||
.0
|
||||
.call(&call)
|
||||
.block(BlockId::Hash(B256::from(at).into()))
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::nonceCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
Ok(res._0)
|
||||
}
|
||||
|
||||
/// Get the message to be signed in order to update the key for Serai.
|
||||
pub(crate) fn execute_message(
|
||||
chain_id: U256,
|
||||
nonce: U256,
|
||||
outs: Vec<abi::OutInstruction>,
|
||||
) -> Vec<u8> {
|
||||
("execute".to_string(), chain_id, nonce, outs).abi_encode_params()
|
||||
}
|
||||
|
||||
/// Execute a batch of `OutInstruction`s.
|
||||
pub fn execute(&self, outs: &[abi::OutInstruction], sig: &Signature) -> TxLegacy {
|
||||
TxLegacy {
|
||||
to: TxKind::Call(self.1),
|
||||
input: abi::executeCall::new((outs.to_vec(), sig.into())).abi_encode().into(),
|
||||
// TODO
|
||||
gas_limit: 100_000 + ((200_000 + 10_000) * u128::try_from(outs.len()).unwrap()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn key_at_end_of_block(&self, block: u64) -> Result<Option<ProjectivePoint>, Error> {
|
||||
let filter = Filter::new().from_block(0).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||
let all_keys = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
if all_keys.is_empty() {
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
let last_key_x_coordinate_log = all_keys.last().ok_or(Error::ConnectionError)?;
|
||||
let last_key_x_coordinate = last_key_x_coordinate_log
|
||||
.log_decode::<SeraiKeyUpdated>()
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.inner
|
||||
.data
|
||||
.key;
|
||||
|
||||
let mut compressed_point = <ProjectivePoint as GroupEncoding>::Repr::default();
|
||||
compressed_point[0] = u8::from(sec1::Tag::CompressedEvenY);
|
||||
compressed_point[1 ..].copy_from_slice(last_key_x_coordinate.as_slice());
|
||||
|
||||
let key =
|
||||
Option::from(ProjectivePoint::from_bytes(&compressed_point)).ok_or(Error::ConnectionError)?;
|
||||
Ok(Some(key))
|
||||
}
|
||||
|
||||
pub async fn in_instructions(
|
||||
&self,
|
||||
block: u64,
|
||||
allowed_tokens: &HashSet<[u8; 20]>,
|
||||
) -> Result<Vec<InInstruction>, Error> {
|
||||
let Some(key_at_end_of_block) = self.key_at_end_of_block(block).await? else {
|
||||
return Ok(vec![]);
|
||||
};
|
||||
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(InInstructionEvent::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
let mut transfer_check = HashSet::new();
|
||||
let mut in_instructions = vec![];
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let id = (
|
||||
log.block_hash.ok_or(Error::ConnectionError)?.into(),
|
||||
log.log_index.ok_or(Error::ConnectionError)?,
|
||||
);
|
||||
|
||||
let tx_hash = log.transaction_hash.ok_or(Error::ConnectionError)?;
|
||||
let tx = self
|
||||
.0
|
||||
.get_transaction_by_hash(tx_hash)
|
||||
.await
|
||||
.ok()
|
||||
.flatten()
|
||||
.ok_or(Error::ConnectionError)?;
|
||||
|
||||
let log =
|
||||
log.log_decode::<InInstructionEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let coin = if log.coin.0 == [0; 20] {
|
||||
Coin::Ether
|
||||
} else {
|
||||
let token = *log.coin.0;
|
||||
|
||||
if !allowed_tokens.contains(&token) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If this also counts as a top-level transfer via the token, drop it
|
||||
//
|
||||
// Necessary in order to handle a potential edge case with some theoretical token
|
||||
// implementations
|
||||
//
|
||||
// This will either let it be handled by the top-level transfer hook or will drop it
|
||||
// entirely on the side of caution
|
||||
if tx.to == Some(token.into()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get all logs for this TX
|
||||
let receipt = self
|
||||
.0
|
||||
.get_transaction_receipt(tx_hash)
|
||||
.await
|
||||
.map_err(|_| Error::ConnectionError)?
|
||||
.ok_or(Error::ConnectionError)?;
|
||||
let tx_logs = receipt.inner.logs();
|
||||
|
||||
// Find a matching transfer log
|
||||
let mut found_transfer = false;
|
||||
for tx_log in tx_logs {
|
||||
let log_index = tx_log.log_index.ok_or(Error::ConnectionError)?;
|
||||
// Ensure we didn't already use this transfer to check a distinct InInstruction event
|
||||
if transfer_check.contains(&log_index) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this log is from the token we expected to be transferred
|
||||
if tx_log.address().0 != token {
|
||||
continue;
|
||||
}
|
||||
// Check if this is a transfer log
|
||||
// https://github.com/alloy-rs/core/issues/589
|
||||
if tx_log.topics()[0] != Transfer::SIGNATURE_HASH {
|
||||
continue;
|
||||
}
|
||||
let Ok(transfer) = Transfer::decode_log(&tx_log.inner.clone(), true) else { continue };
|
||||
// Check if this is a transfer to us for the expected amount
|
||||
if (transfer.to == self.1) && (transfer.value == log.amount) {
|
||||
transfer_check.insert(log_index);
|
||||
found_transfer = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !found_transfer {
|
||||
// This shouldn't be a ConnectionError
|
||||
// This is an exploit, a non-conforming ERC20, or an invalid connection
|
||||
// This should halt the process which is sufficient, yet this is sub-optimal
|
||||
// TODO
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
Coin::Erc20(token)
|
||||
};
|
||||
|
||||
in_instructions.push(InInstruction {
|
||||
id,
|
||||
from: *log.from.0,
|
||||
coin,
|
||||
amount: log.amount,
|
||||
data: log.instruction.as_ref().to_vec(),
|
||||
key_at_end_of_block,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(in_instructions)
|
||||
}
|
||||
|
||||
pub async fn executed_commands(&self, block: u64) -> Result<Vec<Executed>, Error> {
|
||||
let mut res = vec![];
|
||||
|
||||
{
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(SeraiKeyUpdated::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||
|
||||
let log =
|
||||
log.log_decode::<SeraiKeyUpdated>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let mut signature = [0; 64];
|
||||
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||
res.push(Executed {
|
||||
tx_id,
|
||||
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let filter = Filter::new().from_block(block).to_block(block).address(self.1);
|
||||
let filter = filter.event_signature(ExecutedEvent::SIGNATURE_HASH);
|
||||
let logs = self.0.get_logs(&filter).await.map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
for log in logs {
|
||||
// Double check the address which emitted this log
|
||||
if log.address() != self.1 {
|
||||
Err(Error::ConnectionError)?;
|
||||
}
|
||||
|
||||
let tx_id = log.transaction_hash.ok_or(Error::ConnectionError)?.into();
|
||||
|
||||
let log = log.log_decode::<ExecutedEvent>().map_err(|_| Error::ConnectionError)?.inner.data;
|
||||
|
||||
let mut signature = [0; 64];
|
||||
signature[.. 32].copy_from_slice(log.signature.c.as_ref());
|
||||
signature[32 ..].copy_from_slice(log.signature.s.as_ref());
|
||||
res.push(Executed {
|
||||
tx_id,
|
||||
nonce: log.nonce.try_into().map_err(|_| Error::ConnectionError)?,
|
||||
signature,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[cfg(feature = "tests")]
|
||||
pub fn key_updated_filter(&self) -> Filter {
|
||||
Filter::new().address(self.1).event_signature(SeraiKeyUpdated::SIGNATURE_HASH)
|
||||
}
|
||||
#[cfg(feature = "tests")]
|
||||
pub fn executed_filter(&self) -> Filter {
|
||||
Filter::new().address(self.1).event_signature(ExecutedEvent::SIGNATURE_HASH)
|
||||
}
|
||||
}
|
||||
@@ -1,13 +0,0 @@
|
||||
use alloy_sol_types::sol;
|
||||
|
||||
#[rustfmt::skip]
|
||||
#[allow(warnings)]
|
||||
#[allow(needless_pass_by_value)]
|
||||
#[allow(clippy::all)]
|
||||
#[allow(clippy::ignored_unit_patterns)]
|
||||
#[allow(clippy::redundant_closure_for_method_calls)]
|
||||
mod schnorr_container {
|
||||
use super::*;
|
||||
sol!("src/tests/contracts/Schnorr.sol");
|
||||
}
|
||||
pub(crate) use schnorr_container::TestSchnorr as schnorr;
|
||||
@@ -1,15 +0,0 @@
|
||||
// SPDX-License-Identifier: AGPLv3
|
||||
pragma solidity ^0.8.0;
|
||||
|
||||
import "../../../contracts/Schnorr.sol";
|
||||
|
||||
contract TestSchnorr {
|
||||
function verify(
|
||||
bytes32 px,
|
||||
bytes calldata message,
|
||||
bytes32 c,
|
||||
bytes32 s
|
||||
) external pure returns (bool) {
|
||||
return Schnorr.verify(px, message, c, s);
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::{Field, PrimeField};
|
||||
use k256::{
|
||||
ecdsa::{
|
||||
self, hazmat::SignPrimitive, signature::hazmat::PrehashVerifier, SigningKey, VerifyingKey,
|
||||
},
|
||||
Scalar, ProjectivePoint,
|
||||
};
|
||||
|
||||
use frost::{
|
||||
curve::{Ciphersuite, Secp256k1},
|
||||
algorithm::{Hram, IetfSchnorr},
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use crate::{crypto::*, tests::key_gen};
|
||||
|
||||
// The ecrecover opcode, yet with parity replacing v
|
||||
pub(crate) fn ecrecover(message: Scalar, odd_y: bool, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
||||
let sig = ecdsa::Signature::from_scalars(r, s).ok()?;
|
||||
let message: [u8; 32] = message.to_repr().into();
|
||||
alloy_core::primitives::Signature::from_signature_and_parity(
|
||||
sig,
|
||||
alloy_core::primitives::Parity::Parity(odd_y),
|
||||
)
|
||||
.ok()?
|
||||
.recover_address_from_prehash(&alloy_core::primitives::B256::from(message))
|
||||
.ok()
|
||||
.map(Into::into)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover() {
|
||||
let private = SigningKey::random(&mut OsRng);
|
||||
let public = VerifyingKey::from(&private);
|
||||
|
||||
// Sign the signature
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
let (sig, recovery_id) = private
|
||||
.as_nonzero_scalar()
|
||||
.try_sign_prehashed(
|
||||
<Secp256k1 as Ciphersuite>::F::random(&mut OsRng),
|
||||
&keccak256(MESSAGE).into(),
|
||||
)
|
||||
.unwrap();
|
||||
|
||||
// Sanity check the signature verifies
|
||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
||||
{
|
||||
assert_eq!(public.verify_prehash(&keccak256(MESSAGE), &sig).unwrap(), ());
|
||||
}
|
||||
|
||||
// Perform the ecrecover
|
||||
assert_eq!(
|
||||
ecrecover(
|
||||
hash_to_scalar(MESSAGE),
|
||||
u8::from(recovery_id.unwrap().is_y_odd()) == 1,
|
||||
*sig.r(),
|
||||
*sig.s()
|
||||
)
|
||||
.unwrap(),
|
||||
address(&ProjectivePoint::from(public.as_affine()))
|
||||
);
|
||||
}
|
||||
|
||||
// Run the sign test with the EthereumHram
|
||||
#[test]
|
||||
fn test_signing() {
|
||||
let (keys, _) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let _sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub fn preprocess_signature_for_ecrecover(
|
||||
R: ProjectivePoint,
|
||||
public_key: &PublicKey,
|
||||
m: &[u8],
|
||||
s: Scalar,
|
||||
) -> (Scalar, Scalar) {
|
||||
let c = EthereumHram::hram(&R, &public_key.A, m);
|
||||
let sa = -(s * public_key.px);
|
||||
let ca = -(c * public_key.px);
|
||||
(sa, ca)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ecrecover_hack() {
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
|
||||
let (sa, ca) = preprocess_signature_for_ecrecover(sig.R, &public_key, MESSAGE, sig.s);
|
||||
let q = ecrecover(sa, false, public_key.px, ca).unwrap();
|
||||
assert_eq!(q, address(&sig.R));
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use rand_core::OsRng;
|
||||
|
||||
use group::ff::PrimeField;
|
||||
use k256::Scalar;
|
||||
|
||||
use frost::{
|
||||
curve::Secp256k1,
|
||||
algorithm::IetfSchnorr,
|
||||
tests::{algorithm_machines, sign},
|
||||
};
|
||||
|
||||
use alloy_core::primitives::Address;
|
||||
|
||||
use alloy_sol_types::SolCall;
|
||||
|
||||
use alloy_rpc_types_eth::{TransactionInput, TransactionRequest};
|
||||
use alloy_simple_request_transport::SimpleRequest;
|
||||
use alloy_rpc_client::ClientBuilder;
|
||||
use alloy_provider::{Provider, RootProvider};
|
||||
|
||||
use alloy_node_bindings::{Anvil, AnvilInstance};
|
||||
|
||||
use crate::{
|
||||
Error,
|
||||
crypto::*,
|
||||
tests::{key_gen, deploy_contract, abi::schnorr as abi},
|
||||
};
|
||||
|
||||
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {
|
||||
let anvil = Anvil::new().spawn();
|
||||
|
||||
let provider = RootProvider::new(
|
||||
ClientBuilder::default().transport(SimpleRequest::new(anvil.endpoint()), true),
|
||||
);
|
||||
let wallet = anvil.keys()[0].clone().into();
|
||||
let client = Arc::new(provider);
|
||||
|
||||
let address = deploy_contract(client.clone(), &wallet, "TestSchnorr").await.unwrap();
|
||||
(anvil, client, address)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_deploy_contract() {
|
||||
setup_test().await;
|
||||
}
|
||||
|
||||
pub async fn call_verify(
|
||||
provider: &RootProvider<SimpleRequest>,
|
||||
contract: Address,
|
||||
public_key: &PublicKey,
|
||||
message: &[u8],
|
||||
signature: &Signature,
|
||||
) -> Result<(), Error> {
|
||||
let px: [u8; 32] = public_key.px.to_repr().into();
|
||||
let c_bytes: [u8; 32] = signature.c.to_repr().into();
|
||||
let s_bytes: [u8; 32] = signature.s.to_repr().into();
|
||||
let call = TransactionRequest::default().to(contract).input(TransactionInput::new(
|
||||
abi::verifyCall::new((px.into(), message.to_vec().into(), c_bytes.into(), s_bytes.into()))
|
||||
.abi_encode()
|
||||
.into(),
|
||||
));
|
||||
let bytes = provider.call(&call).await.map_err(|_| Error::ConnectionError)?;
|
||||
let res =
|
||||
abi::verifyCall::abi_decode_returns(&bytes, true).map_err(|_| Error::ConnectionError)?;
|
||||
|
||||
if res._0 {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::InvalidSignature)
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ecrecover_hack() {
|
||||
let (_anvil, client, contract) = setup_test().await;
|
||||
|
||||
let (keys, public_key) = key_gen();
|
||||
|
||||
const MESSAGE: &[u8] = b"Hello, World!";
|
||||
|
||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
||||
let sig =
|
||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
||||
let sig = Signature::new(&public_key, MESSAGE, sig).unwrap();
|
||||
|
||||
call_verify(&client, contract, &public_key, MESSAGE, &sig).await.unwrap();
|
||||
// Test an invalid signature fails
|
||||
let mut sig = sig;
|
||||
sig.s += Scalar::ONE;
|
||||
assert!(call_verify(&client, contract, &public_key, MESSAGE, &sig).await.is_err());
|
||||
}
|
||||
@@ -1,395 +0,0 @@
|
||||
use std_shims::{vec, vec::Vec, sync::OnceLock};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
use zeroize::Zeroize;
|
||||
use subtle::{Choice, ConditionallySelectable};
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::{ED25519_BASEPOINT_POINT, ED25519_BASEPOINT_TABLE},
|
||||
scalar::Scalar,
|
||||
edwards::EdwardsPoint,
|
||||
};
|
||||
|
||||
use monero_generators::{H, Generators};
|
||||
use monero_primitives::{INV_EIGHT, Commitment, keccak256_to_scalar};
|
||||
|
||||
use crate::{core::*, ScalarVector, batch_verifier::BulletproofsBatchVerifier};
|
||||
|
||||
include!(concat!(env!("OUT_DIR"), "/generators.rs"));
|
||||
|
||||
static TWO_N_CELL: OnceLock<ScalarVector> = OnceLock::new();
|
||||
fn TWO_N() -> &'static ScalarVector {
|
||||
TWO_N_CELL.get_or_init(|| ScalarVector::powers(Scalar::from(2u8), COMMITMENT_BITS))
|
||||
}
|
||||
|
||||
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
|
||||
fn IP12() -> Scalar {
|
||||
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; COMMITMENT_BITS]).inner_product(TWO_N()))
|
||||
}
|
||||
|
||||
fn MN(outputs: usize) -> (usize, usize, usize) {
|
||||
let mut logM = 0;
|
||||
let mut M;
|
||||
while {
|
||||
M = 1 << logM;
|
||||
(M <= MAX_COMMITMENTS) && (M < outputs)
|
||||
} {
|
||||
logM += 1;
|
||||
}
|
||||
|
||||
(logM + LOG_COMMITMENT_BITS, M, M * COMMITMENT_BITS)
|
||||
}
|
||||
|
||||
fn bit_decompose(commitments: &[Commitment]) -> (ScalarVector, ScalarVector) {
|
||||
let (_, M, MN) = MN(commitments.len());
|
||||
|
||||
let sv = commitments.iter().map(|c| Scalar::from(c.amount)).collect::<Vec<_>>();
|
||||
let mut aL = ScalarVector::new(MN);
|
||||
let mut aR = ScalarVector::new(MN);
|
||||
|
||||
for j in 0 .. M {
|
||||
for i in (0 .. COMMITMENT_BITS).rev() {
|
||||
let bit =
|
||||
if j < sv.len() { Choice::from((sv[j][i / 8] >> (i % 8)) & 1) } else { Choice::from(0) };
|
||||
aL.0[(j * COMMITMENT_BITS) + i] =
|
||||
Scalar::conditional_select(&Scalar::ZERO, &Scalar::ONE, bit);
|
||||
aR.0[(j * COMMITMENT_BITS) + i] =
|
||||
Scalar::conditional_select(&-Scalar::ONE, &Scalar::ZERO, bit);
|
||||
}
|
||||
}
|
||||
|
||||
(aL, aR)
|
||||
}
|
||||
|
||||
fn hash_commitments<C: IntoIterator<Item = EdwardsPoint>>(
|
||||
commitments: C,
|
||||
) -> (Scalar, Vec<EdwardsPoint>) {
|
||||
let V = commitments.into_iter().map(|c| c * INV_EIGHT()).collect::<Vec<_>>();
|
||||
(keccak256_to_scalar(V.iter().flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>()), V)
|
||||
}
|
||||
|
||||
fn alpha_rho<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
generators: &Generators,
|
||||
aL: &ScalarVector,
|
||||
aR: &ScalarVector,
|
||||
) -> (Scalar, EdwardsPoint) {
|
||||
fn vector_exponent(generators: &Generators, a: &ScalarVector, b: &ScalarVector) -> EdwardsPoint {
|
||||
debug_assert_eq!(a.len(), b.len());
|
||||
(a * &generators.G[.. a.len()]) + (b * &generators.H[.. b.len()])
|
||||
}
|
||||
|
||||
let ar = Scalar::random(rng);
|
||||
(ar, (vector_exponent(generators, aL, aR) + (ED25519_BASEPOINT_TABLE * &ar)) * INV_EIGHT())
|
||||
}
|
||||
|
||||
fn LR_statements(
|
||||
a: &ScalarVector,
|
||||
G_i: &[EdwardsPoint],
|
||||
b: &ScalarVector,
|
||||
H_i: &[EdwardsPoint],
|
||||
cL: Scalar,
|
||||
U: EdwardsPoint,
|
||||
) -> Vec<(Scalar, EdwardsPoint)> {
|
||||
let mut res = a
|
||||
.0
|
||||
.iter()
|
||||
.copied()
|
||||
.zip(G_i.iter().copied())
|
||||
.chain(b.0.iter().copied().zip(H_i.iter().copied()))
|
||||
.collect::<Vec<_>>();
|
||||
res.push((cL, U));
|
||||
res
|
||||
}
|
||||
|
||||
fn hash_cache(cache: &mut Scalar, mash: &[[u8; 32]]) -> Scalar {
|
||||
let slice =
|
||||
&[cache.to_bytes().as_ref(), mash.iter().copied().flatten().collect::<Vec<_>>().as_ref()]
|
||||
.concat();
|
||||
*cache = keccak256_to_scalar(slice);
|
||||
*cache
|
||||
}
|
||||
|
||||
fn hadamard_fold(
|
||||
l: &[EdwardsPoint],
|
||||
r: &[EdwardsPoint],
|
||||
a: Scalar,
|
||||
b: Scalar,
|
||||
) -> Vec<EdwardsPoint> {
|
||||
let mut res = Vec::with_capacity(l.len() / 2);
|
||||
for i in 0 .. l.len() {
|
||||
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// Internal structure representing a Bulletproof, as defined by Monero..
|
||||
#[doc(hidden)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub struct OriginalStruct {
|
||||
pub(crate) A: EdwardsPoint,
|
||||
pub(crate) S: EdwardsPoint,
|
||||
pub(crate) T1: EdwardsPoint,
|
||||
pub(crate) T2: EdwardsPoint,
|
||||
pub(crate) tau_x: Scalar,
|
||||
pub(crate) mu: Scalar,
|
||||
pub(crate) L: Vec<EdwardsPoint>,
|
||||
pub(crate) R: Vec<EdwardsPoint>,
|
||||
pub(crate) a: Scalar,
|
||||
pub(crate) b: Scalar,
|
||||
pub(crate) t: Scalar,
|
||||
}
|
||||
|
||||
impl OriginalStruct {
|
||||
pub(crate) fn prove<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
commitments: &[Commitment],
|
||||
) -> OriginalStruct {
|
||||
let (logMN, M, MN) = MN(commitments.len());
|
||||
|
||||
let (aL, aR) = bit_decompose(commitments);
|
||||
let commitments_points = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
|
||||
let (mut cache, _) = hash_commitments(commitments_points.clone());
|
||||
|
||||
let (sL, sR) =
|
||||
ScalarVector((0 .. (MN * 2)).map(|_| Scalar::random(&mut *rng)).collect::<Vec<_>>()).split();
|
||||
|
||||
let generators = GENERATORS();
|
||||
let (mut alpha, A) = alpha_rho(&mut *rng, generators, &aL, &aR);
|
||||
let (mut rho, S) = alpha_rho(&mut *rng, generators, &sL, &sR);
|
||||
|
||||
let y = hash_cache(&mut cache, &[A.compress().to_bytes(), S.compress().to_bytes()]);
|
||||
let mut cache = keccak256_to_scalar(y.to_bytes());
|
||||
let z = cache;
|
||||
|
||||
let l0 = aL - z;
|
||||
let l1 = sL;
|
||||
|
||||
let mut zero_twos = Vec::with_capacity(MN);
|
||||
let zpow = ScalarVector::powers(z, M + 2);
|
||||
for j in 0 .. M {
|
||||
for i in 0 .. COMMITMENT_BITS {
|
||||
zero_twos.push(zpow[j + 2] * TWO_N()[i]);
|
||||
}
|
||||
}
|
||||
|
||||
let yMN = ScalarVector::powers(y, MN);
|
||||
let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos);
|
||||
let r1 = yMN * &sR;
|
||||
|
||||
let (T1, T2, x, mut tau_x) = {
|
||||
let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1);
|
||||
let t2 = l1.clone().inner_product(&r1);
|
||||
|
||||
let mut tau1 = Scalar::random(&mut *rng);
|
||||
let mut tau2 = Scalar::random(&mut *rng);
|
||||
|
||||
let T1 = multiexp(&[(t1, H()), (tau1, ED25519_BASEPOINT_POINT)]) * INV_EIGHT();
|
||||
let T2 = multiexp(&[(t2, H()), (tau2, ED25519_BASEPOINT_POINT)]) * INV_EIGHT();
|
||||
|
||||
let x =
|
||||
hash_cache(&mut cache, &[z.to_bytes(), T1.compress().to_bytes(), T2.compress().to_bytes()]);
|
||||
|
||||
let tau_x = (tau2 * (x * x)) + (tau1 * x);
|
||||
|
||||
tau1.zeroize();
|
||||
tau2.zeroize();
|
||||
(T1, T2, x, tau_x)
|
||||
};
|
||||
|
||||
let mu = (x * rho) + alpha;
|
||||
alpha.zeroize();
|
||||
rho.zeroize();
|
||||
|
||||
for (i, gamma) in commitments.iter().map(|c| c.mask).enumerate() {
|
||||
tau_x += zpow[i + 2] * gamma;
|
||||
}
|
||||
|
||||
let l = l0 + &(l1 * x);
|
||||
let r = r0 + &(r1 * x);
|
||||
|
||||
let t = l.clone().inner_product(&r);
|
||||
|
||||
let x_ip =
|
||||
hash_cache(&mut cache, &[x.to_bytes(), tau_x.to_bytes(), mu.to_bytes(), t.to_bytes()]);
|
||||
|
||||
let mut a = l;
|
||||
let mut b = r;
|
||||
|
||||
let yinv = y.invert();
|
||||
let yinvpow = ScalarVector::powers(yinv, MN);
|
||||
|
||||
let mut G_proof = generators.G[.. a.len()].to_vec();
|
||||
let mut H_proof = generators.H[.. a.len()].to_vec();
|
||||
H_proof.iter_mut().zip(yinvpow.0.iter()).for_each(|(this_H, yinvpow)| *this_H *= yinvpow);
|
||||
let U = H() * x_ip;
|
||||
|
||||
let mut L = Vec::with_capacity(logMN);
|
||||
let mut R = Vec::with_capacity(logMN);
|
||||
|
||||
while a.len() != 1 {
|
||||
let (aL, aR) = a.split();
|
||||
let (bL, bR) = b.split();
|
||||
|
||||
let cL = aL.clone().inner_product(&bR);
|
||||
let cR = aR.clone().inner_product(&bL);
|
||||
|
||||
let (G_L, G_R) = G_proof.split_at(aL.len());
|
||||
let (H_L, H_R) = H_proof.split_at(aL.len());
|
||||
|
||||
let L_i = multiexp(&LR_statements(&aL, G_R, &bR, H_L, cL, U)) * INV_EIGHT();
|
||||
let R_i = multiexp(&LR_statements(&aR, G_L, &bL, H_R, cR, U)) * INV_EIGHT();
|
||||
L.push(L_i);
|
||||
R.push(R_i);
|
||||
|
||||
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
|
||||
let w_inv = w.invert();
|
||||
|
||||
a = (aL * w) + &(aR * w_inv);
|
||||
b = (bL * w_inv) + &(bR * w);
|
||||
|
||||
if a.len() != 1 {
|
||||
G_proof = hadamard_fold(G_L, G_R, w_inv, w);
|
||||
H_proof = hadamard_fold(H_L, H_R, w, w_inv);
|
||||
}
|
||||
}
|
||||
|
||||
let res = OriginalStruct { A, S, T1, T2, tau_x, mu, L, R, a: a[0], b: b[0], t };
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
{
|
||||
let mut verifier = BulletproofsBatchVerifier::default();
|
||||
debug_assert!(res.verify(rng, &mut verifier, &commitments_points));
|
||||
debug_assert!(verifier.verify());
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub(crate) fn verify<R: RngCore + CryptoRng>(
|
||||
&self,
|
||||
rng: &mut R,
|
||||
verifier: &mut BulletproofsBatchVerifier,
|
||||
commitments: &[EdwardsPoint],
|
||||
) -> bool {
|
||||
// Verify commitments are valid
|
||||
if commitments.is_empty() || (commitments.len() > MAX_COMMITMENTS) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Verify L and R are properly sized
|
||||
if self.L.len() != self.R.len() {
|
||||
return false;
|
||||
}
|
||||
|
||||
let (logMN, M, MN) = MN(commitments.len());
|
||||
if self.L.len() != logMN {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Rebuild all challenges
|
||||
let (mut cache, commitments) = hash_commitments(commitments.iter().copied());
|
||||
let y = hash_cache(&mut cache, &[self.A.compress().to_bytes(), self.S.compress().to_bytes()]);
|
||||
|
||||
let z = keccak256_to_scalar(y.to_bytes());
|
||||
cache = z;
|
||||
|
||||
let x = hash_cache(
|
||||
&mut cache,
|
||||
&[z.to_bytes(), self.T1.compress().to_bytes(), self.T2.compress().to_bytes()],
|
||||
);
|
||||
|
||||
let x_ip = hash_cache(
|
||||
&mut cache,
|
||||
&[x.to_bytes(), self.tau_x.to_bytes(), self.mu.to_bytes(), self.t.to_bytes()],
|
||||
);
|
||||
|
||||
let mut w_and_w_inv = Vec::with_capacity(logMN);
|
||||
for (L, R) in self.L.iter().zip(&self.R) {
|
||||
let w = hash_cache(&mut cache, &[L.compress().to_bytes(), R.compress().to_bytes()]);
|
||||
let w_inv = w.invert();
|
||||
w_and_w_inv.push((w, w_inv));
|
||||
}
|
||||
|
||||
// Convert the proof from * INV_EIGHT to its actual form
|
||||
let normalize = |point: &EdwardsPoint| point.mul_by_cofactor();
|
||||
|
||||
let L = self.L.iter().map(normalize).collect::<Vec<_>>();
|
||||
let R = self.R.iter().map(normalize).collect::<Vec<_>>();
|
||||
let T1 = normalize(&self.T1);
|
||||
let T2 = normalize(&self.T2);
|
||||
let A = normalize(&self.A);
|
||||
let S = normalize(&self.S);
|
||||
|
||||
let commitments = commitments.iter().map(EdwardsPoint::mul_by_cofactor).collect::<Vec<_>>();
|
||||
|
||||
// Verify it
|
||||
let zpow = ScalarVector::powers(z, M + 3);
|
||||
|
||||
// First multiexp
|
||||
{
|
||||
let verifier_weight = Scalar::random(rng);
|
||||
|
||||
let ip1y = ScalarVector::powers(y, M * COMMITMENT_BITS).sum();
|
||||
let mut k = -(zpow[2] * ip1y);
|
||||
for j in 1 ..= M {
|
||||
k -= zpow[j + 2] * IP12();
|
||||
}
|
||||
let y1 = self.t - ((z * ip1y) + k);
|
||||
verifier.0.h -= verifier_weight * y1;
|
||||
|
||||
verifier.0.g -= verifier_weight * self.tau_x;
|
||||
|
||||
for (j, commitment) in commitments.iter().enumerate() {
|
||||
verifier.0.other.push((verifier_weight * zpow[j + 2], *commitment));
|
||||
}
|
||||
|
||||
verifier.0.other.push((verifier_weight * x, T1));
|
||||
verifier.0.other.push((verifier_weight * (x * x), T2));
|
||||
}
|
||||
|
||||
// Second multiexp
|
||||
{
|
||||
let verifier_weight = Scalar::random(rng);
|
||||
let z3 = (self.t - (self.a * self.b)) * x_ip;
|
||||
verifier.0.h += verifier_weight * z3;
|
||||
verifier.0.g -= verifier_weight * self.mu;
|
||||
|
||||
verifier.0.other.push((verifier_weight, A));
|
||||
verifier.0.other.push((verifier_weight * x, S));
|
||||
|
||||
{
|
||||
let ypow = ScalarVector::powers(y, MN);
|
||||
let yinv = y.invert();
|
||||
let yinvpow = ScalarVector::powers(yinv, MN);
|
||||
|
||||
let w_cache = challenge_products(&w_and_w_inv);
|
||||
|
||||
while verifier.0.g_bold.len() < MN {
|
||||
verifier.0.g_bold.push(Scalar::ZERO);
|
||||
}
|
||||
while verifier.0.h_bold.len() < MN {
|
||||
verifier.0.h_bold.push(Scalar::ZERO);
|
||||
}
|
||||
|
||||
for i in 0 .. MN {
|
||||
let g = (self.a * w_cache[i]) + z;
|
||||
verifier.0.g_bold[i] -= verifier_weight * g;
|
||||
|
||||
let mut h = self.b * yinvpow[i] * w_cache[(!i) & (MN - 1)];
|
||||
h -= ((zpow[(i / COMMITMENT_BITS) + 2] * TWO_N()[i % COMMITMENT_BITS]) + (z * ypow[i])) *
|
||||
yinvpow[i];
|
||||
verifier.0.h_bold[i] -= verifier_weight * h;
|
||||
}
|
||||
}
|
||||
|
||||
for i in 0 .. logMN {
|
||||
verifier.0.other.push((verifier_weight * (w_and_w_inv[i].0 * w_and_w_inv[i].0), L[i]));
|
||||
verifier.0.other.push((verifier_weight * (w_and_w_inv[i].1 * w_and_w_inv[i].1), R[i]));
|
||||
}
|
||||
}
|
||||
|
||||
true
|
||||
}
|
||||
}
|
||||
@@ -1,105 +0,0 @@
|
||||
use hex_literal::hex;
|
||||
use rand_core::OsRng;
|
||||
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
|
||||
use monero_io::decompress_point;
|
||||
use monero_primitives::Commitment;
|
||||
|
||||
use crate::{batch_verifier::BatchVerifier, original::OriginalStruct, Bulletproof, BulletproofError};
|
||||
|
||||
mod plus;
|
||||
|
||||
#[test]
|
||||
fn bulletproofs_vector() {
|
||||
let scalar = |scalar| Scalar::from_canonical_bytes(scalar).unwrap();
|
||||
let point = |point| decompress_point(point).unwrap();
|
||||
|
||||
// Generated from Monero
|
||||
assert!(Bulletproof::Original(OriginalStruct {
|
||||
A: point(hex!("ef32c0b9551b804decdcb107eb22aa715b7ce259bf3c5cac20e24dfa6b28ac71")),
|
||||
S: point(hex!("e1285960861783574ee2b689ae53622834eb0b035d6943103f960cd23e063fa0")),
|
||||
T1: point(hex!("4ea07735f184ba159d0e0eb662bac8cde3eb7d39f31e567b0fbda3aa23fe5620")),
|
||||
T2: point(hex!("b8390aa4b60b255630d40e592f55ec6b7ab5e3a96bfcdcd6f1cd1d2fc95f441e")),
|
||||
tau_x: scalar(hex!("5957dba8ea9afb23d6e81cc048a92f2d502c10c749dc1b2bd148ae8d41ec7107")),
|
||||
mu: scalar(hex!("923023b234c2e64774b820b4961f7181f6c1dc152c438643e5a25b0bf271bc02")),
|
||||
L: vec![
|
||||
point(hex!("c45f656316b9ebf9d357fb6a9f85b5f09e0b991dd50a6e0ae9b02de3946c9d99")),
|
||||
point(hex!("9304d2bf0f27183a2acc58cc755a0348da11bd345485fda41b872fee89e72aac")),
|
||||
point(hex!("1bb8b71925d155dd9569f64129ea049d6149fdc4e7a42a86d9478801d922129b")),
|
||||
point(hex!("5756a7bf887aa72b9a952f92f47182122e7b19d89e5dd434c747492b00e1c6b7")),
|
||||
point(hex!("6e497c910d102592830555356af5ff8340e8d141e3fb60ea24cfa587e964f07d")),
|
||||
point(hex!("f4fa3898e7b08e039183d444f3d55040f3c790ed806cb314de49f3068bdbb218")),
|
||||
point(hex!("0bbc37597c3ead517a3841e159c8b7b79a5ceaee24b2a9a20350127aab428713")),
|
||||
],
|
||||
R: vec![
|
||||
point(hex!("609420ba1702781692e84accfd225adb3d077aedc3cf8125563400466b52dbd9")),
|
||||
point(hex!("fb4e1d079e7a2b0ec14f7e2a3943bf50b6d60bc346a54fcf562fb234b342abf8")),
|
||||
point(hex!("6ae3ac97289c48ce95b9c557289e82a34932055f7f5e32720139824fe81b12e5")),
|
||||
point(hex!("d071cc2ffbdab2d840326ad15f68c01da6482271cae3cf644670d1632f29a15c")),
|
||||
point(hex!("e52a1754b95e1060589ba7ce0c43d0060820ebfc0d49dc52884bc3c65ad18af5")),
|
||||
point(hex!("41573b06140108539957df71aceb4b1816d2409ce896659aa5c86f037ca5e851")),
|
||||
point(hex!("a65970b2cc3c7b08b2b5b739dbc8e71e646783c41c625e2a5b1535e3d2e0f742")),
|
||||
],
|
||||
a: scalar(hex!("0077c5383dea44d3cd1bc74849376bd60679612dc4b945255822457fa0c0a209")),
|
||||
b: scalar(hex!("fe80cf5756473482581e1d38644007793ddc66fdeb9404ec1689a907e4863302")),
|
||||
t: scalar(hex!("40dfb08e09249040df997851db311bd6827c26e87d6f0f332c55be8eef10e603"))
|
||||
})
|
||||
.verify(
|
||||
&mut OsRng,
|
||||
&[
|
||||
// For some reason, these vectors are * INV_EIGHT
|
||||
point(hex!("8e8f23f315edae4f6c2f948d9a861e0ae32d356b933cd11d2f0e031ac744c41f"))
|
||||
.mul_by_cofactor(),
|
||||
point(hex!("2829cbd025aa54cd6e1b59a032564f22f0b2e5627f7f2c4297f90da438b5510f"))
|
||||
.mul_by_cofactor(),
|
||||
]
|
||||
));
|
||||
}
|
||||
|
||||
macro_rules! bulletproofs_tests {
|
||||
($name: ident, $max: ident, $plus: literal) => {
|
||||
#[test]
|
||||
fn $name() {
|
||||
// Create Bulletproofs for all possible output quantities
|
||||
let mut verifier = BatchVerifier::new();
|
||||
for i in 1 ..= 16 {
|
||||
let commitments = (1 ..= i)
|
||||
.map(|i| Commitment::new(Scalar::random(&mut OsRng), u64::try_from(i).unwrap()))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let bp = if $plus {
|
||||
Bulletproof::prove_plus(&mut OsRng, commitments.clone()).unwrap()
|
||||
} else {
|
||||
Bulletproof::prove(&mut OsRng, &commitments).unwrap()
|
||||
};
|
||||
|
||||
let commitments = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
|
||||
assert!(bp.verify(&mut OsRng, &commitments));
|
||||
assert!(bp.batch_verify(&mut OsRng, &mut verifier, &commitments));
|
||||
}
|
||||
assert!(verifier.verify());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn $max() {
|
||||
// Check Bulletproofs errors if we try to prove for too many outputs
|
||||
let mut commitments = vec![];
|
||||
for _ in 0 .. 17 {
|
||||
commitments.push(Commitment::new(Scalar::ZERO, 0));
|
||||
}
|
||||
assert_eq!(
|
||||
(if $plus {
|
||||
Bulletproof::prove_plus(&mut OsRng, commitments)
|
||||
} else {
|
||||
Bulletproof::prove(&mut OsRng, &commitments)
|
||||
})
|
||||
.unwrap_err(),
|
||||
BulletproofError::TooManyCommitments,
|
||||
);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
bulletproofs_tests!(bulletproofs, bulletproofs_max, false);
|
||||
bulletproofs_tests!(bulletproofs_plus, bulletproofs_plus_max, true);
|
||||
@@ -1,895 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
use core::fmt::Debug;
|
||||
use std_shims::{
|
||||
alloc::{boxed::Box, format},
|
||||
vec,
|
||||
vec::Vec,
|
||||
io,
|
||||
string::{String, ToString},
|
||||
};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
|
||||
use serde::{Serialize, Deserialize, de::DeserializeOwned};
|
||||
use serde_json::{Value, json};
|
||||
|
||||
use monero_serai::{
|
||||
io::*,
|
||||
transaction::{Input, Timelock, Transaction},
|
||||
block::Block,
|
||||
};
|
||||
|
||||
// Number of blocks the fee estimate will be valid for
|
||||
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
|
||||
// src/wallet/wallet2.cpp#L121
|
||||
const GRACE_BLOCKS_FOR_FEE_ESTIMATE: u64 = 10;
|
||||
|
||||
/// An error from the RPC.
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum RpcError {
|
||||
/// An internal error.
|
||||
#[cfg_attr(feature = "std", error("internal error ({0})"))]
|
||||
InternalError(String),
|
||||
/// A connection error with the node.
|
||||
#[cfg_attr(feature = "std", error("connection error ({0})"))]
|
||||
ConnectionError(String),
|
||||
/// The node is invalid per the expected protocol.
|
||||
#[cfg_attr(feature = "std", error("invalid node ({0})"))]
|
||||
InvalidNode(String),
|
||||
/// Requested transactions weren't found.
|
||||
#[cfg_attr(feature = "std", error("transactions not found"))]
|
||||
TransactionsNotFound(Vec<[u8; 32]>),
|
||||
/// The transaction was pruned.
|
||||
///
|
||||
/// Pruned transactions are not supported at this time.
|
||||
#[cfg_attr(feature = "std", error("pruned transaction"))]
|
||||
PrunedTransaction,
|
||||
/// A transaction (sent or received) was invalid.
|
||||
#[cfg_attr(feature = "std", error("invalid transaction ({0:?})"))]
|
||||
InvalidTransaction([u8; 32]),
|
||||
/// The returned fee was unusable.
|
||||
#[cfg_attr(feature = "std", error("unexpected fee response"))]
|
||||
InvalidFee,
|
||||
/// The priority intended for use wasn't usable.
|
||||
#[cfg_attr(feature = "std", error("invalid priority"))]
|
||||
InvalidPriority,
|
||||
}
|
||||
|
||||
/// A struct containing a fee rate.
|
||||
///
|
||||
/// The fee rate is defined as a per-weight cost, along with a mask for rounding purposes.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct FeeRate {
|
||||
/// The fee per-weight of the transaction.
|
||||
pub per_weight: u64,
|
||||
/// The mask to round with.
|
||||
pub mask: u64,
|
||||
}
|
||||
|
||||
impl FeeRate {
|
||||
/// Construct a new fee rate.
|
||||
pub fn new(per_weight: u64, mask: u64) -> Result<FeeRate, RpcError> {
|
||||
if (per_weight == 0) || (mask == 0) {
|
||||
Err(RpcError::InvalidFee)?;
|
||||
}
|
||||
Ok(FeeRate { per_weight, mask })
|
||||
}
|
||||
|
||||
/// Write the FeeRate.
|
||||
///
|
||||
/// This is not a Monero protocol defined struct, and this is accordingly not a Monero protocol
|
||||
/// defined serialization.
|
||||
pub fn write(&self, w: &mut impl io::Write) -> io::Result<()> {
|
||||
w.write_all(&self.per_weight.to_le_bytes())?;
|
||||
w.write_all(&self.mask.to_le_bytes())
|
||||
}
|
||||
|
||||
/// Serialize the FeeRate to a `Vec<u8>`.
|
||||
///
|
||||
/// This is not a Monero protocol defined struct, and this is accordingly not a Monero protocol
|
||||
/// defined serialization.
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut res = Vec::with_capacity(16);
|
||||
self.write(&mut res).unwrap();
|
||||
res
|
||||
}
|
||||
|
||||
/// Read a FeeRate.
|
||||
///
|
||||
/// This is not a Monero protocol defined struct, and this is accordingly not a Monero protocol
|
||||
/// defined serialization.
|
||||
pub fn read(r: &mut impl io::Read) -> io::Result<FeeRate> {
|
||||
Ok(FeeRate { per_weight: read_u64(r)?, mask: read_u64(r)? })
|
||||
}
|
||||
|
||||
/// Calculate the fee to use from the weight.
|
||||
///
|
||||
/// This function may panic if any of the `FeeRate`'s fields are zero.
|
||||
pub fn calculate_fee_from_weight(&self, weight: usize) -> u64 {
|
||||
let fee = self.per_weight * u64::try_from(weight).unwrap();
|
||||
let fee = ((fee + self.mask - 1) / self.mask) * self.mask;
|
||||
debug_assert_eq!(weight, self.calculate_weight_from_fee(fee), "Miscalculated weight from fee");
|
||||
fee
|
||||
}
|
||||
|
||||
/// Calculate the weight from the fee.
|
||||
///
|
||||
/// This function may panic if any of the `FeeRate`'s fields are zero.
|
||||
pub fn calculate_weight_from_fee(&self, fee: u64) -> usize {
|
||||
usize::try_from(fee / self.per_weight).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// The priority for the fee.
|
||||
///
|
||||
/// Higher-priority transactions will be included in blocks earlier.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum FeePriority {
|
||||
/// The `Unimportant` priority, as defined by Monero.
|
||||
Unimportant,
|
||||
/// The `Normal` priority, as defined by Monero.
|
||||
Normal,
|
||||
/// The `Elevated` priority, as defined by Monero.
|
||||
Elevated,
|
||||
/// The `Priority` priority, as defined by Monero.
|
||||
Priority,
|
||||
/// A custom priority.
|
||||
Custom {
|
||||
/// The numeric representation of the priority, as used within the RPC.
|
||||
priority: u32,
|
||||
},
|
||||
}
|
||||
|
||||
/// https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/
|
||||
/// src/simplewallet/simplewallet.cpp#L161
|
||||
impl FeePriority {
|
||||
pub(crate) fn fee_priority(&self) -> u32 {
|
||||
match self {
|
||||
FeePriority::Unimportant => 1,
|
||||
FeePriority::Normal => 2,
|
||||
FeePriority::Elevated => 3,
|
||||
FeePriority::Priority => 4,
|
||||
FeePriority::Custom { priority, .. } => *priority,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct EmptyResponse {}
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct JsonRpcResponse<T> {
|
||||
result: T,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct TransactionResponse {
|
||||
tx_hash: String,
|
||||
as_hex: String,
|
||||
pruned_as_hex: String,
|
||||
}
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct TransactionsResponse {
|
||||
#[serde(default)]
|
||||
missed_tx: Vec<String>,
|
||||
txs: Vec<TransactionResponse>,
|
||||
}
|
||||
|
||||
/// The response to an output query.
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct OutputResponse {
|
||||
/// The height of the block this output was added to the chain in.
|
||||
pub height: usize,
|
||||
/// If the output is unlocked, per the node's local view.
|
||||
pub unlocked: bool,
|
||||
/// The output's key.
|
||||
pub key: String,
|
||||
/// The output's commitment.
|
||||
pub mask: String,
|
||||
/// The transaction which created this output.
|
||||
pub txid: String,
|
||||
}
|
||||
|
||||
fn rpc_hex(value: &str) -> Result<Vec<u8>, RpcError> {
|
||||
hex::decode(value).map_err(|_| RpcError::InvalidNode("expected hex wasn't hex".to_string()))
|
||||
}
|
||||
|
||||
fn hash_hex(hash: &str) -> Result<[u8; 32], RpcError> {
|
||||
rpc_hex(hash)?.try_into().map_err(|_| RpcError::InvalidNode("hash wasn't 32-bytes".to_string()))
|
||||
}
|
||||
|
||||
fn rpc_point(point: &str) -> Result<EdwardsPoint, RpcError> {
|
||||
decompress_point(
|
||||
rpc_hex(point)?
|
||||
.try_into()
|
||||
.map_err(|_| RpcError::InvalidNode(format!("invalid point: {point}")))?,
|
||||
)
|
||||
.ok_or_else(|| RpcError::InvalidNode(format!("invalid point: {point}")))
|
||||
}
|
||||
|
||||
// Read an EPEE VarInt, distinct from the VarInts used throughout the rest of the protocol
|
||||
fn read_epee_vi<R: io::Read>(reader: &mut R) -> io::Result<u64> {
|
||||
let vi_start = read_byte(reader)?;
|
||||
let len = match vi_start & 0b11 {
|
||||
0 => 1,
|
||||
1 => 2,
|
||||
2 => 4,
|
||||
3 => 8,
|
||||
_ => unreachable!(),
|
||||
};
|
||||
let mut vi = u64::from(vi_start >> 2);
|
||||
for i in 1 .. len {
|
||||
vi |= u64::from(read_byte(reader)?) << (((i - 1) * 8) + 6);
|
||||
}
|
||||
Ok(vi)
|
||||
}
|
||||
|
||||
/// An RPC connection to a Monero daemon.
|
||||
///
|
||||
/// This is abstract such that users can use an HTTP library (which being their choice), a
|
||||
/// Tor/i2p-based transport, or even a memory buffer an external service somehow routes.
|
||||
///
|
||||
/// While no implementors are directly provided, [monero-simple-request-rpc](
|
||||
/// https://github.com/serai-dex/serai/tree/develop/coins/monero/rpc/simple-request
|
||||
/// ) is recommended.
|
||||
#[async_trait]
|
||||
pub trait Rpc: Sync + Clone + Debug {
|
||||
/// Perform a POST request to the specified route with the specified body.
|
||||
///
|
||||
/// The implementor is left to handle anything such as authentication.
|
||||
async fn post(&self, route: &str, body: Vec<u8>) -> Result<Vec<u8>, RpcError>;
|
||||
|
||||
/// Perform a RPC call to the specified route with the provided parameters.
|
||||
///
|
||||
/// This is NOT a JSON-RPC call. They use a route of "json_rpc" and are available via
|
||||
/// `json_rpc_call`.
|
||||
async fn rpc_call<Params: Send + Serialize + Debug, Response: DeserializeOwned + Debug>(
|
||||
&self,
|
||||
route: &str,
|
||||
params: Option<Params>,
|
||||
) -> Result<Response, RpcError> {
|
||||
let res = self
|
||||
.post(
|
||||
route,
|
||||
if let Some(params) = params {
|
||||
serde_json::to_string(¶ms).unwrap().into_bytes()
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
let res_str = std_shims::str::from_utf8(&res)
|
||||
.map_err(|_| RpcError::InvalidNode("response wasn't utf-8".to_string()))?;
|
||||
serde_json::from_str(res_str)
|
||||
.map_err(|_| RpcError::InvalidNode(format!("response wasn't the expected json: {res_str}")))
|
||||
}
|
||||
|
||||
/// Perform a JSON-RPC call with the specified method with the provided parameters.
|
||||
async fn json_rpc_call<Response: DeserializeOwned + Debug>(
|
||||
&self,
|
||||
method: &str,
|
||||
params: Option<Value>,
|
||||
) -> Result<Response, RpcError> {
|
||||
let mut req = json!({ "method": method });
|
||||
if let Some(params) = params {
|
||||
req.as_object_mut().unwrap().insert("params".into(), params);
|
||||
}
|
||||
Ok(self.rpc_call::<_, JsonRpcResponse<Response>>("json_rpc", Some(req)).await?.result)
|
||||
}
|
||||
|
||||
/// Perform a binary call to the specified route with the provided parameters.
|
||||
async fn bin_call(&self, route: &str, params: Vec<u8>) -> Result<Vec<u8>, RpcError> {
|
||||
self.post(route, params).await
|
||||
}
|
||||
|
||||
/// Get the active blockchain protocol version.
|
||||
///
|
||||
/// This is specifically the major version within the most recent block header.
|
||||
async fn get_hardfork_version(&self) -> Result<u8, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct HeaderResponse {
|
||||
major_version: u8,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct LastHeaderResponse {
|
||||
block_header: HeaderResponse,
|
||||
}
|
||||
|
||||
Ok(
|
||||
self
|
||||
.json_rpc_call::<LastHeaderResponse>("get_last_block_header", None)
|
||||
.await?
|
||||
.block_header
|
||||
.major_version,
|
||||
)
|
||||
}
|
||||
|
||||
/// Get the height of the Monero blockchain.
|
||||
///
|
||||
/// The height is defined as the amount of blocks on the blockchain. For a blockchain with only
|
||||
/// its genesis block, the height will be 1.
|
||||
async fn get_height(&self) -> Result<usize, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct HeightResponse {
|
||||
height: usize,
|
||||
}
|
||||
Ok(self.rpc_call::<Option<()>, HeightResponse>("get_height", None).await?.height)
|
||||
}
|
||||
|
||||
/// Get the specified transactions.
|
||||
///
|
||||
/// The received transactions will be hashed in order to verify the correct transactions were
|
||||
/// returned.
|
||||
async fn get_transactions(&self, hashes: &[[u8; 32]]) -> Result<Vec<Transaction>, RpcError> {
|
||||
if hashes.is_empty() {
|
||||
return Ok(vec![]);
|
||||
}
|
||||
|
||||
let mut hashes_hex = hashes.iter().map(hex::encode).collect::<Vec<_>>();
|
||||
let mut all_txs = Vec::with_capacity(hashes.len());
|
||||
while !hashes_hex.is_empty() {
|
||||
// Monero errors if more than 100 is requested unless using a non-restricted RPC
|
||||
const TXS_PER_REQUEST: usize = 100;
|
||||
let this_count = TXS_PER_REQUEST.min(hashes_hex.len());
|
||||
|
||||
let txs: TransactionsResponse = self
|
||||
.rpc_call(
|
||||
"get_transactions",
|
||||
Some(json!({
|
||||
"txs_hashes": hashes_hex.drain(.. this_count).collect::<Vec<_>>(),
|
||||
})),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if !txs.missed_tx.is_empty() {
|
||||
Err(RpcError::TransactionsNotFound(
|
||||
txs.missed_tx.iter().map(|hash| hash_hex(hash)).collect::<Result<_, _>>()?,
|
||||
))?;
|
||||
}
|
||||
|
||||
all_txs.extend(txs.txs);
|
||||
}
|
||||
|
||||
all_txs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, res)| {
|
||||
let tx = Transaction::read::<&[u8]>(
|
||||
&mut rpc_hex(if !res.as_hex.is_empty() { &res.as_hex } else { &res.pruned_as_hex })?
|
||||
.as_ref(),
|
||||
)
|
||||
.map_err(|_| match hash_hex(&res.tx_hash) {
|
||||
Ok(hash) => RpcError::InvalidTransaction(hash),
|
||||
Err(err) => err,
|
||||
})?;
|
||||
|
||||
// https://github.com/monero-project/monero/issues/8311
|
||||
if res.as_hex.is_empty() {
|
||||
match tx.prefix().inputs.first() {
|
||||
Some(Input::Gen { .. }) => (),
|
||||
_ => Err(RpcError::PrunedTransaction)?,
|
||||
}
|
||||
}
|
||||
|
||||
// This does run a few keccak256 hashes, which is pointless if the node is trusted
|
||||
// In exchange, this provides resilience against invalid/malicious nodes
|
||||
if tx.hash() != hashes[i] {
|
||||
Err(RpcError::InvalidNode(
|
||||
"replied with transaction wasn't the requested transaction".to_string(),
|
||||
))?;
|
||||
}
|
||||
|
||||
Ok(tx)
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get the specified transaction.
|
||||
///
|
||||
/// The received transaction will be hashed in order to verify the correct transaction was
|
||||
/// returned.
|
||||
async fn get_transaction(&self, tx: [u8; 32]) -> Result<Transaction, RpcError> {
|
||||
self.get_transactions(&[tx]).await.map(|mut txs| txs.swap_remove(0))
|
||||
}
|
||||
|
||||
/// Get the hash of a block from the node.
|
||||
///
|
||||
/// `number` is the block's zero-indexed position on the blockchain (`0` for the genesis block,
|
||||
/// `height - 1` for the latest block).
|
||||
async fn get_block_hash(&self, number: usize) -> Result<[u8; 32], RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct BlockHeaderResponse {
|
||||
hash: String,
|
||||
}
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct BlockHeaderByHeightResponse {
|
||||
block_header: BlockHeaderResponse,
|
||||
}
|
||||
|
||||
let header: BlockHeaderByHeightResponse =
|
||||
self.json_rpc_call("get_block_header_by_height", Some(json!({ "height": number }))).await?;
|
||||
hash_hex(&header.block_header.hash)
|
||||
}
|
||||
|
||||
/// Get a block from the node by its hash.
|
||||
///
|
||||
/// The received block will be hashed in order to verify the correct block was returned.
|
||||
async fn get_block(&self, hash: [u8; 32]) -> Result<Block, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct BlockResponse {
|
||||
blob: String,
|
||||
}
|
||||
|
||||
let res: BlockResponse =
|
||||
self.json_rpc_call("get_block", Some(json!({ "hash": hex::encode(hash) }))).await?;
|
||||
|
||||
let block = Block::read::<&[u8]>(&mut rpc_hex(&res.blob)?.as_ref())
|
||||
.map_err(|_| RpcError::InvalidNode("invalid block".to_string()))?;
|
||||
if block.hash() != hash {
|
||||
Err(RpcError::InvalidNode("different block than requested (hash)".to_string()))?;
|
||||
}
|
||||
Ok(block)
|
||||
}
|
||||
|
||||
/// Get a block from the node by its number.
|
||||
///
|
||||
/// `number` is the block's zero-indexed position on the blockchain (`0` for the genesis block,
|
||||
/// `height - 1` for the latest block).
|
||||
async fn get_block_by_number(&self, number: usize) -> Result<Block, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct BlockResponse {
|
||||
blob: String,
|
||||
}
|
||||
|
||||
let res: BlockResponse =
|
||||
self.json_rpc_call("get_block", Some(json!({ "height": number }))).await?;
|
||||
|
||||
let block = Block::read::<&[u8]>(&mut rpc_hex(&res.blob)?.as_ref())
|
||||
.map_err(|_| RpcError::InvalidNode("invalid block".to_string()))?;
|
||||
|
||||
// Make sure this is actually the block for this number
|
||||
match block.miner_transaction.prefix().inputs.first() {
|
||||
Some(Input::Gen(actual)) => {
|
||||
if usize::try_from(*actual) == Ok(number) {
|
||||
Ok(block)
|
||||
} else {
|
||||
Err(RpcError::InvalidNode("different block than requested (number)".to_string()))
|
||||
}
|
||||
}
|
||||
_ => Err(RpcError::InvalidNode(
|
||||
"block's miner_transaction didn't have an input of kind Input::Gen".to_string(),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the transactions within a block.
|
||||
///
|
||||
/// This function returns all transactions in the block, including the miner's transaction.
|
||||
///
|
||||
/// This function does not verify the returned transactions are the ones committed to by the
|
||||
/// block's header.
|
||||
async fn get_block_transactions(&self, hash: [u8; 32]) -> Result<Vec<Transaction>, RpcError> {
|
||||
let block = self.get_block(hash).await?;
|
||||
let mut res = vec![block.miner_transaction];
|
||||
res.extend(self.get_transactions(&block.transactions).await?);
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Get the transactions within a block.
|
||||
///
|
||||
/// This function returns all transactions in the block, including the miner's transaction.
|
||||
///
|
||||
/// This function does not verify the returned transactions are the ones committed to by the
|
||||
/// block's header.
|
||||
async fn get_block_transactions_by_number(
|
||||
&self,
|
||||
number: usize,
|
||||
) -> Result<Vec<Transaction>, RpcError> {
|
||||
let block = self.get_block_by_number(number).await?;
|
||||
let mut res = vec![block.miner_transaction];
|
||||
res.extend(self.get_transactions(&block.transactions).await?);
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Get the output indexes of the specified transaction.
|
||||
async fn get_o_indexes(&self, hash: [u8; 32]) -> Result<Vec<u64>, RpcError> {
|
||||
/*
|
||||
TODO: Use these when a suitable epee serde lib exists
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
struct Request {
|
||||
txid: [u8; 32],
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct OIndexes {
|
||||
o_indexes: Vec<u64>,
|
||||
}
|
||||
*/
|
||||
|
||||
// Given the immaturity of Rust epee libraries, this is a homegrown one which is only validated
|
||||
// to work against this specific function
|
||||
|
||||
// Header for EPEE, an 8-byte magic and a version
|
||||
const EPEE_HEADER: &[u8] = b"\x01\x11\x01\x01\x01\x01\x02\x01\x01";
|
||||
|
||||
let mut request = EPEE_HEADER.to_vec();
|
||||
// Number of fields (shifted over 2 bits as the 2 LSBs are reserved for metadata)
|
||||
request.push(1 << 2);
|
||||
// Length of field name
|
||||
request.push(4);
|
||||
// Field name
|
||||
request.extend(b"txid");
|
||||
// Type of field
|
||||
request.push(10);
|
||||
// Length of string, since this byte array is technically a string
|
||||
request.push(32 << 2);
|
||||
// The "string"
|
||||
request.extend(hash);
|
||||
|
||||
let indexes_buf = self.bin_call("get_o_indexes.bin", request).await?;
|
||||
let mut indexes: &[u8] = indexes_buf.as_ref();
|
||||
|
||||
(|| {
|
||||
let mut res = None;
|
||||
let mut is_okay = false;
|
||||
|
||||
if read_bytes::<_, { EPEE_HEADER.len() }>(&mut indexes)? != EPEE_HEADER {
|
||||
Err(io::Error::other("invalid header"))?;
|
||||
}
|
||||
|
||||
let read_object = |reader: &mut &[u8]| -> io::Result<Vec<u64>> {
|
||||
let fields = read_byte(reader)? >> 2;
|
||||
|
||||
for _ in 0 .. fields {
|
||||
let name_len = read_byte(reader)?;
|
||||
let name = read_raw_vec(read_byte, name_len.into(), reader)?;
|
||||
|
||||
let type_with_array_flag = read_byte(reader)?;
|
||||
let kind = type_with_array_flag & (!0x80);
|
||||
|
||||
let iters = if type_with_array_flag != kind { read_epee_vi(reader)? } else { 1 };
|
||||
|
||||
if (&name == b"o_indexes") && (kind != 5) {
|
||||
Err(io::Error::other("o_indexes weren't u64s"))?;
|
||||
}
|
||||
|
||||
let f = match kind {
|
||||
// i64
|
||||
1 => |reader: &mut &[u8]| read_raw_vec(read_byte, 8, reader),
|
||||
// i32
|
||||
2 => |reader: &mut &[u8]| read_raw_vec(read_byte, 4, reader),
|
||||
// i16
|
||||
3 => |reader: &mut &[u8]| read_raw_vec(read_byte, 2, reader),
|
||||
// i8
|
||||
4 => |reader: &mut &[u8]| read_raw_vec(read_byte, 1, reader),
|
||||
// u64
|
||||
5 => |reader: &mut &[u8]| read_raw_vec(read_byte, 8, reader),
|
||||
// u32
|
||||
6 => |reader: &mut &[u8]| read_raw_vec(read_byte, 4, reader),
|
||||
// u16
|
||||
7 => |reader: &mut &[u8]| read_raw_vec(read_byte, 2, reader),
|
||||
// u8
|
||||
8 => |reader: &mut &[u8]| read_raw_vec(read_byte, 1, reader),
|
||||
// double
|
||||
9 => |reader: &mut &[u8]| read_raw_vec(read_byte, 8, reader),
|
||||
// string, or any collection of bytes
|
||||
10 => |reader: &mut &[u8]| {
|
||||
let len = read_epee_vi(reader)?;
|
||||
read_raw_vec(
|
||||
read_byte,
|
||||
len.try_into().map_err(|_| io::Error::other("u64 length exceeded usize"))?,
|
||||
reader,
|
||||
)
|
||||
},
|
||||
// bool
|
||||
11 => |reader: &mut &[u8]| read_raw_vec(read_byte, 1, reader),
|
||||
// object, errors here as it shouldn't be used on this call
|
||||
12 => {
|
||||
|_: &mut &[u8]| Err(io::Error::other("node used object in reply to get_o_indexes"))
|
||||
}
|
||||
// array, so far unused
|
||||
13 => |_: &mut &[u8]| Err(io::Error::other("node used the unused array type")),
|
||||
_ => |_: &mut &[u8]| Err(io::Error::other("node used an invalid type")),
|
||||
};
|
||||
|
||||
let mut bytes_res = vec![];
|
||||
for _ in 0 .. iters {
|
||||
bytes_res.push(f(reader)?);
|
||||
}
|
||||
|
||||
let mut actual_res = Vec::with_capacity(bytes_res.len());
|
||||
match name.as_slice() {
|
||||
b"o_indexes" => {
|
||||
for o_index in bytes_res {
|
||||
actual_res.push(u64::from_le_bytes(
|
||||
o_index
|
||||
.try_into()
|
||||
.map_err(|_| io::Error::other("node didn't provide 8 bytes for a u64"))?,
|
||||
));
|
||||
}
|
||||
res = Some(actual_res);
|
||||
}
|
||||
b"status" => {
|
||||
if bytes_res
|
||||
.first()
|
||||
.ok_or_else(|| io::Error::other("status wasn't a string"))?
|
||||
.as_slice() !=
|
||||
b"OK"
|
||||
{
|
||||
// TODO: Better handle non-OK responses
|
||||
Err(io::Error::other("response wasn't OK"))?;
|
||||
}
|
||||
is_okay = true;
|
||||
}
|
||||
_ => continue,
|
||||
}
|
||||
|
||||
if is_okay && res.is_some() {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Didn't return a response with a status
|
||||
// (if the status wasn't okay, we would've already errored)
|
||||
if !is_okay {
|
||||
Err(io::Error::other("response didn't contain a status"))?;
|
||||
}
|
||||
|
||||
// If the Vec was empty, it would've been omitted, hence the unwrap_or
|
||||
// TODO: Test against a 0-output TX, such as the ones found in block 202612
|
||||
Ok(res.unwrap_or(vec![]))
|
||||
};
|
||||
|
||||
read_object(&mut indexes)
|
||||
})()
|
||||
.map_err(|_| RpcError::InvalidNode("invalid binary response".to_string()))
|
||||
}
|
||||
|
||||
/// Get the output distribution.
|
||||
///
|
||||
/// `from` and `to` are heights, not block numbers, and inclusive.
|
||||
async fn get_output_distribution(&self, from: usize, to: usize) -> Result<Vec<u64>, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Distribution {
|
||||
distribution: Vec<u64>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Distributions {
|
||||
distributions: Vec<Distribution>,
|
||||
}
|
||||
|
||||
let mut distributions: Distributions = self
|
||||
.json_rpc_call(
|
||||
"get_output_distribution",
|
||||
Some(json!({
|
||||
"binary": false,
|
||||
"amounts": [0],
|
||||
"cumulative": true,
|
||||
"from_height": from,
|
||||
"to_height": to,
|
||||
})),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(distributions.distributions.swap_remove(0).distribution)
|
||||
}
|
||||
|
||||
/// Get the specified outputs from the RingCT (zero-amount) pool.
|
||||
async fn get_outs(&self, indexes: &[u64]) -> Result<Vec<OutputResponse>, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct OutsResponse {
|
||||
status: String,
|
||||
outs: Vec<OutputResponse>,
|
||||
}
|
||||
|
||||
let res: OutsResponse = self
|
||||
.rpc_call(
|
||||
"get_outs",
|
||||
Some(json!({
|
||||
"get_txid": true,
|
||||
"outputs": indexes.iter().map(|o| json!({
|
||||
"amount": 0,
|
||||
"index": o
|
||||
})).collect::<Vec<_>>()
|
||||
})),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if res.status != "OK" {
|
||||
Err(RpcError::InvalidNode("bad response to get_outs".to_string()))?;
|
||||
}
|
||||
|
||||
Ok(res.outs)
|
||||
}
|
||||
|
||||
/// Get the specified outputs from the RingCT (zero-amount) pool, but only return them if their
|
||||
/// timelock has been satisfied.
|
||||
///
|
||||
/// The timelock being satisfied is distinct from being free of the 10-block lock applied to all
|
||||
/// Monero transactions.
|
||||
///
|
||||
/// The node is trusted for if the output is unlocked unless `fingerprintable_canonical` is set
|
||||
/// to true. If `fingerprintable_canonical` is set to true, the node's local view isn't used, yet
|
||||
/// the transaction's timelock is checked to be unlocked at the specified `height`. This offers a
|
||||
/// canonical decoy selection, yet is fingerprintable as time-based timelocks aren't evaluated
|
||||
/// (and considered locked, preventing their selection).
|
||||
async fn get_unlocked_outputs(
|
||||
&self,
|
||||
indexes: &[u64],
|
||||
height: usize,
|
||||
fingerprintable_canonical: bool,
|
||||
) -> Result<Vec<Option<[EdwardsPoint; 2]>>, RpcError> {
|
||||
let outs: Vec<OutputResponse> = self.get_outs(indexes).await?;
|
||||
|
||||
// Only need to fetch txs to do canonical check on timelock
|
||||
let txs = if fingerprintable_canonical {
|
||||
self
|
||||
.get_transactions(
|
||||
&outs.iter().map(|out| hash_hex(&out.txid)).collect::<Result<Vec<_>, _>>()?,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
// TODO: https://github.com/serai-dex/serai/issues/104
|
||||
outs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, out)| {
|
||||
// Allow keys to be invalid, though if they are, return None to trigger selection of a new
|
||||
// decoy
|
||||
// Only valid keys can be used in CLSAG proofs, hence the need for re-selection, yet
|
||||
// invalid keys may honestly exist on the blockchain
|
||||
// Only a recent hard fork checked output keys were valid points
|
||||
let Some(key) = decompress_point(
|
||||
rpc_hex(&out.key)?
|
||||
.try_into()
|
||||
.map_err(|_| RpcError::InvalidNode("non-32-byte point".to_string()))?,
|
||||
) else {
|
||||
return Ok(None);
|
||||
};
|
||||
Ok(Some([key, rpc_point(&out.mask)?]).filter(|_| {
|
||||
if fingerprintable_canonical {
|
||||
Timelock::Block(height) >= txs[i].prefix().additional_timelock
|
||||
} else {
|
||||
out.unlocked
|
||||
}
|
||||
}))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Get the currently estimated fee rate from the node.
|
||||
///
|
||||
/// This may be manipulated to unsafe levels and MUST be sanity checked.
|
||||
///
|
||||
/// This MUST NOT be expected to be deterministic in any way.
|
||||
// TODO: Take a sanity check argument
|
||||
async fn get_fee_rate(&self, priority: FeePriority) -> Result<FeeRate, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct FeeResponse {
|
||||
status: String,
|
||||
fees: Option<Vec<u64>>,
|
||||
fee: u64,
|
||||
quantization_mask: u64,
|
||||
}
|
||||
|
||||
let res: FeeResponse = self
|
||||
.json_rpc_call(
|
||||
"get_fee_estimate",
|
||||
Some(json!({ "grace_blocks": GRACE_BLOCKS_FOR_FEE_ESTIMATE })),
|
||||
)
|
||||
.await?;
|
||||
|
||||
if res.status != "OK" {
|
||||
Err(RpcError::InvalidFee)?;
|
||||
}
|
||||
|
||||
if let Some(fees) = res.fees {
|
||||
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
|
||||
// src/wallet/wallet2.cpp#L7615-L7620
|
||||
let priority_idx = usize::try_from(if priority.fee_priority() >= 4 {
|
||||
3
|
||||
} else {
|
||||
priority.fee_priority().saturating_sub(1)
|
||||
})
|
||||
.map_err(|_| RpcError::InvalidPriority)?;
|
||||
|
||||
if priority_idx >= fees.len() {
|
||||
Err(RpcError::InvalidPriority)
|
||||
} else {
|
||||
FeeRate::new(fees[priority_idx], res.quantization_mask)
|
||||
}
|
||||
} else {
|
||||
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
|
||||
// src/wallet/wallet2.cpp#L7569-L7584
|
||||
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
|
||||
// src/wallet/wallet2.cpp#L7660-L7661
|
||||
let priority_idx =
|
||||
usize::try_from(if priority.fee_priority() == 0 { 1 } else { priority.fee_priority() - 1 })
|
||||
.map_err(|_| RpcError::InvalidPriority)?;
|
||||
let multipliers = [1, 5, 25, 1000];
|
||||
if priority_idx >= multipliers.len() {
|
||||
// though not an RPC error, it seems sensible to treat as such
|
||||
Err(RpcError::InvalidPriority)?;
|
||||
}
|
||||
let fee_multiplier = multipliers[priority_idx];
|
||||
|
||||
FeeRate::new(res.fee * fee_multiplier, res.quantization_mask)
|
||||
}
|
||||
}
|
||||
|
||||
/// Publish a transaction.
|
||||
async fn publish_transaction(&self, tx: &Transaction) -> Result<(), RpcError> {
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct SendRawResponse {
|
||||
status: String,
|
||||
double_spend: bool,
|
||||
fee_too_low: bool,
|
||||
invalid_input: bool,
|
||||
invalid_output: bool,
|
||||
low_mixin: bool,
|
||||
not_relayed: bool,
|
||||
overspend: bool,
|
||||
too_big: bool,
|
||||
too_few_outputs: bool,
|
||||
reason: String,
|
||||
}
|
||||
|
||||
let res: SendRawResponse = self
|
||||
.rpc_call("send_raw_transaction", Some(json!({ "tx_as_hex": hex::encode(tx.serialize()) })))
|
||||
.await?;
|
||||
|
||||
if res.status != "OK" {
|
||||
Err(RpcError::InvalidTransaction(tx.hash()))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate blocks, with the specified address receiving the block reward.
|
||||
///
|
||||
/// Returns the hashes of the generated blocks and the last block's number.
|
||||
// TODO: Take &Address, not &str?
|
||||
async fn generate_blocks(
|
||||
&self,
|
||||
address: &str,
|
||||
block_count: usize,
|
||||
) -> Result<(Vec<[u8; 32]>, usize), RpcError> {
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct BlocksResponse {
|
||||
blocks: Vec<String>,
|
||||
height: usize,
|
||||
}
|
||||
|
||||
let res = self
|
||||
.json_rpc_call::<BlocksResponse>(
|
||||
"generateblocks",
|
||||
Some(json!({
|
||||
"wallet_address": address,
|
||||
"amount_of_blocks": block_count
|
||||
})),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut blocks = Vec::with_capacity(res.blocks.len());
|
||||
for block in res.blocks {
|
||||
blocks.push(hash_hex(&block)?);
|
||||
}
|
||||
Ok((blocks, res.height))
|
||||
}
|
||||
}
|
||||
@@ -1,335 +0,0 @@
|
||||
// TODO: Clean this
|
||||
|
||||
use std_shims::{vec::Vec, collections::HashSet};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
use rand_distr::{Distribution, Gamma};
|
||||
#[cfg(not(feature = "std"))]
|
||||
use rand_distr::num_traits::Float;
|
||||
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
|
||||
use crate::{
|
||||
DEFAULT_LOCK_WINDOW, COINBASE_LOCK_WINDOW, BLOCK_TIME,
|
||||
rpc::{RpcError, Rpc},
|
||||
WalletOutput,
|
||||
};
|
||||
|
||||
const RECENT_WINDOW: usize = 15;
|
||||
const BLOCKS_PER_YEAR: usize = 365 * 24 * 60 * 60 / BLOCK_TIME;
|
||||
#[allow(clippy::cast_precision_loss)]
|
||||
const TIP_APPLICATION: f64 = (DEFAULT_LOCK_WINDOW * BLOCK_TIME) as f64;
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn select_n<'a, R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
rpc: &impl Rpc,
|
||||
distribution: &[u64],
|
||||
height: usize,
|
||||
high: u64,
|
||||
per_second: f64,
|
||||
real: &[u64],
|
||||
used: &mut HashSet<u64>,
|
||||
count: usize,
|
||||
fingerprintable_canonical: bool,
|
||||
) -> Result<Vec<(u64, [EdwardsPoint; 2])>, RpcError> {
|
||||
// TODO: consider removing this extra RPC and expect the caller to handle it
|
||||
if fingerprintable_canonical && (height > rpc.get_height().await?) {
|
||||
// TODO: Don't use InternalError for the caller's failure
|
||||
Err(RpcError::InternalError("decoys being requested from too young blocks".to_string()))?;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
let mut iters = 0;
|
||||
let mut confirmed = Vec::with_capacity(count);
|
||||
// Retries on failure. Retries are obvious as decoys, yet should be minimal
|
||||
while confirmed.len() != count {
|
||||
let remaining = count - confirmed.len();
|
||||
// TODO: over-request candidates in case some are locked to avoid needing
|
||||
// round trips to the daemon (and revealing obvious decoys to the daemon)
|
||||
let mut candidates = Vec::with_capacity(remaining);
|
||||
while candidates.len() != remaining {
|
||||
#[cfg(test)]
|
||||
{
|
||||
iters += 1;
|
||||
// This is cheap and on fresh chains, a lot of rounds may be needed
|
||||
if iters == 100 {
|
||||
Err(RpcError::InternalError("hit decoy selection round limit".to_string()))?;
|
||||
}
|
||||
}
|
||||
|
||||
// Use a gamma distribution
|
||||
let mut age = Gamma::<f64>::new(19.28, 1.0 / 1.61).unwrap().sample(rng).exp();
|
||||
#[allow(clippy::cast_precision_loss)]
|
||||
if age > TIP_APPLICATION {
|
||||
age -= TIP_APPLICATION;
|
||||
} else {
|
||||
// f64 does not have try_from available, which is why these are written with `as`
|
||||
age = (rng.next_u64() % u64::try_from(RECENT_WINDOW * BLOCK_TIME).unwrap()) as f64;
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
|
||||
let o = (age * per_second) as u64;
|
||||
if o < high {
|
||||
let i = distribution.partition_point(|s| *s < (high - 1 - o));
|
||||
let prev = i.saturating_sub(1);
|
||||
let n = distribution[i] - distribution[prev];
|
||||
if n != 0 {
|
||||
let o = distribution[prev] + (rng.next_u64() % n);
|
||||
if !used.contains(&o) {
|
||||
// It will either actually be used, or is unusable and this prevents trying it again
|
||||
used.insert(o);
|
||||
candidates.push(o);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If this is the first time we're requesting these outputs, include the real one as well
|
||||
// Prevents the node we're connected to from having a list of known decoys and then seeing a
|
||||
// TX which uses all of them, with one additional output (the true spend)
|
||||
let mut real_indexes = HashSet::with_capacity(real.len());
|
||||
if confirmed.is_empty() {
|
||||
for real in real {
|
||||
candidates.push(*real);
|
||||
}
|
||||
// Sort candidates so the real spends aren't the ones at the end
|
||||
candidates.sort();
|
||||
for real in real {
|
||||
real_indexes.insert(candidates.binary_search(real).unwrap());
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: make sure that the real output is included in the response, and
|
||||
// that mask and key are equal to expected
|
||||
for (i, output) in rpc
|
||||
.get_unlocked_outputs(&candidates, height, fingerprintable_canonical)
|
||||
.await?
|
||||
.iter_mut()
|
||||
.enumerate()
|
||||
{
|
||||
// Don't include the real spend as a decoy, despite requesting it
|
||||
if real_indexes.contains(&i) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(output) = output.take() {
|
||||
confirmed.push((candidates[i], output));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(confirmed)
|
||||
}
|
||||
|
||||
fn offset(ring: &[u64]) -> Vec<u64> {
|
||||
let mut res = vec![ring[0]];
|
||||
res.resize(ring.len(), 0);
|
||||
for m in (1 .. ring.len()).rev() {
|
||||
res[m] = ring[m] - ring[m - 1];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
async fn select_decoys<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
rpc: &impl Rpc,
|
||||
ring_len: usize,
|
||||
height: usize,
|
||||
inputs: &[WalletOutput],
|
||||
fingerprintable_canonical: bool,
|
||||
) -> Result<Vec<Decoys>, RpcError> {
|
||||
let mut distribution = vec![];
|
||||
|
||||
let decoy_count = ring_len - 1;
|
||||
|
||||
// Convert the inputs in question to the raw output data
|
||||
let mut real = Vec::with_capacity(inputs.len());
|
||||
let mut outputs = Vec::with_capacity(inputs.len());
|
||||
for input in inputs {
|
||||
real.push(input.relative_id.index_on_blockchain);
|
||||
outputs.push((real[real.len() - 1], [input.key(), input.commitment().calculate()]));
|
||||
}
|
||||
|
||||
if distribution.len() < height {
|
||||
// TODO: verify distribution elems are strictly increasing
|
||||
let extension =
|
||||
rpc.get_output_distribution(distribution.len(), height.saturating_sub(1)).await?;
|
||||
distribution.extend(extension);
|
||||
}
|
||||
// If asked to use an older height than previously asked, truncate to ensure accuracy
|
||||
// Should never happen, yet risks desyncing if it did
|
||||
distribution.truncate(height);
|
||||
|
||||
if distribution.len() < DEFAULT_LOCK_WINDOW {
|
||||
Err(RpcError::InternalError("not enough decoy candidates".to_string()))?;
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_precision_loss)]
|
||||
let per_second = {
|
||||
let blocks = distribution.len().min(BLOCKS_PER_YEAR);
|
||||
let initial = distribution[distribution.len().saturating_sub(blocks + 1)];
|
||||
let outputs = distribution[distribution.len() - 1].saturating_sub(initial);
|
||||
(outputs as f64) / ((blocks * BLOCK_TIME) as f64)
|
||||
};
|
||||
|
||||
let mut used = HashSet::<u64>::new();
|
||||
for o in &outputs {
|
||||
used.insert(o.0);
|
||||
}
|
||||
|
||||
// TODO: Create a TX with less than the target amount, as allowed by the protocol
|
||||
let high = distribution[distribution.len() - DEFAULT_LOCK_WINDOW];
|
||||
if high.saturating_sub(COINBASE_LOCK_WINDOW as u64) <
|
||||
u64::try_from(inputs.len() * ring_len).unwrap()
|
||||
{
|
||||
Err(RpcError::InternalError("not enough coinbase candidates".to_string()))?;
|
||||
}
|
||||
|
||||
// Select all decoys for this transaction, assuming we generate a sane transaction
|
||||
// We should almost never naturally generate an insane transaction, hence why this doesn't
|
||||
// bother with an overage
|
||||
let mut decoys = select_n(
|
||||
rng,
|
||||
rpc,
|
||||
&distribution,
|
||||
height,
|
||||
high,
|
||||
per_second,
|
||||
&real,
|
||||
&mut used,
|
||||
inputs.len() * decoy_count,
|
||||
fingerprintable_canonical,
|
||||
)
|
||||
.await?;
|
||||
real.zeroize();
|
||||
|
||||
let mut res = Vec::with_capacity(inputs.len());
|
||||
for o in outputs {
|
||||
// Grab the decoys for this specific output
|
||||
let mut ring = decoys.drain((decoys.len() - decoy_count) ..).collect::<Vec<_>>();
|
||||
ring.push(o);
|
||||
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
// Sanity checks are only run when 1000 outputs are available in Monero
|
||||
// We run this check whenever the highest output index, which we acknowledge, is > 500
|
||||
// This means we assume (for presumably test blockchains) the height being used has not had
|
||||
// 500 outputs since while itself not being a sufficiently mature blockchain
|
||||
// Considering Monero's p2p layer doesn't actually check transaction sanity, it should be
|
||||
// fine for us to not have perfectly matching rules, especially since this code will infinite
|
||||
// loop if it can't determine sanity, which is possible with sufficient inputs on
|
||||
// sufficiently small chains
|
||||
if high > 500 {
|
||||
// Make sure the TX passes the sanity check that the median output is within the last 40%
|
||||
let target_median = high * 3 / 5;
|
||||
while ring[ring_len / 2].0 < target_median {
|
||||
// If it's not, update the bottom half with new values to ensure the median only moves up
|
||||
for removed in ring.drain(0 .. (ring_len / 2)).collect::<Vec<_>>() {
|
||||
// If we removed the real spend, add it back
|
||||
if removed.0 == o.0 {
|
||||
ring.push(o);
|
||||
} else {
|
||||
// We could not remove this, saving CPU time and removing low values as
|
||||
// possibilities, yet it'd increase the amount of decoys required to create this
|
||||
// transaction and some removed outputs may be the best option (as we drop the first
|
||||
// half, not just the bottom n)
|
||||
used.remove(&removed.0);
|
||||
}
|
||||
}
|
||||
|
||||
// Select new outputs until we have a full sized ring again
|
||||
ring.extend(
|
||||
select_n(
|
||||
rng,
|
||||
rpc,
|
||||
&distribution,
|
||||
height,
|
||||
high,
|
||||
per_second,
|
||||
&[],
|
||||
&mut used,
|
||||
ring_len - ring.len(),
|
||||
fingerprintable_canonical,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
}
|
||||
|
||||
// The other sanity check rule is about duplicates, yet we already enforce unique ring
|
||||
// members
|
||||
}
|
||||
|
||||
res.push(
|
||||
Decoys::new(
|
||||
offset(&ring.iter().map(|output| output.0).collect::<Vec<_>>()),
|
||||
// Binary searches for the real spend since we don't know where it sorted to
|
||||
u8::try_from(ring.partition_point(|x| x.0 < o.0)).unwrap(),
|
||||
ring.iter().map(|output| output.1).collect(),
|
||||
)
|
||||
.unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub use monero_serai::primitives::Decoys;
|
||||
|
||||
// TODO: Remove this trait
|
||||
/// TODO: Document
|
||||
#[cfg(feature = "std")]
|
||||
#[async_trait::async_trait]
|
||||
pub trait DecoySelection {
|
||||
/// Select decoys using the same distribution as Monero. Relies on the monerod RPC
|
||||
/// response for an output's unlocked status, minimizing trips to the daemon.
|
||||
async fn select<R: Send + Sync + RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
rpc: &impl Rpc,
|
||||
ring_len: usize,
|
||||
height: usize,
|
||||
inputs: &[WalletOutput],
|
||||
) -> Result<Vec<Decoys>, RpcError>;
|
||||
|
||||
/// If no reorg has occurred and an honest RPC, any caller who passes the same height to this
|
||||
/// function will use the same distribution to select decoys. It is fingerprintable
|
||||
/// because a caller using this will not be able to select decoys that are timelocked
|
||||
/// with a timestamp. Any transaction which includes timestamp timelocked decoys in its
|
||||
/// rings could not be constructed using this function.
|
||||
///
|
||||
/// TODO: upstream change to monerod get_outs RPC to accept a height param for checking
|
||||
/// output's unlocked status and remove all usage of fingerprintable_canonical
|
||||
async fn fingerprintable_canonical_select<R: Send + Sync + RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
rpc: &impl Rpc,
|
||||
ring_len: usize,
|
||||
height: usize,
|
||||
inputs: &[WalletOutput],
|
||||
) -> Result<Vec<Decoys>, RpcError>;
|
||||
}
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
#[async_trait::async_trait]
|
||||
impl DecoySelection for Decoys {
|
||||
async fn select<R: Send + Sync + RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
rpc: &impl Rpc,
|
||||
ring_len: usize,
|
||||
height: usize,
|
||||
inputs: &[WalletOutput],
|
||||
) -> Result<Vec<Decoys>, RpcError> {
|
||||
select_decoys(rng, rpc, ring_len, height, inputs, false).await
|
||||
}
|
||||
|
||||
async fn fingerprintable_canonical_select<R: Send + Sync + RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
rpc: &impl Rpc,
|
||||
ring_len: usize,
|
||||
height: usize,
|
||||
inputs: &[WalletOutput],
|
||||
) -> Result<Vec<Decoys>, RpcError> {
|
||||
select_decoys(rng, rpc, ring_len, height, inputs, true).await
|
||||
}
|
||||
}
|
||||
@@ -1,13 +1,13 @@
|
||||
[package]
|
||||
name = "serai-db"
|
||||
version = "0.1.0"
|
||||
version = "0.1.1"
|
||||
description = "A simple database trait and backends for it"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.65"
|
||||
rust-version = "1.71"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -18,7 +18,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
||||
rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true }
|
||||
rocksdb = { version = "0.23", default-features = false, features = ["zstd"], optional = true }
|
||||
|
||||
[features]
|
||||
parity-db = ["dep:parity-db"]
|
||||
|
||||
8
common/db/README.md
Normal file
8
common/db/README.md
Normal file
@@ -0,0 +1,8 @@
|
||||
# Serai DB
|
||||
|
||||
An inefficient, minimal abstraction around databases.
|
||||
|
||||
The abstraction offers `get`, `put`, and `del` with helper functions and macros
|
||||
built on top. Database iteration is not offered, forcing the caller to manually
|
||||
implement indexing schemes. This ensures wide compatibility across abstracted
|
||||
databases.
|
||||
@@ -38,12 +38,21 @@ pub fn serai_db_key(
|
||||
#[macro_export]
|
||||
macro_rules! create_db {
|
||||
($db_name: ident {
|
||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
||||
$(
|
||||
$field_name: ident:
|
||||
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||
$($arg: ident: $arg_type: ty),*
|
||||
) -> $field_type: ty$(,)?
|
||||
)*
|
||||
}) => {
|
||||
$(
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct $field_name;
|
||||
impl $field_name {
|
||||
pub(crate) struct $field_name$(
|
||||
<$($generic_name: $generic_type),+>
|
||||
)?$(
|
||||
(core::marker::PhantomData<($($generic_name),+)>)
|
||||
)?;
|
||||
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||
use scale::Encode;
|
||||
$crate::serai_db_key(
|
||||
@@ -52,18 +61,43 @@ macro_rules! create_db {
|
||||
($($arg),*).encode()
|
||||
)
|
||||
}
|
||||
pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
|
||||
let key = $field_name::key($($arg),*);
|
||||
pub(crate) fn set(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*,
|
||||
data: &$field_type
|
||||
) {
|
||||
let key = Self::key($($arg),*);
|
||||
txn.put(&key, borsh::to_vec(data).unwrap());
|
||||
}
|
||||
pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
|
||||
getter.get($field_name::key($($arg),*)).map(|data| {
|
||||
pub(crate) fn get(
|
||||
getter: &impl Get,
|
||||
$($arg: $arg_type),*
|
||||
) -> Option<$field_type> {
|
||||
getter.get(Self::key($($arg),*)).map(|data| {
|
||||
borsh::from_slice(data.as_ref()).unwrap()
|
||||
})
|
||||
}
|
||||
// Returns a PhantomData of all generic types so if the generic was only used in the value,
|
||||
// not the keys, this doesn't have unused generic types
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
|
||||
txn.del(&$field_name::key($($arg),*))
|
||||
pub(crate) fn del(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> core::marker::PhantomData<($($($generic_name),+)?)> {
|
||||
txn.del(&Self::key($($arg),*));
|
||||
core::marker::PhantomData
|
||||
}
|
||||
|
||||
pub(crate) fn take(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let key = Self::key($($arg),*);
|
||||
let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap());
|
||||
if res.is_some() {
|
||||
txn.del(key);
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
)*
|
||||
@@ -73,19 +107,30 @@ macro_rules! create_db {
|
||||
#[macro_export]
|
||||
macro_rules! db_channel {
|
||||
($db_name: ident {
|
||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
||||
$($field_name: ident:
|
||||
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||
$($arg: ident: $arg_type: ty),*
|
||||
) -> $field_type: ty$(,)?
|
||||
)*
|
||||
}) => {
|
||||
$(
|
||||
create_db! {
|
||||
$db_name {
|
||||
$field_name: ($($arg: $arg_type,)* index: u32) -> $field_type,
|
||||
$field_name: $(<$($generic_name: $generic_type),+>)?(
|
||||
$($arg: $arg_type,)*
|
||||
index: u32
|
||||
) -> $field_type
|
||||
}
|
||||
}
|
||||
|
||||
impl $field_name {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
|
||||
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||
pub(crate) fn send(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
, value: &$field_type
|
||||
) {
|
||||
// Use index 0 to store the amount of messages
|
||||
let messages_sent_key = $field_name::key($($arg),*, 0);
|
||||
let messages_sent_key = Self::key($($arg,)* 0);
|
||||
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
@@ -96,19 +141,35 @@ macro_rules! db_channel {
|
||||
// at the same time
|
||||
let index_to_use = messages_sent + 2;
|
||||
|
||||
$field_name::set(txn, $($arg),*, index_to_use, value);
|
||||
Self::set(txn, $($arg,)* index_to_use, value);
|
||||
}
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
|
||||
let messages_recvd_key = $field_name::key($($arg),*, 1);
|
||||
pub(crate) fn peek(
|
||||
getter: &impl Get
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||
let messages_recvd = getter.get(&messages_recvd_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
|
||||
let index_to_read = messages_recvd + 2;
|
||||
|
||||
Self::get(getter, $($arg,)* index_to_read)
|
||||
}
|
||||
pub(crate) fn try_recv(
|
||||
txn: &mut impl DbTxn
|
||||
$(, $arg: $arg_type)*
|
||||
) -> Option<$field_type> {
|
||||
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
|
||||
let index_to_read = messages_recvd + 2;
|
||||
|
||||
let res = $field_name::get(txn, $($arg),*, index_to_read);
|
||||
let res = Self::get(txn, $($arg,)* index_to_read);
|
||||
if res.is_some() {
|
||||
$field_name::del(txn, $($arg),*, index_to_read);
|
||||
Self::del(txn, $($arg,)* index_to_read);
|
||||
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
|
||||
}
|
||||
res
|
||||
|
||||
@@ -14,26 +14,87 @@ mod parity_db;
|
||||
#[cfg(feature = "parity-db")]
|
||||
pub use parity_db::{ParityDb, new_parity_db};
|
||||
|
||||
/// An object implementing get.
|
||||
/// An object implementing `get`.
|
||||
pub trait Get {
|
||||
/// Get a value from the database.
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
|
||||
}
|
||||
|
||||
/// An atomic database operation.
|
||||
/// An atomic database transaction.
|
||||
///
|
||||
/// A transaction is only required to atomically commit. It is not required that two `Get` calls
|
||||
/// made with the same transaction return the same result, if another transaction wrote to that
|
||||
/// key.
|
||||
///
|
||||
/// If two transactions are created, and both write (including deletions) to the same key, behavior
|
||||
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
||||
/// randomly, or any other action, at time of write or at time of commit.
|
||||
#[must_use]
|
||||
pub trait DbTxn: Send + Get {
|
||||
pub trait DbTxn: Sized + Send + Get {
|
||||
/// Write a value to this key.
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||
/// Delete the value from this key.
|
||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||
/// Commit this transaction.
|
||||
fn commit(self);
|
||||
/// Close this transaction.
|
||||
///
|
||||
/// This is equivalent to `Drop` on transactions which can be dropped. This is explicit and works
|
||||
/// with transactions which can't be dropped.
|
||||
fn close(self) {
|
||||
drop(self);
|
||||
}
|
||||
}
|
||||
|
||||
/// A database supporting atomic operations.
|
||||
// Credit for the idea goes to https://jack.wrenn.fyi/blog/undroppable
|
||||
pub struct Undroppable<T>(Option<T>);
|
||||
impl<T> Drop for Undroppable<T> {
|
||||
fn drop(&mut self) {
|
||||
// Use an assertion at compile time to prevent this code from compiling if generated
|
||||
#[allow(clippy::assertions_on_constants)]
|
||||
const {
|
||||
assert!(false, "Undroppable DbTxn was dropped. Ensure all code paths call commit or close");
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<T: DbTxn> Get for Undroppable<T> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
self.0.as_ref().unwrap().get(key)
|
||||
}
|
||||
}
|
||||
impl<T: DbTxn> DbTxn for Undroppable<T> {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||
self.0.as_mut().unwrap().put(key, value);
|
||||
}
|
||||
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||
self.0.as_mut().unwrap().del(key);
|
||||
}
|
||||
fn commit(mut self) {
|
||||
self.0.take().unwrap().commit();
|
||||
let _ = core::mem::ManuallyDrop::new(self);
|
||||
}
|
||||
fn close(mut self) {
|
||||
drop(self.0.take().unwrap());
|
||||
let _ = core::mem::ManuallyDrop::new(self);
|
||||
}
|
||||
}
|
||||
|
||||
/// A database supporting atomic transaction.
|
||||
pub trait Db: 'static + Send + Sync + Clone + Get {
|
||||
/// The type representing a database transaction.
|
||||
type Transaction<'a>: DbTxn;
|
||||
/// Calculate a key for a database entry.
|
||||
///
|
||||
/// Keys are separated by the database, the item within the database, and the item's key itself.
|
||||
fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||
let db_len = u8::try_from(db_dst.len()).unwrap();
|
||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||
}
|
||||
fn txn(&mut self) -> Self::Transaction<'_>;
|
||||
/// Open a new transaction which may be dropped.
|
||||
fn unsafe_txn(&mut self) -> Self::Transaction<'_>;
|
||||
/// Open a new transaction which must be committed or closed.
|
||||
fn txn(&mut self) -> Undroppable<Self::Transaction<'_>> {
|
||||
Undroppable(Some(self.unsafe_txn()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ use crate::*;
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);
|
||||
|
||||
impl<'a> Get for MemDbTxn<'a> {
|
||||
impl Get for MemDbTxn<'_> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
if self.2.contains(key.as_ref()) {
|
||||
return None;
|
||||
@@ -23,7 +23,7 @@ impl<'a> Get for MemDbTxn<'a> {
|
||||
.or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())
|
||||
}
|
||||
}
|
||||
impl<'a> DbTxn for MemDbTxn<'a> {
|
||||
impl DbTxn for MemDbTxn<'_> {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||
self.2.remove(key.as_ref());
|
||||
self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
|
||||
@@ -74,7 +74,7 @@ impl Get for MemDb {
|
||||
}
|
||||
impl Db for MemDb {
|
||||
type Transaction<'a> = MemDbTxn<'a>;
|
||||
fn txn(&mut self) -> MemDbTxn<'_> {
|
||||
fn unsafe_txn(&mut self) -> MemDbTxn<'_> {
|
||||
MemDbTxn(self, HashMap::new(), HashSet::new())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ pub use ::parity_db::{Options, Db as ParityDb};
|
||||
|
||||
use crate::*;
|
||||
|
||||
#[must_use]
|
||||
pub struct Transaction<'a>(&'a Arc<ParityDb>, Vec<(u8, Vec<u8>, Option<Vec<u8>>)>);
|
||||
|
||||
impl Get for Transaction<'_> {
|
||||
@@ -36,7 +37,7 @@ impl Get for Arc<ParityDb> {
|
||||
}
|
||||
impl Db for Arc<ParityDb> {
|
||||
type Transaction<'a> = Transaction<'a>;
|
||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
||||
Transaction(self, vec![])
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ use rocksdb::{
|
||||
|
||||
use crate::*;
|
||||
|
||||
#[must_use]
|
||||
pub struct Transaction<'a, T: ThreadMode>(
|
||||
RocksTransaction<'a, OptimisticTransactionDB<T>>,
|
||||
&'a OptimisticTransactionDB<T>,
|
||||
@@ -38,7 +39,7 @@ impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
||||
}
|
||||
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
||||
type Transaction<'a> = Transaction<'a, T>;
|
||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
||||
let mut opts = WriteOptions::default();
|
||||
opts.set_sync(true);
|
||||
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
||||
|
||||
2
common/env/Cargo.toml
vendored
2
common/env/Cargo.toml
vendored
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.60"
|
||||
rust-version = "1.71"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
20
common/patchable-async-sleep/Cargo.toml
Normal file
20
common/patchable-async-sleep/Cargo.toml
Normal file
@@ -0,0 +1,20 @@
|
||||
[package]
|
||||
name = "patchable-async-sleep"
|
||||
version = "0.1.0"
|
||||
description = "An async sleep function, patchable to the preferred runtime"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-async-sleep"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
||||
edition = "2021"
|
||||
rust-version = "1.71"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
tokio = { version = "1", default-features = false, features = [ "time"] }
|
||||
7
common/patchable-async-sleep/README.md
Normal file
7
common/patchable-async-sleep/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# Patchable Async Sleep
|
||||
|
||||
An async sleep function, patchable to the preferred runtime.
|
||||
|
||||
This crate is `tokio`-backed. Applications which don't want to use `tokio`
|
||||
should patch this crate to one which works witht heir preferred runtime. The
|
||||
point of it is to have a minimal API surface to trivially facilitate such work.
|
||||
10
common/patchable-async-sleep/src/lib.rs
Normal file
10
common/patchable-async-sleep/src/lib.rs
Normal file
@@ -0,0 +1,10 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::time::Duration;
|
||||
|
||||
/// Sleep for the specified duration.
|
||||
pub fn sleep(duration: Duration) -> impl core::future::Future<Output = ()> {
|
||||
tokio::time::sleep(duration)
|
||||
}
|
||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-requ
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["http", "https", "async", "request", "ssl"]
|
||||
edition = "2021"
|
||||
rust-version = "1.64"
|
||||
rust-version = "1.71"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["nostd", "no_std", "alloc", "io"]
|
||||
edition = "2021"
|
||||
rust-version = "1.70"
|
||||
rust-version = "1.80"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -17,8 +17,8 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "once"] }
|
||||
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
|
||||
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
|
||||
hashbrown = { version = "0.15", default-features = false, features = ["default-hasher", "inline-more"] }
|
||||
|
||||
[features]
|
||||
std = []
|
||||
|
||||
@@ -26,27 +26,6 @@ mod mutex_shim {
|
||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::OnceLock;
|
||||
pub use std::sync::LazyLock;
|
||||
#[cfg(not(feature = "std"))]
|
||||
mod oncelock_shim {
|
||||
use spin::Once;
|
||||
|
||||
pub struct OnceLock<T>(Once<T>);
|
||||
impl<T> OnceLock<T> {
|
||||
pub const fn new() -> OnceLock<T> {
|
||||
OnceLock(Once::new())
|
||||
}
|
||||
pub fn get(&self) -> Option<&T> {
|
||||
self.0.poll()
|
||||
}
|
||||
pub fn get_mut(&mut self) -> Option<&mut T> {
|
||||
self.0.get_mut()
|
||||
}
|
||||
|
||||
pub fn get_or_init<F: FnOnce() -> T>(&self, f: F) -> &T {
|
||||
self.0.call_once(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use oncelock_shim::*;
|
||||
pub use spin::Lazy as LazyLock;
|
||||
|
||||
22
common/task/Cargo.toml
Normal file
22
common/task/Cargo.toml
Normal file
@@ -0,0 +1,22 @@
|
||||
[package]
|
||||
name = "serai-task"
|
||||
version = "0.1.0"
|
||||
description = "A task schema for Serai services"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/task"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.75"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] }
|
||||
@@ -1,6 +1,6 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
Copyright (c) 2022-2024 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
3
common/task/README.md
Normal file
3
common/task/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Task
|
||||
|
||||
A schema to define tasks to be run ad infinitum.
|
||||
161
common/task/src/lib.rs
Normal file
161
common/task/src/lib.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{
|
||||
fmt::{self, Debug},
|
||||
future::Future,
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
mod type_name;
|
||||
|
||||
/// A handle for a task.
|
||||
///
|
||||
/// The task will only stop running once all handles for it are dropped.
|
||||
//
|
||||
// `run_now` isn't infallible if the task may have been closed. `run_now` on a closed task would
|
||||
// either need to panic (historic behavior), silently drop the fact the task can't be run, or
|
||||
// return an error. Instead of having a potential panic, and instead of modeling the error
|
||||
// behavior, this task can't be closed unless all handles are dropped, ensuring calls to `run_now`
|
||||
// are infallible.
|
||||
#[derive(Clone)]
|
||||
pub struct TaskHandle {
|
||||
run_now: mpsc::Sender<()>,
|
||||
#[allow(dead_code)] // This is used to track if all handles have been dropped
|
||||
close: mpsc::Sender<()>,
|
||||
}
|
||||
|
||||
/// A task's internal structures.
|
||||
pub struct Task {
|
||||
run_now: mpsc::Receiver<()>,
|
||||
close: mpsc::Receiver<()>,
|
||||
}
|
||||
|
||||
impl Task {
|
||||
/// Create a new task definition.
|
||||
pub fn new() -> (Self, TaskHandle) {
|
||||
// Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as
|
||||
// soon as possible
|
||||
let (run_now_send, run_now_recv) = mpsc::channel(1);
|
||||
// And any call to close satisfies all calls to close
|
||||
let (close_send, close_recv) = mpsc::channel(1);
|
||||
(
|
||||
Self { run_now: run_now_recv, close: close_recv },
|
||||
TaskHandle { run_now: run_now_send, close: close_send },
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl TaskHandle {
|
||||
/// Tell the task to run now (and not whenever its next iteration on a timer is).
|
||||
pub fn run_now(&self) {
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match self.run_now.try_send(()) {
|
||||
Ok(()) => {}
|
||||
// NOP on full, as this task will already be ran as soon as possible
|
||||
Err(mpsc::error::TrySendError::Full(())) => {}
|
||||
Err(mpsc::error::TrySendError::Closed(())) => {
|
||||
// The task should only be closed if all handles are dropped, and this one hasn't been
|
||||
panic!("task was unexpectedly closed when calling run_now")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An enum which can't be constructed, representing that the task does not error.
|
||||
pub enum DoesNotError {}
|
||||
impl Debug for DoesNotError {
|
||||
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||
// This type can't be constructed so we'll never have a `&self` to call this fn with
|
||||
unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
/// A task to be continually ran.
|
||||
pub trait ContinuallyRan: Sized + Send {
|
||||
/// The amount of seconds before this task should be polled again.
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5;
|
||||
/// The maximum amount of seconds before this task should be run again.
|
||||
///
|
||||
/// Upon error, the amount of time waited will be linearly increased until this limit.
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
|
||||
|
||||
/// The error potentially yielded upon running an iteration of this task.
|
||||
type Error: Debug;
|
||||
|
||||
/// Run an iteration of the task.
|
||||
///
|
||||
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
||||
/// (without waiting for whatever timer they were already on).
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>>;
|
||||
|
||||
/// Continually run the task.
|
||||
fn continually_run(
|
||||
mut self,
|
||||
mut task: Task,
|
||||
dependents: Vec<TaskHandle>,
|
||||
) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
// The default number of seconds to sleep before running the task again
|
||||
let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS;
|
||||
// The current number of seconds to sleep before running the task again
|
||||
// We increment this upon errors in order to not flood the logs with errors
|
||||
let mut current_sleep_before_next_task = default_sleep_before_next_task;
|
||||
let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| {
|
||||
let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task;
|
||||
// Set a limit of sleeping for two minutes
|
||||
*current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS);
|
||||
};
|
||||
|
||||
loop {
|
||||
// If we were told to close/all handles were dropped, drop it
|
||||
{
|
||||
let should_close = task.close.try_recv();
|
||||
match should_close {
|
||||
Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break,
|
||||
Err(mpsc::error::TryRecvError::Empty) => {}
|
||||
}
|
||||
}
|
||||
|
||||
match self.run_iteration().await {
|
||||
Ok(run_dependents) => {
|
||||
// Upon a successful (error-free) loop iteration, reset the amount of time we sleep
|
||||
current_sleep_before_next_task = default_sleep_before_next_task;
|
||||
|
||||
if run_dependents {
|
||||
for dependent in &dependents {
|
||||
dependent.run_now();
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
// Get the type name
|
||||
let type_name = type_name::strip_type_name(core::any::type_name::<Self>());
|
||||
// Print the error as a warning, prefixed by the task's type
|
||||
log::warn!("{type_name}: {e:?}");
|
||||
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
||||
}
|
||||
}
|
||||
|
||||
// Don't run the task again for another few seconds UNLESS told to run now
|
||||
/*
|
||||
We could replace tokio::mpsc with async_channel, tokio::time::sleep with
|
||||
patchable_async_sleep::sleep, and tokio::select with futures_lite::future::or
|
||||
It isn't worth the effort when patchable_async_sleep::sleep will still resolve to tokio
|
||||
*/
|
||||
tokio::select! {
|
||||
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
||||
msg = task.run_now.recv() => {
|
||||
// Check if this is firing because the handle was dropped
|
||||
if msg.is_none() {
|
||||
break;
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
31
common/task/src/type_name.rs
Normal file
31
common/task/src/type_name.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
/// Strip the modules from a type name.
|
||||
// This may be of the form `a::b::C`, in which case we only want `C`
|
||||
pub(crate) fn strip_type_name(full_type_name: &'static str) -> String {
|
||||
// It also may be `a::b::C<d::e::F>`, in which case, we only attempt to strip `a::b`
|
||||
let mut by_generics = full_type_name.split('<');
|
||||
|
||||
// Strip to just `C`
|
||||
let full_outer_object_name = by_generics.next().unwrap();
|
||||
let mut outer_object_name_parts = full_outer_object_name.split("::");
|
||||
let mut last_part_in_outer_object_name = outer_object_name_parts.next().unwrap();
|
||||
for part in outer_object_name_parts {
|
||||
last_part_in_outer_object_name = part;
|
||||
}
|
||||
|
||||
// Push back on the generic terms
|
||||
let mut type_name = last_part_in_outer_object_name.to_string();
|
||||
for generic in by_generics {
|
||||
type_name.push('<');
|
||||
type_name.push_str(generic);
|
||||
}
|
||||
type_name
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strip_type_name() {
|
||||
assert_eq!(strip_type_name("core::option::Option"), "Option");
|
||||
assert_eq!(
|
||||
strip_type_name("core::option::Option<alloc::string::String>"),
|
||||
"Option<alloc::string::String>"
|
||||
);
|
||||
}
|
||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.77.0"
|
||||
rust-version = "1.77"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
|
||||
@@ -8,6 +8,7 @@ authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
@@ -17,30 +18,29 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
||||
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||
|
||||
zalloc = { path = "../common/zalloc" }
|
||||
serai-db = { path = "../common/db" }
|
||||
serai-env = { path = "../common/env" }
|
||||
serai-task = { path = "../common/task", version = "0.1" }
|
||||
|
||||
processor-messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
||||
messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
||||
tributary-sdk = { path = "./tributary-sdk" }
|
||||
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
@@ -49,16 +49,15 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
|
||||
tokio = { version = "1", default-features = false, features = ["time", "sync", "macros", "rt-multi-thread"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
serai-cosign = { path = "./cosign" }
|
||||
serai-coordinator-substrate = { path = "./substrate" }
|
||||
serai-coordinator-tributary = { path = "./tributary" }
|
||||
serai-coordinator-p2p = { path = "./p2p" }
|
||||
serai-coordinator-libp2p-p2p = { path = "./p2p/libp2p" }
|
||||
|
||||
[features]
|
||||
longer-reattempts = []
|
||||
longer-reattempts = ["serai-coordinator-tributary/longer-reattempts"]
|
||||
parity-db = ["serai-db/parity-db"]
|
||||
rocksdb = ["serai-db/rocksdb"]
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023 Luke Parker
|
||||
Copyright (c) 2023-2025 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
|
||||
@@ -1,7 +1,29 @@
|
||||
# Coordinator
|
||||
|
||||
The Serai coordinator communicates with other coordinators to prepare batches
|
||||
for Serai and sign transactions.
|
||||
- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint
|
||||
BFT algorithm.
|
||||
|
||||
In order to achieve consensus over gossip, and order certain events, a
|
||||
micro-blockchain is instantiated.
|
||||
- [`tributary-sdk`](./tributary-sdk) is a micro-blockchain framework. Instead
|
||||
of a producing a blockchain daemon like the Polkadot SDK or Cosmos SDK intend
|
||||
to, `tributary` is solely intended to be an embedded asynchronous task within
|
||||
an application.
|
||||
|
||||
The Serai coordinator spawns a tributary for each validator set it's
|
||||
coordinating. This allows the participating validators to communicate in a
|
||||
byzantine-fault-tolerant manner (relying on Tendermint for consensus).
|
||||
|
||||
- [`cosign`](./cosign) contains a library to decide which Substrate blocks
|
||||
should be cosigned and to evaluate cosigns.
|
||||
|
||||
- [`substrate`](./substrate) contains a library to index the Substrate
|
||||
blockchain and handle its events.
|
||||
|
||||
- [`tributary`](./tributary) is our instantiation of the Tributary SDK for the
|
||||
Serai processor. It includes the `Transaction` definition and deferred
|
||||
execution logic.
|
||||
|
||||
- [`p2p`](./p2p) is our abstract P2P API to service the Coordinator.
|
||||
|
||||
- [`libp2p`](./p2p/libp2p) is our libp2p-backed implementation of the P2P API.
|
||||
|
||||
- [`src`](./src) contains the source code for the Coordinator binary itself.
|
||||
|
||||
33
coordinator/cosign/Cargo.toml
Normal file
33
coordinator/cosign/Cargo.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[package]
|
||||
name = "serai-cosign"
|
||||
version = "0.1.0"
|
||||
description = "Evaluator of cosigns for the Serai network"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
tokio = { version = "1", default-features = false }
|
||||
|
||||
serai-db = { path = "../../common/db", version = "0.1.1" }
|
||||
serai-task = { path = "../../common/task", version = "0.1" }
|
||||
121
coordinator/cosign/README.md
Normal file
121
coordinator/cosign/README.md
Normal file
@@ -0,0 +1,121 @@
|
||||
# Serai Cosign
|
||||
|
||||
The Serai blockchain is controlled by a set of validators referred to as the
|
||||
Serai validators. These validators could attempt to double-spend, even if every
|
||||
node on the network is a full node, via equivocating.
|
||||
|
||||
Posit:
|
||||
- The Serai validators control X SRI
|
||||
- The Serai validators produce block A swapping X SRI to Y XYZ
|
||||
- The Serai validators produce block B swapping X SRI to Z ABC
|
||||
- The Serai validators finalize block A and send to the validators for XYZ
|
||||
- The Serai validators finalize block B and send to the validators for ABC
|
||||
|
||||
This is solved via the cosigning protocol. The validators for XYZ and the
|
||||
validators for ABC each sign their view of the Serai blockchain, communicating
|
||||
amongst each other to ensure consistency.
|
||||
|
||||
The security of the cosigning protocol is not formally proven, and there are no
|
||||
claims it achieves Byzantine Fault Tolerance. This protocol is meant to be
|
||||
practical and make such attacks infeasible, when they could already be argued
|
||||
difficult to perform.
|
||||
|
||||
### Definitions
|
||||
|
||||
- Cosign: A signature from a non-Serai validator set for a Serai block
|
||||
- Cosign Commit: A collection of cosigns which achieve the necessary weight
|
||||
|
||||
### Methodology
|
||||
|
||||
Finalized blocks from the Serai network are intended to be cosigned if they
|
||||
contain burn events. Only once cosigned should non-Serai validators process
|
||||
them.
|
||||
|
||||
Cosigning occurs by a non-Serai validator set, using their threshold keys
|
||||
declared on the Serai blockchain. Once 83% of non-Serai validator sets, by
|
||||
weight, cosign a block, a cosign commit is formed. A cosign commit for a block
|
||||
is considered to also cosign for all blocks preceding it.
|
||||
|
||||
### Bounds Under Asynchrony
|
||||
|
||||
Assuming an asynchronous environment fully controlled by the adversary, 34% of
|
||||
a validator set may cause an equivocation. Control of 67% of non-Serai
|
||||
validator sets, by weight, is sufficient to produce two distinct cosign commits
|
||||
at the same position. This is due to the honest stake, 33%, being split across
|
||||
the two candidates (67% + 16.5% = 83.5%, just over the threshold). This means
|
||||
the cosigning protocol may produce multiple cosign commits if 34% of 67%, just
|
||||
22.78%, of the non-Serai validator sets, is malicious. This would be in
|
||||
conjunction with 34% of the Serai validator set (assumed 20% of total stake),
|
||||
for a total stake requirement of 34% of 20% + 22.78% of 80% (25.024%). This is
|
||||
an increase from the 6.8% required without the cosigning protocol.
|
||||
|
||||
### Bounds Under Synchrony
|
||||
|
||||
Assuming the honest stake within the non-Serai validator sets detect the
|
||||
malicious stake within their set prior to assisting in producing a cosign for
|
||||
their set, for which there is a multi-second window, 67% of 67% of non-Serai
|
||||
validator sets is required to produce cosigns for those sets. This raises the
|
||||
total stake requirement to 42.712% (past the usual 34% threshold).
|
||||
|
||||
### Behavior Reliant on Synchrony
|
||||
|
||||
If the Serai blockchain node detects an equivocation, it will stop responding
|
||||
to all RPC requests and stop participating in finalizing further blocks. This
|
||||
lets the node communicate the equivocating commits to other nodes (causing them
|
||||
to exhibit the same behavior), yet prevents interaction with it.
|
||||
|
||||
If cosigns representing 17% of the non-Serai validators sets by weight are
|
||||
detected for distinct blocks at the same position, the protocol halts. An
|
||||
explicit latency period of seventy seconds is enacted after receiving a cosign
|
||||
commit for the detection of such an equivocation. This is largely redundant
|
||||
given how the Serai blockchain node will presumably have halted itself by this
|
||||
time.
|
||||
|
||||
### Equivocation-Detection Avoidance
|
||||
|
||||
Malicious Serai validators could avoid detection of their equivocating if they
|
||||
produced two distinct blockchains, A and B, with different keys declared for
|
||||
the same non-Serai validator set. While the validators following A may detect
|
||||
the cosigns for distinct blocks by validators following B, the cosigns would be
|
||||
assumed invalid due to their signatures being verified against distinct keys.
|
||||
|
||||
This is prevented by requiring cosigns on the blocks which declare new keys,
|
||||
ensuring all validators have a consistent view of the keys used within the
|
||||
cosigning protocol (per the bounds of the cosigning protocol). These blocks are
|
||||
exempt from the general policy of cosign commits cosigning all prior blocks,
|
||||
preventing the newly declared keys (which aren't yet cosigned) from being used
|
||||
to cosign themselves. These cosigns are flagged as "notable", are permanently
|
||||
archived, and must be synced before a validator will move forward.
|
||||
|
||||
Cosigning the block which declares new keys also ensures agreement on the
|
||||
preceding block which declared the new set, with an exact specification of the
|
||||
participants and their weight, before it impacts the cosigning protocol.
|
||||
|
||||
### Denial of Service Concerns
|
||||
|
||||
Any historical Serai validator set may trigger a chain halt by producing an
|
||||
equivocation after their retiry. This requires 67% to be malicious. 34% of the
|
||||
active Serai validator set may also trigger a chain halt.
|
||||
|
||||
17% of non-Serai validator sets equivocating causing a halt means 5.67% of
|
||||
non-Serai validator sets' stake may cause a halt (in an asynchronous
|
||||
environment fully controlled by the adversary). In a synchronous environment
|
||||
where the honest stake cannot be split across two candidates, 11.33% of
|
||||
non-Serai validator sets' stake is required.
|
||||
|
||||
The more practical attack is for one to obtain 5.67% of non-Serai validator
|
||||
sets' stake, under any network conditions, and simply go offline. This will
|
||||
take 17% of validator sets offline with it, preventing any cosign commits
|
||||
from being performed. A fallback protocol where validators individually produce
|
||||
cosigns, removing the network's horizontal scalability but ensuring liveness,
|
||||
prevents this, restoring the additional requirements for control of an
|
||||
asynchronous network or 11.33% of non-Serai validator sets' stake.
|
||||
|
||||
### TODO
|
||||
|
||||
The Serai node no longer responding to RPC requests upon detecting any
|
||||
equivocation, and the fallback protocol where validators individually produce
|
||||
signatures, are not implemented at this time. The former means the detection of
|
||||
equivocating cosigns is not redundant and the latter makes 5.67% of non-Serai
|
||||
validator sets' stake the DoS threshold, even without control of an
|
||||
asynchronous network.
|
||||
70
coordinator/cosign/src/delay.rs
Normal file
70
coordinator/cosign/src/delay.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::{DoesNotError, ContinuallyRan};
|
||||
|
||||
use crate::evaluator::CosignedBlocks;
|
||||
|
||||
/// How often callers should broadcast the cosigns flagged for rebroadcasting.
|
||||
pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(60);
|
||||
const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10);
|
||||
const ACKNOWLEDGEMENT_DELAY: Duration =
|
||||
Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs());
|
||||
|
||||
create_db!(
|
||||
SubstrateCosignDelay {
|
||||
// The latest cosigned block number.
|
||||
LatestCosignedBlockNumber: () -> u64,
|
||||
}
|
||||
);
|
||||
|
||||
/// A task to delay acknowledgement of cosigns.
|
||||
pub(crate) struct CosignDelayTask<D: Db> {
|
||||
pub(crate) db: D,
|
||||
}
|
||||
|
||||
struct AwaitUndroppable<T: DbTxn>(Option<core::mem::ManuallyDrop<Undroppable<T>>>);
|
||||
impl<T: DbTxn> Drop for AwaitUndroppable<T> {
|
||||
fn drop(&mut self) {
|
||||
if let Some(mut txn) = self.0.take() {
|
||||
(unsafe { core::mem::ManuallyDrop::take(&mut txn) }).close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||
type Error = DoesNotError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
let mut txn = self.db.txn();
|
||||
|
||||
// Receive the next block to mark as cosigned
|
||||
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
||||
txn.close();
|
||||
break;
|
||||
};
|
||||
|
||||
// Calculate when we should mark it as valid
|
||||
let time_valid =
|
||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
||||
// Sleep until then
|
||||
let mut txn = AwaitUndroppable(Some(core::mem::ManuallyDrop::new(txn)));
|
||||
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
||||
.await;
|
||||
let mut txn = core::mem::ManuallyDrop::into_inner(txn.0.take().unwrap());
|
||||
|
||||
// Set the cosigned block
|
||||
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
||||
txn.commit();
|
||||
|
||||
made_progress = true;
|
||||
}
|
||||
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
233
coordinator/cosign/src/evaluator.rs
Normal file
233
coordinator/cosign/src/evaluator.rs
Normal file
@@ -0,0 +1,233 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::{
|
||||
HasEvents, GlobalSession, NetworksLatestCosignedBlock, RequestNotableCosigns,
|
||||
intend::{GlobalSessionsChannel, BlockEventData, BlockEvents},
|
||||
};
|
||||
|
||||
create_db!(
|
||||
SubstrateCosignEvaluator {
|
||||
// The global session currently being evaluated.
|
||||
CurrentlyEvaluatedGlobalSession: () -> ([u8; 32], GlobalSession),
|
||||
}
|
||||
);
|
||||
|
||||
db_channel!(
|
||||
SubstrateCosignEvaluatorChannels {
|
||||
// (cosigned block, time cosign was evaluated)
|
||||
CosignedBlocks: () -> (u64, u64),
|
||||
}
|
||||
);
|
||||
|
||||
// This is a strict function which won't panic, even with a malicious Serai node, so long as:
|
||||
// - It's called incrementally (with an increment of 1)
|
||||
// - It's only called for block numbers we've completed indexing on within the intend task
|
||||
// - It's only called for block numbers after a global session has started
|
||||
// - The global sessions channel is populated as the block declaring the session is indexed
|
||||
// Which all hold true within the context of this task and the intend task.
|
||||
//
|
||||
// This function will also ensure the currently evaluated global session is incremented once we
|
||||
// finish evaluation of the prior session.
|
||||
fn currently_evaluated_global_session_strict(
|
||||
txn: &mut impl DbTxn,
|
||||
block_number: u64,
|
||||
) -> ([u8; 32], GlobalSession) {
|
||||
let mut res = {
|
||||
let existing = match CurrentlyEvaluatedGlobalSession::get(txn) {
|
||||
Some(existing) => existing,
|
||||
None => {
|
||||
let first = GlobalSessionsChannel::try_recv(txn)
|
||||
.expect("fetching latest global session yet none declared");
|
||||
CurrentlyEvaluatedGlobalSession::set(txn, &first);
|
||||
first
|
||||
}
|
||||
};
|
||||
assert!(
|
||||
existing.1.start_block_number <= block_number,
|
||||
"candidate's start block number exceeds our block number"
|
||||
);
|
||||
existing
|
||||
};
|
||||
|
||||
if let Some(next) = GlobalSessionsChannel::peek(txn) {
|
||||
assert!(
|
||||
block_number <= next.1.start_block_number,
|
||||
"currently_evaluated_global_session_strict wasn't called incrementally"
|
||||
);
|
||||
// If it's time for this session to activate, take it from the channel and set it
|
||||
if block_number == next.1.start_block_number {
|
||||
GlobalSessionsChannel::try_recv(txn).unwrap();
|
||||
CurrentlyEvaluatedGlobalSession::set(txn, &next);
|
||||
res = next;
|
||||
}
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u8; 32]> {
|
||||
CurrentlyEvaluatedGlobalSession::get(getter).map(|(id, _info)| id)
|
||||
}
|
||||
|
||||
/// A task to determine if a block has been cosigned and we should handle it.
|
||||
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
||||
pub(crate) db: D,
|
||||
pub(crate) request: R,
|
||||
}
|
||||
|
||||
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let mut known_cosign = None;
|
||||
let mut made_progress = false;
|
||||
loop {
|
||||
let mut txn = self.db.unsafe_txn();
|
||||
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
||||
else {
|
||||
break;
|
||||
};
|
||||
|
||||
// Fetch the global session information
|
||||
let (global_session, global_session_info) =
|
||||
currently_evaluated_global_session_strict(&mut txn, block_number);
|
||||
|
||||
match has_events {
|
||||
// Because this had notable events, we require an explicit cosign for this block by a
|
||||
// supermajority of the prior block's validator sets
|
||||
HasEvents::Notable => {
|
||||
let mut weight_cosigned = 0;
|
||||
for set in global_session_info.sets {
|
||||
// Check if we have the cosign from this set
|
||||
if NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
|
||||
.map(|signed_cosign| signed_cosign.cosign.block_number) ==
|
||||
Some(block_number)
|
||||
{
|
||||
// Since have this cosign, add the set's weight to the weight which has cosigned
|
||||
weight_cosigned +=
|
||||
global_session_info.stakes.get(&set.network).ok_or_else(|| {
|
||||
"ValidatorSet in global session yet didn't have its stake".to_string()
|
||||
})?;
|
||||
}
|
||||
}
|
||||
// Check if the sum weight doesn't cross the required threshold
|
||||
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||
// Request the necessary cosigns over the network
|
||||
// TODO: Add a timer to ensure this isn't called too often
|
||||
self
|
||||
.request
|
||||
.request_notable_cosigns(global_session)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
// We return an error so the delay before this task is run again increases
|
||||
return Err(format!(
|
||||
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||
));
|
||||
}
|
||||
|
||||
log::info!("marking notable block #{block_number} as cosigned");
|
||||
}
|
||||
// Since this block didn't have any notable events, we simply require a cosign for this
|
||||
// block or a greater block by the current validator sets
|
||||
HasEvents::NonNotable => {
|
||||
// Check if this was satisfied by a cached result which wasn't calculated incrementally
|
||||
let known_cosigned = if let Some(known_cosign) = known_cosign {
|
||||
known_cosign >= block_number
|
||||
} else {
|
||||
// Clear `known_cosign` which is no longer helpful
|
||||
known_cosign = None;
|
||||
false
|
||||
};
|
||||
|
||||
// If it isn't already known to be cosigned, evaluate the latest cosigns
|
||||
if !known_cosigned {
|
||||
/*
|
||||
LatestCosign is populated with the latest cosigns for each network which don't
|
||||
exceed the latest global session we've evaluated the start of. This current block
|
||||
is during the latest global session we've evaluated the start of.
|
||||
*/
|
||||
|
||||
let mut weight_cosigned = 0;
|
||||
let mut lowest_common_block: Option<u64> = None;
|
||||
for set in global_session_info.sets {
|
||||
// Check if this set cosigned this block or not
|
||||
let Some(cosign) =
|
||||
NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
if cosign.cosign.block_number >= block_number {
|
||||
weight_cosigned +=
|
||||
global_session_info.stakes.get(&set.network).ok_or_else(|| {
|
||||
"ValidatorSet in global session yet didn't have its stake".to_string()
|
||||
})?;
|
||||
}
|
||||
|
||||
// Update the lowest block common to all of these cosigns
|
||||
lowest_common_block = lowest_common_block
|
||||
.map(|existing| existing.min(cosign.cosign.block_number))
|
||||
.or(Some(cosign.cosign.block_number));
|
||||
}
|
||||
|
||||
// Check if the sum weight doesn't cross the required threshold
|
||||
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||
// Request the superseding notable cosigns over the network
|
||||
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
||||
// the desired non-notable cosigns as part of normal operations, without needing to
|
||||
// explicitly request them
|
||||
self
|
||||
.request
|
||||
.request_notable_cosigns(global_session)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
// We return an error so the delay before this task is run again increases
|
||||
return Err(format!(
|
||||
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||
));
|
||||
}
|
||||
|
||||
// Update the cached result for the block we know is cosigned
|
||||
/*
|
||||
There may be a higher block which was cosigned, but once we get to this block,
|
||||
we'll re-evaluate and find it then. The alternative would be an optimistic
|
||||
re-evaluation now. Both are fine, so the lower-complexity option is preferred.
|
||||
*/
|
||||
known_cosign = lowest_common_block;
|
||||
}
|
||||
|
||||
log::debug!("marking non-notable block #{block_number} as cosigned");
|
||||
}
|
||||
// If this block has no events necessitating cosigning, we can immediately consider the
|
||||
// block cosigned (making this block a NOP)
|
||||
HasEvents::No => {}
|
||||
}
|
||||
|
||||
// Since we checked we had the necessary cosigns, send it for delay before acknowledgement
|
||||
CosignedBlocks::send(
|
||||
&mut txn,
|
||||
&(
|
||||
block_number,
|
||||
SystemTime::now()
|
||||
.duration_since(SystemTime::UNIX_EPOCH)
|
||||
.unwrap_or(Duration::ZERO)
|
||||
.as_secs(),
|
||||
),
|
||||
);
|
||||
txn.commit();
|
||||
|
||||
if (block_number % 500) == 0 {
|
||||
log::info!("marking block #{block_number} as cosigned");
|
||||
}
|
||||
|
||||
made_progress = true;
|
||||
}
|
||||
|
||||
Ok(made_progress)
|
||||
}
|
||||
}
|
||||
}
|
||||
184
coordinator/cosign/src/intend.rs
Normal file
184
coordinator/cosign/src/intend.rs
Normal file
@@ -0,0 +1,184 @@
|
||||
use core::future::Future;
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Amount},
|
||||
validator_sets::primitives::ValidatorSet,
|
||||
Serai,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::*;
|
||||
|
||||
create_db!(
|
||||
CosignIntend {
|
||||
ScanCosignFrom: () -> u64,
|
||||
}
|
||||
);
|
||||
|
||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) struct BlockEventData {
|
||||
pub(crate) block_number: u64,
|
||||
pub(crate) has_events: HasEvents,
|
||||
}
|
||||
|
||||
db_channel! {
|
||||
CosignIntendChannels {
|
||||
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
||||
BlockEvents: () -> BlockEventData,
|
||||
IntendedCosigns: (set: ValidatorSet) -> CosignIntent,
|
||||
}
|
||||
}
|
||||
|
||||
async fn block_has_events_justifying_a_cosign(
|
||||
serai: &Serai,
|
||||
block_number: u64,
|
||||
) -> Result<(Block, HasEvents), String> {
|
||||
let block = serai
|
||||
.finalized_block_by_number(block_number)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
|
||||
let serai = serai.as_of(block.hash());
|
||||
|
||||
if !serai.validator_sets().key_gen_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||
return Ok((block, HasEvents::Notable));
|
||||
}
|
||||
|
||||
if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||
return Ok((block, HasEvents::NonNotable));
|
||||
}
|
||||
|
||||
Ok((block, HasEvents::No))
|
||||
}
|
||||
|
||||
/// A task to determine which blocks we should intend to cosign.
|
||||
pub(crate) struct CosignIntendTask<D: Db> {
|
||||
pub(crate) db: D,
|
||||
pub(crate) serai: Arc<Serai>,
|
||||
}
|
||||
|
||||
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
||||
let latest_block_number =
|
||||
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
||||
|
||||
for block_number in start_block_number ..= latest_block_number {
|
||||
let mut txn = self.db.unsafe_txn();
|
||||
|
||||
let (block, mut has_events) =
|
||||
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?;
|
||||
|
||||
// Check we are indexing a linear chain
|
||||
if (block_number > 1) &&
|
||||
(<[u8; 32]>::from(block.header.parent_hash) !=
|
||||
SubstrateBlockHash::get(&txn, block_number - 1)
|
||||
.expect("indexing a block but haven't indexed its parent"))
|
||||
{
|
||||
Err(format!(
|
||||
"node's block #{block_number} doesn't build upon the block #{} prior indexed",
|
||||
block_number - 1
|
||||
))?;
|
||||
}
|
||||
let block_hash = block.hash();
|
||||
SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
|
||||
|
||||
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
||||
|
||||
// If this is notable, it creates a new global session, which we index into the database
|
||||
// now
|
||||
if has_events == HasEvents::Notable {
|
||||
let serai = self.serai.as_of(block_hash);
|
||||
let sets_and_keys = cosigning_sets(&serai).await?;
|
||||
let global_session =
|
||||
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
||||
|
||||
let mut sets = Vec::with_capacity(sets_and_keys.len());
|
||||
let mut keys = HashMap::with_capacity(sets_and_keys.len());
|
||||
let mut stakes = HashMap::with_capacity(sets_and_keys.len());
|
||||
let mut total_stake = 0;
|
||||
for (set, key) in &sets_and_keys {
|
||||
sets.push(*set);
|
||||
keys.insert(set.network, SeraiAddress::from(*key));
|
||||
let stake = serai
|
||||
.validator_sets()
|
||||
.total_allocated_stake(set.network)
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
.unwrap_or(Amount(0))
|
||||
.0;
|
||||
stakes.insert(set.network, stake);
|
||||
total_stake += stake;
|
||||
}
|
||||
if total_stake == 0 {
|
||||
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
|
||||
}
|
||||
|
||||
let global_session_info = GlobalSession {
|
||||
// This session starts cosigning after this block, as this block must be cosigned by
|
||||
// the existing validators
|
||||
start_block_number: block_number + 1,
|
||||
sets,
|
||||
keys,
|
||||
stakes,
|
||||
total_stake,
|
||||
};
|
||||
GlobalSessions::set(&mut txn, global_session, &global_session_info);
|
||||
if let Some(ending_global_session) = global_session_for_this_block {
|
||||
GlobalSessionsLastBlock::set(&mut txn, ending_global_session, &block_number);
|
||||
}
|
||||
LatestGlobalSessionIntended::set(&mut txn, &global_session);
|
||||
GlobalSessionsChannel::send(&mut txn, &(global_session, global_session_info));
|
||||
}
|
||||
|
||||
// If there isn't anyone available to cosign this block, meaning it'll never be cosigned,
|
||||
// we flag it as not having any events requiring cosigning so we don't attempt to
|
||||
// sign/require a cosign for it
|
||||
if global_session_for_this_block.is_none() {
|
||||
has_events = HasEvents::No;
|
||||
}
|
||||
|
||||
match has_events {
|
||||
HasEvents::Notable | HasEvents::NonNotable => {
|
||||
let global_session_for_this_block = global_session_for_this_block
|
||||
.expect("global session for this block was None but still attempting to cosign it");
|
||||
let global_session_info = GlobalSessions::get(&txn, global_session_for_this_block)
|
||||
.expect("last global session intended wasn't saved to the database");
|
||||
|
||||
// Tell each set of their expectation to cosign this block
|
||||
for set in global_session_info.sets {
|
||||
log::debug!("{:?} will be cosigning block #{block_number}", set);
|
||||
IntendedCosigns::send(
|
||||
&mut txn,
|
||||
set,
|
||||
&CosignIntent {
|
||||
global_session: global_session_for_this_block,
|
||||
block_number,
|
||||
block_hash,
|
||||
notable: has_events == HasEvents::Notable,
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
HasEvents::No => {}
|
||||
}
|
||||
|
||||
// Populate a singular feed with every block's status for the evluator to work off of
|
||||
BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events }));
|
||||
// Mark this block as handled, meaning we should scan from the next block moving on
|
||||
ScanCosignFrom::set(&mut txn, &(block_number + 1));
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
Ok(start_block_number <= latest_block_number)
|
||||
}
|
||||
}
|
||||
}
|
||||
509
coordinator/cosign/src/lib.rs
Normal file
509
coordinator/cosign/src/lib.rs
Normal file
@@ -0,0 +1,509 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{fmt::Debug, future::Future};
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{
|
||||
primitives::{NetworkId, SeraiAddress},
|
||||
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||
Public, Block, Serai, TemporalSerai,
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::*;
|
||||
|
||||
/// The cosigns which are intended to be performed.
|
||||
mod intend;
|
||||
/// The evaluator of the cosigns.
|
||||
mod evaluator;
|
||||
/// The task to delay acknowledgement of the cosigns.
|
||||
mod delay;
|
||||
pub use delay::BROADCAST_FREQUENCY;
|
||||
use delay::LatestCosignedBlockNumber;
|
||||
|
||||
/// The schnorrkel context to used when signing a cosign.
|
||||
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
||||
|
||||
/// A 'global session', defined as all validator sets used for cosigning at a given moment.
|
||||
///
|
||||
/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign
|
||||
/// distinct blocks at distinct positions within a global session, we still identify the faults.
|
||||
/*
|
||||
There is the attack where a validator set is given an alternate blockchain with a key generation
|
||||
event at block #n, while most validator sets are given a blockchain with a key generation event
|
||||
at block number #(n+1). This prevents whoever has the alternate blockchain from verifying the
|
||||
cosigns on the primary blockchain, and detecting the faults, if they use the keys as of the block
|
||||
prior to the block being cosigned.
|
||||
|
||||
We solve this by binding cosigns to a global session ID, which has a specific start block, and
|
||||
reading the keys from the start block. This means that so long as all validator sets agree on the
|
||||
start of a global session, they can verify all cosigns produced by that session, regardless of
|
||||
how it advances. Since agreeing on the start of a global session is mandated, there's no way to
|
||||
have validator sets follow two distinct global sessions without breaking the bounds of the
|
||||
cosigning protocol.
|
||||
*/
|
||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) struct GlobalSession {
|
||||
pub(crate) start_block_number: u64,
|
||||
pub(crate) sets: Vec<ValidatorSet>,
|
||||
pub(crate) keys: HashMap<NetworkId, SeraiAddress>,
|
||||
pub(crate) stakes: HashMap<NetworkId, u64>,
|
||||
pub(crate) total_stake: u64,
|
||||
}
|
||||
impl GlobalSession {
|
||||
fn id(mut cosigners: Vec<ValidatorSet>) -> [u8; 32] {
|
||||
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
||||
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
||||
}
|
||||
}
|
||||
|
||||
/// If the block has events.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
enum HasEvents {
|
||||
/// The block had a notable event.
|
||||
///
|
||||
/// This is a special case as blocks with key gen events change the keys used for cosigning, and
|
||||
/// accordingly must be cosigned before we advance past them.
|
||||
Notable,
|
||||
/// The block had an non-notable event justifying a cosign.
|
||||
NonNotable,
|
||||
/// The block didn't have an event justifying a cosign.
|
||||
No,
|
||||
}
|
||||
|
||||
/// An intended cosign.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct CosignIntent {
|
||||
/// The global session this cosign is being performed under.
|
||||
pub global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
pub block_hash: [u8; 32],
|
||||
/// If this cosign must be handled before further cosigns are.
|
||||
pub notable: bool,
|
||||
}
|
||||
|
||||
/// A cosign.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
|
||||
pub struct Cosign {
|
||||
/// The global session this cosign is being performed under.
|
||||
pub global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
pub block_hash: [u8; 32],
|
||||
/// The actual cosigner.
|
||||
pub cosigner: NetworkId,
|
||||
}
|
||||
|
||||
/// A signed cosign.
|
||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct SignedCosign {
|
||||
/// The cosign.
|
||||
pub cosign: Cosign,
|
||||
/// The signature for the cosign.
|
||||
pub signature: [u8; 64],
|
||||
}
|
||||
|
||||
impl SignedCosign {
|
||||
fn verify_signature(&self, signer: serai_client::Public) -> bool {
|
||||
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
||||
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
||||
|
||||
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.encode(), &signature).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
create_db! {
|
||||
Cosign {
|
||||
// The following are populated by the intend task and used throughout the library
|
||||
|
||||
// An index of Substrate blocks
|
||||
SubstrateBlockHash: (block_number: u64) -> [u8; 32],
|
||||
// A mapping from a global session's ID to its relevant information.
|
||||
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
||||
// The last block to be cosigned by a global session.
|
||||
GlobalSessionsLastBlock: (global_session: [u8; 32]) -> u64,
|
||||
// The latest global session intended.
|
||||
//
|
||||
// This is distinct from the latest global session for which we've evaluated the cosigns for.
|
||||
LatestGlobalSessionIntended: () -> [u8; 32],
|
||||
|
||||
// The following are managed by the `intake_cosign` function present in this file
|
||||
|
||||
// The latest cosigned block for each network.
|
||||
//
|
||||
// This will only be populated with cosigns predating or during the most recent global session
|
||||
// to have its start cosigned.
|
||||
//
|
||||
// The global session changes upon a notable block, causing each global session to have exactly
|
||||
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
||||
// block, causing the latest cosigned block for a global session to either be the global
|
||||
// session's notable cosigns or the network's latest cosigns.
|
||||
NetworksLatestCosignedBlock: (global_session: [u8; 32], network: NetworkId) -> SignedCosign,
|
||||
// Cosigns received for blocks not locally recognized as finalized.
|
||||
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
||||
// The global session which faulted.
|
||||
FaultedSession: () -> [u8; 32],
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetch the keys used for cosigning by a specific network.
|
||||
async fn keys_for_network(
|
||||
serai: &TemporalSerai<'_>,
|
||||
network: NetworkId,
|
||||
) -> Result<Option<(Session, KeyPair)>, String> {
|
||||
// The Serai network never cosigns so it has no keys for cosigning
|
||||
if network == NetworkId::Serai {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let Some(latest_session) =
|
||||
serai.validator_sets().session(network).await.map_err(|e| format!("{e:?}"))?
|
||||
else {
|
||||
// If this network hasn't had a session declared, move on
|
||||
return Ok(None);
|
||||
};
|
||||
|
||||
// Get the keys for the latest session
|
||||
if let Some(keys) = serai
|
||||
.validator_sets()
|
||||
.keys(ValidatorSet { network, session: latest_session })
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
{
|
||||
return Ok(Some((latest_session, keys)));
|
||||
}
|
||||
|
||||
// If the latest session has yet to set keys, use the prior session
|
||||
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
||||
if let Some(keys) = serai
|
||||
.validator_sets()
|
||||
.keys(ValidatorSet { network, session: prior_session })
|
||||
.await
|
||||
.map_err(|e| format!("{e:?}"))?
|
||||
{
|
||||
return Ok(Some((prior_session, keys)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Fetch the `ValidatorSet`s, and their associated keys, used for cosigning as of this block.
|
||||
async fn cosigning_sets(serai: &TemporalSerai<'_>) -> Result<Vec<(ValidatorSet, Public)>, String> {
|
||||
let mut sets = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
||||
// If this network doesn't have usable keys, move on
|
||||
continue;
|
||||
};
|
||||
|
||||
sets.push((ValidatorSet { network, session }, keys.0));
|
||||
}
|
||||
Ok(sets)
|
||||
}
|
||||
|
||||
/// An object usable to request notable cosigns for a block.
|
||||
pub trait RequestNotableCosigns: 'static + Send {
|
||||
/// The error type which may be encountered when requesting notable cosigns.
|
||||
type Error: Debug;
|
||||
|
||||
/// Request the notable cosigns for this global session.
|
||||
fn request_notable_cosigns(
|
||||
&self,
|
||||
global_session: [u8; 32],
|
||||
) -> impl Send + Future<Output = Result<(), Self::Error>>;
|
||||
}
|
||||
|
||||
/// An error used to indicate the cosigning protocol has faulted.
|
||||
#[derive(Debug)]
|
||||
pub struct Faulted;
|
||||
|
||||
/// An error incurred while intaking a cosign.
|
||||
#[derive(Debug)]
|
||||
pub enum IntakeCosignError {
|
||||
/// Cosign is for a not-yet-indexed block
|
||||
NotYetIndexedBlock,
|
||||
/// A later cosign for this cosigner has already been handled
|
||||
StaleCosign,
|
||||
/// The cosign's global session isn't recognized
|
||||
UnrecognizedGlobalSession,
|
||||
/// The cosign is for a block before its global session starts
|
||||
BeforeGlobalSessionStart,
|
||||
/// The cosign is for a block after its global session ends
|
||||
AfterGlobalSessionEnd,
|
||||
/// The cosign's signing network wasn't a participant in this global session
|
||||
NonParticipatingNetwork,
|
||||
/// The cosign had an invalid signature
|
||||
InvalidSignature,
|
||||
/// The cosign is for a global session which has yet to have its declaration block cosigned
|
||||
FutureGlobalSession,
|
||||
}
|
||||
|
||||
impl IntakeCosignError {
|
||||
/// If this error is temporal to the local view
|
||||
pub fn temporal(&self) -> bool {
|
||||
match self {
|
||||
IntakeCosignError::NotYetIndexedBlock |
|
||||
IntakeCosignError::StaleCosign |
|
||||
IntakeCosignError::UnrecognizedGlobalSession |
|
||||
IntakeCosignError::FutureGlobalSession => true,
|
||||
IntakeCosignError::BeforeGlobalSessionStart |
|
||||
IntakeCosignError::AfterGlobalSessionEnd |
|
||||
IntakeCosignError::NonParticipatingNetwork |
|
||||
IntakeCosignError::InvalidSignature => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The interface to manage cosigning with.
|
||||
pub struct Cosigning<D: Db> {
|
||||
db: D,
|
||||
}
|
||||
impl<D: Db> Cosigning<D> {
|
||||
/// Spawn the tasks to intend and evaluate cosigns.
|
||||
///
|
||||
/// The database specified must only be used with a singular instance of the Serai network, and
|
||||
/// only used once at any given time.
|
||||
pub fn spawn<R: RequestNotableCosigns>(
|
||||
db: D,
|
||||
serai: Arc<Serai>,
|
||||
request: R,
|
||||
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||
) -> Self {
|
||||
let (intend_task, _intend_task_handle) = Task::new();
|
||||
let (evaluator_task, evaluator_task_handle) = Task::new();
|
||||
let (delay_task, delay_task_handle) = Task::new();
|
||||
tokio::spawn(
|
||||
(intend::CosignIntendTask { db: db.clone(), serai })
|
||||
.continually_run(intend_task, vec![evaluator_task_handle]),
|
||||
);
|
||||
tokio::spawn(
|
||||
(evaluator::CosignEvaluatorTask { db: db.clone(), request })
|
||||
.continually_run(evaluator_task, vec![delay_task_handle]),
|
||||
);
|
||||
tokio::spawn(
|
||||
(delay::CosignDelayTask { db: db.clone() })
|
||||
.continually_run(delay_task, tasks_to_run_upon_cosigning),
|
||||
);
|
||||
Self { db }
|
||||
}
|
||||
|
||||
/// The latest cosigned block number.
|
||||
pub fn latest_cosigned_block_number(getter: &impl Get) -> Result<u64, Faulted> {
|
||||
if FaultedSession::get(getter).is_some() {
|
||||
Err(Faulted)?;
|
||||
}
|
||||
|
||||
Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0))
|
||||
}
|
||||
|
||||
/// Fetch a cosigned Substrate block's hash by its block number.
|
||||
pub fn cosigned_block(getter: &impl Get, block_number: u64) -> Result<Option<[u8; 32]>, Faulted> {
|
||||
if block_number > Self::latest_cosigned_block_number(getter)? {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
Ok(Some(
|
||||
SubstrateBlockHash::get(getter, block_number).expect("cosigned block but didn't index it"),
|
||||
))
|
||||
}
|
||||
|
||||
/// Fetch the notable cosigns for a global session in order to respond to requests.
|
||||
///
|
||||
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||
/// cosigns for this session.
|
||||
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
}
|
||||
cosigns
|
||||
}
|
||||
|
||||
/// The cosigns to rebroadcast every `BROADCAST_FREQUENCY` seconds.
|
||||
///
|
||||
/// This will be the most recent cosigns, in case the initial broadcast failed, or the faulty
|
||||
/// cosigns, in case of a fault, to induce identification of the fault by others.
|
||||
pub fn cosigns_to_rebroadcast(&self) -> Vec<SignedCosign> {
|
||||
if let Some(faulted) = FaultedSession::get(&self.db) {
|
||||
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
||||
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
||||
// identification in those who see the faulty cosigns as honest
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
||||
if cosign.cosign.global_session == faulted {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
}
|
||||
}
|
||||
cosigns
|
||||
} else {
|
||||
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
||||
return vec![];
|
||||
};
|
||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||
cosigns.push(cosign);
|
||||
}
|
||||
}
|
||||
cosigns
|
||||
}
|
||||
}
|
||||
|
||||
/// Intake a cosign.
|
||||
//
|
||||
// Takes `&mut self` as this should only be called once at any given moment.
|
||||
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<(), IntakeCosignError> {
|
||||
let cosign = &signed_cosign.cosign;
|
||||
let network = cosign.cosigner;
|
||||
|
||||
// Check our indexed blockchain includes a block with this block number
|
||||
let Some(our_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else {
|
||||
Err(IntakeCosignError::NotYetIndexedBlock)?
|
||||
};
|
||||
let faulty = cosign.block_hash != our_block_hash;
|
||||
|
||||
// Check this isn't a dated cosign within its global session (as it would be if rebroadcasted)
|
||||
if !faulty {
|
||||
if let Some(existing) =
|
||||
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
|
||||
{
|
||||
if existing.cosign.block_number >= cosign.block_number {
|
||||
Err(IntakeCosignError::StaleCosign)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
|
||||
Err(IntakeCosignError::UnrecognizedGlobalSession)?
|
||||
};
|
||||
|
||||
// Check the cosigned block number is in range to the global session
|
||||
if cosign.block_number < global_session.start_block_number {
|
||||
// Cosign is for a block predating the global session
|
||||
Err(IntakeCosignError::BeforeGlobalSessionStart)?;
|
||||
}
|
||||
if !faulty {
|
||||
// This prevents a malicious validator set, on the same chain, from producing a cosign after
|
||||
// their final block, replacing their notable cosign
|
||||
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
|
||||
if cosign.block_number > last_block {
|
||||
// Cosign is for a block after the last block this global session should have signed
|
||||
Err(IntakeCosignError::AfterGlobalSessionEnd)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check the cosign's signature
|
||||
{
|
||||
let key = Public::from({
|
||||
let Some(key) = global_session.keys.get(&network) else {
|
||||
Err(IntakeCosignError::NonParticipatingNetwork)?
|
||||
};
|
||||
*key
|
||||
});
|
||||
|
||||
if !signed_cosign.verify_signature(key) {
|
||||
Err(IntakeCosignError::InvalidSignature)?;
|
||||
}
|
||||
}
|
||||
|
||||
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
||||
// cosign
|
||||
|
||||
let mut txn = self.db.unsafe_txn();
|
||||
|
||||
if !faulty {
|
||||
// If this is for a future global session, we don't acknowledge this cosign at this time
|
||||
let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&txn).unwrap_or(0);
|
||||
// This global session starts the block *after* its declaration, so we want to check if the
|
||||
// block declaring it was cosigned
|
||||
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
|
||||
drop(txn);
|
||||
return Err(IntakeCosignError::FutureGlobalSession);
|
||||
}
|
||||
|
||||
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
|
||||
NetworksLatestCosignedBlock::set(&mut txn, cosign.global_session, network, signed_cosign);
|
||||
} else {
|
||||
let mut faults = Faults::get(&txn, cosign.global_session).unwrap_or(vec![]);
|
||||
// Only handle this as a fault if this set wasn't prior faulty
|
||||
if !faults.iter().any(|cosign| cosign.cosign.cosigner == network) {
|
||||
faults.push(signed_cosign.clone());
|
||||
Faults::set(&mut txn, cosign.global_session, &faults);
|
||||
|
||||
let mut weight_cosigned = 0;
|
||||
for fault in &faults {
|
||||
let stake = global_session
|
||||
.stakes
|
||||
.get(&fault.cosign.cosigner)
|
||||
.expect("cosigner with recognized key didn't have a stake entry saved");
|
||||
weight_cosigned += stake;
|
||||
}
|
||||
|
||||
// Check if the sum weight means a fault has occurred
|
||||
if weight_cosigned >= ((global_session.total_stake * 17) / 100) {
|
||||
FaultedSession::set(&mut txn, &cosign.global_session);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
txn.commit();
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Receive intended cosigns to produce for this ValidatorSet.
|
||||
///
|
||||
/// All cosigns intended, up to and including the next notable cosign, are returned.
|
||||
///
|
||||
/// This will drain the internal channel and not re-yield these intentions again.
|
||||
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<CosignIntent> {
|
||||
let mut res: Vec<CosignIntent> = vec![];
|
||||
// While we have yet to find a notable cosign...
|
||||
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
|
||||
let Some(intent) = intend::IntendedCosigns::try_recv(txn, set) else { break };
|
||||
res.push(intent);
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
struct RNC;
|
||||
impl RequestNotableCosigns for RNC {
|
||||
/// The error type which may be encountered when requesting notable cosigns.
|
||||
type Error = ();
|
||||
|
||||
/// Request the notable cosigns for this global session.
|
||||
fn request_notable_cosigns(
|
||||
&self,
|
||||
global_session: [u8; 32],
|
||||
) -> impl Send + Future<Output = Result<(), Self::Error>> {
|
||||
async move { Ok(()) }
|
||||
}
|
||||
}
|
||||
#[tokio::test]
|
||||
async fn test() {
|
||||
let db: serai_db::MemDb = serai_db::MemDb::new();
|
||||
let serai = unsafe { core::mem::transmute(0u64) };
|
||||
let request = RNC;
|
||||
let tasks = vec![];
|
||||
let _ = Cosigning::spawn(db, serai, request, tasks);
|
||||
core::future::pending().await
|
||||
}
|
||||
}
|
||||
33
coordinator/p2p/Cargo.toml
Normal file
33
coordinator/p2p/Cargo.toml
Normal file
@@ -0,0 +1,33 @@
|
||||
[package]
|
||||
name = "serai-coordinator-p2p"
|
||||
version = "0.1.0"
|
||||
description = "Serai coordinator's P2P abstraction"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/p2p"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
serai-db = { path = "../../common/db", version = "0.1" }
|
||||
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
serai-cosign = { path = "../cosign" }
|
||||
tributary-sdk = { path = "../tributary-sdk" }
|
||||
|
||||
futures-lite = { version = "2", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["sync", "macros"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
serai-task = { path = "../../common/task", version = "0.1" }
|
||||
@@ -1,6 +1,6 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
Copyright (c) 2023-2025 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
3
coordinator/p2p/README.md
Normal file
3
coordinator/p2p/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# Serai Coordinator P2P
|
||||
|
||||
The P2P abstraction used by Serai's coordinator, and tasks over it.
|
||||
42
coordinator/p2p/libp2p/Cargo.toml
Normal file
42
coordinator/p2p/libp2p/Cargo.toml
Normal file
@@ -0,0 +1,42 @@
|
||||
[package]
|
||||
name = "serai-coordinator-libp2p-p2p"
|
||||
version = "0.1.0"
|
||||
description = "Serai coordinator's libp2p-based P2P backend"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/p2p/libp2p"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.81"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
serai-cosign = { path = "../../cosign" }
|
||||
tributary-sdk = { path = "../../tributary-sdk" }
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
serai-task = { path = "../../../common/task", version = "0.1" }
|
||||
serai-coordinator-p2p = { path = "../" }
|
||||
15
coordinator/p2p/libp2p/LICENSE
Normal file
15
coordinator/p2p/libp2p/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023-2025 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
14
coordinator/p2p/libp2p/README.md
Normal file
14
coordinator/p2p/libp2p/README.md
Normal file
@@ -0,0 +1,14 @@
|
||||
# Serai Coordinator libp2p P2P
|
||||
|
||||
A libp2p-backed P2P instantiation for Serai's coordinator.
|
||||
|
||||
The libp2p swarm is limited to validators from the Serai network. The swarm
|
||||
does not maintain any of its own peer finding/routing infrastructure, instead
|
||||
relying on the Serai network's connection information to dial peers. This does
|
||||
limit the listening peers to only the peers immediately reachable via the same
|
||||
IP address (despite the two distinct services), not hidden behind a NAT, yet is
|
||||
also quite simple and gives full control of who to connect to to us.
|
||||
|
||||
Peers are decided via the internal `DialTask` which aims to maintain a target
|
||||
amount of peers for each external network. This ensures cosigns are able to
|
||||
propagate across the external networks which sign them.
|
||||
176
coordinator/p2p/libp2p/src/authenticate.rs
Normal file
176
coordinator/p2p/libp2p/src/authenticate.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
use core::{pin::Pin, future::Future};
|
||||
use std::io;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
use schnorrkel::{Keypair, PublicKey, Signature};
|
||||
|
||||
use serai_client::primitives::PublicKey as Public;
|
||||
|
||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
use libp2p::{
|
||||
core::UpgradeInfo,
|
||||
InboundUpgrade, OutboundUpgrade,
|
||||
identity::{self, PeerId},
|
||||
noise,
|
||||
};
|
||||
|
||||
use crate::peer_id_from_public;
|
||||
|
||||
const PROTOCOL: &str = "/serai/coordinator/validators";
|
||||
|
||||
#[derive(Clone)]
|
||||
pub(crate) struct OnlyValidators {
|
||||
pub(crate) serai_key: Zeroizing<Keypair>,
|
||||
pub(crate) noise_keypair: identity::Keypair,
|
||||
}
|
||||
|
||||
impl OnlyValidators {
|
||||
/// The ephemeral challenge protocol for authentication.
|
||||
///
|
||||
/// We use ephemeral challenges to prevent replaying signatures from historic sessions.
|
||||
///
|
||||
/// We don't immediately send the challenge. We only send a commitment to it. This prevents our
|
||||
/// remote peer from choosing their challenge in response to our challenge, in case there was any
|
||||
/// benefit to doing so.
|
||||
async fn challenges<S: 'static + Send + Unpin + AsyncRead + AsyncWrite>(
|
||||
socket: &mut noise::Output<S>,
|
||||
) -> io::Result<([u8; 32], [u8; 32])> {
|
||||
let mut our_challenge = [0; 32];
|
||||
OsRng.fill_bytes(&mut our_challenge);
|
||||
|
||||
// Write the hash of our challenge
|
||||
socket.write_all(&Blake2s256::digest(our_challenge)).await?;
|
||||
|
||||
// Read the hash of their challenge
|
||||
let mut their_challenge_commitment = [0; 32];
|
||||
socket.read_exact(&mut their_challenge_commitment).await?;
|
||||
|
||||
// Reveal our challenge
|
||||
socket.write_all(&our_challenge).await?;
|
||||
|
||||
// Read their challenge
|
||||
let mut their_challenge = [0; 32];
|
||||
socket.read_exact(&mut their_challenge).await?;
|
||||
|
||||
// Verify their challenge
|
||||
if <[u8; 32]>::from(Blake2s256::digest(their_challenge)) != their_challenge_commitment {
|
||||
Err(io::Error::other("challenge didn't match challenge commitment"))?;
|
||||
}
|
||||
|
||||
Ok((our_challenge, their_challenge))
|
||||
}
|
||||
|
||||
// We sign the two noise peer IDs and the ephemeral challenges.
|
||||
//
|
||||
// Signing the noise peer IDs ensures we're authenticating this noise connection. The only
|
||||
// expectations placed on noise are for it to prevent a MITM from impersonating the other end or
|
||||
// modifying any messages sent.
|
||||
//
|
||||
// Signing the ephemeral challenges prevents any replays. While that should be unnecessary, as
|
||||
// noise MAY prevent replays across sessions (even when the same key is used), and noise IDs
|
||||
// shouldn't be reused (so it should be fine to reuse an existing signature for these noise IDs),
|
||||
// it doesn't hurt.
|
||||
async fn authenticate<S: 'static + Send + Unpin + AsyncRead + AsyncWrite>(
|
||||
&self,
|
||||
socket: &mut noise::Output<S>,
|
||||
dialer_peer_id: PeerId,
|
||||
dialer_challenge: [u8; 32],
|
||||
listener_peer_id: PeerId,
|
||||
listener_challenge: [u8; 32],
|
||||
) -> io::Result<PeerId> {
|
||||
// Write our public key
|
||||
socket.write_all(&self.serai_key.public.to_bytes()).await?;
|
||||
|
||||
let msg = borsh::to_vec(&(
|
||||
dialer_peer_id.to_bytes(),
|
||||
dialer_challenge,
|
||||
listener_peer_id.to_bytes(),
|
||||
listener_challenge,
|
||||
))
|
||||
.unwrap();
|
||||
let signature = self.serai_key.sign_simple(PROTOCOL.as_bytes(), &msg);
|
||||
socket.write_all(&signature.to_bytes()).await?;
|
||||
|
||||
let mut public_key_and_sig = [0; 96];
|
||||
socket.read_exact(&mut public_key_and_sig).await?;
|
||||
let public_key = PublicKey::from_bytes(&public_key_and_sig[.. 32])
|
||||
.map_err(|_| io::Error::other("invalid public key"))?;
|
||||
let sig = Signature::from_bytes(&public_key_and_sig[32 ..])
|
||||
.map_err(|_| io::Error::other("invalid signature serialization"))?;
|
||||
|
||||
public_key
|
||||
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
|
||||
.map_err(|_| io::Error::other("invalid signature"))?;
|
||||
|
||||
Ok(peer_id_from_public(Public::from_raw(public_key.to_bytes())))
|
||||
}
|
||||
}
|
||||
|
||||
impl UpgradeInfo for OnlyValidators {
|
||||
type Info = <noise::Config as UpgradeInfo>::Info;
|
||||
type InfoIter = <noise::Config as UpgradeInfo>::InfoIter;
|
||||
fn protocol_info(&self) -> Self::InfoIter {
|
||||
// A keypair only causes an error if its sign operation fails, which is only possible with RSA,
|
||||
// which isn't used within this codebase
|
||||
noise::Config::new(&self.noise_keypair).unwrap().protocol_info()
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for OnlyValidators {
|
||||
type Output = (PeerId, noise::Output<S>);
|
||||
type Error = io::Error;
|
||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||
|
||||
fn upgrade_inbound(self, socket: S, info: Self::Info) -> Self::Future {
|
||||
Box::pin(async move {
|
||||
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||
.unwrap()
|
||||
.upgrade_inbound(socket, info)
|
||||
.await
|
||||
.map_err(io::Error::other)?;
|
||||
|
||||
let (our_challenge, dialer_challenge) = OnlyValidators::challenges(&mut socket).await?;
|
||||
let dialer_serai_validator = self
|
||||
.authenticate(
|
||||
&mut socket,
|
||||
dialer_noise_peer_id,
|
||||
dialer_challenge,
|
||||
PeerId::from_public_key(&self.noise_keypair.public()),
|
||||
our_challenge,
|
||||
)
|
||||
.await?;
|
||||
Ok((dialer_serai_validator, socket))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundUpgrade<S> for OnlyValidators {
|
||||
type Output = (PeerId, noise::Output<S>);
|
||||
type Error = io::Error;
|
||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||
|
||||
fn upgrade_outbound(self, socket: S, info: Self::Info) -> Self::Future {
|
||||
Box::pin(async move {
|
||||
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||
.unwrap()
|
||||
.upgrade_outbound(socket, info)
|
||||
.await
|
||||
.map_err(io::Error::other)?;
|
||||
|
||||
let (our_challenge, listener_challenge) = OnlyValidators::challenges(&mut socket).await?;
|
||||
let listener_serai_validator = self
|
||||
.authenticate(
|
||||
&mut socket,
|
||||
PeerId::from_public_key(&self.noise_keypair.public()),
|
||||
our_challenge,
|
||||
listener_noise_peer_id,
|
||||
listener_challenge,
|
||||
)
|
||||
.await?;
|
||||
Ok((listener_serai_validator, socket))
|
||||
})
|
||||
}
|
||||
}
|
||||
127
coordinator/p2p/libp2p/src/dial.rs
Normal file
127
coordinator/p2p/libp2p/src/dial.rs
Normal file
@@ -0,0 +1,127 @@
|
||||
use core::future::Future;
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use serai_client::{SeraiError, Serai};
|
||||
|
||||
use libp2p::{
|
||||
core::multiaddr::{Protocol, Multiaddr},
|
||||
swarm::dial_opts::DialOpts,
|
||||
};
|
||||
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::{PORT, Peers, validators::Validators};
|
||||
|
||||
const TARGET_PEERS_PER_NETWORK: usize = 5;
|
||||
/*
|
||||
If we only tracked the target amount of peers per network, we'd risk being eclipsed by an
|
||||
adversary who immediately connects to us with their array of validators upon our boot. Their
|
||||
array would satisfy our target amount of peers, so we'd never seek more, enabling the adversary
|
||||
to be the only entity we peered with.
|
||||
|
||||
We solve this by additionally requiring an explicit amount of peers we dialed. That means we
|
||||
randomly chose to connect to these peers.
|
||||
*/
|
||||
// TODO const TARGET_DIALED_PEERS_PER_NETWORK: usize = 3;
|
||||
|
||||
pub(crate) struct DialTask {
|
||||
serai: Arc<Serai>,
|
||||
validators: Validators,
|
||||
peers: Peers,
|
||||
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||
}
|
||||
|
||||
impl DialTask {
|
||||
pub(crate) fn new(
|
||||
serai: Arc<Serai>,
|
||||
peers: Peers,
|
||||
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||
) -> Self {
|
||||
DialTask { serai: serai.clone(), validators: Validators::new(serai).0, peers, to_dial }
|
||||
}
|
||||
}
|
||||
|
||||
impl ContinuallyRan for DialTask {
|
||||
// Only run every five minutes, not the default of every five seconds
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
||||
|
||||
type Error = SeraiError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
self.validators.update().await?;
|
||||
|
||||
// If any of our peers is lacking, try to connect to more
|
||||
let mut dialed = false;
|
||||
let peer_counts = self
|
||||
.peers
|
||||
.peers
|
||||
.read()
|
||||
.await
|
||||
.iter()
|
||||
.map(|(network, peers)| (*network, peers.len()))
|
||||
.collect::<Vec<_>>();
|
||||
for (network, peer_count) in peer_counts {
|
||||
/*
|
||||
If we don't have the target amount of peers, and we don't have all the validators in the
|
||||
set but one, attempt to connect to more validators within this set.
|
||||
|
||||
The latter clause is so if there's a set with only 3 validators, we don't infinitely try
|
||||
to connect to the target amount of peers for this network as we never will. Instead, we
|
||||
only try to connect to most of the validators actually present.
|
||||
*/
|
||||
if (peer_count < TARGET_PEERS_PER_NETWORK) &&
|
||||
(peer_count <
|
||||
self
|
||||
.validators
|
||||
.by_network()
|
||||
.get(&network)
|
||||
.map(HashSet::len)
|
||||
.unwrap_or(0)
|
||||
.saturating_sub(1))
|
||||
{
|
||||
let mut potential_peers = self.serai.p2p_validators(network).await?;
|
||||
for _ in 0 .. (TARGET_PEERS_PER_NETWORK - peer_count) {
|
||||
if potential_peers.is_empty() {
|
||||
break;
|
||||
}
|
||||
let index_to_dial =
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
|
||||
.unwrap();
|
||||
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
|
||||
|
||||
log::info!("found peer from substrate: {randomly_selected_peer}");
|
||||
|
||||
// Map the peer from a Substrate P2P network peer to a Coordinator P2P network peer
|
||||
let mapped_peer = randomly_selected_peer
|
||||
.into_iter()
|
||||
.filter_map(|protocol| match protocol {
|
||||
// Drop PeerIds from the Substrate P2p network
|
||||
Protocol::P2p(_) => None,
|
||||
// Use our own TCP port
|
||||
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
|
||||
// Pass-through any other specifications (IPv4, IPv6, etc)
|
||||
other => Some(other),
|
||||
})
|
||||
.collect::<Multiaddr>();
|
||||
|
||||
log::debug!("mapped found peer: {mapped_peer}");
|
||||
|
||||
self
|
||||
.to_dial
|
||||
.send(DialOpts::unknown_peer_id().address(mapped_peer).build())
|
||||
.expect("dial receiver closed?");
|
||||
dialed = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(dialed)
|
||||
}
|
||||
}
|
||||
}
|
||||
75
coordinator/p2p/libp2p/src/gossip.rs
Normal file
75
coordinator/p2p/libp2p/src/gossip.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
use core::time::Duration;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use libp2p::gossipsub::{
|
||||
IdentTopic, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, IdentityTransform,
|
||||
AllowAllSubscriptionFilter, Behaviour,
|
||||
};
|
||||
pub use libp2p::gossipsub::Event;
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
// Block size limit + 16 KB of space for signatures/metadata
|
||||
pub(crate) const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary_sdk::BLOCK_SIZE_LIMIT + 16384;
|
||||
|
||||
const LIBP2P_PROTOCOL: &str = "/serai/coordinator/gossip/1.0.0";
|
||||
const BASE_TOPIC: &str = "/";
|
||||
|
||||
fn topic_for_tributary(tributary: [u8; 32]) -> IdentTopic {
|
||||
IdentTopic::new(format!("/tributary/{}", hex::encode(tributary)))
|
||||
}
|
||||
|
||||
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) enum Message {
|
||||
Tributary { tributary: [u8; 32], message: Vec<u8> },
|
||||
Cosign(SignedCosign),
|
||||
}
|
||||
|
||||
impl Message {
|
||||
pub(crate) fn topic(&self) -> IdentTopic {
|
||||
match self {
|
||||
Message::Tributary { tributary, .. } => topic_for_tributary(*tributary),
|
||||
Message::Cosign(_) => IdentTopic::new(BASE_TOPIC),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type Behavior = Behaviour<IdentityTransform, AllowAllSubscriptionFilter>;
|
||||
|
||||
pub(crate) fn new_behavior() -> Behavior {
|
||||
// The latency used by the Tendermint protocol, used here as the gossip epoch duration
|
||||
// libp2p-rs defaults to 1 second, whereas ours will be ~2
|
||||
let heartbeat_interval = tributary_sdk::tendermint::LATENCY_TIME;
|
||||
// The amount of heartbeats which will occur within a single Tributary block
|
||||
let heartbeats_per_block =
|
||||
tributary_sdk::tendermint::TARGET_BLOCK_TIME.div_ceil(heartbeat_interval);
|
||||
// libp2p-rs defaults to 5, whereas ours will be ~8
|
||||
let heartbeats_to_keep = 2 * heartbeats_per_block;
|
||||
// libp2p-rs defaults to 3 whereas ours will be ~4
|
||||
let heartbeats_to_gossip = heartbeats_per_block;
|
||||
|
||||
let config = ConfigBuilder::default()
|
||||
.protocol_id_prefix(LIBP2P_PROTOCOL)
|
||||
.history_length(usize::try_from(heartbeats_to_keep).unwrap())
|
||||
.history_gossip(usize::try_from(heartbeats_to_gossip).unwrap())
|
||||
.heartbeat_interval(Duration::from_millis(heartbeat_interval.into()))
|
||||
.max_transmit_size(MAX_LIBP2P_GOSSIP_MESSAGE_SIZE)
|
||||
.duplicate_cache_time(Duration::from_millis((heartbeats_to_keep * heartbeat_interval).into()))
|
||||
.validation_mode(ValidationMode::Anonymous)
|
||||
// Uses a content based message ID to avoid duplicates as much as possible
|
||||
.message_id_fn(|msg| {
|
||||
MessageId::new(&Blake2s256::digest([msg.topic.as_str().as_bytes(), &msg.data].concat()))
|
||||
})
|
||||
.build();
|
||||
|
||||
let mut gossip = Behavior::new(MessageAuthenticity::Anonymous, config.unwrap()).unwrap();
|
||||
|
||||
// Subscribe to the base topic
|
||||
let topic = IdentTopic::new(BASE_TOPIC);
|
||||
let _ = gossip.subscribe(&topic);
|
||||
|
||||
gossip
|
||||
}
|
||||
433
coordinator/p2p/libp2p/src/lib.rs
Normal file
433
coordinator/p2p/libp2p/src/lib.rs
Normal file
@@ -0,0 +1,433 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::{future::Future, time::Duration};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use schnorrkel::Keypair;
|
||||
|
||||
use serai_client::{
|
||||
primitives::{NetworkId, PublicKey},
|
||||
validator_sets::primitives::ValidatorSet,
|
||||
Serai,
|
||||
};
|
||||
|
||||
use tokio::sync::{mpsc, oneshot, Mutex, RwLock};
|
||||
|
||||
use serai_task::{Task, ContinuallyRan};
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
use libp2p::{
|
||||
multihash::Multihash,
|
||||
identity::{self, PeerId},
|
||||
tcp::Config as TcpConfig,
|
||||
yamux, allow_block_list,
|
||||
connection_limits::{self, ConnectionLimits},
|
||||
swarm::NetworkBehaviour,
|
||||
SwarmBuilder,
|
||||
};
|
||||
|
||||
use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
|
||||
|
||||
/// A struct to sync the validators from the Serai node in order to keep track of them.
|
||||
mod validators;
|
||||
use validators::UpdateValidatorsTask;
|
||||
|
||||
/// The authentication protocol upgrade to limit the P2P network to active validators.
|
||||
mod authenticate;
|
||||
use authenticate::OnlyValidators;
|
||||
|
||||
/// The ping behavior, used to ensure connection latency is below the limit
|
||||
mod ping;
|
||||
|
||||
/// The request-response messages and behavior
|
||||
mod reqres;
|
||||
use reqres::{RequestId, Request, Response};
|
||||
|
||||
/// The gossip messages and behavior
|
||||
mod gossip;
|
||||
use gossip::Message;
|
||||
|
||||
/// The swarm task, running it and dispatching to/from it
|
||||
mod swarm;
|
||||
use swarm::SwarmTask;
|
||||
|
||||
/// The dial task, to find new peers to connect to
|
||||
mod dial;
|
||||
use dial::DialTask;
|
||||
|
||||
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||
|
||||
// usize::max, manually implemented, as max isn't a const fn
|
||||
const MAX_LIBP2P_MESSAGE_SIZE: usize =
|
||||
if gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
||||
gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE
|
||||
} else {
|
||||
reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE
|
||||
};
|
||||
|
||||
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
||||
// 0 represents the identity Multihash, that no hash was performed
|
||||
// It's an internal constant so we can't refer to the constant inside libp2p
|
||||
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()
|
||||
}
|
||||
|
||||
/// The representation of a peer.
|
||||
pub struct Peer<'a> {
|
||||
outbound_requests: &'a mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
id: PeerId,
|
||||
}
|
||||
impl serai_coordinator_p2p::Peer<'_> for Peer<'_> {
|
||||
fn send_heartbeat(
|
||||
&self,
|
||||
heartbeat: Heartbeat,
|
||||
) -> impl Send + Future<Output = Option<Vec<TributaryBlockWithCommit>>> {
|
||||
async move {
|
||||
const HEARTBEAT_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
let request = Request::Heartbeat(heartbeat);
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
self
|
||||
.outbound_requests
|
||||
.send((self.id, request, sender))
|
||||
.expect("outbound requests recv channel was dropped?");
|
||||
if let Ok(Ok(Response::Blocks(blocks))) =
|
||||
tokio::time::timeout(HEARTBEAT_TIMEOUT, receiver).await
|
||||
{
|
||||
Some(blocks)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
struct Peers {
|
||||
peers: Arc<RwLock<HashMap<NetworkId, HashSet<PeerId>>>>,
|
||||
}
|
||||
|
||||
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
|
||||
// network for peers, we could use it solely for bootstrapping/as a fallback.
|
||||
#[derive(NetworkBehaviour)]
|
||||
struct Behavior {
|
||||
// Used to only allow Serai validators as peers
|
||||
allow_list: allow_block_list::Behaviour<allow_block_list::AllowedPeers>,
|
||||
// Used to limit each peer to a single connection
|
||||
connection_limits: connection_limits::Behaviour,
|
||||
// Used to ensure connection latency is within tolerances
|
||||
ping: ping::Behavior,
|
||||
// Used to request data from specific peers
|
||||
reqres: reqres::Behavior,
|
||||
// Used to broadcast messages to all other peers subscribed to a topic
|
||||
gossip: gossip::Behavior,
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
struct Libp2pInner {
|
||||
peers: Peers,
|
||||
|
||||
gossip: mpsc::UnboundedSender<Message>,
|
||||
outbound_requests: mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
|
||||
tributary_gossip: Mutex<mpsc::UnboundedReceiver<([u8; 32], Vec<u8>)>>,
|
||||
|
||||
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
|
||||
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
||||
|
||||
heartbeat_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, ValidatorSet, [u8; 32])>>,
|
||||
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, [u8; 32])>>,
|
||||
inbound_request_responses: mpsc::UnboundedSender<(RequestId, Response)>,
|
||||
}
|
||||
|
||||
/// The libp2p-backed P2P implementation.
|
||||
///
|
||||
/// The P2p trait implementation does not support backpressure and is expected to be fully
|
||||
/// utilized. Failure to poll the entire API will cause unbounded memory growth.
|
||||
#[derive(Clone)]
|
||||
pub struct Libp2p(Arc<Libp2pInner>);
|
||||
|
||||
impl Libp2p {
|
||||
/// Create a new libp2p-backed P2P instance.
|
||||
///
|
||||
/// This will spawn all of the internal tasks necessary for functioning.
|
||||
pub fn new(serai_key: &Zeroizing<Keypair>, serai: Arc<Serai>) -> Libp2p {
|
||||
// Define the object we track peers with
|
||||
let peers = Peers { peers: Arc::new(RwLock::new(HashMap::new())) };
|
||||
|
||||
// Define the dial task
|
||||
let (dial_task_def, dial_task) = Task::new();
|
||||
let (to_dial_send, to_dial_recv) = mpsc::unbounded_channel();
|
||||
tokio::spawn(
|
||||
DialTask::new(serai.clone(), peers.clone(), to_dial_send)
|
||||
.continually_run(dial_task_def, vec![]),
|
||||
);
|
||||
|
||||
let swarm = {
|
||||
let new_only_validators = |noise_keypair: &identity::Keypair| -> Result<_, ()> {
|
||||
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
|
||||
};
|
||||
|
||||
let new_yamux = || {
|
||||
let mut config = yamux::Config::default();
|
||||
// 1 MiB default + max message size
|
||||
config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE);
|
||||
// 256 KiB default + max message size
|
||||
config
|
||||
.set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap());
|
||||
config
|
||||
};
|
||||
|
||||
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
||||
.with_tokio()
|
||||
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, new_yamux)
|
||||
.unwrap()
|
||||
.with_behaviour(|_| Behavior {
|
||||
allow_list: allow_block_list::Behaviour::default(),
|
||||
// Limit each per to a single connection
|
||||
connection_limits: connection_limits::Behaviour::new(
|
||||
ConnectionLimits::default().with_max_established_per_peer(Some(1)),
|
||||
),
|
||||
ping: ping::new_behavior(),
|
||||
reqres: reqres::new_behavior(),
|
||||
gossip: gossip::new_behavior(),
|
||||
})
|
||||
.unwrap()
|
||||
.with_swarm_config(|config| {
|
||||
config
|
||||
.with_idle_connection_timeout(ping::INTERVAL + ping::TIMEOUT + Duration::from_secs(5))
|
||||
})
|
||||
.build();
|
||||
swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap();
|
||||
swarm.listen_on(format!("/ip6/::/tcp/{PORT}").parse().unwrap()).unwrap();
|
||||
swarm
|
||||
};
|
||||
|
||||
let (swarm_validators, validator_changes) = UpdateValidatorsTask::spawn(serai);
|
||||
|
||||
let (gossip_send, gossip_recv) = mpsc::unbounded_channel();
|
||||
let (signed_cosigns_send, signed_cosigns_recv) = mpsc::unbounded_channel();
|
||||
let (tributary_gossip_send, tributary_gossip_recv) = mpsc::unbounded_channel();
|
||||
|
||||
let (outbound_requests_send, outbound_requests_recv) = mpsc::unbounded_channel();
|
||||
|
||||
let (heartbeat_requests_send, heartbeat_requests_recv) = mpsc::unbounded_channel();
|
||||
let (notable_cosign_requests_send, notable_cosign_requests_recv) = mpsc::unbounded_channel();
|
||||
let (inbound_request_responses_send, inbound_request_responses_recv) =
|
||||
mpsc::unbounded_channel();
|
||||
|
||||
// Create the swarm task
|
||||
SwarmTask::spawn(
|
||||
dial_task,
|
||||
to_dial_recv,
|
||||
swarm_validators,
|
||||
validator_changes,
|
||||
peers.clone(),
|
||||
swarm,
|
||||
gossip_recv,
|
||||
signed_cosigns_send.clone(),
|
||||
tributary_gossip_send,
|
||||
outbound_requests_recv,
|
||||
heartbeat_requests_send,
|
||||
notable_cosign_requests_send,
|
||||
inbound_request_responses_recv,
|
||||
);
|
||||
|
||||
Libp2p(Arc::new(Libp2pInner {
|
||||
peers,
|
||||
|
||||
gossip: gossip_send,
|
||||
outbound_requests: outbound_requests_send,
|
||||
|
||||
tributary_gossip: Mutex::new(tributary_gossip_recv),
|
||||
|
||||
signed_cosigns: Mutex::new(signed_cosigns_recv),
|
||||
signed_cosigns_send,
|
||||
|
||||
heartbeat_requests: Mutex::new(heartbeat_requests_recv),
|
||||
notable_cosign_requests: Mutex::new(notable_cosign_requests_recv),
|
||||
inbound_request_responses: inbound_request_responses_send,
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
impl tributary_sdk::P2p for Libp2p {
|
||||
fn broadcast(&self, tributary: [u8; 32], message: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
self
|
||||
.0
|
||||
.gossip
|
||||
.send(Message::Tributary { tributary, message })
|
||||
.expect("gossip recv channel was dropped?");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl serai_cosign::RequestNotableCosigns for Libp2p {
|
||||
type Error = ();
|
||||
|
||||
fn request_notable_cosigns(
|
||||
&self,
|
||||
global_session: [u8; 32],
|
||||
) -> impl Send + Future<Output = Result<(), Self::Error>> {
|
||||
async move {
|
||||
const AMOUNT_OF_PEERS_TO_REQUEST_FROM: usize = 3;
|
||||
const NOTABLE_COSIGNS_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
|
||||
let request = Request::NotableCosigns { global_session };
|
||||
|
||||
let peers = self.0.peers.peers.read().await.clone();
|
||||
// HashSet of all peers
|
||||
let peers = peers.into_values().flat_map(<_>::into_iter).collect::<HashSet<_>>();
|
||||
// Vec of all peers
|
||||
let mut peers = peers.into_iter().collect::<Vec<_>>();
|
||||
|
||||
let mut channels = Vec::with_capacity(AMOUNT_OF_PEERS_TO_REQUEST_FROM);
|
||||
for _ in 0 .. AMOUNT_OF_PEERS_TO_REQUEST_FROM {
|
||||
if peers.is_empty() {
|
||||
break;
|
||||
}
|
||||
let i = usize::try_from(OsRng.next_u64() % u64::try_from(peers.len()).unwrap()).unwrap();
|
||||
let peer = peers.swap_remove(i);
|
||||
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
self
|
||||
.0
|
||||
.outbound_requests
|
||||
.send((peer, request, sender))
|
||||
.expect("outbound requests recv channel was dropped?");
|
||||
channels.push(receiver);
|
||||
}
|
||||
|
||||
// We could reduce our latency by using FuturesUnordered here but the latency isn't a concern
|
||||
for channel in channels {
|
||||
if let Ok(Ok(Response::NotableCosigns(cosigns))) =
|
||||
tokio::time::timeout(NOTABLE_COSIGNS_TIMEOUT, channel).await
|
||||
{
|
||||
for cosign in cosigns {
|
||||
self
|
||||
.0
|
||||
.signed_cosigns_send
|
||||
.send(cosign)
|
||||
.expect("signed_cosigns recv in this object was dropped?");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl serai_coordinator_p2p::P2p for Libp2p {
|
||||
type Peer<'a> = Peer<'a>;
|
||||
|
||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
||||
async move {
|
||||
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
|
||||
return vec![];
|
||||
};
|
||||
let mut res = vec![];
|
||||
for id in peer_ids {
|
||||
res.push(Peer { outbound_requests: &self.0.outbound_requests, id });
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
self.0.gossip.send(Message::Cosign(cosign)).expect("gossip recv channel was dropped?");
|
||||
}
|
||||
}
|
||||
|
||||
fn heartbeat(
|
||||
&self,
|
||||
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)> {
|
||||
async move {
|
||||
let (request_id, set, latest_block_hash) = self
|
||||
.0
|
||||
.heartbeat_requests
|
||||
.lock()
|
||||
.await
|
||||
.recv()
|
||||
.await
|
||||
.expect("heartbeat_requests_send was dropped?");
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
tokio::spawn({
|
||||
let respond = self.0.inbound_request_responses.clone();
|
||||
async move {
|
||||
// The swarm task expects us to respond to every request. If the caller drops this
|
||||
// channel, we'll receive `Err` and respond with `vec![]`, safely satisfying that bound
|
||||
// without requiring the caller send a value down this channel
|
||||
let response = if let Ok(blocks) = receiver.await {
|
||||
Response::Blocks(blocks)
|
||||
} else {
|
||||
Response::Blocks(vec![])
|
||||
};
|
||||
respond
|
||||
.send((request_id, response))
|
||||
.expect("inbound_request_responses_recv was dropped?");
|
||||
}
|
||||
});
|
||||
(Heartbeat { set, latest_block_hash }, sender)
|
||||
}
|
||||
}
|
||||
|
||||
fn notable_cosigns_request(
|
||||
&self,
|
||||
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)> {
|
||||
async move {
|
||||
let (request_id, global_session) = self
|
||||
.0
|
||||
.notable_cosign_requests
|
||||
.lock()
|
||||
.await
|
||||
.recv()
|
||||
.await
|
||||
.expect("notable_cosign_requests_send was dropped?");
|
||||
let (sender, receiver) = oneshot::channel();
|
||||
tokio::spawn({
|
||||
let respond = self.0.inbound_request_responses.clone();
|
||||
async move {
|
||||
let response = if let Ok(notable_cosigns) = receiver.await {
|
||||
Response::NotableCosigns(notable_cosigns)
|
||||
} else {
|
||||
Response::NotableCosigns(vec![])
|
||||
};
|
||||
respond
|
||||
.send((request_id, response))
|
||||
.expect("inbound_request_responses_recv was dropped?");
|
||||
}
|
||||
});
|
||||
(global_session, sender)
|
||||
}
|
||||
}
|
||||
|
||||
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)> {
|
||||
async move {
|
||||
self.0.tributary_gossip.lock().await.recv().await.expect("tributary_gossip send was dropped?")
|
||||
}
|
||||
}
|
||||
|
||||
fn cosign(&self) -> impl Send + Future<Output = SignedCosign> {
|
||||
async move {
|
||||
self
|
||||
.0
|
||||
.signed_cosigns
|
||||
.lock()
|
||||
.await
|
||||
.recv()
|
||||
.await
|
||||
.expect("signed_cosigns couldn't recv despite send in same object?")
|
||||
}
|
||||
}
|
||||
}
|
||||
17
coordinator/p2p/libp2p/src/ping.rs
Normal file
17
coordinator/p2p/libp2p/src/ping.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
use core::time::Duration;
|
||||
|
||||
use tributary_sdk::tendermint::LATENCY_TIME;
|
||||
|
||||
use libp2p::ping::{self, Config, Behaviour};
|
||||
pub use ping::Event;
|
||||
|
||||
pub(crate) const INTERVAL: Duration = Duration::from_secs(30);
|
||||
// LATENCY_TIME represents the maximum latency for message delivery. Sending the ping, and
|
||||
// receiving the pong, each have to occur within this time bound to validate the connection. We
|
||||
// enforce that, as best we can, by requiring the round-trip be within twice the allowed latency.
|
||||
pub(crate) const TIMEOUT: Duration = Duration::from_millis((2 * LATENCY_TIME) as u64);
|
||||
|
||||
pub(crate) type Behavior = Behaviour;
|
||||
pub(crate) fn new_behavior() -> Behavior {
|
||||
Behavior::new(Config::default().with_interval(INTERVAL).with_timeout(TIMEOUT))
|
||||
}
|
||||
135
coordinator/p2p/libp2p/src/reqres.rs
Normal file
135
coordinator/p2p/libp2p/src/reqres.rs
Normal file
@@ -0,0 +1,135 @@
|
||||
use core::{fmt, time::Duration};
|
||||
use std::io;
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||
|
||||
use libp2p::request_response::{
|
||||
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
|
||||
};
|
||||
pub use request_response::{RequestId, Message};
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
|
||||
|
||||
/// The maximum message size for the request-response protocol
|
||||
// This is derived from the heartbeat message size as it's our largest message
|
||||
pub(crate) const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
|
||||
1024 + serai_coordinator_p2p::heartbeat::BATCH_SIZE_LIMIT;
|
||||
|
||||
const PROTOCOL: &str = "/serai/coordinator/reqres/1.0.0";
|
||||
|
||||
/// Requests which can be made via the request-response protocol.
|
||||
#[derive(Clone, Copy, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) enum Request {
|
||||
/// A heartbeat informing our peers of our latest block, for the specified blockchain, on regular
|
||||
/// intervals.
|
||||
///
|
||||
/// If our peers have more blocks than us, they're expected to respond with those blocks.
|
||||
Heartbeat(Heartbeat),
|
||||
/// A request for the notable cosigns for a global session.
|
||||
NotableCosigns { global_session: [u8; 32] },
|
||||
}
|
||||
|
||||
/// Responses which can be received via the request-response protocol.
|
||||
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub(crate) enum Response {
|
||||
None,
|
||||
Blocks(Vec<TributaryBlockWithCommit>),
|
||||
NotableCosigns(Vec<SignedCosign>),
|
||||
}
|
||||
impl fmt::Debug for Response {
|
||||
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Response::None => fmt.debug_struct("Response::None").finish(),
|
||||
Response::Blocks(_) => fmt.debug_struct("Response::Block").finish_non_exhaustive(),
|
||||
Response::NotableCosigns(_) => {
|
||||
fmt.debug_struct("Response::NotableCosigns").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The codec used for the request-response protocol.
|
||||
///
|
||||
/// We don't use CBOR or JSON, but use borsh to create `Vec<u8>`s we then length-prefix. While
|
||||
/// ideally, we'd use borsh directly with the `io` traits defined here, they're async and there
|
||||
/// isn't an amenable API within borsh for incremental deserialization.
|
||||
#[derive(Default, Clone, Copy, Debug)]
|
||||
pub(crate) struct Codec;
|
||||
impl Codec {
|
||||
async fn read<M: BorshDeserialize>(io: &mut (impl Unpin + AsyncRead)) -> io::Result<M> {
|
||||
let mut len = [0; 4];
|
||||
io.read_exact(&mut len).await?;
|
||||
let len = usize::try_from(u32::from_le_bytes(len)).expect("not at least a 32-bit platform?");
|
||||
if len > MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
||||
Err(io::Error::other("request length exceeded MAX_LIBP2P_REQRES_MESSAGE_SIZE"))?;
|
||||
}
|
||||
// This may be a non-trivial allocation easily causable
|
||||
// While we could chunk the read, meaning we only perform the allocation as bandwidth is used,
|
||||
// the max message size should be sufficiently sane
|
||||
let mut buf = vec![0; len];
|
||||
io.read_exact(&mut buf).await?;
|
||||
let mut buf = buf.as_slice();
|
||||
let res = M::deserialize(&mut buf)?;
|
||||
if !buf.is_empty() {
|
||||
Err(io::Error::other("p2p message had extra data appended to it"))?;
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
async fn write(io: &mut (impl Unpin + AsyncWrite), msg: &impl BorshSerialize) -> io::Result<()> {
|
||||
let msg = borsh::to_vec(msg).unwrap();
|
||||
io.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await?;
|
||||
io.write_all(&msg).await
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl CodecTrait for Codec {
|
||||
type Protocol = &'static str;
|
||||
type Request = Request;
|
||||
type Response = Response;
|
||||
|
||||
async fn read_request<R: Send + Unpin + AsyncRead>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut R,
|
||||
) -> io::Result<Request> {
|
||||
Self::read(io).await
|
||||
}
|
||||
async fn read_response<R: Send + Unpin + AsyncRead>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut R,
|
||||
) -> io::Result<Response> {
|
||||
Self::read(io).await
|
||||
}
|
||||
async fn write_request<W: Send + Unpin + AsyncWrite>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut W,
|
||||
req: Request,
|
||||
) -> io::Result<()> {
|
||||
Self::write(io, &req).await
|
||||
}
|
||||
async fn write_response<W: Send + Unpin + AsyncWrite>(
|
||||
&mut self,
|
||||
_: &Self::Protocol,
|
||||
io: &mut W,
|
||||
res: Response,
|
||||
) -> io::Result<()> {
|
||||
Self::write(io, &res).await
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) type Event = GenericEvent<Request, Response>;
|
||||
|
||||
pub(crate) type Behavior = Behaviour<Codec>;
|
||||
pub(crate) fn new_behavior() -> Behavior {
|
||||
let mut config = Config::default();
|
||||
config.set_request_timeout(Duration::from_secs(5));
|
||||
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
|
||||
}
|
||||
356
coordinator/p2p/libp2p/src/swarm.rs
Normal file
356
coordinator/p2p/libp2p/src/swarm.rs
Normal file
@@ -0,0 +1,356 @@
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use borsh::BorshDeserialize;
|
||||
|
||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||
|
||||
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||
|
||||
use serai_task::TaskHandle;
|
||||
|
||||
use serai_cosign::SignedCosign;
|
||||
|
||||
use futures_util::StreamExt;
|
||||
use libp2p::{
|
||||
identity::PeerId,
|
||||
request_response::{RequestId, ResponseChannel},
|
||||
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
||||
};
|
||||
|
||||
use serai_coordinator_p2p::Heartbeat;
|
||||
|
||||
use crate::{
|
||||
Peers, BehaviorEvent, Behavior,
|
||||
validators::{self, Validators},
|
||||
ping,
|
||||
reqres::{self, Request, Response},
|
||||
gossip,
|
||||
};
|
||||
|
||||
const TIME_BETWEEN_REBUILD_PEERS: Duration = Duration::from_secs(10 * 60);
|
||||
|
||||
/*
|
||||
`SwarmTask` handles everything we need the `Swarm` object for. The goal is to minimize the
|
||||
contention on this task. Unfortunately, the `Swarm` object itself is needed for a variety of
|
||||
purposes making this a rather large task.
|
||||
|
||||
Responsibilities include:
|
||||
- Actually dialing new peers (the selection process occurs in another task)
|
||||
- Maintaining the peers structure (as we need the Swarm object to see who our peers are)
|
||||
- Gossiping messages
|
||||
- Dispatching gossiped messages
|
||||
- Sending requests
|
||||
- Dispatching responses to requests
|
||||
- Dispatching received requests
|
||||
- Sending responses
|
||||
*/
|
||||
pub(crate) struct SwarmTask {
|
||||
dial_task: TaskHandle,
|
||||
to_dial: mpsc::UnboundedReceiver<DialOpts>,
|
||||
last_dial_task_run: Instant,
|
||||
|
||||
validators: Arc<RwLock<Validators>>,
|
||||
validator_changes: mpsc::UnboundedReceiver<validators::Changes>,
|
||||
peers: Peers,
|
||||
rebuild_peers_at: Instant,
|
||||
|
||||
swarm: Swarm<Behavior>,
|
||||
|
||||
gossip: mpsc::UnboundedReceiver<gossip::Message>,
|
||||
signed_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
||||
|
||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
outbound_request_responses: HashMap<RequestId, oneshot::Sender<Response>>,
|
||||
|
||||
inbound_request_response_channels: HashMap<RequestId, ResponseChannel<Response>>,
|
||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
||||
}
|
||||
|
||||
impl SwarmTask {
|
||||
fn handle_gossip(&mut self, event: gossip::Event) {
|
||||
match event {
|
||||
gossip::Event::Message { message, .. } => {
|
||||
let Ok(message) = gossip::Message::deserialize(&mut message.data.as_slice()) else {
|
||||
// TODO: Penalize the PeerId which created this message, which requires authenticating
|
||||
// each message OR moving to explicit acknowledgement before re-gossiping
|
||||
return;
|
||||
};
|
||||
match message {
|
||||
gossip::Message::Tributary { tributary, message } => {
|
||||
let _: Result<_, _> = self.tributary_gossip.send((tributary, message));
|
||||
}
|
||||
gossip::Message::Cosign(signed_cosign) => {
|
||||
let _: Result<_, _> = self.signed_cosigns.send(signed_cosign);
|
||||
}
|
||||
}
|
||||
}
|
||||
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
||||
gossip::Event::GossipsubNotSupported { peer_id } => {
|
||||
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn handle_reqres(&mut self, event: reqres::Event) {
|
||||
match event {
|
||||
reqres::Event::Message { message, .. } => match message {
|
||||
reqres::Message::Request { request_id, request, channel } => match request {
|
||||
reqres::Request::Heartbeat(Heartbeat { set, latest_block_hash }) => {
|
||||
self.inbound_request_response_channels.insert(request_id, channel);
|
||||
let _: Result<_, _> =
|
||||
self.heartbeat_requests.send((request_id, set, latest_block_hash));
|
||||
}
|
||||
reqres::Request::NotableCosigns { global_session } => {
|
||||
self.inbound_request_response_channels.insert(request_id, channel);
|
||||
let _: Result<_, _> = self.notable_cosign_requests.send((request_id, global_session));
|
||||
}
|
||||
},
|
||||
reqres::Message::Response { request_id, response } => {
|
||||
if let Some(channel) = self.outbound_request_responses.remove(&request_id) {
|
||||
let _: Result<_, _> = channel.send(response);
|
||||
}
|
||||
}
|
||||
},
|
||||
reqres::Event::OutboundFailure { request_id, .. } => {
|
||||
// Send None as the response for the request
|
||||
if let Some(channel) = self.outbound_request_responses.remove(&request_id) {
|
||||
let _: Result<_, _> = channel.send(Response::None);
|
||||
}
|
||||
}
|
||||
reqres::Event::InboundFailure { .. } | reqres::Event::ResponseSent { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
async fn run(mut self) {
|
||||
loop {
|
||||
let time_till_rebuild_peers = self.rebuild_peers_at.saturating_duration_since(Instant::now());
|
||||
|
||||
tokio::select! {
|
||||
// If the validators have changed, update the allow list
|
||||
validator_changes = self.validator_changes.recv() => {
|
||||
let validator_changes = validator_changes.expect("validators update task shut down?");
|
||||
let behavior = &mut self.swarm.behaviour_mut().allow_list;
|
||||
for removed in validator_changes.removed {
|
||||
behavior.disallow_peer(removed);
|
||||
}
|
||||
for added in validator_changes.added {
|
||||
behavior.allow_peer(added);
|
||||
}
|
||||
}
|
||||
|
||||
// Dial peers we're instructed to
|
||||
dial_opts = self.to_dial.recv() => {
|
||||
let dial_opts = dial_opts.expect("DialTask was closed?");
|
||||
let _: Result<_, _> = self.swarm.dial(dial_opts);
|
||||
}
|
||||
|
||||
/*
|
||||
Rebuild the peers every 10 minutes.
|
||||
|
||||
This protects against any race conditions/edge cases we have in our logic to track peers,
|
||||
along with unrepresented behavior such as when a peer changes the networks they're active
|
||||
in. This lets the peer tracking logic simply be 'good enough' to not become horribly
|
||||
corrupt over the span of `TIME_BETWEEN_REBUILD_PEERS`.
|
||||
|
||||
We also use this to disconnect all peers who are no longer active in any network.
|
||||
*/
|
||||
() = tokio::time::sleep(time_till_rebuild_peers) => {
|
||||
let validators_by_network = self.validators.read().await.by_network().clone();
|
||||
let connected_peers = self.swarm.connected_peers().copied().collect::<HashSet<_>>();
|
||||
|
||||
// Build the new peers object
|
||||
let mut peers = HashMap::new();
|
||||
for (network, validators) in validators_by_network {
|
||||
peers.insert(network, validators.intersection(&connected_peers).copied().collect());
|
||||
}
|
||||
|
||||
// Write the new peers object
|
||||
*self.peers.peers.write().await = peers;
|
||||
self.rebuild_peers_at = Instant::now() + TIME_BETWEEN_REBUILD_PEERS;
|
||||
}
|
||||
|
||||
// Handle swarm events
|
||||
event = self.swarm.next() => {
|
||||
// `Swarm::next` will never return `Poll::Ready(None)`
|
||||
// https://docs.rs/
|
||||
// libp2p/0.54.1/libp2p/struct.Swarm.html#impl-Stream-for-Swarm%3CTBehaviour%3E
|
||||
let event = event.unwrap();
|
||||
match event {
|
||||
// New connection, so update peers
|
||||
SwarmEvent::ConnectionEstablished { peer_id, .. } => {
|
||||
let Some(networks) =
|
||||
self.validators.read().await.networks(&peer_id).cloned() else { continue };
|
||||
let mut peers = self.peers.peers.write().await;
|
||||
for network in networks {
|
||||
peers.entry(network).or_insert_with(HashSet::new).insert(peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Connection closed, so update peers
|
||||
SwarmEvent::ConnectionClosed { peer_id, .. } => {
|
||||
let Some(networks) =
|
||||
self.validators.read().await.networks(&peer_id).cloned() else { continue };
|
||||
let mut peers = self.peers.peers.write().await;
|
||||
for network in networks {
|
||||
peers.entry(network).or_insert_with(HashSet::new).remove(&peer_id);
|
||||
}
|
||||
|
||||
/*
|
||||
We want to re-run the dial task, since we lost a peer, in case we should find new
|
||||
peers. This opens a DoS where a validator repeatedly opens/closes connections to
|
||||
force iterations of the dial task. We prevent this by setting a minimum distance
|
||||
since the last explicit iteration.
|
||||
|
||||
This is suboptimal. If we have several disconnects in immediate proximity, we'll
|
||||
trigger the dial task upon the first (where we may still have enough peers we
|
||||
shouldn't dial more) but not the last (where we may have so few peers left we
|
||||
should dial more). This is accepted as the dial task will eventually run on its
|
||||
natural timer.
|
||||
*/
|
||||
const MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL: Duration = Duration::from_secs(60);
|
||||
let now = Instant::now();
|
||||
if (self.last_dial_task_run + MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL) < now {
|
||||
self.dial_task.run_now();
|
||||
self.last_dial_task_run = now;
|
||||
}
|
||||
}
|
||||
|
||||
SwarmEvent::Behaviour(
|
||||
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event)
|
||||
) => {
|
||||
// This *is* an exhaustive match as these events are empty enums
|
||||
match event {}
|
||||
}
|
||||
SwarmEvent::Behaviour(
|
||||
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, })
|
||||
) => {
|
||||
if result.is_err() {
|
||||
self.swarm.close_connection(connection);
|
||||
}
|
||||
}
|
||||
SwarmEvent::Behaviour(BehaviorEvent::Reqres(event)) => {
|
||||
self.handle_reqres(event)
|
||||
}
|
||||
SwarmEvent::Behaviour(BehaviorEvent::Gossip(event)) => {
|
||||
self.handle_gossip(event)
|
||||
}
|
||||
|
||||
// We don't handle any of these
|
||||
SwarmEvent::IncomingConnection { .. } |
|
||||
SwarmEvent::IncomingConnectionError { .. } |
|
||||
SwarmEvent::OutgoingConnectionError { .. } |
|
||||
SwarmEvent::NewListenAddr { .. } |
|
||||
SwarmEvent::ExpiredListenAddr { .. } |
|
||||
SwarmEvent::ListenerClosed { .. } |
|
||||
SwarmEvent::ListenerError { .. } |
|
||||
SwarmEvent::Dialing { .. } => {}
|
||||
}
|
||||
}
|
||||
|
||||
message = self.gossip.recv() => {
|
||||
let message = message.expect("channel for messages to gossip was closed?");
|
||||
let topic = message.topic();
|
||||
let message = borsh::to_vec(&message).unwrap();
|
||||
|
||||
/*
|
||||
If we're sending a message for this topic, it's because this topic is relevant to us.
|
||||
Subscribe to it.
|
||||
|
||||
We create topics roughly weekly, one per validator set/session. Once present in a
|
||||
topic, we're interested in all messages for it until the validator set/session retires.
|
||||
Then there should no longer be any messages for the topic as we should drop the
|
||||
Tributary which creates the messages.
|
||||
|
||||
We use this as an argument to not bother implement unsubscribing from topics. They're
|
||||
incredibly infrequently created and old topics shouldn't still have messages published
|
||||
to them. Having the coordinator reboot being our method of unsubscribing is fine.
|
||||
|
||||
Alternatively, we could route an API to determine when a topic is retired, or retire
|
||||
any topics we haven't sent messages on in the past hour.
|
||||
*/
|
||||
let behavior = self.swarm.behaviour_mut();
|
||||
let _: Result<_, _> = behavior.gossip.subscribe(&topic);
|
||||
/*
|
||||
This may be an error of `InsufficientPeers`. If so, we could ask DialTask to dial more
|
||||
peers for this network. We don't as we assume DialTask will detect the lack of peers
|
||||
for this network, and will already successfully handle this.
|
||||
*/
|
||||
let _: Result<_, _> = behavior.gossip.publish(topic.hash(), message);
|
||||
}
|
||||
|
||||
request = self.outbound_requests.recv() => {
|
||||
let (peer, request, response_channel) =
|
||||
request.expect("channel for requests was closed?");
|
||||
let request_id = self.swarm.behaviour_mut().reqres.send_request(&peer, request);
|
||||
self.outbound_request_responses.insert(request_id, response_channel);
|
||||
}
|
||||
|
||||
response = self.inbound_request_responses.recv() => {
|
||||
let (request_id, response) =
|
||||
response.expect("channel for inbound request responses was closed?");
|
||||
if let Some(channel) = self.inbound_request_response_channels.remove(&request_id) {
|
||||
let _: Result<_, _> =
|
||||
self.swarm.behaviour_mut().reqres.send_response(channel, response);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub(crate) fn spawn(
|
||||
dial_task: TaskHandle,
|
||||
to_dial: mpsc::UnboundedReceiver<DialOpts>,
|
||||
|
||||
validators: Arc<RwLock<Validators>>,
|
||||
validator_changes: mpsc::UnboundedReceiver<validators::Changes>,
|
||||
peers: Peers,
|
||||
|
||||
swarm: Swarm<Behavior>,
|
||||
|
||||
gossip: mpsc::UnboundedReceiver<gossip::Message>,
|
||||
signed_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
||||
|
||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||
|
||||
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
||||
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
||||
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
||||
) {
|
||||
tokio::spawn(
|
||||
SwarmTask {
|
||||
dial_task,
|
||||
to_dial,
|
||||
last_dial_task_run: Instant::now(),
|
||||
|
||||
validators,
|
||||
validator_changes,
|
||||
peers,
|
||||
rebuild_peers_at: Instant::now() + TIME_BETWEEN_REBUILD_PEERS,
|
||||
|
||||
swarm,
|
||||
|
||||
gossip,
|
||||
signed_cosigns,
|
||||
tributary_gossip,
|
||||
|
||||
outbound_requests,
|
||||
outbound_request_responses: HashMap::new(),
|
||||
|
||||
inbound_request_response_channels: HashMap::new(),
|
||||
heartbeat_requests,
|
||||
notable_cosign_requests,
|
||||
inbound_request_responses,
|
||||
}
|
||||
.run(),
|
||||
);
|
||||
}
|
||||
}
|
||||
214
coordinator/p2p/libp2p/src/validators.rs
Normal file
214
coordinator/p2p/libp2p/src/validators.rs
Normal file
@@ -0,0 +1,214 @@
|
||||
use core::{borrow::Borrow, future::Future};
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, SeraiError, Serai};
|
||||
|
||||
use serai_task::{Task, ContinuallyRan};
|
||||
|
||||
use libp2p::PeerId;
|
||||
|
||||
use futures_util::stream::{StreamExt, FuturesUnordered};
|
||||
use tokio::sync::{mpsc, RwLock};
|
||||
|
||||
use crate::peer_id_from_public;
|
||||
|
||||
pub(crate) struct Changes {
|
||||
pub(crate) removed: HashSet<PeerId>,
|
||||
pub(crate) added: HashSet<PeerId>,
|
||||
}
|
||||
|
||||
pub(crate) struct Validators {
|
||||
serai: Arc<Serai>,
|
||||
|
||||
// A cache for which session we're populated with the validators of
|
||||
sessions: HashMap<NetworkId, Session>,
|
||||
// The validators by network
|
||||
by_network: HashMap<NetworkId, HashSet<PeerId>>,
|
||||
// The validators and their networks
|
||||
validators: HashMap<PeerId, HashSet<NetworkId>>,
|
||||
|
||||
// The channel to send the changes down
|
||||
changes: mpsc::UnboundedSender<Changes>,
|
||||
}
|
||||
|
||||
impl Validators {
|
||||
pub(crate) fn new(serai: Arc<Serai>) -> (Self, mpsc::UnboundedReceiver<Changes>) {
|
||||
let (send, recv) = mpsc::unbounded_channel();
|
||||
let validators = Validators {
|
||||
serai,
|
||||
sessions: HashMap::new(),
|
||||
by_network: HashMap::new(),
|
||||
validators: HashMap::new(),
|
||||
changes: send,
|
||||
};
|
||||
(validators, recv)
|
||||
}
|
||||
|
||||
async fn session_changes(
|
||||
serai: impl Borrow<Serai>,
|
||||
sessions: impl Borrow<HashMap<NetworkId, Session>>,
|
||||
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
||||
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
||||
let temporal_serai = temporal_serai.validator_sets();
|
||||
|
||||
let mut session_changes = vec![];
|
||||
{
|
||||
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
||||
// we poll it till it yields all futures with the most minimal processing possible
|
||||
let mut futures = FuturesUnordered::new();
|
||||
for network in serai_client::primitives::NETWORKS {
|
||||
if network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
let sessions = sessions.borrow();
|
||||
futures.push(async move {
|
||||
let session = match temporal_serai.session(network).await {
|
||||
Ok(Some(session)) => session,
|
||||
Ok(None) => return Ok(None),
|
||||
Err(e) => return Err(e),
|
||||
};
|
||||
|
||||
if sessions.get(&network) == Some(&session) {
|
||||
Ok(None)
|
||||
} else {
|
||||
match temporal_serai.active_network_validators(network).await {
|
||||
Ok(validators) => Ok(Some((
|
||||
network,
|
||||
session,
|
||||
validators.into_iter().map(peer_id_from_public).collect(),
|
||||
))),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
while let Some(session_change) = futures.next().await {
|
||||
if let Some(session_change) = session_change? {
|
||||
session_changes.push(session_change);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(session_changes)
|
||||
}
|
||||
|
||||
fn incorporate_session_changes(
|
||||
&mut self,
|
||||
session_changes: Vec<(NetworkId, Session, HashSet<PeerId>)>,
|
||||
) {
|
||||
let mut removed = HashSet::new();
|
||||
let mut added = HashSet::new();
|
||||
|
||||
for (network, session, validators) in session_changes {
|
||||
// Remove the existing validators
|
||||
for validator in self.by_network.remove(&network).unwrap_or_else(HashSet::new) {
|
||||
// Get all networks this validator is in
|
||||
let mut networks = self.validators.remove(&validator).unwrap();
|
||||
// Remove this one
|
||||
networks.remove(&network);
|
||||
if !networks.is_empty() {
|
||||
// Insert the networks back if the validator was present in other networks
|
||||
self.validators.insert(validator, networks);
|
||||
} else {
|
||||
// Because this validator is no longer present in any network, mark them as removed
|
||||
/*
|
||||
This isn't accurate. The validator isn't present in the latest session for this
|
||||
network. The validator was present in the prior session which has yet to retire. Our
|
||||
lack of explicit inclusion for both the prior session and the current session causes
|
||||
only the validators mutually present in both sessions to be responsible for all actions
|
||||
still ongoing as the prior validator set retires.
|
||||
|
||||
TODO: Fix this
|
||||
*/
|
||||
removed.insert(validator);
|
||||
}
|
||||
}
|
||||
|
||||
// Add the new validators
|
||||
for validator in validators.iter().copied() {
|
||||
self.validators.entry(validator).or_insert_with(HashSet::new).insert(network);
|
||||
added.insert(validator);
|
||||
}
|
||||
self.by_network.insert(network, validators);
|
||||
|
||||
// Update the session we have populated
|
||||
self.sessions.insert(network, session);
|
||||
}
|
||||
|
||||
// Only flag validators for removal if they weren't simultaneously added by these changes
|
||||
removed.retain(|validator| !added.contains(validator));
|
||||
// Send the changes, dropping the error
|
||||
// This lets the caller opt-out of change notifications by dropping the receiver
|
||||
let _: Result<_, _> = self.changes.send(Changes { removed, added });
|
||||
}
|
||||
|
||||
/// Update the view of the validators.
|
||||
pub(crate) async fn update(&mut self) -> Result<(), SeraiError> {
|
||||
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
|
||||
self.incorporate_session_changes(session_changes);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn by_network(&self) -> &HashMap<NetworkId, HashSet<PeerId>> {
|
||||
&self.by_network
|
||||
}
|
||||
|
||||
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<NetworkId>> {
|
||||
self.validators.get(peer_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// A task which updates a set of validators.
|
||||
///
|
||||
/// The validators managed by this tak will have their exclusive lock held for a minimal amount of
|
||||
/// time while the update occurs to minimize the disruption to the services relying on it.
|
||||
pub(crate) struct UpdateValidatorsTask {
|
||||
validators: Arc<RwLock<Validators>>,
|
||||
}
|
||||
|
||||
impl UpdateValidatorsTask {
|
||||
/// Spawn a new instance of the UpdateValidatorsTask.
|
||||
///
|
||||
/// This returns a reference to the Validators it updates after spawning itself.
|
||||
pub(crate) fn spawn(
|
||||
serai: Arc<Serai>,
|
||||
) -> (Arc<RwLock<Validators>>, mpsc::UnboundedReceiver<Changes>) {
|
||||
// The validators which will be updated
|
||||
let (validators, changes) = Validators::new(serai);
|
||||
let validators = Arc::new(RwLock::new(validators));
|
||||
|
||||
// Define the task
|
||||
let (update_validators_task, update_validators_task_handle) = Task::new();
|
||||
// Forget the handle, as dropping the handle would stop the task
|
||||
core::mem::forget(update_validators_task_handle);
|
||||
// Spawn the task
|
||||
tokio::spawn(
|
||||
(Self { validators: validators.clone() }).continually_run(update_validators_task, vec![]),
|
||||
);
|
||||
|
||||
// Return the validators
|
||||
(validators, changes)
|
||||
}
|
||||
}
|
||||
|
||||
impl ContinuallyRan for UpdateValidatorsTask {
|
||||
// Only run every minute, not the default of every five seconds
|
||||
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
||||
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||
|
||||
type Error = SeraiError;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
let session_changes = {
|
||||
let validators = self.validators.read().await;
|
||||
Validators::session_changes(validators.serai.clone(), validators.sessions.clone()).await?
|
||||
};
|
||||
self.validators.write().await.incorporate_session_changes(session_changes);
|
||||
Ok(true)
|
||||
}
|
||||
}
|
||||
}
|
||||
151
coordinator/p2p/src/heartbeat.rs
Normal file
151
coordinator/p2p/src/heartbeat.rs
Normal file
@@ -0,0 +1,151 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet};
|
||||
|
||||
use futures_lite::FutureExt;
|
||||
|
||||
use tributary_sdk::{ReadWrite, TransactionTrait, Block, Tributary, TributaryReader};
|
||||
|
||||
use serai_db::*;
|
||||
use serai_task::ContinuallyRan;
|
||||
|
||||
use crate::{Heartbeat, Peer, P2p};
|
||||
|
||||
// Amount of blocks in a minute
|
||||
const BLOCKS_PER_MINUTE: usize =
|
||||
(60 / (tributary_sdk::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
|
||||
|
||||
/// The minimum amount of blocks to include/included within a batch, assuming there's blocks to
|
||||
/// include in the batch.
|
||||
///
|
||||
/// This decides the size limit of the Batch (the Block size limit multiplied by the minimum amount
|
||||
/// of blocks we'll send). The actual amount of blocks sent will be the amount which fits within
|
||||
/// the size limit.
|
||||
pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
||||
|
||||
/// The size limit for a batch of blocks sent in response to a Heartbeat.
|
||||
///
|
||||
/// This estimates the size of a commit as `32 + (MAX_VALIDATORS * 128)`. At the time of writing, a
|
||||
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
|
||||
/// and aggregate signature). Accordingly, this should be a safe over-estimate.
|
||||
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
|
||||
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128));
|
||||
|
||||
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
||||
/// tip.
|
||||
///
|
||||
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
||||
/// the sync protocol for our Tributaries.
|
||||
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
||||
pub(crate) set: ValidatorSet,
|
||||
pub(crate) tributary: Tributary<TD, Tx, P>,
|
||||
pub(crate) reader: TributaryReader<TD, Tx>,
|
||||
pub(crate) p2p: P,
|
||||
}
|
||||
|
||||
impl<TD: Db, Tx: TransactionTrait, P: P2p> ContinuallyRan for HeartbeatTask<TD, Tx, P> {
|
||||
type Error = String;
|
||||
|
||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||
async move {
|
||||
// If our blockchain hasn't had a block in the past minute, trigger the heartbeat protocol
|
||||
const TIME_TO_TRIGGER_SYNCING: Duration = Duration::from_secs(60);
|
||||
|
||||
let mut tip = self.reader.tip();
|
||||
let time_since = {
|
||||
let block_time = if let Some(time_of_block) = self.reader.time_of_block(&tip) {
|
||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_of_block)
|
||||
} else {
|
||||
// If we couldn't fetch this block's time, assume it's old
|
||||
// We don't want to declare its unix time as 0 and claim it's 50+ years old though
|
||||
log::warn!(
|
||||
"heartbeat task couldn't fetch the time of a block, flagging it as a minute old"
|
||||
);
|
||||
SystemTime::now() - TIME_TO_TRIGGER_SYNCING
|
||||
};
|
||||
SystemTime::now().duration_since(block_time).unwrap_or(Duration::ZERO)
|
||||
};
|
||||
let mut tip_is_stale = false;
|
||||
|
||||
let mut synced_block = false;
|
||||
if TIME_TO_TRIGGER_SYNCING <= time_since {
|
||||
log::warn!(
|
||||
"last known tributary block for {:?} was {} seconds ago",
|
||||
self.set,
|
||||
time_since.as_secs()
|
||||
);
|
||||
|
||||
// This requests all peers for this network, without differentiating by session
|
||||
// This should be fine as most validators should overlap across sessions
|
||||
'peer: for peer in self.p2p.peers(self.set.network).await {
|
||||
loop {
|
||||
// Create the request for blocks
|
||||
if tip_is_stale {
|
||||
tip = self.reader.tip();
|
||||
tip_is_stale = false;
|
||||
}
|
||||
// Necessary due to https://github.com/rust-lang/rust/issues/100013
|
||||
let Some(blocks) = peer
|
||||
.send_heartbeat(Heartbeat { set: self.set, latest_block_hash: tip })
|
||||
.boxed()
|
||||
.await
|
||||
else {
|
||||
continue 'peer;
|
||||
};
|
||||
|
||||
// This is the final batch if it has less than the maximum amount of blocks
|
||||
// (signifying there weren't more blocks after this to fill the batch with)
|
||||
let final_batch = blocks.len() < MIN_BLOCKS_PER_BATCH;
|
||||
|
||||
// Sync each block
|
||||
for block_with_commit in blocks {
|
||||
let Ok(block) = Block::read(&mut block_with_commit.block.as_slice()) else {
|
||||
// TODO: Disconnect/slash this peer
|
||||
log::warn!("received invalid Block inside response to heartbeat");
|
||||
continue 'peer;
|
||||
};
|
||||
|
||||
// Attempt to sync the block
|
||||
if !self.tributary.sync_block(block, block_with_commit.commit).await {
|
||||
// The block may be invalid or stale if we added a block elsewhere
|
||||
if (!tip_is_stale) && (tip != self.reader.tip()) {
|
||||
// Since the Tributary's tip advanced on its own, return
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Since this block was invalid or stale in a way non-trivial to detect, try to
|
||||
// sync with the next peer
|
||||
continue 'peer;
|
||||
}
|
||||
|
||||
// Because we synced a block, flag the tip as stale
|
||||
tip_is_stale = true;
|
||||
// And that we did sync a block
|
||||
synced_block = true;
|
||||
}
|
||||
|
||||
// If this was the final batch, move on from this peer
|
||||
// We could assume they were honest and we are done syncing the chain, but this is a
|
||||
// bit more robust
|
||||
if final_batch {
|
||||
continue 'peer;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This will cause the tak to be run less and less often, ensuring we aren't spamming the
|
||||
// net if we legitimately aren't making progress
|
||||
if !synced_block {
|
||||
Err(format!(
|
||||
"tried to sync blocks for {:?} since we haven't seen one in {} seconds but didn't",
|
||||
self.set,
|
||||
time_since.as_secs(),
|
||||
))?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(synced_block)
|
||||
}
|
||||
}
|
||||
}
|
||||
204
coordinator/p2p/src/lib.rs
Normal file
204
coordinator/p2p/src/lib.rs
Normal file
@@ -0,0 +1,204 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use core::future::Future;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
||||
|
||||
use serai_db::Db;
|
||||
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
|
||||
use serai_cosign::{SignedCosign, Cosigning};
|
||||
|
||||
use tokio::sync::{mpsc, oneshot};
|
||||
|
||||
use serai_task::{Task, ContinuallyRan};
|
||||
|
||||
/// The heartbeat task, effecting sync of Tributaries
|
||||
pub mod heartbeat;
|
||||
use crate::heartbeat::HeartbeatTask;
|
||||
|
||||
/// A heartbeat for a Tributary.
|
||||
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
||||
pub struct Heartbeat {
|
||||
/// The Tributary this is the heartbeat of.
|
||||
pub set: ValidatorSet,
|
||||
/// The hash of the latest block added to the Tributary.
|
||||
pub latest_block_hash: [u8; 32],
|
||||
}
|
||||
|
||||
/// A tributary block and its commit.
|
||||
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||
pub struct TributaryBlockWithCommit {
|
||||
/// The serialized block.
|
||||
pub block: Vec<u8>,
|
||||
/// The serialized commit.
|
||||
pub commit: Vec<u8>,
|
||||
}
|
||||
|
||||
/// A representation of a peer.
|
||||
pub trait Peer<'a>: Send {
|
||||
/// Send a heartbeat to this peer.
|
||||
fn send_heartbeat(
|
||||
&self,
|
||||
heartbeat: Heartbeat,
|
||||
) -> impl Send + Future<Output = Option<Vec<TributaryBlockWithCommit>>>;
|
||||
}
|
||||
|
||||
/// The representation of the P2P network.
|
||||
pub trait P2p:
|
||||
Send + Sync + Clone + tributary_sdk::P2p + serai_cosign::RequestNotableCosigns
|
||||
{
|
||||
/// The representation of a peer.
|
||||
type Peer<'a>: Peer<'a>;
|
||||
|
||||
/// Fetch the peers for this network.
|
||||
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
||||
|
||||
/// Broadcast a cosign.
|
||||
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
|
||||
|
||||
/// A cancel-safe future for the next heartbeat received over the P2P network.
|
||||
///
|
||||
/// Yields the validator set its for, the latest block hash observed, and a channel to return the
|
||||
/// descending blocks. This channel MUST NOT and will not have its receiver dropped before a
|
||||
/// message is sent.
|
||||
fn heartbeat(
|
||||
&self,
|
||||
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)>;
|
||||
|
||||
/// A cancel-safe future for the next request for the notable cosigns of a gloabl session.
|
||||
///
|
||||
/// Yields the global session the request is for and a channel to return the notable cosigns.
|
||||
/// This channel MUST NOT and will not have its receiver dropped before a message is sent.
|
||||
fn notable_cosigns_request(
|
||||
&self,
|
||||
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)>;
|
||||
|
||||
/// A cancel-safe future for the next message regarding a Tributary.
|
||||
///
|
||||
/// Yields the message's Tributary's genesis block hash and the message.
|
||||
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)>;
|
||||
|
||||
/// A cancel-safe future for the next cosign received.
|
||||
fn cosign(&self) -> impl Send + Future<Output = SignedCosign>;
|
||||
}
|
||||
|
||||
fn handle_notable_cosigns_request<D: Db>(
|
||||
db: &D,
|
||||
global_session: [u8; 32],
|
||||
channel: oneshot::Sender<Vec<SignedCosign>>,
|
||||
) {
|
||||
let cosigns = Cosigning::<D>::notable_cosigns(db, global_session);
|
||||
channel.send(cosigns).expect("channel listening for cosign oneshot response was dropped?");
|
||||
}
|
||||
|
||||
fn handle_heartbeat<D: Db, T: TransactionTrait>(
|
||||
reader: &TributaryReader<D, T>,
|
||||
mut latest_block_hash: [u8; 32],
|
||||
channel: oneshot::Sender<Vec<TributaryBlockWithCommit>>,
|
||||
) {
|
||||
let mut res_size = 8;
|
||||
let mut res = vec![];
|
||||
// This former case should be covered by this latter case
|
||||
while (res.len() < heartbeat::MIN_BLOCKS_PER_BATCH) || (res_size < heartbeat::BATCH_SIZE_LIMIT) {
|
||||
let Some(block_after) = reader.block_after(&latest_block_hash) else { break };
|
||||
|
||||
// These `break` conditions should only occur under edge cases, such as if we're actively
|
||||
// deleting this Tributary due to being done with it
|
||||
let Some(block) = reader.block(&block_after) else { break };
|
||||
let block = block.serialize();
|
||||
let Some(commit) = reader.commit(&block_after) else { break };
|
||||
res_size += 8 + block.len() + 8 + commit.len();
|
||||
res.push(TributaryBlockWithCommit { block, commit });
|
||||
|
||||
latest_block_hash = block_after;
|
||||
}
|
||||
channel
|
||||
.send(res)
|
||||
.map_err(|_| ())
|
||||
.expect("channel listening for heartbeat oneshot response was dropped?");
|
||||
}
|
||||
|
||||
/// Run the P2P instance.
|
||||
///
|
||||
/// `add_tributary`'s and `retire_tributary's senders, along with `send_cosigns`'s receiver, must
|
||||
/// never be dropped. `retire_tributary` is not required to only be instructed with added
|
||||
/// Tributaries.
|
||||
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
|
||||
db: impl Db,
|
||||
p2p: P,
|
||||
mut add_tributary: mpsc::UnboundedReceiver<(ValidatorSet, Tributary<TD, Tx, P>)>,
|
||||
mut retire_tributary: mpsc::UnboundedReceiver<ValidatorSet>,
|
||||
send_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||
) {
|
||||
let mut readers = HashMap::<ValidatorSet, TributaryReader<TD, Tx>>::new();
|
||||
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
|
||||
let mut heartbeat_tasks = HashMap::<ValidatorSet, _>::new();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
tributary = add_tributary.recv() => {
|
||||
let (set, tributary) = tributary.expect("add_tributary send was dropped");
|
||||
let reader = tributary.reader();
|
||||
readers.insert(set, reader.clone());
|
||||
|
||||
let (heartbeat_task_def, heartbeat_task) = Task::new();
|
||||
tokio::spawn(
|
||||
(HeartbeatTask {
|
||||
set,
|
||||
tributary: tributary.clone(),
|
||||
reader: reader.clone(),
|
||||
p2p: p2p.clone(),
|
||||
}).continually_run(heartbeat_task_def, vec![])
|
||||
);
|
||||
heartbeat_tasks.insert(set, heartbeat_task);
|
||||
|
||||
let (tributary_message_send, mut tributary_message_recv) = mpsc::unbounded_channel();
|
||||
tributaries.insert(tributary.genesis(), tributary_message_send);
|
||||
// For as long as this sender exists, handle the messages from it on a dedicated task
|
||||
tokio::spawn(async move {
|
||||
while let Some(message) = tributary_message_recv.recv().await {
|
||||
tributary.handle_message(&message).await;
|
||||
}
|
||||
});
|
||||
}
|
||||
set = retire_tributary.recv() => {
|
||||
let set = set.expect("retire_tributary send was dropped");
|
||||
let Some(reader) = readers.remove(&set) else { continue };
|
||||
tributaries.remove(&reader.genesis()).expect("tributary reader but no tributary");
|
||||
heartbeat_tasks.remove(&set).expect("tributary but no heartbeat task");
|
||||
}
|
||||
|
||||
(heartbeat, channel) = p2p.heartbeat() => {
|
||||
if let Some(reader) = readers.get(&heartbeat.set) {
|
||||
let reader = reader.clone(); // This is a cheap clone
|
||||
// We spawn this on a task due to the DB reads needed
|
||||
tokio::spawn(async move {
|
||||
handle_heartbeat(&reader, heartbeat.latest_block_hash, channel)
|
||||
});
|
||||
}
|
||||
}
|
||||
(global_session, channel) = p2p.notable_cosigns_request() => {
|
||||
tokio::spawn({
|
||||
let db = db.clone();
|
||||
async move { handle_notable_cosigns_request(&db, global_session, channel) }
|
||||
});
|
||||
}
|
||||
(tributary, message) = p2p.tributary_message() => {
|
||||
if let Some(tributary) = tributaries.get(&tributary) {
|
||||
tributary.send(message).expect("tributary message recv was dropped?");
|
||||
}
|
||||
}
|
||||
cosign = p2p.cosign() => {
|
||||
// We don't call `Cosigning::intake_cosign` here as that can only be called from a single
|
||||
// location. We also need to intake the cosigns we produce, which means we need to merge
|
||||
// these streams (signing, network) somehow. That's done with this mpsc channel
|
||||
send_cosigns.send(cosign).expect("channel receiving cosigns was dropped");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,336 +0,0 @@
|
||||
use core::time::Duration;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
sync::{mpsc, Mutex, RwLock},
|
||||
time::sleep,
|
||||
};
|
||||
|
||||
use borsh::BorshSerialize;
|
||||
use sp_application_crypto::RuntimePublic;
|
||||
use serai_client::{
|
||||
primitives::{NETWORKS, NetworkId, Signature},
|
||||
validator_sets::primitives::{Session, ValidatorSet},
|
||||
SeraiError, TemporalSerai, Serai,
|
||||
};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db, create_db};
|
||||
|
||||
use processor_messages::coordinator::cosign_block_msg;
|
||||
|
||||
use crate::{
|
||||
p2p::{CosignedBlock, GossipMessageKind, P2p},
|
||||
substrate::LatestCosignedBlock,
|
||||
};
|
||||
|
||||
create_db! {
|
||||
CosignDb {
|
||||
ReceivedCosign: (set: ValidatorSet, block: [u8; 32]) -> CosignedBlock,
|
||||
LatestCosign: (network: NetworkId) -> CosignedBlock,
|
||||
DistinctChain: (set: ValidatorSet) -> (),
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CosignEvaluator<D: Db> {
|
||||
db: Mutex<D>,
|
||||
serai: Arc<Serai>,
|
||||
stakes: RwLock<Option<HashMap<NetworkId, u64>>>,
|
||||
latest_cosigns: RwLock<HashMap<NetworkId, CosignedBlock>>,
|
||||
}
|
||||
|
||||
impl<D: Db> CosignEvaluator<D> {
|
||||
async fn update_latest_cosign(&self) {
|
||||
let stakes_lock = self.stakes.read().await;
|
||||
// If we haven't gotten the stake data yet, return
|
||||
let Some(stakes) = stakes_lock.as_ref() else { return };
|
||||
|
||||
let total_stake = stakes.values().copied().sum::<u64>();
|
||||
|
||||
let latest_cosigns = self.latest_cosigns.read().await;
|
||||
let mut highest_block = 0;
|
||||
for cosign in latest_cosigns.values() {
|
||||
let mut networks = HashSet::new();
|
||||
for (network, sub_cosign) in &*latest_cosigns {
|
||||
if sub_cosign.block_number >= cosign.block_number {
|
||||
networks.insert(network);
|
||||
}
|
||||
}
|
||||
let sum_stake =
|
||||
networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::<u64>();
|
||||
let needed_stake = ((total_stake * 2) / 3) + 1;
|
||||
if (total_stake == 0) || (sum_stake > needed_stake) {
|
||||
highest_block = highest_block.max(cosign.block_number);
|
||||
}
|
||||
}
|
||||
|
||||
let mut db_lock = self.db.lock().await;
|
||||
let mut txn = db_lock.txn();
|
||||
if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) {
|
||||
log::info!("setting latest cosigned block to {}", highest_block);
|
||||
LatestCosignedBlock::set(&mut txn, &highest_block);
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
async fn update_stakes(&self) -> Result<(), SeraiError> {
|
||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||
|
||||
let mut stakes = HashMap::new();
|
||||
for network in NETWORKS {
|
||||
// Use if this network has published a Batch for a short-circuit of if they've ever set a key
|
||||
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
|
||||
if set_key {
|
||||
stakes.insert(
|
||||
network,
|
||||
serai
|
||||
.validator_sets()
|
||||
.total_allocated_stake(network)
|
||||
.await?
|
||||
.expect("network which published a batch didn't have a stake set")
|
||||
.0,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Since we've successfully built stakes, set it
|
||||
*self.stakes.write().await = Some(stakes);
|
||||
|
||||
self.update_latest_cosign().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Uses Err to signify a message should be retried
|
||||
async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> {
|
||||
// If we already have this cosign or a newer cosign, return
|
||||
if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) {
|
||||
if latest.block_number >= cosign.block_number {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// If this an old cosign (older than a day), drop it
|
||||
let latest_block = self.serai.latest_finalized_block().await?;
|
||||
if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() {
|
||||
log::debug!("received old cosign supposedly signed by {:?}", cosign.network);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else {
|
||||
log::warn!("received cosign with a block number which doesn't map to a block");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
async fn set_with_keys_fn(
|
||||
serai: &TemporalSerai<'_>,
|
||||
network: NetworkId,
|
||||
) -> Result<Option<ValidatorSet>, SeraiError> {
|
||||
let Some(latest_session) = serai.validator_sets().session(network).await? else {
|
||||
log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
|
||||
return Ok(None);
|
||||
};
|
||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
||||
Ok(Some(
|
||||
if serai
|
||||
.validator_sets()
|
||||
.keys(ValidatorSet { network, session: prior_session })
|
||||
.await?
|
||||
.is_some()
|
||||
{
|
||||
ValidatorSet { network, session: prior_session }
|
||||
} else {
|
||||
ValidatorSet { network, session: latest_session }
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
// Get the key for this network as of the prior block
|
||||
// If we have two chains, this value may be different across chains depending on if one chain
|
||||
// included the set_keys and one didn't
|
||||
// Because set_keys will force a cosign, it will force detection of distinct blocks
|
||||
// re: set_keys using keys prior to set_keys (assumed amenable to all)
|
||||
let serai = self.serai.as_of(block.header.parent_hash.into());
|
||||
|
||||
let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else {
|
||||
return Ok(());
|
||||
};
|
||||
let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else {
|
||||
log::warn!("received cosign for a block we didn't have keys for");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if !keys
|
||||
.0
|
||||
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &Signature(cosign.signature))
|
||||
{
|
||||
log::warn!("received cosigned block with an invalid signature");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"received cosign for block {} ({}) by {:?}",
|
||||
block.number(),
|
||||
hex::encode(cosign.block),
|
||||
cosign.network
|
||||
);
|
||||
|
||||
// Save this cosign to the DB
|
||||
{
|
||||
let mut db = self.db.lock().await;
|
||||
let mut txn = db.txn();
|
||||
ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign);
|
||||
LatestCosign::set(&mut txn, set_with_keys.network, &(cosign));
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
if cosign.block != block.hash() {
|
||||
log::error!(
|
||||
"received cosign for a distinct block at {}. we have {}. cosign had {}",
|
||||
cosign.block_number,
|
||||
hex::encode(block.hash()),
|
||||
hex::encode(cosign.block)
|
||||
);
|
||||
|
||||
let serai = self.serai.as_of(latest_block.hash());
|
||||
|
||||
let mut db = self.db.lock().await;
|
||||
// Save this set as being on a different chain
|
||||
let mut txn = db.txn();
|
||||
DistinctChain::set(&mut txn, set_with_keys, &());
|
||||
txn.commit();
|
||||
|
||||
let mut total_stake = 0;
|
||||
let mut total_on_distinct_chain = 0;
|
||||
for network in NETWORKS {
|
||||
if network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get the current set for this network
|
||||
let set_with_keys = {
|
||||
let mut res;
|
||||
while {
|
||||
res = set_with_keys_fn(&serai, cosign.network).await;
|
||||
res.is_err()
|
||||
} {
|
||||
log::error!(
|
||||
"couldn't get the set with keys when checking for a distinct chain: {:?}",
|
||||
res
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
||||
}
|
||||
res.unwrap()
|
||||
};
|
||||
|
||||
// Get its stake
|
||||
// Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition
|
||||
if let Some(set_with_keys) = set_with_keys {
|
||||
let stake = {
|
||||
let mut res;
|
||||
while {
|
||||
res = serai.validator_sets().total_allocated_stake(set_with_keys.network).await;
|
||||
res.is_err()
|
||||
} {
|
||||
log::error!(
|
||||
"couldn't get total allocated stake when checking for a distinct chain: {:?}",
|
||||
res
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
||||
}
|
||||
res.unwrap()
|
||||
};
|
||||
|
||||
if let Some(stake) = stake {
|
||||
total_stake += stake.0;
|
||||
|
||||
if DistinctChain::get(&*db, set_with_keys).is_some() {
|
||||
total_on_distinct_chain += stake.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17%
|
||||
if (total_stake * 17 / 100) <= total_on_distinct_chain {
|
||||
panic!("17% of validator sets (by stake) have co-signed a distinct chain");
|
||||
}
|
||||
} else {
|
||||
{
|
||||
let mut latest_cosigns = self.latest_cosigns.write().await;
|
||||
latest_cosigns.insert(cosign.network, cosign);
|
||||
}
|
||||
self.update_latest_cosign().await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
|
||||
let mut latest_cosigns = HashMap::new();
|
||||
for network in NETWORKS {
|
||||
if let Some(cosign) = LatestCosign::get(&db, network) {
|
||||
latest_cosigns.insert(network, cosign);
|
||||
}
|
||||
}
|
||||
|
||||
let evaluator = Arc::new(Self {
|
||||
db: Mutex::new(db),
|
||||
serai,
|
||||
stakes: RwLock::new(None),
|
||||
latest_cosigns: RwLock::new(latest_cosigns),
|
||||
});
|
||||
|
||||
// Spawn a task to update stakes regularly
|
||||
tokio::spawn({
|
||||
let evaluator = evaluator.clone();
|
||||
async move {
|
||||
loop {
|
||||
// Run this until it passes
|
||||
while evaluator.update_stakes().await.is_err() {
|
||||
log::warn!("couldn't update stakes in the cosign evaluator");
|
||||
// Try again in 10 seconds
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
// Run it every 10 minutes as we don't need the exact stake data for this to be valid
|
||||
sleep(Duration::from_secs(10 * 60)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn a task to receive cosigns and handle them
|
||||
let (send, mut recv) = mpsc::unbounded_channel();
|
||||
tokio::spawn({
|
||||
let evaluator = evaluator.clone();
|
||||
async move {
|
||||
while let Some(msg) = recv.recv().await {
|
||||
while evaluator.handle_new_cosign(msg).await.is_err() {
|
||||
// Try again in 10 seconds
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn a task to rebroadcast the most recent cosigns
|
||||
tokio::spawn({
|
||||
async move {
|
||||
loop {
|
||||
let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::<Vec<_>>();
|
||||
for cosign in cosigns {
|
||||
let mut buf = vec![];
|
||||
cosign.serialize(&mut buf).unwrap();
|
||||
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||
}
|
||||
sleep(Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Return the channel to send cosigns
|
||||
send
|
||||
}
|
||||
}
|
||||
@@ -1,134 +1,113 @@
|
||||
use blake2::{
|
||||
digest::{consts::U32, Digest},
|
||||
Blake2b,
|
||||
};
|
||||
use std::{path::Path, fs};
|
||||
|
||||
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
||||
use serai_db::{create_db, db_channel};
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_client::{
|
||||
primitives::NetworkId,
|
||||
validator_sets::primitives::{Session, ValidatorSet},
|
||||
in_instructions::primitives::{Batch, SignedBatch},
|
||||
};
|
||||
|
||||
pub use serai_db::*;
|
||||
use serai_cosign::SignedCosign;
|
||||
use serai_coordinator_substrate::NewSetInformation;
|
||||
use serai_coordinator_tributary::Transaction;
|
||||
|
||||
use ::tributary::ReadWrite;
|
||||
use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
|
||||
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||
pub(crate) type Db = serai_db::ParityDb;
|
||||
#[cfg(feature = "rocksdb")]
|
||||
pub(crate) type Db = serai_db::RocksDB;
|
||||
|
||||
create_db!(
|
||||
MainDb {
|
||||
HandledMessageDb: (network: NetworkId) -> u64,
|
||||
ActiveTributaryDb: () -> Vec<u8>,
|
||||
RetiredTributaryDb: (set: ValidatorSet) -> (),
|
||||
FirstPreprocessDb: (
|
||||
network: NetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: &[u8]
|
||||
) -> Vec<Vec<u8>>,
|
||||
LastReceivedBatchDb: (network: NetworkId) -> u32,
|
||||
ExpectedBatchDb: (network: NetworkId, id: u32) -> [u8; 32],
|
||||
BatchDb: (network: NetworkId, id: u32) -> SignedBatch,
|
||||
LastVerifiedBatchDb: (network: NetworkId) -> u32,
|
||||
HandoverBatchDb: (set: ValidatorSet) -> u32,
|
||||
LookupHandoverBatchDb: (network: NetworkId, batch: u32) -> Session,
|
||||
QueuedBatchesDb: (set: ValidatorSet) -> Vec<u8>
|
||||
}
|
||||
);
|
||||
|
||||
impl ActiveTributaryDb {
|
||||
pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<TributarySpec>) {
|
||||
let bytes = Self::get(getter).unwrap_or_default();
|
||||
let mut bytes_ref: &[u8] = bytes.as_ref();
|
||||
|
||||
let mut tributaries = vec![];
|
||||
while !bytes_ref.is_empty() {
|
||||
tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap());
|
||||
}
|
||||
|
||||
(bytes, tributaries)
|
||||
#[allow(unused_variables, unreachable_code)]
|
||||
fn db(path: &str) -> Db {
|
||||
{
|
||||
let path: &Path = path.as_ref();
|
||||
// This may error if this path already exists, which we shouldn't propagate/panic on. If this
|
||||
// is a problem (such as we don't have the necessary permissions to write to this path), we
|
||||
// expect the following DB opening to error.
|
||||
let _: Result<_, _> = fs::create_dir_all(path.parent().unwrap());
|
||||
}
|
||||
|
||||
pub fn add_participating_in_tributary(txn: &mut impl DbTxn, spec: &TributarySpec) {
|
||||
let (mut existing_bytes, existing) = ActiveTributaryDb::active_tributaries(txn);
|
||||
for tributary in &existing {
|
||||
if tributary == spec {
|
||||
return;
|
||||
}
|
||||
}
|
||||
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
|
||||
panic!("built with parity-db and rocksdb");
|
||||
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||
let db = serai_db::new_parity_db(path);
|
||||
#[cfg(feature = "rocksdb")]
|
||||
let db = serai_db::new_rocksdb(path);
|
||||
db
|
||||
}
|
||||
|
||||
spec.serialize(&mut existing_bytes).unwrap();
|
||||
ActiveTributaryDb::set(txn, &existing_bytes);
|
||||
}
|
||||
pub(crate) fn coordinator_db() -> Db {
|
||||
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||
db(&format!("{root_path}/coordinator/db"))
|
||||
}
|
||||
|
||||
pub fn retire_tributary(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||
let mut active = Self::active_tributaries(txn).1;
|
||||
for i in 0 .. active.len() {
|
||||
if active[i].set() == set {
|
||||
active.remove(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
fn tributary_db_folder(set: ValidatorSet) -> String {
|
||||
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||
let network = match set.network {
|
||||
NetworkId::Serai => panic!("creating Tributary for the Serai network"),
|
||||
NetworkId::Bitcoin => "Bitcoin",
|
||||
NetworkId::Ethereum => "Ethereum",
|
||||
NetworkId::Monero => "Monero",
|
||||
};
|
||||
format!("{root_path}/tributary-{network}-{}", set.session.0)
|
||||
}
|
||||
|
||||
let mut bytes = vec![];
|
||||
for active in active {
|
||||
active.serialize(&mut bytes).unwrap();
|
||||
}
|
||||
Self::set(txn, &bytes);
|
||||
RetiredTributaryDb::set(txn, set, &());
|
||||
pub(crate) fn tributary_db(set: ValidatorSet) -> Db {
|
||||
db(&format!("{}/db", tributary_db_folder(set)))
|
||||
}
|
||||
|
||||
pub(crate) fn prune_tributary_db(set: ValidatorSet) {
|
||||
log::info!("pruning data directory for tributary {set:?}");
|
||||
let db = tributary_db_folder(set);
|
||||
if fs::exists(&db).expect("couldn't check if tributary DB exists") {
|
||||
fs::remove_dir_all(db).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl FirstPreprocessDb {
|
||||
pub fn save_first_preprocess(
|
||||
txn: &mut impl DbTxn,
|
||||
network: NetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: &[u8],
|
||||
preprocess: &Vec<Vec<u8>>,
|
||||
) {
|
||||
if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) {
|
||||
assert_eq!(&existing, preprocess, "saved a distinct first preprocess");
|
||||
return;
|
||||
create_db! {
|
||||
Coordinator {
|
||||
// The currently active Tributaries
|
||||
ActiveTributaries: () -> Vec<NewSetInformation>,
|
||||
// The latest Tributary to have been retired for a network
|
||||
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been
|
||||
// retired
|
||||
RetiredTributary: (network: NetworkId) -> Session,
|
||||
// The last handled message from a Processor
|
||||
LastProcessorMessage: (network: NetworkId) -> u64,
|
||||
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
||||
ErroneousCosigns: () -> Vec<SignedCosign>,
|
||||
}
|
||||
}
|
||||
|
||||
db_channel! {
|
||||
Coordinator {
|
||||
// Cosigns we produced
|
||||
SignedCosigns: () -> SignedCosign,
|
||||
// Tributaries to clean up upon reboot
|
||||
TributaryCleanup: () -> ValidatorSet,
|
||||
}
|
||||
}
|
||||
|
||||
mod _internal_db {
|
||||
use super::*;
|
||||
|
||||
db_channel! {
|
||||
Coordinator {
|
||||
// Tributary transactions to publish
|
||||
TributaryTransactions: (set: ValidatorSet) -> Transaction,
|
||||
}
|
||||
FirstPreprocessDb::set(txn, network, id_type, id, preprocess);
|
||||
}
|
||||
}
|
||||
|
||||
impl ExpectedBatchDb {
|
||||
pub fn save_expected_batch(txn: &mut impl DbTxn, batch: &Batch) {
|
||||
LastReceivedBatchDb::set(txn, batch.network, &batch.id);
|
||||
Self::set(
|
||||
txn,
|
||||
batch.network,
|
||||
batch.id,
|
||||
&Blake2b::<U32>::digest(batch.instructions.encode()).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl HandoverBatchDb {
|
||||
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ValidatorSet, batch: u32) {
|
||||
Self::set(txn, set, &batch);
|
||||
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
|
||||
}
|
||||
}
|
||||
impl QueuedBatchesDb {
|
||||
pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: &Transaction) {
|
||||
let mut batches = Self::get(txn, set).unwrap_or_default();
|
||||
batch.write(&mut batches).unwrap();
|
||||
Self::set(txn, set, &batches);
|
||||
}
|
||||
|
||||
pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
|
||||
let batches_vec = Self::get(txn, set).unwrap_or_default();
|
||||
txn.del(Self::key(set));
|
||||
|
||||
let mut batches: &[u8] = &batches_vec;
|
||||
let mut res = vec![];
|
||||
while !batches.is_empty() {
|
||||
res.push(Transaction::read(&mut batches).unwrap());
|
||||
pub(crate) struct TributaryTransactions;
|
||||
impl TributaryTransactions {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
|
||||
// If this set has yet to be retired, send this transaction
|
||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||
_internal_db::TributaryTransactions::send(txn, set, tx);
|
||||
}
|
||||
res
|
||||
}
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
|
||||
_internal_db::TributaryTransactions::try_recv(txn, set)
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user