mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Compare commits
570 Commits
develop
...
d219b77bd0
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d219b77bd0 | ||
|
|
fce26eaee1 | ||
|
|
3cfbd9add7 | ||
|
|
609cf06393 | ||
|
|
46b1f1b7ec | ||
|
|
09113201e7 | ||
|
|
556d294157 | ||
|
|
82ca889ed3 | ||
|
|
cde0f753c2 | ||
|
|
6ff0ef7aa6 | ||
|
|
f9e3d1b142 | ||
|
|
a793aa18ef | ||
|
|
5662beeb8a | ||
|
|
509bd58f4e | ||
|
|
367a5769e8 | ||
|
|
cb6eb6430a | ||
|
|
4f82e5912c | ||
|
|
ac7af40f2e | ||
|
|
264bdd46ca | ||
|
|
c52f7634de | ||
|
|
21eaa5793d | ||
|
|
c744a80d80 | ||
|
|
a34f9f6164 | ||
|
|
353683cfd2 | ||
|
|
d4f77159c4 | ||
|
|
191bf4bdea | ||
|
|
06a4824aba | ||
|
|
e65a37e639 | ||
|
|
4653ef4a61 | ||
|
|
ce08fad931 | ||
|
|
1866bb7ae3 | ||
|
|
aff2065c31 | ||
|
|
7300700108 | ||
|
|
31874ceeae | ||
|
|
012b8fddae | ||
|
|
d2f58232c8 | ||
|
|
49794b6a75 | ||
|
|
973287d0a1 | ||
|
|
1b499edfe1 | ||
|
|
642848bd24 | ||
|
|
f7fb78bdd6 | ||
|
|
9c47ef2658 | ||
|
|
e1b6b638c6 | ||
|
|
65613750e1 | ||
|
|
87ee879dea | ||
|
|
b5603560e8 | ||
|
|
94faf098b6 | ||
|
|
03e45f73cd | ||
|
|
56f6ba2dac | ||
|
|
138a0e9b40 | ||
|
|
4fc7263ac3 | ||
|
|
f27fd59fa6 | ||
|
|
08f6af8bb9 | ||
|
|
3512b3832d | ||
|
|
1164f92ea1 | ||
|
|
0a3ead0e19 | ||
|
|
437f0e9a93 | ||
|
|
cc5d38f1ce | ||
|
|
0ce025e0c2 | ||
|
|
ea66cd0d1a | ||
|
|
8b32fba458 | ||
|
|
e63acf3f67 | ||
|
|
d373d2a4c9 | ||
|
|
cbf998ff30 | ||
|
|
ef07253a27 | ||
|
|
ffae6753ec | ||
|
|
a04215bc13 | ||
|
|
28aea8a442 | ||
|
|
7b46477ca0 | ||
|
|
e62b62ddfb | ||
|
|
a2d8d0fd13 | ||
|
|
b2b36b17c4 | ||
|
|
9de8394efa | ||
|
|
3cb9432daa | ||
|
|
3f5150b3fa | ||
|
|
d74b00b9e4 | ||
|
|
224cf4ea21 | ||
|
|
3955f92cc2 | ||
|
|
a9b1e5293c | ||
|
|
80009ab67f | ||
|
|
df9fda2971 | ||
|
|
ca8afb83a1 | ||
|
|
18a9cf2535 | ||
|
|
10c126ad92 | ||
|
|
19305aebc9 | ||
|
|
be68e27551 | ||
|
|
d6d96fe8ff | ||
|
|
95909d83a4 | ||
|
|
3bd48974f3 | ||
|
|
29093715e3 | ||
|
|
87b4dfc8f3 | ||
|
|
4db78b1787 | ||
|
|
02a5f15535 | ||
|
|
a1ef18a039 | ||
|
|
bec806230a | ||
|
|
8bafeab5b3 | ||
|
|
3722df7326 | ||
|
|
ddb8e1398e | ||
|
|
2be69b23b1 | ||
|
|
a82ccadbb0 | ||
|
|
1ff2934927 | ||
|
|
cd4ffa862f | ||
|
|
c0a4d85ae6 | ||
|
|
55e845fe12 | ||
|
|
5ea087d177 | ||
|
|
dd7dc0c1dc | ||
|
|
c83fbb3e44 | ||
|
|
befbbbfb84 | ||
|
|
d0f497dc68 | ||
|
|
1b755a5d48 | ||
|
|
e5efcd56ba | ||
|
|
5d60b3c2ae | ||
|
|
ae923b24ff | ||
|
|
d304cd97e1 | ||
|
|
2b56dcdf3f | ||
|
|
865e351f96 | ||
|
|
ea275df26c | ||
|
|
90804c4c30 | ||
|
|
46caca2f51 | ||
|
|
2077e485bb | ||
|
|
28dbef8a1c | ||
|
|
2216ade8c4 | ||
|
|
3541197aa5 | ||
|
|
5265cc69de | ||
|
|
a141deaf36 | ||
|
|
215e41fdb6 | ||
|
|
41c34d7f11 | ||
|
|
974bc82387 | ||
|
|
47ef24a7cc | ||
|
|
a2209dd6ff | ||
|
|
2032cf355f | ||
|
|
fe41b09fd4 | ||
|
|
74bad049a7 | ||
|
|
72fefb3d85 | ||
|
|
200c1530a4 | ||
|
|
5736b87b57 | ||
|
|
ada94e8c5d | ||
|
|
75240ed327 | ||
|
|
6177cf5c07 | ||
|
|
0d38dc96b6 | ||
|
|
e8094523ff | ||
|
|
53a64bc7e2 | ||
|
|
c0e48867e1 | ||
|
|
2a02a8dc59 | ||
|
|
3c6e889732 | ||
|
|
354efc0192 | ||
|
|
e20058feae | ||
|
|
09f0714894 | ||
|
|
d3d539553c | ||
|
|
b08ae8e6a7 | ||
|
|
35db2924b4 | ||
|
|
bfff823bf7 | ||
|
|
352af85498 | ||
|
|
ecad89b269 | ||
|
|
48f5ed71d7 | ||
|
|
ed9cbdd8e0 | ||
|
|
0ac11defcc | ||
|
|
24e89316d5 | ||
|
|
3f03dac050 | ||
|
|
820b710928 | ||
|
|
88c7ae3e7d | ||
|
|
dd5e43760d | ||
|
|
776e417fd2 | ||
|
|
2f8ce15a92 | ||
|
|
af56304676 | ||
|
|
62a2c4f20e | ||
|
|
c69841710a | ||
|
|
3158590675 | ||
|
|
263d75d380 | ||
|
|
030185c7fc | ||
|
|
e2dc5db7aa | ||
|
|
90bc364f9f | ||
|
|
a4811c9a41 | ||
|
|
12cfa6b2a5 | ||
|
|
0c71b6fc4d | ||
|
|
ffe1b60a11 | ||
|
|
5526b8d439 | ||
|
|
beac35c119 | ||
|
|
62bb75e09a | ||
|
|
45bd376c08 | ||
|
|
da190759a9 | ||
|
|
f2d399ba1e | ||
|
|
220bcbc592 | ||
|
|
85949f4b04 | ||
|
|
2f833dec77 | ||
|
|
e3e41324c9 | ||
|
|
6ed7c5d65e | ||
|
|
9dddfd91c8 | ||
|
|
c24b694fb2 | ||
|
|
738babf7e9 | ||
|
|
33faa53b56 | ||
|
|
8c366107ae | ||
|
|
b59b1f59dd | ||
|
|
cc4a65e82a | ||
|
|
4e0c58464f | ||
|
|
205da3fd38 | ||
|
|
f7e63d4944 | ||
|
|
b5608fc3d2 | ||
|
|
33018bf6da | ||
|
|
bef90b2f1a | ||
|
|
184c02714a | ||
|
|
5a7b815e2e | ||
|
|
22e411981a | ||
|
|
11d48d0685 | ||
|
|
e4cc23b72d | ||
|
|
52d853c8ba | ||
|
|
9c33a711d7 | ||
|
|
a275023cfc | ||
|
|
258c02ff39 | ||
|
|
3655dc723f | ||
|
|
315d4fb356 | ||
|
|
2bc880e372 | ||
|
|
19422de231 | ||
|
|
fa0dadc9bd | ||
|
|
f004c8726f | ||
|
|
835b5bb06f | ||
|
|
0484113254 | ||
|
|
17cc10b3f7 | ||
|
|
7e01589fba | ||
|
|
f8c3acae7b | ||
|
|
0957460f27 | ||
|
|
ea00ba9ff8 | ||
|
|
a9625364df | ||
|
|
75c6427d7c | ||
|
|
e742a6b0ec | ||
|
|
5164a710a2 | ||
|
|
27c1dc4646 | ||
|
|
3892fa30b7 | ||
|
|
ed599c8ab5 | ||
|
|
29bb5e21ab | ||
|
|
604a4b2442 | ||
|
|
977dcad86d | ||
|
|
cefc542744 | ||
|
|
164fe9a14f | ||
|
|
f948881eba | ||
|
|
201b675031 | ||
|
|
3d44766eff | ||
|
|
a63a86ba79 | ||
|
|
e922264ebf | ||
|
|
7e53eff642 | ||
|
|
669b8b776b | ||
|
|
6508957cbc | ||
|
|
373e794d2c | ||
|
|
c8f3a32fdf | ||
|
|
f690bf831f | ||
|
|
0b30ac175e | ||
|
|
47560fa9a9 | ||
|
|
9d57c4eb4d | ||
|
|
642ba00952 | ||
|
|
3c9c12d320 | ||
|
|
f6b52b3fd3 | ||
|
|
0d906363a0 | ||
|
|
8222ce78d8 | ||
|
|
cb906242e7 | ||
|
|
2a19e9da93 | ||
|
|
2226dd59cc | ||
|
|
be2098d2e1 | ||
|
|
6b41f32371 | ||
|
|
19b87c7f5a | ||
|
|
505f1b20a4 | ||
|
|
8b52b921f3 | ||
|
|
f36bbcba25 | ||
|
|
167826aa88 | ||
|
|
bea4f92b7a | ||
|
|
7312fa8d3c | ||
|
|
92a4cceeeb | ||
|
|
3357181fe2 | ||
|
|
7ce5bdad44 | ||
|
|
0de3fda921 | ||
|
|
cb410cc4e0 | ||
|
|
6c145a5ec3 | ||
|
|
a7fef2ba7a | ||
|
|
291ebf5e24 | ||
|
|
5e0e91c85d | ||
|
|
b5a6b0693e | ||
|
|
3cc2abfedc | ||
|
|
0ce9aad9b2 | ||
|
|
e35aa04afb | ||
|
|
e7de5125a2 | ||
|
|
158140c3a7 | ||
|
|
df9a9adaa8 | ||
|
|
d854807edd | ||
|
|
f501d46d44 | ||
|
|
74106b025f | ||
|
|
e731b546ab | ||
|
|
77d60660d2 | ||
|
|
3c664ff05f | ||
|
|
c05b0c9eba | ||
|
|
6d5049cab2 | ||
|
|
1419ba570a | ||
|
|
542bf2170a | ||
|
|
378d6b90cf | ||
|
|
cbe83956aa | ||
|
|
091d485fd8 | ||
|
|
2a3eaf4d7e | ||
|
|
23122712cb | ||
|
|
47eb793ce9 | ||
|
|
9b0b5fd1e2 | ||
|
|
893a24a1cc | ||
|
|
b101e2211a | ||
|
|
201a444e89 | ||
|
|
9833911e06 | ||
|
|
465e8498c4 | ||
|
|
adf20773ac | ||
|
|
295c1bd044 | ||
|
|
dda6e3e899 | ||
|
|
75a00f2a1a | ||
|
|
6cde2bb6ef | ||
|
|
20326bba73 | ||
|
|
ce83b41712 | ||
|
|
b2bd5d3a44 | ||
|
|
de2d6568a4 | ||
|
|
fd9b464b35 | ||
|
|
376a66b000 | ||
|
|
2121a9b131 | ||
|
|
419223c54e | ||
|
|
a731c0005d | ||
|
|
f27e4e3202 | ||
|
|
f55165e016 | ||
|
|
d9e9887d34 | ||
|
|
82e753db30 | ||
|
|
052388285b | ||
|
|
47a4e534ef | ||
|
|
257f691277 | ||
|
|
c6d0fb477c | ||
|
|
96518500b1 | ||
|
|
2b8f481364 | ||
|
|
479ca0410a | ||
|
|
9a5a661d04 | ||
|
|
3daeea09e6 | ||
|
|
a64e2004ab | ||
|
|
f9f6d40695 | ||
|
|
4836c1676b | ||
|
|
985261574c | ||
|
|
3f3b0255f8 | ||
|
|
5fc8500f8d | ||
|
|
49c221cca2 | ||
|
|
906e2fb669 | ||
|
|
ce676efb1f | ||
|
|
0a611cb155 | ||
|
|
bcd3f14f4f | ||
|
|
6272c40561 | ||
|
|
2240a50a0c | ||
|
|
7e2b31e5da | ||
|
|
8c9441a1a5 | ||
|
|
5a42f66dc2 | ||
|
|
b584a2beab | ||
|
|
26ccff25a1 | ||
|
|
f0094b3c7c | ||
|
|
458f4fe170 | ||
|
|
1de8136739 | ||
|
|
445c49f030 | ||
|
|
5b74fc8ac1 | ||
|
|
e67e301fc2 | ||
|
|
1d50792eed | ||
|
|
9c92709e62 | ||
|
|
3d15710a43 | ||
|
|
df06da5552 | ||
|
|
cef5bc95b0 | ||
|
|
f336ab1ece | ||
|
|
2aebfb21af | ||
|
|
56af6c44eb | ||
|
|
4b34be05bf | ||
|
|
5b337c3ce8 | ||
|
|
e119fb4c16 | ||
|
|
ef972b2658 | ||
|
|
4de1a5804d | ||
|
|
147a6e43d0 | ||
|
|
066aa9eda4 | ||
|
|
9593a428e3 | ||
|
|
5b3c5ec02b | ||
|
|
9ccfa8a9f5 | ||
|
|
18897978d0 | ||
|
|
3192370484 | ||
|
|
8013c56195 | ||
|
|
834c16930b | ||
|
|
2920987173 | ||
|
|
26230377b0 | ||
|
|
2f5c0c68d0 | ||
|
|
8de42cc2d4 | ||
|
|
cf4123b0f8 | ||
|
|
6a520a7412 | ||
|
|
b2ec58a445 | ||
|
|
8e800885fb | ||
|
|
2a427382f1 | ||
|
|
ce1689b325 | ||
|
|
0b61a75afc | ||
|
|
2aee21e507 | ||
|
|
b3e003bd5d | ||
|
|
251a6e96e8 | ||
|
|
2c8af04781 | ||
|
|
a0ed043372 | ||
|
|
2984d2f8cf | ||
|
|
554c5778e4 | ||
|
|
7e4c59a0a3 | ||
|
|
294462641e | ||
|
|
ae76749513 | ||
|
|
1e1b821d34 | ||
|
|
702b4c860c | ||
|
|
bc1bbf9951 | ||
|
|
ec9211fd84 | ||
|
|
4292660eda | ||
|
|
8ea5acbacb | ||
|
|
1b1aa74770 | ||
|
|
861a8352e5 | ||
|
|
e64827b6d7 | ||
|
|
c27aaf8658 | ||
|
|
53567e91c8 | ||
|
|
1a08d50e16 | ||
|
|
855e53164e | ||
|
|
1367e41510 | ||
|
|
a691be21c8 | ||
|
|
673cf8fd47 | ||
|
|
118d81bc90 | ||
|
|
e75c4ec6ed | ||
|
|
9e628d217f | ||
|
|
a717ae9ea7 | ||
|
|
98c3f75fa2 | ||
|
|
18178f3764 | ||
|
|
bdc3bda04a | ||
|
|
433beac93a | ||
|
|
8f2a9301cf | ||
|
|
d21034c349 | ||
|
|
381495618c | ||
|
|
ee0efe7cde | ||
|
|
7feb7aed22 | ||
|
|
cc75a92641 | ||
|
|
a7d5640642 | ||
|
|
ae61f3d359 | ||
|
|
4bcea31c2a | ||
|
|
eb9bce6862 | ||
|
|
39be23d807 | ||
|
|
3f0f4d520d | ||
|
|
80ca2b780a | ||
|
|
0813351f1f | ||
|
|
a38d135059 | ||
|
|
67f9f76fdf | ||
|
|
1c5bc2259e | ||
|
|
bdf89f5350 | ||
|
|
239127aae5 | ||
|
|
d9543bee40 | ||
|
|
8746b54a43 | ||
|
|
7761798a78 | ||
|
|
72a18bf8bb | ||
|
|
0616085109 | ||
|
|
e23176deeb | ||
|
|
5551521e58 | ||
|
|
a2d9aeaed7 | ||
|
|
e1ad897f7e | ||
|
|
2edc2f3612 | ||
|
|
e56af7fc51 | ||
|
|
947e1067d9 | ||
|
|
b4e94f3d51 | ||
|
|
1b39138472 | ||
|
|
e78236276a | ||
|
|
2c4c33e632 | ||
|
|
02409c5735 | ||
|
|
f2cf03cedf | ||
|
|
0d4c8cf032 | ||
|
|
b6811f9015 | ||
|
|
fcd5fb85df | ||
|
|
3ac0265f07 | ||
|
|
9b8c8f8231 | ||
|
|
59fa49f750 | ||
|
|
723f529659 | ||
|
|
73af09effb | ||
|
|
4054e44471 | ||
|
|
a8159e9070 | ||
|
|
b61ba9d1bb | ||
|
|
776cbbb9a4 | ||
|
|
76a3f3ec4b | ||
|
|
93c7d06684 | ||
|
|
4cb838e248 | ||
|
|
c988b7cdb0 | ||
|
|
017aab2258 | ||
|
|
ba3a6f9e91 | ||
|
|
e36b671f37 | ||
|
|
2d4b775b6e | ||
|
|
247cc8f0cc | ||
|
|
0ccf71df1e | ||
|
|
8aba71b9c4 | ||
|
|
46c12c0e66 | ||
|
|
3cc7b49492 | ||
|
|
0078858c1c | ||
|
|
a3cb514400 | ||
|
|
ed0221d804 | ||
|
|
4152bcacb2 | ||
|
|
f07ec7bee0 | ||
|
|
7484eadbbb | ||
|
|
59ff944152 | ||
|
|
8f848b1abc | ||
|
|
100c80be9f | ||
|
|
a353f9e2da | ||
|
|
b62fc3a1fa | ||
|
|
8380653855 | ||
|
|
b50b889918 | ||
|
|
d570c1d277 | ||
|
|
2da24506a2 | ||
|
|
6e9cb74022 | ||
|
|
0c1aec29bb | ||
|
|
653ead1e8c | ||
|
|
8ff019265f | ||
|
|
0601d47789 | ||
|
|
ebef38d93b | ||
|
|
75b4707002 | ||
|
|
3c787e005f | ||
|
|
f11a6b4ff1 | ||
|
|
fadc88d2ad | ||
|
|
c88ebe985e | ||
|
|
6deb60513c | ||
|
|
bd277e7032 | ||
|
|
fc765bb9e0 | ||
|
|
13b74195f7 | ||
|
|
f21838e0d5 | ||
|
|
76cbe6cf1e | ||
|
|
5999f5d65a | ||
|
|
d429a0bae6 | ||
|
|
775824f373 | ||
|
|
41a74cb513 | ||
|
|
e26da1ec34 | ||
|
|
7266e7f7ea | ||
|
|
a8b9b7bad3 | ||
|
|
2ca7fccb08 | ||
|
|
4f6d91037e | ||
|
|
8db76ed67c | ||
|
|
920303e1b4 | ||
|
|
9f4b28e5ae | ||
|
|
f9d02d43c2 | ||
|
|
8ac501028d | ||
|
|
612c67c537 | ||
|
|
04a971a024 | ||
|
|
738636c238 | ||
|
|
65f3f48517 | ||
|
|
7cc07d64d1 | ||
|
|
fdfe520f9d | ||
|
|
77ef25416b | ||
|
|
7c1025dbcb | ||
|
|
a771fbe1c6 | ||
|
|
9cebdf7c68 | ||
|
|
75251f04b4 | ||
|
|
6196642beb | ||
|
|
2bddf00222 | ||
|
|
9ab8ba0215 | ||
|
|
33e0c85f34 | ||
|
|
1e8f4e6156 | ||
|
|
66f3428051 | ||
|
|
7e71840822 | ||
|
|
b65dbacd6a | ||
|
|
2fcd9530dd | ||
|
|
379780a3c9 | ||
|
|
945f31dfc7 | ||
|
|
d5d1fc3eea | ||
|
|
fd12cc0213 | ||
|
|
ce805c8cc8 | ||
|
|
bc0cc5a754 | ||
|
|
f2ee4daf43 | ||
|
|
4e29678799 | ||
|
|
74d3075dae | ||
|
|
155ad48f4c | ||
|
|
951872b026 | ||
|
|
2b47feafed | ||
|
|
a2717d73f0 | ||
|
|
8763ef23ed | ||
|
|
57a0ba966b | ||
|
|
e843b4a2a0 | ||
|
|
2f3bd7a02a | ||
|
|
1e8a9ec5bd | ||
|
|
2f29c91d30 | ||
|
|
f3b91bd44f | ||
|
|
e4e4245ee3 |
2
.github/LICENSE
vendored
2
.github/LICENSE
vendored
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
4
.github/actions/bitcoin/action.yml
vendored
4
.github/actions/bitcoin/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: "27.0"
|
default: "30.0"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -37,4 +37,4 @@ runs:
|
|||||||
|
|
||||||
- name: Bitcoin Regtest Daemon
|
- name: Bitcoin Regtest Daemon
|
||||||
shell: bash
|
shell: bash
|
||||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon
|
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -txindex -daemon
|
||||||
|
|||||||
@@ -52,9 +52,9 @@ runs:
|
|||||||
- name: Install solc
|
- name: Install solc
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
cargo +1.89 install svm-rs --version =0.5.18
|
cargo +1.91 install svm-rs --version =0.5.19
|
||||||
svm install 0.8.26
|
svm install 0.8.29
|
||||||
svm use 0.8.26
|
svm use 0.8.29
|
||||||
|
|
||||||
- name: Remove preinstalled Docker
|
- name: Remove preinstalled Docker
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|||||||
2
.github/actions/monero-wallet-rpc/action.yml
vendored
2
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: v0.18.3.4
|
default: v0.18.4.3
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
|
|||||||
2
.github/actions/monero/action.yml
vendored
2
.github/actions/monero/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: v0.18.3.4
|
default: v0.18.4.3
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
|
|||||||
4
.github/actions/test-dependencies/action.yml
vendored
4
.github/actions/test-dependencies/action.yml
vendored
@@ -5,12 +5,12 @@ inputs:
|
|||||||
monero-version:
|
monero-version:
|
||||||
description: "Monero version to download and run as a regtest node"
|
description: "Monero version to download and run as a regtest node"
|
||||||
required: false
|
required: false
|
||||||
default: v0.18.3.4
|
default: v0.18.4.3
|
||||||
|
|
||||||
bitcoin-version:
|
bitcoin-version:
|
||||||
description: "Bitcoin version to download and run as a regtest node"
|
description: "Bitcoin version to download and run as a regtest node"
|
||||||
required: false
|
required: false
|
||||||
default: "27.1"
|
default: "30.0"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
|
|||||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
|||||||
nightly-2025-11-01
|
nightly-2025-11-11
|
||||||
|
|||||||
1
.github/workflows/common-tests.yml
vendored
1
.github/workflows/common-tests.yml
vendored
@@ -30,4 +30,5 @@ jobs:
|
|||||||
-p patchable-async-sleep \
|
-p patchable-async-sleep \
|
||||||
-p serai-db \
|
-p serai-db \
|
||||||
-p serai-env \
|
-p serai-env \
|
||||||
|
-p serai-task \
|
||||||
-p simple-request
|
-p simple-request
|
||||||
|
|||||||
8
.github/workflows/crypto-tests.yml
vendored
8
.github/workflows/crypto-tests.yml
vendored
@@ -35,12 +35,14 @@ jobs:
|
|||||||
-p ciphersuite-kp256 \
|
-p ciphersuite-kp256 \
|
||||||
-p multiexp \
|
-p multiexp \
|
||||||
-p schnorr-signatures \
|
-p schnorr-signatures \
|
||||||
-p dleq \
|
-p prime-field \
|
||||||
|
-p short-weierstrass \
|
||||||
|
-p secq256k1 \
|
||||||
|
-p embedwards25519 \
|
||||||
-p dkg \
|
-p dkg \
|
||||||
-p dkg-recovery \
|
-p dkg-recovery \
|
||||||
-p dkg-dealer \
|
-p dkg-dealer \
|
||||||
-p dkg-promote \
|
|
||||||
-p dkg-musig \
|
-p dkg-musig \
|
||||||
-p dkg-pedpop \
|
-p dkg-evrf \
|
||||||
-p modular-frost \
|
-p modular-frost \
|
||||||
-p frost-schnorrkel
|
-p frost-schnorrkel
|
||||||
|
|||||||
2
.github/workflows/daily-deny.yml
vendored
2
.github/workflows/daily-deny.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
key: rust-advisory-db
|
key: rust-advisory-db
|
||||||
|
|
||||||
- name: Install cargo deny
|
- name: Install cargo deny
|
||||||
run: cargo +1.89 install cargo-deny --version =0.18.3
|
run: cargo +1.91 install cargo-deny --version =0.18.5
|
||||||
|
|
||||||
- name: Run cargo deny
|
- name: Run cargo deny
|
||||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||||
|
|||||||
134
.github/workflows/lint.yml
vendored
134
.github/workflows/lint.yml
vendored
@@ -26,7 +26,7 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Install nightly rust
|
- name: Install nightly rust
|
||||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
|
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c clippy
|
||||||
|
|
||||||
- name: Run Clippy
|
- name: Run Clippy
|
||||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||||
@@ -52,7 +52,7 @@ jobs:
|
|||||||
key: rust-advisory-db
|
key: rust-advisory-db
|
||||||
|
|
||||||
- name: Install cargo deny
|
- name: Install cargo deny
|
||||||
run: cargo +1.89 install cargo-deny --version =0.18.4
|
run: cargo +1.91 install cargo-deny --version =0.18.5
|
||||||
|
|
||||||
- name: Run cargo deny
|
- name: Run cargo deny
|
||||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||||
@@ -73,11 +73,137 @@ jobs:
|
|||||||
- name: Run rustfmt
|
- name: Run rustfmt
|
||||||
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
||||||
|
|
||||||
|
- name: Install foundry
|
||||||
|
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||||
|
with:
|
||||||
|
version: nightly-41d4e5437107f6f42c7711123890147bc736a609
|
||||||
|
cache: false
|
||||||
|
|
||||||
|
- name: Run forge fmt
|
||||||
|
run: FOUNDRY_FMT_SORT_INPUTS=false FOUNDRY_FMT_LINE_LENGTH=100 FOUNDRY_FMT_TAB_WIDTH=2 FOUNDRY_FMT_BRACKET_SPACING=true FOUNDRY_FMT_INT_TYPES=preserve forge fmt --check $(find . -iname "*.sol")
|
||||||
|
|
||||||
machete:
|
machete:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
- name: Verify all dependencies are in use
|
- name: Verify all dependencies are in use
|
||||||
run: |
|
run: |
|
||||||
cargo +1.89 install cargo-machete --version =0.8.0
|
cargo +1.91 install cargo-machete --version =0.9.1
|
||||||
cargo +1.89 machete
|
cargo +1.91 machete
|
||||||
|
|
||||||
|
msrv:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
- name: Verify claimed `rust-version`
|
||||||
|
shell: bash
|
||||||
|
run: |
|
||||||
|
cargo +1.91 install cargo-msrv --version =0.18.4
|
||||||
|
|
||||||
|
function check_msrv {
|
||||||
|
# We `cd` into the directory passed as the first argument, but will return to the
|
||||||
|
# directory called from.
|
||||||
|
return_to=$(pwd)
|
||||||
|
echo "Checking $1"
|
||||||
|
cd $1
|
||||||
|
|
||||||
|
# We then find the existing `rust-version` using `grep` (for the right line) and then a
|
||||||
|
# regex (to strip to just the major and minor version).
|
||||||
|
existing=$(cat ./Cargo.toml | grep "rust-version" | grep -Eo "[0-9]+\.[0-9]+")
|
||||||
|
|
||||||
|
# We then backup the `Cargo.toml`, allowing us to restore it after, saving time on future
|
||||||
|
# MSRV checks (as they'll benefit from immediately exiting if the queried version is less
|
||||||
|
# than the declared MSRV).
|
||||||
|
mv ./Cargo.toml ./Cargo.toml.bak
|
||||||
|
|
||||||
|
# We then use an inverted (`-v`) grep to remove the existing `rust-version` from the
|
||||||
|
# `Cargo.toml`, as required because else earlier versions of Rust won't even attempt to
|
||||||
|
# compile this crate.
|
||||||
|
cat ./Cargo.toml.bak | grep -v "rust-version" > Cargo.toml
|
||||||
|
|
||||||
|
# We then find the actual `rust-version` using `cargo-msrv` (again stripping to just the
|
||||||
|
# major and minor version).
|
||||||
|
actual=$(cargo msrv find --output-format minimal | grep -Eo "^[0-9]+\.[0-9]+")
|
||||||
|
|
||||||
|
# Finally, we compare the two.
|
||||||
|
echo "Declared rust-version: $existing"
|
||||||
|
echo "Actual rust-version: $actual"
|
||||||
|
[ $existing == $actual ]
|
||||||
|
result=$?
|
||||||
|
|
||||||
|
# Restore the original `Cargo.toml`.
|
||||||
|
rm Cargo.toml
|
||||||
|
mv ./Cargo.toml.bak ./Cargo.toml
|
||||||
|
|
||||||
|
# Return to the directory called from and return the result.
|
||||||
|
cd $return_to
|
||||||
|
return $result
|
||||||
|
}
|
||||||
|
|
||||||
|
# Check each member of the workspace
|
||||||
|
function check_workspace {
|
||||||
|
# Get the members array from the workspace's `Cargo.toml`
|
||||||
|
cargo_toml_lines=$(cat ./Cargo.toml | wc -l)
|
||||||
|
# Keep all lines after the start of the array, then keep all lines before the next "]"
|
||||||
|
members=$(cat Cargo.toml | grep "members\ \=\ \[" -m1 -A$cargo_toml_lines | grep "]" -m1 -B$cargo_toml_lines)
|
||||||
|
|
||||||
|
# Parse out any comments, whitespace, including comments post-fixed on the same line as an entry
|
||||||
|
# We accomplish the latter by pruning all characters after the entry's ","
|
||||||
|
members=$(echo "$members" | grep -Ev "^[[:space:]]*(#|$)" | awk -F',' '{print $1","}')
|
||||||
|
# Replace the first line, which was "members = [" and is now "members = [,", with "["
|
||||||
|
members=$(echo "$members" | sed "1s/.*/\[/")
|
||||||
|
# Correct the last line, which was malleated to "],"
|
||||||
|
members=$(echo "$members" | sed "$(echo "$members" | wc -l)s/\]\,/\]/")
|
||||||
|
|
||||||
|
# Don't check the following
|
||||||
|
# Most of these are binaries, with the exception of the Substrate runtime which has a
|
||||||
|
# bespoke build pipeline
|
||||||
|
members=$(echo "$members" | grep -v "networks/ethereum/relayer\"")
|
||||||
|
members=$(echo "$members" | grep -v "message-queue\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/bin\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/bitcoin\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/ethereum\"")
|
||||||
|
members=$(echo "$members" | grep -v "processor/monero\"")
|
||||||
|
members=$(echo "$members" | grep -v "coordinator\"")
|
||||||
|
members=$(echo "$members" | grep -v "substrate/runtime\"")
|
||||||
|
members=$(echo "$members" | grep -v "substrate/node\"")
|
||||||
|
members=$(echo "$members" | grep -v "orchestration\"")
|
||||||
|
|
||||||
|
# Don't check the tests
|
||||||
|
members=$(echo "$members" | grep -v "mini\"")
|
||||||
|
members=$(echo "$members" | grep -v "tests/")
|
||||||
|
|
||||||
|
# Remove the trailing comma by replacing the last line's "," with ""
|
||||||
|
members=$(echo "$members" | sed "$(($(echo "$members" | wc -l) - 1))s/\,//")
|
||||||
|
|
||||||
|
echo $members | jq -r ".[]" | while read -r member; do
|
||||||
|
check_msrv $member
|
||||||
|
correct=$?
|
||||||
|
if [ $correct -ne 0 ]; then
|
||||||
|
return $correct
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
}
|
||||||
|
check_workspace
|
||||||
|
|
||||||
|
slither:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Build Dependencies
|
||||||
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Slither
|
||||||
|
run: |
|
||||||
|
python3 -m pip install slither-analyzer
|
||||||
|
|
||||||
|
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
||||||
|
slither --include-paths ./networks/ethereum/schnorr/contracts ./networks/ethereum/schnorr/contracts/tests/Schnorr.sol
|
||||||
|
slither processor/ethereum/deployer/contracts/Deployer.sol
|
||||||
|
slither processor/ethereum/erc20/contracts/IERC20.sol
|
||||||
|
|
||||||
|
cp networks/ethereum/schnorr/contracts/Schnorr.sol processor/ethereum/router/contracts/
|
||||||
|
cp processor/ethereum/erc20/contracts/IERC20.sol processor/ethereum/router/contracts/
|
||||||
|
cd processor/ethereum/router/contracts
|
||||||
|
slither Router.sol
|
||||||
|
|||||||
3
.github/workflows/networks-tests.yml
vendored
3
.github/workflows/networks-tests.yml
vendored
@@ -30,6 +30,7 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||||
-p bitcoin-serai \
|
-p bitcoin-serai \
|
||||||
|
-p build-solidity-contracts \
|
||||||
|
-p ethereum-schnorr-contract \
|
||||||
-p alloy-simple-request-transport \
|
-p alloy-simple-request-transport \
|
||||||
-p ethereum-serai \
|
|
||||||
-p serai-ethereum-relayer \
|
-p serai-ethereum-relayer \
|
||||||
|
|||||||
14
.github/workflows/no-std.yml
vendored
14
.github/workflows/no-std.yml
vendored
@@ -28,8 +28,18 @@ jobs:
|
|||||||
- name: Install Build Dependencies
|
- name: Install Build Dependencies
|
||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
|
- name: Get nightly version to use
|
||||||
|
id: nightly
|
||||||
|
shell: bash
|
||||||
|
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||||
|
|
||||||
- name: Install RISC-V Toolchain
|
- name: Install RISC-V Toolchain
|
||||||
run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
|
run: |
|
||||||
|
sudo apt update
|
||||||
|
sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib
|
||||||
|
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal --component rust-src --target riscv32imac-unknown-none-elf
|
||||||
|
|
||||||
- name: Verify no-std builds
|
- name: Verify no-std builds
|
||||||
run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
|
run: |
|
||||||
|
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core -p serai-no-std-tests
|
||||||
|
CFLAGS=-I/usr/include cargo +${{ steps.nightly.outputs.version }} build --target riscv32imac-unknown-none-elf -Z build-std=core,alloc -p serai-no-std-tests --features "alloc"
|
||||||
|
|||||||
60
.github/workflows/tests.yml
vendored
60
.github/workflows/tests.yml
vendored
@@ -39,9 +39,34 @@ jobs:
|
|||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||||
-p serai-message-queue \
|
-p serai-message-queue \
|
||||||
-p serai-processor-messages \
|
-p serai-processor-messages \
|
||||||
-p serai-processor \
|
-p serai-processor-key-gen \
|
||||||
|
-p serai-processor-view-keys \
|
||||||
|
-p serai-processor-frost-attempt-manager \
|
||||||
|
-p serai-processor-primitives \
|
||||||
|
-p serai-processor-scanner \
|
||||||
|
-p serai-processor-scheduler-primitives \
|
||||||
|
-p serai-processor-utxo-scheduler-primitives \
|
||||||
|
-p serai-processor-utxo-scheduler \
|
||||||
|
-p serai-processor-transaction-chaining-scheduler \
|
||||||
|
-p serai-processor-smart-contract-scheduler \
|
||||||
|
-p serai-processor-signers \
|
||||||
|
-p serai-processor-bin \
|
||||||
|
-p serai-bitcoin-processor \
|
||||||
|
-p serai-processor-ethereum-primitives \
|
||||||
|
-p serai-processor-ethereum-test-primitives \
|
||||||
|
-p serai-processor-ethereum-deployer \
|
||||||
|
-p serai-processor-ethereum-router \
|
||||||
|
-p serai-processor-ethereum-erc20 \
|
||||||
|
-p serai-ethereum-processor \
|
||||||
|
-p serai-monero-processor \
|
||||||
-p tendermint-machine \
|
-p tendermint-machine \
|
||||||
-p tributary-chain \
|
-p tributary-sdk \
|
||||||
|
-p serai-cosign-types \
|
||||||
|
-p serai-cosign \
|
||||||
|
-p serai-coordinator-substrate \
|
||||||
|
-p serai-coordinator-tributary \
|
||||||
|
-p serai-coordinator-p2p \
|
||||||
|
-p serai-coordinator-libp2p-p2p \
|
||||||
-p serai-coordinator \
|
-p serai-coordinator \
|
||||||
-p serai-orchestrator \
|
-p serai-orchestrator \
|
||||||
-p serai-docker-tests
|
-p serai-docker-tests
|
||||||
@@ -58,23 +83,19 @@ jobs:
|
|||||||
run: |
|
run: |
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||||
-p serai-primitives \
|
-p serai-primitives \
|
||||||
-p serai-coins-primitives \
|
|
||||||
-p serai-coins-pallet \
|
|
||||||
-p serai-dex-pallet \
|
|
||||||
-p serai-validator-sets-primitives \
|
|
||||||
-p serai-validator-sets-pallet \
|
|
||||||
-p serai-genesis-liquidity-primitives \
|
|
||||||
-p serai-genesis-liquidity-pallet \
|
|
||||||
-p serai-emissions-primitives \
|
|
||||||
-p serai-emissions-pallet \
|
|
||||||
-p serai-economic-security-pallet \
|
|
||||||
-p serai-in-instructions-primitives \
|
|
||||||
-p serai-in-instructions-pallet \
|
|
||||||
-p serai-signals-primitives \
|
|
||||||
-p serai-signals-pallet \
|
|
||||||
-p serai-abi \
|
-p serai-abi \
|
||||||
|
-p serai-core-pallet \
|
||||||
|
-p serai-coins-pallet \
|
||||||
|
-p serai-validator-sets-pallet \
|
||||||
|
-p serai-signals-pallet \
|
||||||
|
-p serai-dex-pallet \
|
||||||
|
-p serai-genesis-liquidity-pallet \
|
||||||
|
-p serai-economic-security-pallet \
|
||||||
|
-p serai-emissions-pallet \
|
||||||
|
-p serai-in-instructions-pallet \
|
||||||
-p serai-runtime \
|
-p serai-runtime \
|
||||||
-p serai-node
|
-p serai-node
|
||||||
|
-p serai-substrate-tests
|
||||||
|
|
||||||
test-serai-client:
|
test-serai-client:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
@@ -85,4 +106,9 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run Tests
|
- name: Run Tests
|
||||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
|
run: |
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-bitcoin
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-ethereum
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-monero
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client-serai
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
|
||||||
|
|||||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -2,11 +2,10 @@ target
|
|||||||
|
|
||||||
# Don't commit any `Cargo.lock` which aren't the workspace's
|
# Don't commit any `Cargo.lock` which aren't the workspace's
|
||||||
Cargo.lock
|
Cargo.lock
|
||||||
!./Cargo.lock
|
!/Cargo.lock
|
||||||
|
|
||||||
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
|
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
|
||||||
Dockerfile
|
Dockerfile
|
||||||
Dockerfile.fast-epoch
|
|
||||||
!orchestration/runtime/Dockerfile
|
!orchestration/runtime/Dockerfile
|
||||||
|
|
||||||
.test-logs
|
.test-logs
|
||||||
|
|||||||
4009
Cargo.lock
generated
4009
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
199
Cargo.toml
199
Cargo.toml
@@ -1,18 +1,12 @@
|
|||||||
[workspace]
|
[workspace]
|
||||||
resolver = "2"
|
resolver = "2"
|
||||||
members = [
|
members = [
|
||||||
# std patches
|
|
||||||
"patches/matches",
|
|
||||||
|
|
||||||
# Rewrites/redirects
|
|
||||||
"patches/option-ext",
|
|
||||||
"patches/directories-next",
|
|
||||||
|
|
||||||
"common/std-shims",
|
"common/std-shims",
|
||||||
"common/zalloc",
|
"common/zalloc",
|
||||||
"common/patchable-async-sleep",
|
"common/patchable-async-sleep",
|
||||||
"common/db",
|
"common/db",
|
||||||
"common/env",
|
"common/env",
|
||||||
|
"common/task",
|
||||||
"common/request",
|
"common/request",
|
||||||
|
|
||||||
"crypto/transcript",
|
"crypto/transcript",
|
||||||
@@ -24,62 +18,85 @@ members = [
|
|||||||
"crypto/ciphersuite/kp256",
|
"crypto/ciphersuite/kp256",
|
||||||
|
|
||||||
"crypto/multiexp",
|
"crypto/multiexp",
|
||||||
|
|
||||||
"crypto/schnorr",
|
"crypto/schnorr",
|
||||||
"crypto/dleq",
|
|
||||||
|
"crypto/prime-field",
|
||||||
|
"crypto/short-weierstrass",
|
||||||
|
"crypto/secq256k1",
|
||||||
|
"crypto/embedwards25519",
|
||||||
|
|
||||||
"crypto/dkg",
|
"crypto/dkg",
|
||||||
"crypto/dkg/recovery",
|
"crypto/dkg/recovery",
|
||||||
"crypto/dkg/dealer",
|
"crypto/dkg/dealer",
|
||||||
"crypto/dkg/promote",
|
|
||||||
"crypto/dkg/musig",
|
"crypto/dkg/musig",
|
||||||
"crypto/dkg/pedpop",
|
"crypto/dkg/evrf",
|
||||||
"crypto/frost",
|
"crypto/frost",
|
||||||
"crypto/schnorrkel",
|
"crypto/schnorrkel",
|
||||||
|
|
||||||
"networks/bitcoin",
|
"networks/bitcoin",
|
||||||
|
|
||||||
|
"networks/ethereum/build-contracts",
|
||||||
|
"networks/ethereum/schnorr",
|
||||||
"networks/ethereum/alloy-simple-request-transport",
|
"networks/ethereum/alloy-simple-request-transport",
|
||||||
"networks/ethereum",
|
|
||||||
"networks/ethereum/relayer",
|
"networks/ethereum/relayer",
|
||||||
|
|
||||||
"message-queue",
|
"message-queue",
|
||||||
|
|
||||||
"processor/messages",
|
"processor/messages",
|
||||||
"processor",
|
|
||||||
|
|
||||||
"coordinator/tributary/tendermint",
|
"processor/key-gen",
|
||||||
|
"processor/view-keys",
|
||||||
|
"processor/frost-attempt-manager",
|
||||||
|
|
||||||
|
"processor/primitives",
|
||||||
|
"processor/scanner",
|
||||||
|
"processor/scheduler/primitives",
|
||||||
|
"processor/scheduler/utxo/primitives",
|
||||||
|
"processor/scheduler/utxo/standard",
|
||||||
|
"processor/scheduler/utxo/transaction-chaining",
|
||||||
|
"processor/scheduler/smart-contract",
|
||||||
|
"processor/signers",
|
||||||
|
|
||||||
|
"processor/bin",
|
||||||
|
"processor/bitcoin",
|
||||||
|
"processor/ethereum/primitives",
|
||||||
|
"processor/ethereum/test-primitives",
|
||||||
|
"processor/ethereum/deployer",
|
||||||
|
"processor/ethereum/erc20",
|
||||||
|
"processor/ethereum/router",
|
||||||
|
"processor/ethereum",
|
||||||
|
"processor/monero",
|
||||||
|
|
||||||
|
"coordinator/tributary-sdk/tendermint",
|
||||||
|
"coordinator/tributary-sdk",
|
||||||
|
"coordinator/cosign/types",
|
||||||
|
"coordinator/cosign",
|
||||||
|
"coordinator/substrate",
|
||||||
"coordinator/tributary",
|
"coordinator/tributary",
|
||||||
|
"coordinator/p2p",
|
||||||
|
"coordinator/p2p/libp2p",
|
||||||
"coordinator",
|
"coordinator",
|
||||||
|
|
||||||
"substrate/primitives",
|
"substrate/primitives",
|
||||||
|
|
||||||
"substrate/coins/primitives",
|
|
||||||
"substrate/coins/pallet",
|
|
||||||
|
|
||||||
"substrate/dex/pallet",
|
|
||||||
|
|
||||||
"substrate/validator-sets/primitives",
|
|
||||||
"substrate/validator-sets/pallet",
|
|
||||||
|
|
||||||
"substrate/genesis-liquidity/primitives",
|
|
||||||
"substrate/genesis-liquidity/pallet",
|
|
||||||
|
|
||||||
"substrate/emissions/primitives",
|
|
||||||
"substrate/emissions/pallet",
|
|
||||||
|
|
||||||
"substrate/economic-security/pallet",
|
|
||||||
|
|
||||||
"substrate/in-instructions/primitives",
|
|
||||||
"substrate/in-instructions/pallet",
|
|
||||||
|
|
||||||
"substrate/signals/primitives",
|
|
||||||
"substrate/signals/pallet",
|
|
||||||
|
|
||||||
"substrate/abi",
|
"substrate/abi",
|
||||||
|
|
||||||
|
"substrate/core",
|
||||||
|
"substrate/coins",
|
||||||
|
"substrate/validator-sets",
|
||||||
|
"substrate/signals",
|
||||||
|
"substrate/dex",
|
||||||
|
"substrate/genesis-liquidity",
|
||||||
|
"substrate/economic-security",
|
||||||
|
"substrate/emissions",
|
||||||
|
"substrate/in-instructions",
|
||||||
|
|
||||||
"substrate/runtime",
|
"substrate/runtime",
|
||||||
"substrate/node",
|
"substrate/node",
|
||||||
|
|
||||||
|
"substrate/client/bitcoin",
|
||||||
|
"substrate/client/ethereum",
|
||||||
|
"substrate/client/monero",
|
||||||
|
"substrate/client/serai",
|
||||||
"substrate/client",
|
"substrate/client",
|
||||||
|
|
||||||
"orchestration",
|
"orchestration",
|
||||||
@@ -90,48 +107,92 @@ members = [
|
|||||||
|
|
||||||
"tests/docker",
|
"tests/docker",
|
||||||
"tests/message-queue",
|
"tests/message-queue",
|
||||||
"tests/processor",
|
# TODO "tests/processor",
|
||||||
"tests/coordinator",
|
# TODO "tests/coordinator",
|
||||||
"tests/full-stack",
|
"tests/substrate",
|
||||||
|
# TODO "tests/full-stack",
|
||||||
"tests/reproducible-runtime",
|
"tests/reproducible-runtime",
|
||||||
]
|
]
|
||||||
|
|
||||||
|
[profile.dev.package]
|
||||||
# Always compile Monero (and a variety of dependencies) with optimizations due
|
# Always compile Monero (and a variety of dependencies) with optimizations due
|
||||||
# to the extensive operations required for Bulletproofs
|
# to the extensive operations required for Bulletproofs
|
||||||
[profile.dev.package]
|
|
||||||
subtle = { opt-level = 3 }
|
subtle = { opt-level = 3 }
|
||||||
curve25519-dalek = { opt-level = 3 }
|
|
||||||
|
sha3 = { opt-level = 3 }
|
||||||
|
blake2 = { opt-level = 3 }
|
||||||
|
|
||||||
ff = { opt-level = 3 }
|
ff = { opt-level = 3 }
|
||||||
group = { opt-level = 3 }
|
group = { opt-level = 3 }
|
||||||
|
|
||||||
crypto-bigint = { opt-level = 3 }
|
crypto-bigint = { opt-level = 3 }
|
||||||
|
curve25519-dalek = { opt-level = 3 }
|
||||||
dalek-ff-group = { opt-level = 3 }
|
dalek-ff-group = { opt-level = 3 }
|
||||||
minimal-ed448 = { opt-level = 3 }
|
|
||||||
|
|
||||||
multiexp = { opt-level = 3 }
|
multiexp = { opt-level = 3 }
|
||||||
|
|
||||||
|
monero-generators = { opt-level = 3 }
|
||||||
|
monero-borromean = { opt-level = 3 }
|
||||||
|
monero-bulletproofs = { opt-level = 3 }
|
||||||
|
monero-mlsag = { opt-level = 3 }
|
||||||
|
monero-clsag = { opt-level = 3 }
|
||||||
monero-oxide = { opt-level = 3 }
|
monero-oxide = { opt-level = 3 }
|
||||||
|
|
||||||
|
# Always compile the eVRF DKG tree with optimizations as well
|
||||||
|
secp256k1 = { opt-level = 3 }
|
||||||
|
secq256k1 = { opt-level = 3 }
|
||||||
|
embedwards25519 = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
|
||||||
|
|
||||||
|
# revm also effectively requires being built with optimizations
|
||||||
|
revm = { opt-level = 3 }
|
||||||
|
revm-bytecode = { opt-level = 3 }
|
||||||
|
revm-context = { opt-level = 3 }
|
||||||
|
revm-context-interface = { opt-level = 3 }
|
||||||
|
revm-database = { opt-level = 3 }
|
||||||
|
revm-database-interface = { opt-level = 3 }
|
||||||
|
revm-handler = { opt-level = 3 }
|
||||||
|
revm-inspector = { opt-level = 3 }
|
||||||
|
revm-interpreter = { opt-level = 3 }
|
||||||
|
revm-precompile = { opt-level = 3 }
|
||||||
|
revm-primitives = { opt-level = 3 }
|
||||||
|
revm-state = { opt-level = 3 }
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
panic = "unwind"
|
panic = "unwind"
|
||||||
overflow-checks = true
|
overflow-checks = true
|
||||||
|
|
||||||
[patch.crates-io]
|
[patch.crates-io]
|
||||||
|
# Point to empty crates for unused crates in our tree
|
||||||
|
ark-ff-3 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.3" }
|
||||||
|
ark-ff-4 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.4" }
|
||||||
|
c-kzg = { path = "patches/ethereum/c-kzg" }
|
||||||
|
secp256k1-30 = { package = "secp256k1", path = "patches/ethereum/secp256k1-30" }
|
||||||
|
|
||||||
# Dependencies from monero-oxide which originate from within our own tree
|
# Dependencies from monero-oxide which originate from within our own tree
|
||||||
std-shims = { path = "common/std-shims" }
|
std-shims = { path = "patches/std-shims" }
|
||||||
simple-request = { path = "common/request" }
|
simple-request = { path = "patches/simple-request" }
|
||||||
dalek-ff-group = { path = "crypto/dalek-ff-group" }
|
multiexp = { path = "crypto/multiexp" }
|
||||||
flexible-transcript = { path = "crypto/transcript" }
|
flexible-transcript = { path = "crypto/transcript" }
|
||||||
|
ciphersuite = { path = "patches/ciphersuite" }
|
||||||
|
dalek-ff-group = { path = "crypto/dalek-ff-group" }
|
||||||
|
minimal-ed448 = { path = "crypto/ed448" }
|
||||||
modular-frost = { path = "crypto/frost" }
|
modular-frost = { path = "crypto/frost" }
|
||||||
|
|
||||||
|
# Patch due to `std` now including the required functionality
|
||||||
|
is_terminal_polyfill = { path = "./patches/is_terminal_polyfill" }
|
||||||
|
# This has a non-deprecated `std` alternative since Rust's 2024 edition
|
||||||
|
home = { path = "patches/home" }
|
||||||
|
|
||||||
|
# Updates to the latest version
|
||||||
|
darling = { path = "patches/darling" }
|
||||||
|
thiserror = { path = "patches/thiserror" }
|
||||||
|
|
||||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||||
|
|
||||||
# These have `std` alternatives
|
|
||||||
matches = { path = "patches/matches" }
|
|
||||||
home = { path = "patches/home" }
|
|
||||||
|
|
||||||
# directories-next was created because directories was unmaintained
|
# directories-next was created because directories was unmaintained
|
||||||
# directories-next is now unmaintained while directories is maintained
|
# directories-next is now unmaintained while directories is maintained
|
||||||
# The directories author pulls in ridiculously pointless crates and prefers
|
# The directories author pulls in ridiculously pointless crates and prefers
|
||||||
@@ -140,11 +201,22 @@ home = { path = "patches/home" }
|
|||||||
option-ext = { path = "patches/option-ext" }
|
option-ext = { path = "patches/option-ext" }
|
||||||
directories-next = { path = "patches/directories-next" }
|
directories-next = { path = "patches/directories-next" }
|
||||||
|
|
||||||
|
# Patch from a fork back to upstream
|
||||||
|
parity-bip39 = { path = "patches/parity-bip39" }
|
||||||
|
|
||||||
|
# Patch to include `FromUniformBytes<64>` over `Scalar`
|
||||||
|
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||||
|
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||||
|
|
||||||
|
# `jemalloc` conflicts with `mimalloc`, so patch to a `rocksdb` which never uses `jemalloc`
|
||||||
|
librocksdb-sys = { path = "patches/librocksdb-sys" }
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
[workspace.lints.clippy]
|
||||||
uninlined_format_args = "allow" # TODO
|
|
||||||
unwrap_or_default = "allow"
|
|
||||||
manual_is_multiple_of = "allow"
|
|
||||||
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
||||||
|
manual_is_multiple_of = "allow"
|
||||||
|
unwrap_or_default = "allow"
|
||||||
|
map_unwrap_or = "allow"
|
||||||
|
needless_continue = "allow"
|
||||||
borrow_as_ptr = "deny"
|
borrow_as_ptr = "deny"
|
||||||
cast_lossless = "deny"
|
cast_lossless = "deny"
|
||||||
cast_possible_truncation = "deny"
|
cast_possible_truncation = "deny"
|
||||||
@@ -169,14 +241,12 @@ large_stack_arrays = "deny"
|
|||||||
linkedlist = "deny"
|
linkedlist = "deny"
|
||||||
macro_use_imports = "deny"
|
macro_use_imports = "deny"
|
||||||
manual_instant_elapsed = "deny"
|
manual_instant_elapsed = "deny"
|
||||||
# TODO manual_let_else = "deny"
|
manual_let_else = "deny"
|
||||||
manual_ok_or = "deny"
|
manual_ok_or = "deny"
|
||||||
manual_string_new = "deny"
|
manual_string_new = "deny"
|
||||||
map_unwrap_or = "deny"
|
|
||||||
match_bool = "deny"
|
match_bool = "deny"
|
||||||
match_same_arms = "deny"
|
match_same_arms = "deny"
|
||||||
missing_fields_in_debug = "deny"
|
missing_fields_in_debug = "deny"
|
||||||
# TODO needless_continue = "deny"
|
|
||||||
needless_pass_by_value = "deny"
|
needless_pass_by_value = "deny"
|
||||||
ptr_cast_constness = "deny"
|
ptr_cast_constness = "deny"
|
||||||
range_minus_one = "deny"
|
range_minus_one = "deny"
|
||||||
@@ -184,7 +254,9 @@ range_plus_one = "deny"
|
|||||||
redundant_closure_for_method_calls = "deny"
|
redundant_closure_for_method_calls = "deny"
|
||||||
redundant_else = "deny"
|
redundant_else = "deny"
|
||||||
string_add_assign = "deny"
|
string_add_assign = "deny"
|
||||||
|
string_slice = "deny"
|
||||||
unchecked_time_subtraction = "deny"
|
unchecked_time_subtraction = "deny"
|
||||||
|
uninlined_format_args = "deny"
|
||||||
unnecessary_box_returns = "deny"
|
unnecessary_box_returns = "deny"
|
||||||
unnecessary_join = "deny"
|
unnecessary_join = "deny"
|
||||||
unnecessary_wraps = "deny"
|
unnecessary_wraps = "deny"
|
||||||
@@ -193,20 +265,5 @@ unused_async = "deny"
|
|||||||
unused_self = "deny"
|
unused_self = "deny"
|
||||||
zero_sized_map_values = "deny"
|
zero_sized_map_values = "deny"
|
||||||
|
|
||||||
# TODO: These were incurred when updating Rust as necessary for compilation, yet aren't being fixed
|
|
||||||
# at this time due to the impacts it'd have throughout the repository (when this isn't actively the
|
|
||||||
# primary branch, `next` is)
|
|
||||||
needless_continue = "allow"
|
|
||||||
needless_lifetimes = "allow"
|
|
||||||
useless_conversion = "allow"
|
|
||||||
empty_line_after_doc_comments = "allow"
|
|
||||||
manual_div_ceil = "allow"
|
|
||||||
manual_let_else = "allow"
|
|
||||||
unnecessary_map_or = "allow"
|
|
||||||
result_large_err = "allow"
|
|
||||||
unneeded_struct_pattern = "allow"
|
|
||||||
[workspace.lints.rust]
|
[workspace.lints.rust]
|
||||||
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
|
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
|
||||||
mismatched_lifetime_syntaxes = "allow"
|
|
||||||
unused_attributes = "allow"
|
|
||||||
unused_parens = "allow"
|
|
||||||
|
|||||||
14
audits/Trail of Bits ethereum contracts April 2025/README.md
Normal file
14
audits/Trail of Bits ethereum contracts April 2025/README.md
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Trail of Bits Ethereum Contracts Audit, June 2025
|
||||||
|
|
||||||
|
This audit included:
|
||||||
|
- Our Schnorr contract and associated library (/networks/ethereum/schnorr)
|
||||||
|
- Our Ethereum primitives library (/processor/ethereum/primitives)
|
||||||
|
- Our Deployer contract and associated library (/processor/ethereum/deployer)
|
||||||
|
- Our ERC20 library (/processor/ethereum/erc20)
|
||||||
|
- Our Router contract and associated library (/processor/ethereum/router)
|
||||||
|
|
||||||
|
It is encompassing up to commit 4e0c58464fc4673623938335f06e2e9ea96ca8dd.
|
||||||
|
|
||||||
|
Please see
|
||||||
|
https://github.com/trailofbits/publications/blob/30c4fa3ebf39ff8e4d23ba9567344ec9691697b5/reviews/2025-04-serai-dex-security-review.pdf
|
||||||
|
for the actual report.
|
||||||
50
audits/crypto/dkg/evrf/README.md
Normal file
50
audits/crypto/dkg/evrf/README.md
Normal file
@@ -0,0 +1,50 @@
|
|||||||
|
# eVRF DKG
|
||||||
|
|
||||||
|
In 2024, the [eVRF paper](https://eprint.iacr.org/2024/397) was published to
|
||||||
|
the IACR preprint server. Within it was a one-round unbiased DKG and a
|
||||||
|
one-round unbiased threshold DKG. Unfortunately, both simply describe
|
||||||
|
communication of the secret shares as 'Alice sends $s_b$ to Bob'. This causes,
|
||||||
|
in practice, the need for an additional round of communication to occur where
|
||||||
|
all participants confirm they received their secret shares.
|
||||||
|
|
||||||
|
Within Serai, it was posited to use the same premises as the DDH eVRF itself to
|
||||||
|
achieve a verifiable encryption scheme. This allows the secret shares to be
|
||||||
|
posted to any 'bulletin board' (such as a blockchain) and for all observers to
|
||||||
|
confirm:
|
||||||
|
|
||||||
|
- A participant participated
|
||||||
|
- The secret shares sent can be received by the intended recipient so long as
|
||||||
|
they can access the bulletin board
|
||||||
|
|
||||||
|
Additionally, Serai desired a robust scheme (albeit with an biased key as the
|
||||||
|
output, which is fine for our purposes). Accordingly, our implementation
|
||||||
|
instantiates the threshold eVRF DKG from the eVRF paper, with our own proposal
|
||||||
|
for verifiable encryption, with the caller allowed to decide the set of
|
||||||
|
participants. They may:
|
||||||
|
|
||||||
|
- Select everyone, collapsing to the non-threshold unbiased DKG from the eVRF
|
||||||
|
paper
|
||||||
|
- Select a pre-determined set, collapsing to the threshold unbaised DKG from
|
||||||
|
the eVRF paper
|
||||||
|
- Select a post-determined set (with any solution for the Common Subset
|
||||||
|
problem), allowing achieving a robust threshold biased DKG
|
||||||
|
|
||||||
|
Note that the eVRF paper proposes using the eVRF to sample coefficients yet
|
||||||
|
this is unnecessary when the resulting key will be biased. Any proof of
|
||||||
|
knowledge for the coefficients, as necessary for their extraction within the
|
||||||
|
security proofs, would be sufficient.
|
||||||
|
|
||||||
|
MAGIC Grants contracted HashCloak to formalize Serai's proposal for a DKG and
|
||||||
|
provide proofs for its security. This resulted in
|
||||||
|
[this paper](<./Security Proofs.pdf>).
|
||||||
|
|
||||||
|
Our implementation itself is then built on top of the audited
|
||||||
|
[`generalized-bulletproofs`](https://github.com/kayabaNerve/monero-oxide/tree/generalized-bulletproofs/audits/crypto/generalized-bulletproofs)
|
||||||
|
and
|
||||||
|
[`generalized-bulletproofs-ec-gadgets`](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/fcmps).
|
||||||
|
|
||||||
|
Note we do not use the originally premised DDH eVRF yet the one premised on
|
||||||
|
elliptic curve divisors, the methodology of which is commented on
|
||||||
|
[here](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/divisors).
|
||||||
|
|
||||||
|
Our implementation itself is unaudited at this time however.
|
||||||
BIN
audits/crypto/dkg/evrf/Security Proofs.pdf
Normal file
BIN
audits/crypto/dkg/evrf/Security Proofs.pdf
Normal file
Binary file not shown.
@@ -1,6 +1,6 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "serai-db"
|
name = "serai-db"
|
||||||
version = "0.1.0"
|
version = "0.1.1"
|
||||||
description = "A simple database trait and backends for it"
|
description = "A simple database trait and backends for it"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
||||||
@@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
parity-db = { version = "0.5", default-features = false, features = ["arc"], optional = true }
|
||||||
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
|
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
8
common/db/README.md
Normal file
8
common/db/README.md
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
# Serai DB
|
||||||
|
|
||||||
|
An inefficient, minimal abstraction around databases.
|
||||||
|
|
||||||
|
The abstraction offers `get`, `put`, and `del` with helper functions and macros
|
||||||
|
built on top. Database iteration is not offered, forcing the caller to manually
|
||||||
|
implement indexing schemes. This ensures wide compatibility across abstracted
|
||||||
|
databases.
|
||||||
@@ -15,7 +15,7 @@ pub fn serai_db_key(
|
|||||||
///
|
///
|
||||||
/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro
|
/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro
|
||||||
/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key,
|
/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key,
|
||||||
/// they must be `scale` encodable. The return type is used to auto encode and decode the database
|
/// they must be `borsh` serializable. The return type is used to auto (de)serialize the database
|
||||||
/// value bytes using `borsh`.
|
/// value bytes using `borsh`.
|
||||||
///
|
///
|
||||||
/// # Arguments
|
/// # Arguments
|
||||||
@@ -38,32 +38,65 @@ pub fn serai_db_key(
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! create_db {
|
macro_rules! create_db {
|
||||||
($db_name: ident {
|
($db_name: ident {
|
||||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
$(
|
||||||
|
$field_name: ident:
|
||||||
|
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||||
|
$($arg: ident: $arg_type: ty),*
|
||||||
|
) -> $field_type: ty$(,)?
|
||||||
|
)*
|
||||||
}) => {
|
}) => {
|
||||||
$(
|
$(
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub(crate) struct $field_name;
|
pub(crate) struct $field_name$(
|
||||||
impl $field_name {
|
<$($generic_name: $generic_type),+>
|
||||||
|
)?$(
|
||||||
|
(core::marker::PhantomData<($($generic_name),+)>)
|
||||||
|
)?;
|
||||||
|
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||||
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||||
use scale::Encode;
|
|
||||||
$crate::serai_db_key(
|
$crate::serai_db_key(
|
||||||
stringify!($db_name).as_bytes(),
|
stringify!($db_name).as_bytes(),
|
||||||
stringify!($field_name).as_bytes(),
|
stringify!($field_name).as_bytes(),
|
||||||
($($arg),*).encode()
|
&borsh::to_vec(&($($arg),*)).unwrap(),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
|
pub(crate) fn set(
|
||||||
let key = $field_name::key($($arg),*);
|
txn: &mut impl DbTxn
|
||||||
|
$(, $arg: $arg_type)*,
|
||||||
|
data: &$field_type
|
||||||
|
) {
|
||||||
|
let key = Self::key($($arg),*);
|
||||||
txn.put(&key, borsh::to_vec(data).unwrap());
|
txn.put(&key, borsh::to_vec(data).unwrap());
|
||||||
}
|
}
|
||||||
pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
|
pub(crate) fn get(
|
||||||
getter.get($field_name::key($($arg),*)).map(|data| {
|
getter: &impl Get,
|
||||||
|
$($arg: $arg_type),*
|
||||||
|
) -> Option<$field_type> {
|
||||||
|
getter.get(Self::key($($arg),*)).map(|data| {
|
||||||
borsh::from_slice(data.as_ref()).unwrap()
|
borsh::from_slice(data.as_ref()).unwrap()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
// Returns a PhantomData of all generic types so if the generic was only used in the value,
|
||||||
|
// not the keys, this doesn't have unused generic types
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
|
pub(crate) fn del(
|
||||||
txn.del(&$field_name::key($($arg),*))
|
txn: &mut impl DbTxn
|
||||||
|
$(, $arg: $arg_type)*
|
||||||
|
) -> core::marker::PhantomData<($($($generic_name),+)?)> {
|
||||||
|
txn.del(&Self::key($($arg),*));
|
||||||
|
core::marker::PhantomData
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn take(
|
||||||
|
txn: &mut impl DbTxn
|
||||||
|
$(, $arg: $arg_type)*
|
||||||
|
) -> Option<$field_type> {
|
||||||
|
let key = Self::key($($arg),*);
|
||||||
|
let res = txn.get(&key).map(|data| borsh::from_slice(data.as_ref()).unwrap());
|
||||||
|
if res.is_some() {
|
||||||
|
txn.del(key);
|
||||||
|
}
|
||||||
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
)*
|
)*
|
||||||
@@ -73,19 +106,30 @@ macro_rules! create_db {
|
|||||||
#[macro_export]
|
#[macro_export]
|
||||||
macro_rules! db_channel {
|
macro_rules! db_channel {
|
||||||
($db_name: ident {
|
($db_name: ident {
|
||||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
$($field_name: ident:
|
||||||
|
$(<$($generic_name: tt: $generic_type: tt),+>)?(
|
||||||
|
$($arg: ident: $arg_type: ty),*
|
||||||
|
) -> $field_type: ty$(,)?
|
||||||
|
)*
|
||||||
}) => {
|
}) => {
|
||||||
$(
|
$(
|
||||||
create_db! {
|
create_db! {
|
||||||
$db_name {
|
$db_name {
|
||||||
$field_name: ($($arg: $arg_type,)* index: u32) -> $field_type,
|
$field_name: $(<$($generic_name: $generic_type),+>)?(
|
||||||
|
$($arg: $arg_type,)*
|
||||||
|
index: u32
|
||||||
|
) -> $field_type
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl $field_name {
|
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
|
pub(crate) fn send(
|
||||||
|
txn: &mut impl DbTxn
|
||||||
|
$(, $arg: $arg_type)*
|
||||||
|
, value: &$field_type
|
||||||
|
) {
|
||||||
// Use index 0 to store the amount of messages
|
// Use index 0 to store the amount of messages
|
||||||
let messages_sent_key = $field_name::key($($arg),*, 0);
|
let messages_sent_key = Self::key($($arg,)* 0);
|
||||||
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
|
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
|
||||||
u32::from_le_bytes(counter.try_into().unwrap())
|
u32::from_le_bytes(counter.try_into().unwrap())
|
||||||
}).unwrap_or(0);
|
}).unwrap_or(0);
|
||||||
@@ -96,19 +140,35 @@ macro_rules! db_channel {
|
|||||||
// at the same time
|
// at the same time
|
||||||
let index_to_use = messages_sent + 2;
|
let index_to_use = messages_sent + 2;
|
||||||
|
|
||||||
$field_name::set(txn, $($arg),*, index_to_use, value);
|
Self::set(txn, $($arg,)* index_to_use, value);
|
||||||
}
|
}
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
|
pub(crate) fn peek(
|
||||||
let messages_recvd_key = $field_name::key($($arg),*, 1);
|
getter: &impl Get
|
||||||
|
$(, $arg: $arg_type)*
|
||||||
|
) -> Option<$field_type> {
|
||||||
|
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||||
|
let messages_recvd = getter.get(&messages_recvd_key).map(|counter| {
|
||||||
|
u32::from_le_bytes(counter.try_into().unwrap())
|
||||||
|
}).unwrap_or(0);
|
||||||
|
|
||||||
|
let index_to_read = messages_recvd + 2;
|
||||||
|
|
||||||
|
Self::get(getter, $($arg,)* index_to_read)
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(
|
||||||
|
txn: &mut impl DbTxn
|
||||||
|
$(, $arg: $arg_type)*
|
||||||
|
) -> Option<$field_type> {
|
||||||
|
let messages_recvd_key = Self::key($($arg,)* 1);
|
||||||
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
|
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
|
||||||
u32::from_le_bytes(counter.try_into().unwrap())
|
u32::from_le_bytes(counter.try_into().unwrap())
|
||||||
}).unwrap_or(0);
|
}).unwrap_or(0);
|
||||||
|
|
||||||
let index_to_read = messages_recvd + 2;
|
let index_to_read = messages_recvd + 2;
|
||||||
|
|
||||||
let res = $field_name::get(txn, $($arg),*, index_to_read);
|
let res = Self::get(txn, $($arg,)* index_to_read);
|
||||||
if res.is_some() {
|
if res.is_some() {
|
||||||
$field_name::del(txn, $($arg),*, index_to_read);
|
Self::del(txn, $($arg,)* index_to_read);
|
||||||
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
|
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
|
|||||||
@@ -14,26 +14,43 @@ mod parity_db;
|
|||||||
#[cfg(feature = "parity-db")]
|
#[cfg(feature = "parity-db")]
|
||||||
pub use parity_db::{ParityDb, new_parity_db};
|
pub use parity_db::{ParityDb, new_parity_db};
|
||||||
|
|
||||||
/// An object implementing get.
|
/// An object implementing `get`.
|
||||||
pub trait Get {
|
pub trait Get {
|
||||||
|
/// Get a value from the database.
|
||||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An atomic database operation.
|
/// An atomic database transaction.
|
||||||
|
///
|
||||||
|
/// A transaction is only required to atomically commit. It is not required that two `Get` calls
|
||||||
|
/// made with the same transaction return the same result, if another transaction wrote to that
|
||||||
|
/// key.
|
||||||
|
///
|
||||||
|
/// If two transactions are created, and both write (including deletions) to the same key, behavior
|
||||||
|
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
||||||
|
/// randomly, or any other action, at time of write or at time of commit.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub trait DbTxn: Send + Get {
|
pub trait DbTxn: Send + Get {
|
||||||
|
/// Write a value to this key.
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||||
|
/// Delete the value from this key.
|
||||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||||
|
/// Commit this transaction.
|
||||||
fn commit(self);
|
fn commit(self);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A database supporting atomic operations.
|
/// A database supporting atomic transaction.
|
||||||
pub trait Db: 'static + Send + Sync + Clone + Get {
|
pub trait Db: 'static + Send + Sync + Clone + Get {
|
||||||
|
/// The type representing a database transaction.
|
||||||
type Transaction<'a>: DbTxn;
|
type Transaction<'a>: DbTxn;
|
||||||
|
/// Calculate a key for a database entry.
|
||||||
|
///
|
||||||
|
/// Keys are separated by the database, the item within the database, and the item's key itself.
|
||||||
fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||||
let db_len = u8::try_from(db_dst.len()).unwrap();
|
let db_len = u8::try_from(db_dst.len()).unwrap();
|
||||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||||
}
|
}
|
||||||
|
/// Open a new transaction.
|
||||||
fn txn(&mut self) -> Self::Transaction<'_>;
|
fn txn(&mut self) -> Self::Transaction<'_>;
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ use crate::*;
|
|||||||
#[derive(PartialEq, Eq, Debug)]
|
#[derive(PartialEq, Eq, Debug)]
|
||||||
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);
|
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);
|
||||||
|
|
||||||
impl<'a> Get for MemDbTxn<'a> {
|
impl Get for MemDbTxn<'_> {
|
||||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||||
if self.2.contains(key.as_ref()) {
|
if self.2.contains(key.as_ref()) {
|
||||||
return None;
|
return None;
|
||||||
@@ -23,7 +23,7 @@ impl<'a> Get for MemDbTxn<'a> {
|
|||||||
.or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())
|
.or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl<'a> DbTxn for MemDbTxn<'a> {
|
impl DbTxn for MemDbTxn<'_> {
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||||
self.2.remove(key.as_ref());
|
self.2.remove(key.as_ref());
|
||||||
self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
|
self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
|
||||||
|
|||||||
2
common/env/Cargo.toml
vendored
2
common/env/Cargo.toml
vendored
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.60"
|
rust-version = "1.64"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
2
common/env/LICENSE
vendored
2
common/env/LICENSE
vendored
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/patchable-a
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
keywords = ["async", "sleep", "tokio", "smol", "async-std"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
|
rust-version = "1.70"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2024 Luke Parker
|
Copyright (c) 2024-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "simple-request"
|
name = "simple-request"
|
||||||
version = "0.1.0"
|
version = "0.3.0"
|
||||||
description = "A simple HTTP(S) request library"
|
description = "A simple HTTP(S) request library"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request"
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/request"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["http", "https", "async", "request", "ssl"]
|
keywords = ["http", "https", "async", "request", "ssl"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.70"
|
rust-version = "1.71"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -19,9 +19,10 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
tower-service = { version = "0.3", default-features = false }
|
tower-service = { version = "0.3", default-features = false }
|
||||||
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
||||||
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
|
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy"] }
|
||||||
http-body-util = { version = "0.1", default-features = false }
|
http-body-util = { version = "0.1", default-features = false }
|
||||||
tokio = { version = "1", default-features = false }
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
|
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||||
|
|
||||||
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||||
|
|
||||||
@@ -29,6 +30,8 @@ zeroize = { version = "1", optional = true }
|
|||||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
tls = ["hyper-rustls"]
|
tokio = ["hyper-util/tokio"]
|
||||||
|
tls = ["tokio", "hyper-rustls"]
|
||||||
|
webpki-roots = ["tls", "hyper-rustls/webpki-roots"]
|
||||||
basic-auth = ["zeroize", "base64ct"]
|
basic-auth = ["zeroize", "base64ct"]
|
||||||
default = ["tls"]
|
default = ["tls"]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,19 +1,20 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
|
|
||||||
|
use core::{pin::Pin, future::Future};
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
use tokio::sync::Mutex;
|
use futures_util::FutureExt;
|
||||||
|
use ::tokio::sync::Mutex;
|
||||||
|
|
||||||
use tower_service::Service as TowerService;
|
use tower_service::Service as TowerService;
|
||||||
|
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest, rt::Executor};
|
||||||
|
pub use hyper;
|
||||||
|
|
||||||
|
use hyper_util::client::legacy::{Client as HyperClient, connect::HttpConnector};
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
||||||
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
|
|
||||||
use hyper_util::{
|
|
||||||
rt::tokio::TokioExecutor,
|
|
||||||
client::legacy::{Client as HyperClient, connect::HttpConnector},
|
|
||||||
};
|
|
||||||
pub use hyper;
|
|
||||||
|
|
||||||
mod request;
|
mod request;
|
||||||
pub use request::*;
|
pub use request::*;
|
||||||
@@ -37,52 +38,86 @@ type Connector = HttpConnector;
|
|||||||
type Connector = HttpsConnector<HttpConnector>;
|
type Connector = HttpsConnector<HttpConnector>;
|
||||||
|
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
enum Connection {
|
enum Connection<
|
||||||
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
||||||
Connection {
|
Connection {
|
||||||
|
executor: E,
|
||||||
connector: Connector,
|
connector: Connector,
|
||||||
host: Uri,
|
host: Uri,
|
||||||
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An HTTP client.
|
||||||
|
///
|
||||||
|
/// `tls` is only guaranteed to work when using the `tokio` executor. Instantiating a client when
|
||||||
|
/// the `tls` feature is active without using the `tokio` executor will cause errors.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct Client {
|
pub struct Client<
|
||||||
connection: Connection,
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
|
connection: Connection<E>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Client {
|
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||||
fn connector() -> Connector {
|
Client<E>
|
||||||
|
{
|
||||||
|
#[allow(clippy::unnecessary_wraps)]
|
||||||
|
fn connector() -> Result<Connector, Error> {
|
||||||
let mut res = HttpConnector::new();
|
let mut res = HttpConnector::new();
|
||||||
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
||||||
res.set_nodelay(true);
|
res.set_nodelay(true);
|
||||||
res.set_reuse_address(true);
|
res.set_reuse_address(true);
|
||||||
|
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
if core::any::TypeId::of::<E>() !=
|
||||||
|
core::any::TypeId::of::<hyper_util::rt::tokio::TokioExecutor>()
|
||||||
|
{
|
||||||
|
Err(Error::ConnectionError(
|
||||||
|
"`tls` feature enabled but not using the `tokio` executor".into(),
|
||||||
|
))?;
|
||||||
|
}
|
||||||
|
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
res.enforce_http(false);
|
res.enforce_http(false);
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
let res = HttpsConnectorBuilder::new()
|
let https = HttpsConnectorBuilder::new().with_native_roots();
|
||||||
.with_native_roots()
|
#[cfg(all(feature = "tls", not(feature = "webpki-roots")))]
|
||||||
.expect("couldn't fetch system's SSL roots")
|
let https = https.map_err(|e| {
|
||||||
.https_or_http()
|
Error::ConnectionError(
|
||||||
.enable_http1()
|
format!("couldn't load system's SSL root certificates and webpki-roots unavilable: {e:?}")
|
||||||
.wrap_connector(res);
|
.into(),
|
||||||
res
|
)
|
||||||
|
})?;
|
||||||
|
// Fallback to `webpki-roots` if present
|
||||||
|
#[cfg(all(feature = "tls", feature = "webpki-roots"))]
|
||||||
|
let https = https.unwrap_or(HttpsConnectorBuilder::new().with_webpki_roots());
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
let res = https.https_or_http().enable_http1().wrap_connector(res);
|
||||||
|
|
||||||
|
Ok(res)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn with_connection_pool() -> Client {
|
pub fn with_executor_and_connection_pool(executor: E) -> Result<Client<E>, Error> {
|
||||||
Client {
|
Ok(Client {
|
||||||
connection: Connection::ConnectionPool(
|
connection: Connection::ConnectionPool(
|
||||||
HyperClient::builder(TokioExecutor::new())
|
HyperClient::builder(executor)
|
||||||
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
||||||
.build(Self::connector()),
|
.build(Self::connector()?),
|
||||||
),
|
),
|
||||||
}
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
|
pub fn with_executor_and_without_connection_pool(
|
||||||
|
executor: E,
|
||||||
|
host: &str,
|
||||||
|
) -> Result<Client<E>, Error> {
|
||||||
Ok(Client {
|
Ok(Client {
|
||||||
connection: Connection::Connection {
|
connection: Connection::Connection {
|
||||||
connector: Self::connector(),
|
executor,
|
||||||
|
connector: Self::connector()?,
|
||||||
host: {
|
host: {
|
||||||
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
||||||
if uri.host().is_none() {
|
if uri.host().is_none() {
|
||||||
@@ -95,9 +130,9 @@ impl Client {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
|
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_, E>, Error> {
|
||||||
let request: Request = request.into();
|
let request: Request = request.into();
|
||||||
let mut request = request.0;
|
let Request { mut request, response_size_limit } = request;
|
||||||
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
||||||
match &self.connection {
|
match &self.connection {
|
||||||
Connection::ConnectionPool(_) => {}
|
Connection::ConnectionPool(_) => {}
|
||||||
@@ -131,7 +166,7 @@ impl Client {
|
|||||||
Connection::ConnectionPool(client) => {
|
Connection::ConnectionPool(client) => {
|
||||||
client.request(request).await.map_err(Error::HyperUtil)?
|
client.request(request).await.map_err(Error::HyperUtil)?
|
||||||
}
|
}
|
||||||
Connection::Connection { connector, host, connection } => {
|
Connection::Connection { executor, connector, host, connection } => {
|
||||||
let mut connection_lock = connection.lock().await;
|
let mut connection_lock = connection.lock().await;
|
||||||
|
|
||||||
// If there's not a connection...
|
// If there's not a connection...
|
||||||
@@ -143,28 +178,46 @@ impl Client {
|
|||||||
let call_res = call_res.map_err(Error::ConnectionError);
|
let call_res = call_res.map_err(Error::ConnectionError);
|
||||||
let (requester, connection) =
|
let (requester, connection) =
|
||||||
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
||||||
// This will die when we drop the requester, so we don't need to track an AbortHandle
|
// This task will die when we drop the requester
|
||||||
// for it
|
executor.execute(Box::pin(connection.map(|_| ())));
|
||||||
tokio::spawn(connection);
|
|
||||||
*connection_lock = Some(requester);
|
*connection_lock = Some(requester);
|
||||||
}
|
}
|
||||||
|
|
||||||
let connection = connection_lock.as_mut().unwrap();
|
let connection = connection_lock.as_mut().expect("lock over the connection was poisoned");
|
||||||
let mut err = connection.ready().await.err();
|
let mut err = connection.ready().await.err();
|
||||||
if err.is_none() {
|
if err.is_none() {
|
||||||
// Send the request
|
// Send the request
|
||||||
let res = connection.send_request(request).await;
|
let response = connection.send_request(request).await;
|
||||||
if let Ok(res) = res {
|
if let Ok(response) = response {
|
||||||
return Ok(Response(res, self));
|
return Ok(Response { response, size_limit: response_size_limit, client: self });
|
||||||
}
|
}
|
||||||
err = res.err();
|
err = response.err();
|
||||||
}
|
}
|
||||||
// Since this connection has been put into an error state, drop it
|
// Since this connection has been put into an error state, drop it
|
||||||
*connection_lock = None;
|
*connection_lock = None;
|
||||||
Err(Error::Hyper(err.unwrap()))?
|
Err(Error::Hyper(err.expect("only here if `err` is some yet no error")))?
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
Ok(Response(response, self))
|
Ok(Response { response, size_limit: response_size_limit, client: self })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "tokio")]
|
||||||
|
mod tokio {
|
||||||
|
use hyper_util::rt::tokio::TokioExecutor;
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
pub type TokioClient = Client<TokioExecutor>;
|
||||||
|
impl Client<TokioExecutor> {
|
||||||
|
pub fn with_connection_pool() -> Result<Self, Error> {
|
||||||
|
Self::with_executor_and_connection_pool(TokioExecutor::new())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub fn without_connection_pool(host: &str) -> Result<Self, Error> {
|
||||||
|
Self::with_executor_and_without_connection_pool(TokioExecutor::new(), host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[cfg(feature = "tokio")]
|
||||||
|
pub use tokio::TokioClient;
|
||||||
|
|||||||
@@ -7,11 +7,15 @@ pub use http_body_util::Full;
|
|||||||
use crate::Error;
|
use crate::Error;
|
||||||
|
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
|
pub struct Request {
|
||||||
|
pub(crate) request: hyper::Request<Full<Bytes>>,
|
||||||
|
pub(crate) response_size_limit: Option<usize>,
|
||||||
|
}
|
||||||
|
|
||||||
impl Request {
|
impl Request {
|
||||||
#[cfg(feature = "basic-auth")]
|
#[cfg(feature = "basic-auth")]
|
||||||
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
|
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
|
||||||
if let Some(authority) = self.0.uri().authority() {
|
if let Some(authority) = self.request.uri().authority() {
|
||||||
let authority = authority.as_str();
|
let authority = authority.as_str();
|
||||||
if authority.contains('@') {
|
if authority.contains('@') {
|
||||||
// Decode the username and password from the URI
|
// Decode the username and password from the URI
|
||||||
@@ -36,9 +40,10 @@ impl Request {
|
|||||||
let mut formatted = format!("{username}:{password}");
|
let mut formatted = format!("{username}:{password}");
|
||||||
let mut encoded = Base64::encode_string(formatted.as_bytes());
|
let mut encoded = Base64::encode_string(formatted.as_bytes());
|
||||||
formatted.zeroize();
|
formatted.zeroize();
|
||||||
self.0.headers_mut().insert(
|
self.request.headers_mut().insert(
|
||||||
hyper::header::AUTHORIZATION,
|
hyper::header::AUTHORIZATION,
|
||||||
HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(),
|
HeaderValue::from_str(&format!("Basic {encoded}"))
|
||||||
|
.expect("couldn't form header from base64-encoded string"),
|
||||||
);
|
);
|
||||||
encoded.zeroize();
|
encoded.zeroize();
|
||||||
}
|
}
|
||||||
@@ -59,9 +64,17 @@ impl Request {
|
|||||||
pub fn with_basic_auth(&mut self) {
|
pub fn with_basic_auth(&mut self) {
|
||||||
let _ = self.basic_auth_from_uri();
|
let _ = self.basic_auth_from_uri();
|
||||||
}
|
}
|
||||||
}
|
|
||||||
impl From<hyper::Request<Full<Bytes>>> for Request {
|
/// Set a size limit for the response.
|
||||||
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
///
|
||||||
Request(request)
|
/// This may be exceeded by a single HTTP frame and accordingly isn't perfect.
|
||||||
|
pub fn set_response_size_limit(&mut self, response_size_limit: Option<usize>) {
|
||||||
|
self.response_size_limit = response_size_limit;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<hyper::Request<Full<Bytes>>> for Request {
|
||||||
|
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
||||||
|
Request { request, response_size_limit: None }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,24 +1,54 @@
|
|||||||
|
use core::{pin::Pin, future::Future};
|
||||||
|
use std::io;
|
||||||
|
|
||||||
use hyper::{
|
use hyper::{
|
||||||
StatusCode,
|
StatusCode,
|
||||||
header::{HeaderValue, HeaderMap},
|
header::{HeaderValue, HeaderMap},
|
||||||
body::{Buf, Incoming},
|
body::Incoming,
|
||||||
|
rt::Executor,
|
||||||
};
|
};
|
||||||
use http_body_util::BodyExt;
|
use http_body_util::BodyExt;
|
||||||
|
|
||||||
|
use futures_util::{Stream, StreamExt};
|
||||||
|
|
||||||
use crate::{Client, Error};
|
use crate::{Client, Error};
|
||||||
|
|
||||||
// Borrows the client so its async task lives as long as this response exists.
|
// Borrows the client so its async task lives as long as this response exists.
|
||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
|
pub struct Response<
|
||||||
impl<'a> Response<'a> {
|
'a,
|
||||||
|
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||||
|
> {
|
||||||
|
pub(crate) response: hyper::Response<Incoming>,
|
||||||
|
pub(crate) size_limit: Option<usize>,
|
||||||
|
pub(crate) client: &'a Client<E>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||||
|
Response<'_, E>
|
||||||
|
{
|
||||||
pub fn status(&self) -> StatusCode {
|
pub fn status(&self) -> StatusCode {
|
||||||
self.0.status()
|
self.response.status()
|
||||||
}
|
}
|
||||||
pub fn headers(&self) -> &HeaderMap<HeaderValue> {
|
pub fn headers(&self) -> &HeaderMap<HeaderValue> {
|
||||||
self.0.headers()
|
self.response.headers()
|
||||||
}
|
}
|
||||||
pub async fn body(self) -> Result<impl std::io::Read, Error> {
|
pub async fn body(self) -> Result<impl std::io::Read, Error> {
|
||||||
Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
|
let mut body = self.response.into_body().into_data_stream();
|
||||||
|
let mut res: Vec<u8> = vec![];
|
||||||
|
loop {
|
||||||
|
if let Some(size_limit) = self.size_limit {
|
||||||
|
let (lower, upper) = body.size_hint();
|
||||||
|
if res.len().wrapping_add(upper.unwrap_or(lower)) > size_limit.min(usize::MAX - 1) {
|
||||||
|
Err(Error::ConnectionError("response exceeded size limit".into()))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(part) = body.next().await else { break };
|
||||||
|
let part = part.map_err(Error::Hyper)?;
|
||||||
|
res.extend(part.as_ref());
|
||||||
|
}
|
||||||
|
Ok(io::Cursor::new(res))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,13 +1,13 @@
|
|||||||
[package]
|
[package]
|
||||||
name = "std-shims"
|
name = "std-shims"
|
||||||
version = "0.1.4"
|
version = "0.1.5"
|
||||||
description = "A series of std shims to make alloc more feasible"
|
description = "A series of std shims to make alloc more feasible"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["nostd", "no_std", "alloc", "io"]
|
keywords = ["nostd", "no_std", "alloc", "io"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.64"
|
rust-version = "1.65"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -18,9 +18,10 @@ workspace = true
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rustversion = { version = "1", default-features = false }
|
rustversion = { version = "1", default-features = false }
|
||||||
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
|
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "fair_mutex", "once", "lazy"] }
|
||||||
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
|
hashbrown = { version = "0.16", default-features = false, features = ["default-hasher", "inline-more"], optional = true }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
std = []
|
alloc = ["hashbrown"]
|
||||||
|
std = ["alloc", "spin/std"]
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -1,11 +1,28 @@
|
|||||||
# std shims
|
# `std` shims
|
||||||
|
|
||||||
A crate which passes through to std when the default `std` feature is enabled,
|
`std-shims` is a Rust crate with two purposes:
|
||||||
yet provides a series of shims when it isn't.
|
- Expand the functionality of `core` and `alloc`
|
||||||
|
- Polyfill functionality only available on newer version of Rust
|
||||||
|
|
||||||
No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the
|
The goal is to make supporting no-`std` environments, and older versions of
|
||||||
average case.
|
Rust, as simple as possible. For most use cases, replacing `std::` with
|
||||||
|
`std_shims::` and adding `use std_shims::prelude::*` is sufficient to take full
|
||||||
|
advantage of `std-shims`.
|
||||||
|
|
||||||
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
|
# API Surface
|
||||||
`spin` (avoiding a requirement on `critical-section`).
|
|
||||||
types are not guaranteed to be
|
`std-shims` only aims to have items _mutually available_ between `alloc` (with
|
||||||
|
extra dependencies) and `std` publicly exposed. Items exclusive to `std`, with
|
||||||
|
no shims available, will not be exported by `std-shims`.
|
||||||
|
|
||||||
|
# Dependencies
|
||||||
|
|
||||||
|
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization
|
||||||
|
primitives are provided via `spin` (avoiding a requirement on
|
||||||
|
`critical-section`). Sections of `std::io` are independently matched as
|
||||||
|
possible. `rustversion` is used to detect when to provide polyfills.
|
||||||
|
|
||||||
|
# Disclaimer
|
||||||
|
|
||||||
|
No guarantee of one-to-one parity is provided. The shims provided aim to be
|
||||||
|
sufficient for the average case. Pull requests are _welcome_.
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use extern_alloc::collections::*;
|
||||||
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use hashbrown::{HashSet, HashMap};
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
pub use std::collections::*;
|
pub use std::collections::*;
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
pub use alloc::collections::*;
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
pub use hashbrown::{HashSet, HashMap};
|
|
||||||
|
|||||||
@@ -1,42 +1,74 @@
|
|||||||
#[cfg(feature = "std")]
|
|
||||||
pub use std::io::*;
|
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
mod shims {
|
mod shims {
|
||||||
use core::fmt::{Debug, Formatter};
|
use core::fmt::{self, Debug, Display, Formatter};
|
||||||
use alloc::{boxed::Box, vec::Vec};
|
#[cfg(feature = "alloc")]
|
||||||
|
use extern_alloc::{boxed::Box, vec::Vec};
|
||||||
|
use crate::error::Error as CoreError;
|
||||||
|
|
||||||
|
/// The kind of error.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||||
pub enum ErrorKind {
|
pub enum ErrorKind {
|
||||||
UnexpectedEof,
|
UnexpectedEof,
|
||||||
Other,
|
Other,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// An error.
|
||||||
|
#[derive(Debug)]
|
||||||
pub struct Error {
|
pub struct Error {
|
||||||
kind: ErrorKind,
|
kind: ErrorKind,
|
||||||
error: Box<dyn Send + Sync>,
|
#[cfg(feature = "alloc")]
|
||||||
|
error: Box<dyn Send + Sync + CoreError>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Debug for Error {
|
impl Display for Error {
|
||||||
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
|
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||||
fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive()
|
<Self as Debug>::fmt(self, f)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
impl CoreError for Error {}
|
||||||
|
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
pub trait IntoBoxSendSyncError {}
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
impl<I> IntoBoxSendSyncError for I {}
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
pub trait IntoBoxSendSyncError: Into<Box<dyn Send + Sync + CoreError>> {}
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
impl<I: Into<Box<dyn Send + Sync + CoreError>>> IntoBoxSendSyncError for I {}
|
||||||
|
|
||||||
impl Error {
|
impl Error {
|
||||||
pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {
|
/// Create a new error.
|
||||||
Error { kind, error: Box::new(error) }
|
///
|
||||||
|
/// The error object itself is silently dropped when `alloc` is not enabled.
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn new<E: 'static + IntoBoxSendSyncError>(kind: ErrorKind, error: E) -> Error {
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
let res = Error { kind };
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
let res = Error { kind, error: error.into() };
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
|
/// Create a new error with `io::ErrorKind::Other` as its kind.
|
||||||
Error { kind: ErrorKind::Other, error: Box::new(error) }
|
///
|
||||||
|
/// The error object itself is silently dropped when `alloc` is not enabled.
|
||||||
|
#[allow(unused)]
|
||||||
|
pub fn other<E: 'static + IntoBoxSendSyncError>(error: E) -> Error {
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
let res = Error { kind: ErrorKind::Other };
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
let res = Error { kind: ErrorKind::Other, error: error.into() };
|
||||||
|
res
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The kind of error.
|
||||||
pub fn kind(&self) -> ErrorKind {
|
pub fn kind(&self) -> ErrorKind {
|
||||||
self.kind
|
self.kind
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
|
/// Retrieve the inner error.
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
pub fn into_inner(self) -> Option<Box<dyn Send + Sync + CoreError>> {
|
||||||
Some(self.error)
|
Some(self.error)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -64,6 +96,12 @@ mod shims {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
impl<R: Read> Read for &mut R {
|
||||||
|
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||||
|
R::read(*self, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
pub trait BufRead: Read {
|
pub trait BufRead: Read {
|
||||||
fn fill_buf(&mut self) -> Result<&[u8]>;
|
fn fill_buf(&mut self) -> Result<&[u8]>;
|
||||||
fn consume(&mut self, amt: usize);
|
fn consume(&mut self, amt: usize);
|
||||||
@@ -88,6 +126,7 @@ mod shims {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
impl Write for Vec<u8> {
|
impl Write for Vec<u8> {
|
||||||
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
||||||
self.extend(buf);
|
self.extend(buf);
|
||||||
@@ -95,6 +134,8 @@ mod shims {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
pub use shims::*;
|
pub use shims::*;
|
||||||
|
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::io::{ErrorKind, Error, Result, Read, BufRead, Write};
|
||||||
|
|||||||
@@ -2,17 +2,44 @@
|
|||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
#![cfg_attr(not(feature = "std"), no_std)]
|
||||||
|
|
||||||
pub extern crate alloc;
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
pub use core::*;
|
||||||
|
#[cfg(not(feature = "alloc"))]
|
||||||
|
pub use core::{alloc, borrow, ffi, fmt, slice, str, task};
|
||||||
|
|
||||||
|
#[cfg(not(feature = "std"))]
|
||||||
|
#[rustversion::before(1.81)]
|
||||||
|
pub mod error {
|
||||||
|
use core::fmt::Debug::Display;
|
||||||
|
pub trait Error: Debug + Display {}
|
||||||
|
}
|
||||||
|
#[cfg(not(feature = "std"))]
|
||||||
|
#[rustversion::since(1.81)]
|
||||||
|
pub use core::error;
|
||||||
|
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
extern crate alloc as extern_alloc;
|
||||||
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use extern_alloc::{alloc, borrow, boxed, ffi, fmt, rc, slice, str, string, task, vec, format};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::{alloc, borrow, boxed, error, ffi, fmt, rc, slice, str, string, task, vec, format};
|
||||||
|
|
||||||
pub mod sync;
|
|
||||||
pub mod collections;
|
pub mod collections;
|
||||||
pub mod io;
|
pub mod io;
|
||||||
|
pub mod sync;
|
||||||
pub use alloc::vec;
|
|
||||||
pub use alloc::str;
|
|
||||||
pub use alloc::string;
|
|
||||||
|
|
||||||
pub mod prelude {
|
pub mod prelude {
|
||||||
|
// Shim the `std` prelude
|
||||||
|
#[cfg(feature = "alloc")]
|
||||||
|
pub use extern_alloc::{
|
||||||
|
format, vec,
|
||||||
|
borrow::ToOwned,
|
||||||
|
boxed::Box,
|
||||||
|
vec::Vec,
|
||||||
|
string::{String, ToString},
|
||||||
|
};
|
||||||
|
|
||||||
|
// Shim `div_ceil`
|
||||||
#[rustversion::before(1.73)]
|
#[rustversion::before(1.73)]
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
pub trait StdShimsDivCeil {
|
pub trait StdShimsDivCeil {
|
||||||
@@ -53,6 +80,7 @@ pub mod prelude {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Shim `io::Error::other`
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
#[rustversion::before(1.74)]
|
#[rustversion::before(1.74)]
|
||||||
#[doc(hidden)]
|
#[doc(hidden)]
|
||||||
|
|||||||
@@ -1,19 +1,28 @@
|
|||||||
pub use core::sync::*;
|
pub use core::sync::atomic;
|
||||||
pub use alloc::sync::*;
|
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||||
|
pub use extern_alloc::sync::{Arc, Weak};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::sync::{Arc, Weak};
|
||||||
|
|
||||||
mod mutex_shim {
|
mod mutex_shim {
|
||||||
#[cfg(feature = "std")]
|
|
||||||
pub use std::sync::*;
|
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
pub use spin::*;
|
pub use spin::{Mutex, MutexGuard};
|
||||||
|
#[cfg(feature = "std")]
|
||||||
|
pub use std::sync::{Mutex, MutexGuard};
|
||||||
|
|
||||||
|
/// A shimmed `Mutex` with an API mutual to `spin` and `std`.
|
||||||
#[derive(Default, Debug)]
|
#[derive(Default, Debug)]
|
||||||
pub struct ShimMutex<T>(Mutex<T>);
|
pub struct ShimMutex<T>(Mutex<T>);
|
||||||
impl<T> ShimMutex<T> {
|
impl<T> ShimMutex<T> {
|
||||||
|
/// Construct a new `Mutex`.
|
||||||
pub const fn new(value: T) -> Self {
|
pub const fn new(value: T) -> Self {
|
||||||
Self(Mutex::new(value))
|
Self(Mutex::new(value))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Acquire a lock on the contents of the `Mutex`.
|
||||||
|
///
|
||||||
|
/// On no-`std` environments, this may spin until the lock is acquired. On `std` environments,
|
||||||
|
/// this may panic if the `Mutex` was poisoned.
|
||||||
pub fn lock(&self) -> MutexGuard<'_, T> {
|
pub fn lock(&self) -> MutexGuard<'_, T> {
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
let res = self.0.lock().unwrap();
|
let res = self.0.lock().unwrap();
|
||||||
@@ -25,10 +34,11 @@ mod mutex_shim {
|
|||||||
}
|
}
|
||||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
pub use spin::Lazy as LazyLock;
|
|
||||||
#[rustversion::before(1.80)]
|
#[rustversion::before(1.80)]
|
||||||
#[cfg(feature = "std")]
|
pub use spin::Lazy as LazyLock;
|
||||||
|
|
||||||
|
#[rustversion::since(1.80)]
|
||||||
|
#[cfg(not(feature = "std"))]
|
||||||
pub use spin::Lazy as LazyLock;
|
pub use spin::Lazy as LazyLock;
|
||||||
#[rustversion::since(1.80)]
|
#[rustversion::since(1.80)]
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
|
|||||||
22
common/task/Cargo.toml
Normal file
22
common/task/Cargo.toml
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
[package]
|
||||||
|
name = "serai-task"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "A task schema for Serai services"
|
||||||
|
license = "AGPL-3.0-only"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/common/task"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
keywords = []
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
rust-version = "1.75"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
tokio = { version = "1", default-features = false, features = ["macros", "sync", "time"] }
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2024 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
3
common/task/README.md
Normal file
3
common/task/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Task
|
||||||
|
|
||||||
|
A schema to define tasks to be run ad infinitum.
|
||||||
161
common/task/src/lib.rs
Normal file
161
common/task/src/lib.rs
Normal file
@@ -0,0 +1,161 @@
|
|||||||
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
|
#![doc = include_str!("../README.md")]
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
use core::{
|
||||||
|
fmt::{self, Debug},
|
||||||
|
future::Future,
|
||||||
|
time::Duration,
|
||||||
|
};
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
mod type_name;
|
||||||
|
|
||||||
|
/// A handle for a task.
|
||||||
|
///
|
||||||
|
/// The task will only stop running once all handles for it are dropped.
|
||||||
|
//
|
||||||
|
// `run_now` isn't infallible if the task may have been closed. `run_now` on a closed task would
|
||||||
|
// either need to panic (historic behavior), silently drop the fact the task can't be run, or
|
||||||
|
// return an error. Instead of having a potential panic, and instead of modeling the error
|
||||||
|
// behavior, this task can't be closed unless all handles are dropped, ensuring calls to `run_now`
|
||||||
|
// are infallible.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct TaskHandle {
|
||||||
|
run_now: mpsc::Sender<()>,
|
||||||
|
#[allow(dead_code)] // This is used to track if all handles have been dropped
|
||||||
|
close: mpsc::Sender<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A task's internal structures.
|
||||||
|
pub struct Task {
|
||||||
|
run_now: mpsc::Receiver<()>,
|
||||||
|
close: mpsc::Receiver<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Task {
|
||||||
|
/// Create a new task definition.
|
||||||
|
pub fn new() -> (Self, TaskHandle) {
|
||||||
|
// Uses a capacity of 1 as any call to run as soon as possible satisfies all calls to run as
|
||||||
|
// soon as possible
|
||||||
|
let (run_now_send, run_now_recv) = mpsc::channel(1);
|
||||||
|
// And any call to close satisfies all calls to close
|
||||||
|
let (close_send, close_recv) = mpsc::channel(1);
|
||||||
|
(
|
||||||
|
Self { run_now: run_now_recv, close: close_recv },
|
||||||
|
TaskHandle { run_now: run_now_send, close: close_send },
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl TaskHandle {
|
||||||
|
/// Tell the task to run now (and not whenever its next iteration on a timer is).
|
||||||
|
pub fn run_now(&self) {
|
||||||
|
#[allow(clippy::match_same_arms)]
|
||||||
|
match self.run_now.try_send(()) {
|
||||||
|
Ok(()) => {}
|
||||||
|
// NOP on full, as this task will already be ran as soon as possible
|
||||||
|
Err(mpsc::error::TrySendError::Full(())) => {}
|
||||||
|
Err(mpsc::error::TrySendError::Closed(())) => {
|
||||||
|
// The task should only be closed if all handles are dropped, and this one hasn't been
|
||||||
|
panic!("task was unexpectedly closed when calling run_now")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An enum which can't be constructed, representing that the task does not error.
|
||||||
|
pub enum DoesNotError {}
|
||||||
|
impl Debug for DoesNotError {
|
||||||
|
fn fmt(&self, _: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> {
|
||||||
|
// This type can't be constructed so we'll never have a `&self` to call this fn with
|
||||||
|
unreachable!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A task to be continually ran.
|
||||||
|
pub trait ContinuallyRan: Sized + Send {
|
||||||
|
/// The amount of seconds before this task should be polled again.
|
||||||
|
const DELAY_BETWEEN_ITERATIONS: u64 = 5;
|
||||||
|
/// The maximum amount of seconds before this task should be run again.
|
||||||
|
///
|
||||||
|
/// Upon error, the amount of time waited will be linearly increased until this limit.
|
||||||
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 120;
|
||||||
|
|
||||||
|
/// The error potentially yielded upon running an iteration of this task.
|
||||||
|
type Error: Debug;
|
||||||
|
|
||||||
|
/// Run an iteration of the task.
|
||||||
|
///
|
||||||
|
/// If this returns `true`, all dependents of the task will immediately have a new iteration ran
|
||||||
|
/// (without waiting for whatever timer they were already on).
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>>;
|
||||||
|
|
||||||
|
/// Continually run the task.
|
||||||
|
fn continually_run(
|
||||||
|
mut self,
|
||||||
|
mut task: Task,
|
||||||
|
dependents: Vec<TaskHandle>,
|
||||||
|
) -> impl Send + Future<Output = ()> {
|
||||||
|
async move {
|
||||||
|
// The default number of seconds to sleep before running the task again
|
||||||
|
let default_sleep_before_next_task = Self::DELAY_BETWEEN_ITERATIONS;
|
||||||
|
// The current number of seconds to sleep before running the task again
|
||||||
|
// We increment this upon errors in order to not flood the logs with errors
|
||||||
|
let mut current_sleep_before_next_task = default_sleep_before_next_task;
|
||||||
|
let increase_sleep_before_next_task = |current_sleep_before_next_task: &mut u64| {
|
||||||
|
let new_sleep = *current_sleep_before_next_task + default_sleep_before_next_task;
|
||||||
|
// Set a limit of sleeping for two minutes
|
||||||
|
*current_sleep_before_next_task = new_sleep.max(Self::MAX_DELAY_BETWEEN_ITERATIONS);
|
||||||
|
};
|
||||||
|
|
||||||
|
loop {
|
||||||
|
// If we were told to close/all handles were dropped, drop it
|
||||||
|
{
|
||||||
|
let should_close = task.close.try_recv();
|
||||||
|
match should_close {
|
||||||
|
Ok(()) | Err(mpsc::error::TryRecvError::Disconnected) => break,
|
||||||
|
Err(mpsc::error::TryRecvError::Empty) => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
match self.run_iteration().await {
|
||||||
|
Ok(run_dependents) => {
|
||||||
|
// Upon a successful (error-free) loop iteration, reset the amount of time we sleep
|
||||||
|
current_sleep_before_next_task = default_sleep_before_next_task;
|
||||||
|
|
||||||
|
if run_dependents {
|
||||||
|
for dependent in &dependents {
|
||||||
|
dependent.run_now();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
// Get the type name
|
||||||
|
let type_name = type_name::strip_type_name(core::any::type_name::<Self>());
|
||||||
|
// Print the error as a warning, prefixed by the task's type
|
||||||
|
log::warn!("{type_name}: {e:?}");
|
||||||
|
increase_sleep_before_next_task(&mut current_sleep_before_next_task);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Don't run the task again for another few seconds UNLESS told to run now
|
||||||
|
/*
|
||||||
|
We could replace tokio::mpsc with async_channel, tokio::time::sleep with
|
||||||
|
patchable_async_sleep::sleep, and tokio::select with futures_lite::future::or
|
||||||
|
It isn't worth the effort when patchable_async_sleep::sleep will still resolve to tokio
|
||||||
|
*/
|
||||||
|
tokio::select! {
|
||||||
|
() = tokio::time::sleep(Duration::from_secs(current_sleep_before_next_task)) => {},
|
||||||
|
msg = task.run_now.recv() => {
|
||||||
|
// Check if this is firing because the handle was dropped
|
||||||
|
if msg.is_none() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
31
common/task/src/type_name.rs
Normal file
31
common/task/src/type_name.rs
Normal file
@@ -0,0 +1,31 @@
|
|||||||
|
/// Strip the modules from a type name.
|
||||||
|
// This may be of the form `a::b::C`, in which case we only want `C`
|
||||||
|
pub(crate) fn strip_type_name(full_type_name: &'static str) -> String {
|
||||||
|
// It also may be `a::b::C<d::e::F>`, in which case, we only attempt to strip `a::b`
|
||||||
|
let mut by_generics = full_type_name.split('<');
|
||||||
|
|
||||||
|
// Strip to just `C`
|
||||||
|
let full_outer_object_name = by_generics.next().unwrap();
|
||||||
|
let mut outer_object_name_parts = full_outer_object_name.split("::");
|
||||||
|
let mut last_part_in_outer_object_name = outer_object_name_parts.next().unwrap();
|
||||||
|
for part in outer_object_name_parts {
|
||||||
|
last_part_in_outer_object_name = part;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Push back on the generic terms
|
||||||
|
let mut type_name = last_part_in_outer_object_name.to_string();
|
||||||
|
for generic in by_generics {
|
||||||
|
type_name.push('<');
|
||||||
|
type_name.push_str(generic);
|
||||||
|
}
|
||||||
|
type_name
|
||||||
|
}
|
||||||
|
|
||||||
|
#[test]
|
||||||
|
fn test_strip_type_name() {
|
||||||
|
assert_eq!(strip_type_name("core::option::Option"), "Option");
|
||||||
|
assert_eq!(
|
||||||
|
strip_type_name("core::option::Option<alloc::string::String>"),
|
||||||
|
"Option<alloc::string::String>"
|
||||||
|
);
|
||||||
|
}
|
||||||
@@ -7,7 +7,9 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.77"
|
# This must be specified with the patch version, else Rust believes `1.77` < `1.77.0` and will
|
||||||
|
# refuse to compile due to relying on versions introduced with `1.77.0`
|
||||||
|
rust-version = "1.77.0"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
MIT License
|
MIT License
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2022-2025 Luke Parker
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
of this software and associated documentation files (the "Software"), to deal
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
|||||||
@@ -17,50 +17,45 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
async-trait = { version = "0.1", default-features = false }
|
|
||||||
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
|
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
|
||||||
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
|
dalek-ff-group = { path = "../crypto/dalek-ff-group", default-features = false, features = ["std"] }
|
||||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std", "aggregate"] }
|
dkg = { package = "dkg-musig", path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
|
||||||
dkg-musig = { path = "../crypto/dkg/musig", default-features = false, features = ["std"] }
|
|
||||||
frost = { package = "modular-frost", path = "../crypto/frost" }
|
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
|
||||||
|
|
||||||
zalloc = { path = "../common/zalloc" }
|
|
||||||
serai-db = { path = "../common/db" }
|
|
||||||
serai-env = { path = "../common/env" }
|
|
||||||
|
|
||||||
processor-messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
|
||||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
|
||||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
|
||||||
|
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
|
||||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
|
zalloc = { path = "../common/zalloc" }
|
||||||
|
serai-db = { path = "../common/db" }
|
||||||
|
serai-env = { path = "../common/env" }
|
||||||
|
serai-task = { path = "../common/task", version = "0.1" }
|
||||||
|
|
||||||
|
messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
||||||
|
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||||
|
tributary-sdk = { path = "./tributary-sdk" }
|
||||||
|
|
||||||
|
serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||||
|
|
||||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
tokio = { version = "1", default-features = false, features = ["time", "sync", "macros", "rt-multi-thread"] }
|
||||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
|
||||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
serai-cosign = { path = "./cosign" }
|
||||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
serai-coordinator-substrate = { path = "./substrate" }
|
||||||
sp-application-crypto = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
serai-coordinator-tributary = { path = "./tributary" }
|
||||||
sp-runtime = { git = "https://github.com/serai-dex/patch-polkadot-sdk", rev = "da19e1f8ca7a9e2cbf39fbfa493918eeeb45e10b", default-features = false, features = ["std"] }
|
serai-coordinator-p2p = { path = "./p2p" }
|
||||||
|
serai-coordinator-libp2p-p2p = { path = "./p2p/libp2p" }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
longer-reattempts = []
|
longer-reattempts = ["serai-coordinator-tributary/longer-reattempts"]
|
||||||
parity-db = ["serai-db/parity-db"]
|
parity-db = ["serai-db/parity-db"]
|
||||||
rocksdb = ["serai-db/rocksdb"]
|
rocksdb = ["serai-db/rocksdb"]
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
|||||||
@@ -1,7 +1,29 @@
|
|||||||
# Coordinator
|
# Coordinator
|
||||||
|
|
||||||
The Serai coordinator communicates with other coordinators to prepare batches
|
- [`tendermint`](/tributary/tendermint) is an implementation of the Tendermint
|
||||||
for Serai and sign transactions.
|
BFT algorithm.
|
||||||
|
|
||||||
In order to achieve consensus over gossip, and order certain events, a
|
- [`tributary-sdk`](./tributary-sdk) is a micro-blockchain framework. Instead
|
||||||
micro-blockchain is instantiated.
|
of a producing a blockchain daemon like the Polkadot SDK or Cosmos SDK intend
|
||||||
|
to, `tributary` is solely intended to be an embedded asynchronous task within
|
||||||
|
an application.
|
||||||
|
|
||||||
|
The Serai coordinator spawns a tributary for each validator set it's
|
||||||
|
coordinating. This allows the participating validators to communicate in a
|
||||||
|
byzantine-fault-tolerant manner (relying on Tendermint for consensus).
|
||||||
|
|
||||||
|
- [`cosign`](./cosign) contains a library to decide which Substrate blocks
|
||||||
|
should be cosigned and to evaluate cosigns.
|
||||||
|
|
||||||
|
- [`substrate`](./substrate) contains a library to index the Substrate
|
||||||
|
blockchain and handle its events.
|
||||||
|
|
||||||
|
- [`tributary`](./tributary) is our instantiation of the Tributary SDK for the
|
||||||
|
Serai processor. It includes the `Transaction` definition and deferred
|
||||||
|
execution logic.
|
||||||
|
|
||||||
|
- [`p2p`](./p2p) is our abstract P2P API to service the Coordinator.
|
||||||
|
|
||||||
|
- [`libp2p`](./p2p/libp2p) is our libp2p-backed implementation of the P2P API.
|
||||||
|
|
||||||
|
- [`src`](./src) contains the source code for the Coordinator binary itself.
|
||||||
|
|||||||
34
coordinator/cosign/Cargo.toml
Normal file
34
coordinator/cosign/Cargo.toml
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
[package]
|
||||||
|
name = "serai-cosign"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Evaluator of cosigns for the Serai network"
|
||||||
|
license = "AGPL-3.0-only"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
keywords = []
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
rust-version = "1.85"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
|
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
serai-abi = { path = "../../substrate/abi", default-features = false, features = ["std"] }
|
||||||
|
serai-client-serai = { path = "../../substrate/client/serai", default-features = false }
|
||||||
|
|
||||||
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
tokio = { version = "1", default-features = false }
|
||||||
|
|
||||||
|
serai-db = { path = "../../common/db", version = "0.1.1" }
|
||||||
|
serai-task = { path = "../../common/task", version = "0.1" }
|
||||||
|
|
||||||
|
serai-cosign-types = { path = "./types" }
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
121
coordinator/cosign/README.md
Normal file
121
coordinator/cosign/README.md
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
# Serai Cosign
|
||||||
|
|
||||||
|
The Serai blockchain is controlled by a set of validators referred to as the
|
||||||
|
Serai validators. These validators could attempt to double-spend, even if every
|
||||||
|
node on the network is a full node, via equivocating.
|
||||||
|
|
||||||
|
Posit:
|
||||||
|
- The Serai validators control X SRI
|
||||||
|
- The Serai validators produce block A swapping X SRI to Y XYZ
|
||||||
|
- The Serai validators produce block B swapping X SRI to Z ABC
|
||||||
|
- The Serai validators finalize block A and send to the validators for XYZ
|
||||||
|
- The Serai validators finalize block B and send to the validators for ABC
|
||||||
|
|
||||||
|
This is solved via the cosigning protocol. The validators for XYZ and the
|
||||||
|
validators for ABC each sign their view of the Serai blockchain, communicating
|
||||||
|
amongst each other to ensure consistency.
|
||||||
|
|
||||||
|
The security of the cosigning protocol is not formally proven, and there are no
|
||||||
|
claims it achieves Byzantine Fault Tolerance. This protocol is meant to be
|
||||||
|
practical and make such attacks infeasible, when they could already be argued
|
||||||
|
difficult to perform.
|
||||||
|
|
||||||
|
### Definitions
|
||||||
|
|
||||||
|
- Cosign: A signature from a non-Serai validator set for a Serai block
|
||||||
|
- Cosign Commit: A collection of cosigns which achieve the necessary weight
|
||||||
|
|
||||||
|
### Methodology
|
||||||
|
|
||||||
|
Finalized blocks from the Serai network are intended to be cosigned if they
|
||||||
|
contain burn events. Only once cosigned should non-Serai validators process
|
||||||
|
them.
|
||||||
|
|
||||||
|
Cosigning occurs by a non-Serai validator set, using their threshold keys
|
||||||
|
declared on the Serai blockchain. Once 83% of non-Serai validator sets, by
|
||||||
|
weight, cosign a block, a cosign commit is formed. A cosign commit for a block
|
||||||
|
is considered to also cosign for all blocks preceding it.
|
||||||
|
|
||||||
|
### Bounds Under Asynchrony
|
||||||
|
|
||||||
|
Assuming an asynchronous environment fully controlled by the adversary, 34% of
|
||||||
|
a validator set may cause an equivocation. Control of 67% of non-Serai
|
||||||
|
validator sets, by weight, is sufficient to produce two distinct cosign commits
|
||||||
|
at the same position. This is due to the honest stake, 33%, being split across
|
||||||
|
the two candidates (67% + 16.5% = 83.5%, just over the threshold). This means
|
||||||
|
the cosigning protocol may produce multiple cosign commits if 34% of 67%, just
|
||||||
|
22.78%, of the non-Serai validator sets, is malicious. This would be in
|
||||||
|
conjunction with 34% of the Serai validator set (assumed 20% of total stake),
|
||||||
|
for a total stake requirement of 34% of 20% + 22.78% of 80% (25.024%). This is
|
||||||
|
an increase from the 6.8% required without the cosigning protocol.
|
||||||
|
|
||||||
|
### Bounds Under Synchrony
|
||||||
|
|
||||||
|
Assuming the honest stake within the non-Serai validator sets detect the
|
||||||
|
malicious stake within their set prior to assisting in producing a cosign for
|
||||||
|
their set, for which there is a multi-second window, 67% of 67% of non-Serai
|
||||||
|
validator sets is required to produce cosigns for those sets. This raises the
|
||||||
|
total stake requirement to 42.712% (past the usual 34% threshold).
|
||||||
|
|
||||||
|
### Behavior Reliant on Synchrony
|
||||||
|
|
||||||
|
If the Serai blockchain node detects an equivocation, it will stop responding
|
||||||
|
to all RPC requests and stop participating in finalizing further blocks. This
|
||||||
|
lets the node communicate the equivocating commits to other nodes (causing them
|
||||||
|
to exhibit the same behavior), yet prevents interaction with it.
|
||||||
|
|
||||||
|
If cosigns representing 17% of the non-Serai validators sets by weight are
|
||||||
|
detected for distinct blocks at the same position, the protocol halts. An
|
||||||
|
explicit latency period of seventy seconds is enacted after receiving a cosign
|
||||||
|
commit for the detection of such an equivocation. This is largely redundant
|
||||||
|
given how the Serai blockchain node will presumably have halted itself by this
|
||||||
|
time.
|
||||||
|
|
||||||
|
### Equivocation-Detection Avoidance
|
||||||
|
|
||||||
|
Malicious Serai validators could avoid detection of their equivocating if they
|
||||||
|
produced two distinct blockchains, A and B, with different keys declared for
|
||||||
|
the same non-Serai validator set. While the validators following A may detect
|
||||||
|
the cosigns for distinct blocks by validators following B, the cosigns would be
|
||||||
|
assumed invalid due to their signatures being verified against distinct keys.
|
||||||
|
|
||||||
|
This is prevented by requiring cosigns on the blocks which declare new keys,
|
||||||
|
ensuring all validators have a consistent view of the keys used within the
|
||||||
|
cosigning protocol (per the bounds of the cosigning protocol). These blocks are
|
||||||
|
exempt from the general policy of cosign commits cosigning all prior blocks,
|
||||||
|
preventing the newly declared keys (which aren't yet cosigned) from being used
|
||||||
|
to cosign themselves. These cosigns are flagged as "notable", are permanently
|
||||||
|
archived, and must be synced before a validator will move forward.
|
||||||
|
|
||||||
|
Cosigning the block which declares new keys also ensures agreement on the
|
||||||
|
preceding block which declared the new set, with an exact specification of the
|
||||||
|
participants and their weight, before it impacts the cosigning protocol.
|
||||||
|
|
||||||
|
### Denial of Service Concerns
|
||||||
|
|
||||||
|
Any historical Serai validator set may trigger a chain halt by producing an
|
||||||
|
equivocation after their retiry. This requires 67% to be malicious. 34% of the
|
||||||
|
active Serai validator set may also trigger a chain halt.
|
||||||
|
|
||||||
|
17% of non-Serai validator sets equivocating causing a halt means 5.67% of
|
||||||
|
non-Serai validator sets' stake may cause a halt (in an asynchronous
|
||||||
|
environment fully controlled by the adversary). In a synchronous environment
|
||||||
|
where the honest stake cannot be split across two candidates, 11.33% of
|
||||||
|
non-Serai validator sets' stake is required.
|
||||||
|
|
||||||
|
The more practical attack is for one to obtain 5.67% of non-Serai validator
|
||||||
|
sets' stake, under any network conditions, and simply go offline. This will
|
||||||
|
take 17% of validator sets offline with it, preventing any cosign commits
|
||||||
|
from being performed. A fallback protocol where validators individually produce
|
||||||
|
cosigns, removing the network's horizontal scalability but ensuring liveness,
|
||||||
|
prevents this, restoring the additional requirements for control of an
|
||||||
|
asynchronous network or 11.33% of non-Serai validator sets' stake.
|
||||||
|
|
||||||
|
### TODO
|
||||||
|
|
||||||
|
The Serai node no longer responding to RPC requests upon detecting any
|
||||||
|
equivocation, and the fallback protocol where validators individually produce
|
||||||
|
signatures, are not implemented at this time. The former means the detection of
|
||||||
|
equivocating cosigns is not redundant and the latter makes 5.67% of non-Serai
|
||||||
|
validator sets' stake the DoS threshold, even without control of an
|
||||||
|
asynchronous network.
|
||||||
57
coordinator/cosign/src/delay.rs
Normal file
57
coordinator/cosign/src/delay.rs
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
|
use serai_db::*;
|
||||||
|
use serai_task::{DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
|
use crate::evaluator::CosignedBlocks;
|
||||||
|
|
||||||
|
/// How often callers should broadcast the cosigns flagged for rebroadcasting.
|
||||||
|
pub const BROADCAST_FREQUENCY: Duration = Duration::from_secs(60);
|
||||||
|
const SYNCHRONY_EXPECTATION: Duration = Duration::from_secs(10);
|
||||||
|
const ACKNOWLEDGEMENT_DELAY: Duration =
|
||||||
|
Duration::from_secs(BROADCAST_FREQUENCY.as_secs() + SYNCHRONY_EXPECTATION.as_secs());
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
SubstrateCosignDelay {
|
||||||
|
// The latest cosigned block number.
|
||||||
|
LatestCosignedBlockNumber: () -> u64,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
/// A task to delay acknowledgement of cosigns.
|
||||||
|
pub(crate) struct CosignDelayTask<D: Db> {
|
||||||
|
pub(crate) db: D,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||||
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
|
// Receive the next block to mark as cosigned
|
||||||
|
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
// Calculate when we should mark it as valid
|
||||||
|
let time_valid =
|
||||||
|
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
||||||
|
// Sleep until then
|
||||||
|
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
||||||
|
.await;
|
||||||
|
|
||||||
|
// Set the cosigned block
|
||||||
|
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
246
coordinator/cosign/src/evaluator.rs
Normal file
246
coordinator/cosign/src/evaluator.rs
Normal file
@@ -0,0 +1,246 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::time::{Duration, Instant, SystemTime};
|
||||||
|
|
||||||
|
use serai_db::*;
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
HasEvents, GlobalSession, NetworksLatestCosignedBlock, RequestNotableCosigns,
|
||||||
|
intend::{GlobalSessionsChannel, BlockEventData, BlockEvents},
|
||||||
|
};
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
SubstrateCosignEvaluator {
|
||||||
|
// The global session currently being evaluated.
|
||||||
|
CurrentlyEvaluatedGlobalSession: () -> ([u8; 32], GlobalSession),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
db_channel!(
|
||||||
|
SubstrateCosignEvaluatorChannels {
|
||||||
|
// (cosigned block, time cosign was evaluated)
|
||||||
|
CosignedBlocks: () -> (u64, u64),
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
// This is a strict function which won't panic, even with a malicious Serai node, so long as:
|
||||||
|
// - It's called incrementally (with an increment of 1)
|
||||||
|
// - It's only called for block numbers we've completed indexing on within the intend task
|
||||||
|
// - It's only called for block numbers after a global session has started
|
||||||
|
// - The global sessions channel is populated as the block declaring the session is indexed
|
||||||
|
// Which all hold true within the context of this task and the intend task.
|
||||||
|
//
|
||||||
|
// This function will also ensure the currently evaluated global session is incremented once we
|
||||||
|
// finish evaluation of the prior session.
|
||||||
|
fn currently_evaluated_global_session_strict(
|
||||||
|
txn: &mut impl DbTxn,
|
||||||
|
block_number: u64,
|
||||||
|
) -> ([u8; 32], GlobalSession) {
|
||||||
|
let mut res = {
|
||||||
|
let existing = match CurrentlyEvaluatedGlobalSession::get(txn) {
|
||||||
|
Some(existing) => existing,
|
||||||
|
None => {
|
||||||
|
let first = GlobalSessionsChannel::try_recv(txn)
|
||||||
|
.expect("fetching latest global session yet none declared");
|
||||||
|
CurrentlyEvaluatedGlobalSession::set(txn, &first);
|
||||||
|
first
|
||||||
|
}
|
||||||
|
};
|
||||||
|
assert!(
|
||||||
|
existing.1.start_block_number <= block_number,
|
||||||
|
"candidate's start block number exceeds our block number"
|
||||||
|
);
|
||||||
|
existing
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(next) = GlobalSessionsChannel::peek(txn) {
|
||||||
|
assert!(
|
||||||
|
block_number <= next.1.start_block_number,
|
||||||
|
"currently_evaluated_global_session_strict wasn't called incrementally"
|
||||||
|
);
|
||||||
|
// If it's time for this session to activate, take it from the channel and set it
|
||||||
|
if block_number == next.1.start_block_number {
|
||||||
|
GlobalSessionsChannel::try_recv(txn).unwrap();
|
||||||
|
CurrentlyEvaluatedGlobalSession::set(txn, &next);
|
||||||
|
res = next;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
res
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u8; 32]> {
|
||||||
|
CurrentlyEvaluatedGlobalSession::get(getter).map(|(id, _info)| id)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A task to determine if a block has been cosigned and we should handle it.
|
||||||
|
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
||||||
|
pub(crate) db: D,
|
||||||
|
pub(crate) request: R,
|
||||||
|
pub(crate) last_request_for_cosigns: Instant,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
let should_request_cosigns = |last_request_for_cosigns: &mut Instant| {
|
||||||
|
const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60);
|
||||||
|
if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
*last_request_for_cosigns = Instant::now();
|
||||||
|
true
|
||||||
|
};
|
||||||
|
|
||||||
|
async move {
|
||||||
|
let mut known_cosign = None;
|
||||||
|
let mut made_progress = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Fetch the global session information
|
||||||
|
let (global_session, global_session_info) =
|
||||||
|
currently_evaluated_global_session_strict(&mut txn, block_number);
|
||||||
|
|
||||||
|
match has_events {
|
||||||
|
// Because this had notable events, we require an explicit cosign for this block by a
|
||||||
|
// supermajority of the prior block's validator sets
|
||||||
|
HasEvents::Notable => {
|
||||||
|
let mut weight_cosigned = 0;
|
||||||
|
for set in global_session_info.sets {
|
||||||
|
// Check if we have the cosign from this set
|
||||||
|
if NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
|
||||||
|
.map(|signed_cosign| signed_cosign.cosign.block_number) ==
|
||||||
|
Some(block_number)
|
||||||
|
{
|
||||||
|
// Since have this cosign, add the set's weight to the weight which has cosigned
|
||||||
|
weight_cosigned +=
|
||||||
|
global_session_info.stakes.get(&set.network).ok_or_else(|| {
|
||||||
|
"ValidatorSet in global session yet didn't have its stake".to_string()
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Check if the sum weight doesn't cross the required threshold
|
||||||
|
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||||
|
// Request the necessary cosigns over the network
|
||||||
|
if should_request_cosigns(&mut self.last_request_for_cosigns) {
|
||||||
|
self
|
||||||
|
.request
|
||||||
|
.request_notable_cosigns(global_session)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
}
|
||||||
|
// We return an error so the delay before this task is run again increases
|
||||||
|
return Err(format!(
|
||||||
|
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
log::info!("marking notable block #{block_number} as cosigned");
|
||||||
|
}
|
||||||
|
// Since this block didn't have any notable events, we simply require a cosign for this
|
||||||
|
// block or a greater block by the current validator sets
|
||||||
|
HasEvents::NonNotable => {
|
||||||
|
// Check if this was satisfied by a cached result which wasn't calculated incrementally
|
||||||
|
let known_cosigned = if let Some(known_cosign) = known_cosign {
|
||||||
|
known_cosign >= block_number
|
||||||
|
} else {
|
||||||
|
// Clear `known_cosign` which is no longer helpful
|
||||||
|
known_cosign = None;
|
||||||
|
false
|
||||||
|
};
|
||||||
|
|
||||||
|
// If it isn't already known to be cosigned, evaluate the latest cosigns
|
||||||
|
if !known_cosigned {
|
||||||
|
/*
|
||||||
|
LatestCosign is populated with the latest cosigns for each network which don't
|
||||||
|
exceed the latest global session we've evaluated the start of. This current block
|
||||||
|
is during the latest global session we've evaluated the start of.
|
||||||
|
*/
|
||||||
|
|
||||||
|
let mut weight_cosigned = 0;
|
||||||
|
let mut lowest_common_block: Option<u64> = None;
|
||||||
|
for set in global_session_info.sets {
|
||||||
|
// Check if this set cosigned this block or not
|
||||||
|
let Some(cosign) =
|
||||||
|
NetworksLatestCosignedBlock::get(&txn, global_session, set.network)
|
||||||
|
else {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
if cosign.cosign.block_number >= block_number {
|
||||||
|
weight_cosigned +=
|
||||||
|
global_session_info.stakes.get(&set.network).ok_or_else(|| {
|
||||||
|
"ValidatorSet in global session yet didn't have its stake".to_string()
|
||||||
|
})?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the lowest block common to all of these cosigns
|
||||||
|
lowest_common_block = lowest_common_block
|
||||||
|
.map(|existing| existing.min(cosign.cosign.block_number))
|
||||||
|
.or(Some(cosign.cosign.block_number));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the sum weight doesn't cross the required threshold
|
||||||
|
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||||
|
// Request the superseding notable cosigns over the network
|
||||||
|
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
||||||
|
// the desired non-notable cosigns as part of normal operations, without needing to
|
||||||
|
// explicitly request them
|
||||||
|
if should_request_cosigns(&mut self.last_request_for_cosigns) {
|
||||||
|
self
|
||||||
|
.request
|
||||||
|
.request_notable_cosigns(global_session)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
}
|
||||||
|
// We return an error so the delay before this task is run again increases
|
||||||
|
return Err(format!(
|
||||||
|
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the cached result for the block we know is cosigned
|
||||||
|
/*
|
||||||
|
There may be a higher block which was cosigned, but once we get to this block,
|
||||||
|
we'll re-evaluate and find it then. The alternative would be an optimistic
|
||||||
|
re-evaluation now. Both are fine, so the lower-complexity option is preferred.
|
||||||
|
*/
|
||||||
|
known_cosign = lowest_common_block;
|
||||||
|
}
|
||||||
|
|
||||||
|
log::debug!("marking non-notable block #{block_number} as cosigned");
|
||||||
|
}
|
||||||
|
// If this block has no events necessitating cosigning, we can immediately consider the
|
||||||
|
// block cosigned (making this block a NOP)
|
||||||
|
HasEvents::No => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since we checked we had the necessary cosigns, send it for delay before acknowledgement
|
||||||
|
CosignedBlocks::send(
|
||||||
|
&mut txn,
|
||||||
|
&(
|
||||||
|
block_number,
|
||||||
|
SystemTime::now()
|
||||||
|
.duration_since(SystemTime::UNIX_EPOCH)
|
||||||
|
.unwrap_or(Duration::ZERO)
|
||||||
|
.as_secs(),
|
||||||
|
),
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
if (block_number % 500) == 0 {
|
||||||
|
log::info!("marking block #{block_number} as cosigned");
|
||||||
|
}
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
196
coordinator/cosign/src/intend.rs
Normal file
196
coordinator/cosign/src/intend.rs
Normal file
@@ -0,0 +1,196 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::{sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
|
use blake2::{Digest, Blake2b256};
|
||||||
|
|
||||||
|
use serai_abi::primitives::{
|
||||||
|
balance::Amount, validator_sets::ExternalValidatorSet, address::SeraiAddress,
|
||||||
|
merkle::IncrementalUnbalancedMerkleTree,
|
||||||
|
};
|
||||||
|
use serai_client_serai::Serai;
|
||||||
|
|
||||||
|
use serai_db::*;
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::*;
|
||||||
|
|
||||||
|
create_db!(
|
||||||
|
CosignIntend {
|
||||||
|
ScanCosignFrom: () -> u64,
|
||||||
|
BuildsUpon: () -> IncrementalUnbalancedMerkleTree,
|
||||||
|
}
|
||||||
|
);
|
||||||
|
|
||||||
|
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub(crate) struct BlockEventData {
|
||||||
|
pub(crate) block_number: u64,
|
||||||
|
pub(crate) has_events: HasEvents,
|
||||||
|
}
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
CosignIntendChannels {
|
||||||
|
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
||||||
|
BlockEvents: () -> BlockEventData,
|
||||||
|
IntendedCosigns: (set: ExternalValidatorSet) -> CosignIntent,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn block_has_events_justifying_a_cosign(
|
||||||
|
serai: &Serai,
|
||||||
|
block_number: u64,
|
||||||
|
) -> Result<(Block, HasEvents), String> {
|
||||||
|
let block = serai
|
||||||
|
.block_by_number(block_number)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?
|
||||||
|
.ok_or_else(|| "couldn't get block which should've been finalized".to_string())?;
|
||||||
|
let serai = serai.as_of(block.header.hash()).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
|
||||||
|
if !serai.validator_sets().set_keys_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||||
|
return Ok((block, HasEvents::Notable));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !serai.coins().burn_with_instruction_events().await.map_err(|e| format!("{e:?}"))?.is_empty() {
|
||||||
|
return Ok((block, HasEvents::NonNotable));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((block, HasEvents::No))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A task to determine which blocks we should intend to cosign.
|
||||||
|
pub(crate) struct CosignIntendTask<D: Db> {
|
||||||
|
pub(crate) db: D,
|
||||||
|
pub(crate) serai: Arc<Serai>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let start_block_number = ScanCosignFrom::get(&self.db).unwrap_or(1);
|
||||||
|
let latest_block_number =
|
||||||
|
self.serai.latest_finalized_block_number().await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
|
||||||
|
for block_number in start_block_number ..= latest_block_number {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
|
let (block, mut has_events) =
|
||||||
|
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
|
|
||||||
|
let mut builds_upon =
|
||||||
|
BuildsUpon::get(&txn).unwrap_or(IncrementalUnbalancedMerkleTree::new());
|
||||||
|
|
||||||
|
// Check we are indexing a linear chain
|
||||||
|
if block.header.builds_upon() !=
|
||||||
|
builds_upon.clone().calculate(serai_abi::BLOCK_HEADER_BRANCH_TAG)
|
||||||
|
{
|
||||||
|
Err(format!(
|
||||||
|
"node's block #{block_number} doesn't build upon the block #{} prior indexed",
|
||||||
|
block_number - 1
|
||||||
|
))?;
|
||||||
|
}
|
||||||
|
let block_hash = block.header.hash();
|
||||||
|
SubstrateBlockHash::set(&mut txn, block_number, &block_hash);
|
||||||
|
builds_upon.append(
|
||||||
|
serai_abi::BLOCK_HEADER_BRANCH_TAG,
|
||||||
|
Blake2b256::new_with_prefix([serai_abi::BLOCK_HEADER_LEAF_TAG])
|
||||||
|
.chain_update(block_hash.0)
|
||||||
|
.finalize()
|
||||||
|
.into(),
|
||||||
|
);
|
||||||
|
BuildsUpon::set(&mut txn, &builds_upon);
|
||||||
|
|
||||||
|
let global_session_for_this_block = LatestGlobalSessionIntended::get(&txn);
|
||||||
|
|
||||||
|
// If this is notable, it creates a new global session, which we index into the database
|
||||||
|
// now
|
||||||
|
if has_events == HasEvents::Notable {
|
||||||
|
let serai = self.serai.as_of(block_hash).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let sets_and_keys = cosigning_sets(&serai).await?;
|
||||||
|
let global_session =
|
||||||
|
GlobalSession::id(sets_and_keys.iter().map(|(set, _key)| *set).collect());
|
||||||
|
|
||||||
|
let mut sets = Vec::with_capacity(sets_and_keys.len());
|
||||||
|
let mut keys = HashMap::with_capacity(sets_and_keys.len());
|
||||||
|
let mut stakes = HashMap::with_capacity(sets_and_keys.len());
|
||||||
|
let mut total_stake = 0;
|
||||||
|
for (set, key) in &sets_and_keys {
|
||||||
|
sets.push(*set);
|
||||||
|
keys.insert(set.network, SeraiAddress::from(*key));
|
||||||
|
let stake = serai
|
||||||
|
.validator_sets()
|
||||||
|
.current_stake(set.network.into())
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?
|
||||||
|
.unwrap_or(Amount(0))
|
||||||
|
.0;
|
||||||
|
stakes.insert(set.network, stake);
|
||||||
|
total_stake += stake;
|
||||||
|
}
|
||||||
|
if total_stake == 0 {
|
||||||
|
Err(format!("cosigning sets for block #{block_number} had 0 stake in total"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let global_session_info = GlobalSession {
|
||||||
|
// This session starts cosigning after this block, as this block must be cosigned by
|
||||||
|
// the existing validators
|
||||||
|
start_block_number: block_number + 1,
|
||||||
|
sets,
|
||||||
|
keys,
|
||||||
|
stakes,
|
||||||
|
total_stake,
|
||||||
|
};
|
||||||
|
GlobalSessions::set(&mut txn, global_session, &global_session_info);
|
||||||
|
if let Some(ending_global_session) = global_session_for_this_block {
|
||||||
|
GlobalSessionsLastBlock::set(&mut txn, ending_global_session, &block_number);
|
||||||
|
}
|
||||||
|
LatestGlobalSessionIntended::set(&mut txn, &global_session);
|
||||||
|
GlobalSessionsChannel::send(&mut txn, &(global_session, global_session_info));
|
||||||
|
}
|
||||||
|
|
||||||
|
// If there isn't anyone available to cosign this block, meaning it'll never be cosigned,
|
||||||
|
// we flag it as not having any events requiring cosigning so we don't attempt to
|
||||||
|
// sign/require a cosign for it
|
||||||
|
if global_session_for_this_block.is_none() {
|
||||||
|
has_events = HasEvents::No;
|
||||||
|
}
|
||||||
|
|
||||||
|
match has_events {
|
||||||
|
HasEvents::Notable | HasEvents::NonNotable => {
|
||||||
|
let global_session_for_this_block = global_session_for_this_block
|
||||||
|
.expect("global session for this block was None but still attempting to cosign it");
|
||||||
|
let global_session_info = GlobalSessions::get(&txn, global_session_for_this_block)
|
||||||
|
.expect("last global session intended wasn't saved to the database");
|
||||||
|
|
||||||
|
// Tell each set of their expectation to cosign this block
|
||||||
|
for set in global_session_info.sets {
|
||||||
|
log::debug!("{set:?} will be cosigning block #{block_number}");
|
||||||
|
IntendedCosigns::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&CosignIntent {
|
||||||
|
global_session: global_session_for_this_block,
|
||||||
|
block_number,
|
||||||
|
block_hash,
|
||||||
|
notable: has_events == HasEvents::Notable,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
HasEvents::No => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Populate a singular feed with every block's status for the evluator to work off of
|
||||||
|
BlockEvents::send(&mut txn, &(BlockEventData { block_number, has_events }));
|
||||||
|
// Mark this block as handled, meaning we should scan from the next block moving on
|
||||||
|
ScanCosignFrom::set(&mut txn, &(block_number + 1));
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(start_block_number <= latest_block_number)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
450
coordinator/cosign/src/lib.rs
Normal file
450
coordinator/cosign/src/lib.rs
Normal file
@@ -0,0 +1,450 @@
|
|||||||
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
|
#![doc = include_str!("../README.md")]
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
use core::{fmt::Debug, future::Future};
|
||||||
|
use std::{sync::Arc, collections::HashMap, time::Instant};
|
||||||
|
|
||||||
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
|
use serai_abi::{
|
||||||
|
primitives::{
|
||||||
|
BlockHash,
|
||||||
|
crypto::{Public, KeyPair},
|
||||||
|
network_id::ExternalNetworkId,
|
||||||
|
validator_sets::{Session, ExternalValidatorSet},
|
||||||
|
address::SeraiAddress,
|
||||||
|
},
|
||||||
|
Block,
|
||||||
|
};
|
||||||
|
use serai_client_serai::{Serai, TemporalSerai};
|
||||||
|
|
||||||
|
use serai_db::*;
|
||||||
|
use serai_task::*;
|
||||||
|
|
||||||
|
use serai_cosign_types::*;
|
||||||
|
|
||||||
|
/// The cosigns which are intended to be performed.
|
||||||
|
mod intend;
|
||||||
|
/// The evaluator of the cosigns.
|
||||||
|
mod evaluator;
|
||||||
|
/// The task to delay acknowledgement of the cosigns.
|
||||||
|
mod delay;
|
||||||
|
pub use delay::BROADCAST_FREQUENCY;
|
||||||
|
use delay::LatestCosignedBlockNumber;
|
||||||
|
|
||||||
|
/// A 'global session', defined as all validator sets used for cosigning at a given moment.
|
||||||
|
///
|
||||||
|
/// We evaluate cosign faults within a global session. This ensures even if cosigners cosign
|
||||||
|
/// distinct blocks at distinct positions within a global session, we still identify the faults.
|
||||||
|
/*
|
||||||
|
There is the attack where a validator set is given an alternate blockchain with a key generation
|
||||||
|
event at block #n, while most validator sets are given a blockchain with a key generation event
|
||||||
|
at block number #(n+1). This prevents whoever has the alternate blockchain from verifying the
|
||||||
|
cosigns on the primary blockchain, and detecting the faults, if they use the keys as of the block
|
||||||
|
prior to the block being cosigned.
|
||||||
|
|
||||||
|
We solve this by binding cosigns to a global session ID, which has a specific start block, and
|
||||||
|
reading the keys from the start block. This means that so long as all validator sets agree on the
|
||||||
|
start of a global session, they can verify all cosigns produced by that session, regardless of
|
||||||
|
how it advances. Since agreeing on the start of a global session is mandated, there's no way to
|
||||||
|
have validator sets follow two distinct global sessions without breaking the bounds of the
|
||||||
|
cosigning protocol.
|
||||||
|
*/
|
||||||
|
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub(crate) struct GlobalSession {
|
||||||
|
pub(crate) start_block_number: u64,
|
||||||
|
pub(crate) sets: Vec<ExternalValidatorSet>,
|
||||||
|
pub(crate) keys: HashMap<ExternalNetworkId, SeraiAddress>,
|
||||||
|
pub(crate) stakes: HashMap<ExternalNetworkId, u64>,
|
||||||
|
pub(crate) total_stake: u64,
|
||||||
|
}
|
||||||
|
impl GlobalSession {
|
||||||
|
fn id(mut cosigners: Vec<ExternalValidatorSet>) -> [u8; 32] {
|
||||||
|
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
||||||
|
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// If the block has events.
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
enum HasEvents {
|
||||||
|
/// The block had a notable event.
|
||||||
|
///
|
||||||
|
/// This is a special case as blocks with key gen events change the keys used for cosigning, and
|
||||||
|
/// accordingly must be cosigned before we advance past them.
|
||||||
|
Notable,
|
||||||
|
/// The block had an non-notable event justifying a cosign.
|
||||||
|
NonNotable,
|
||||||
|
/// The block didn't have an event justifying a cosign.
|
||||||
|
No,
|
||||||
|
}
|
||||||
|
|
||||||
|
create_db! {
|
||||||
|
Cosign {
|
||||||
|
// The following are populated by the intend task and used throughout the library
|
||||||
|
|
||||||
|
// An index of Substrate blocks
|
||||||
|
SubstrateBlockHash: (block_number: u64) -> BlockHash,
|
||||||
|
// A mapping from a global session's ID to its relevant information.
|
||||||
|
GlobalSessions: (global_session: [u8; 32]) -> GlobalSession,
|
||||||
|
// The last block to be cosigned by a global session.
|
||||||
|
GlobalSessionsLastBlock: (global_session: [u8; 32]) -> u64,
|
||||||
|
// The latest global session intended.
|
||||||
|
//
|
||||||
|
// This is distinct from the latest global session for which we've evaluated the cosigns for.
|
||||||
|
LatestGlobalSessionIntended: () -> [u8; 32],
|
||||||
|
|
||||||
|
// The following are managed by the `intake_cosign` function present in this file
|
||||||
|
|
||||||
|
// The latest cosigned block for each network.
|
||||||
|
//
|
||||||
|
// This will only be populated with cosigns predating or during the most recent global session
|
||||||
|
// to have its start cosigned.
|
||||||
|
//
|
||||||
|
// The global session changes upon a notable block, causing each global session to have exactly
|
||||||
|
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
||||||
|
// block, causing the latest cosigned block for a global session to either be the global
|
||||||
|
// session's notable cosigns or the network's latest cosigns.
|
||||||
|
NetworksLatestCosignedBlock: (
|
||||||
|
global_session: [u8; 32],
|
||||||
|
network: ExternalNetworkId
|
||||||
|
) -> SignedCosign,
|
||||||
|
// Cosigns received for blocks not locally recognized as finalized.
|
||||||
|
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
||||||
|
// The global session which faulted.
|
||||||
|
FaultedSession: () -> [u8; 32],
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch the keys used for cosigning by a specific network.
|
||||||
|
async fn keys_for_network(
|
||||||
|
serai: &TemporalSerai<'_>,
|
||||||
|
network: ExternalNetworkId,
|
||||||
|
) -> Result<Option<(Session, KeyPair)>, String> {
|
||||||
|
let Some(latest_session) =
|
||||||
|
serai.validator_sets().current_session(network.into()).await.map_err(|e| format!("{e:?}"))?
|
||||||
|
else {
|
||||||
|
// If this network hasn't had a session declared, move on
|
||||||
|
return Ok(None);
|
||||||
|
};
|
||||||
|
|
||||||
|
// Get the keys for the latest session
|
||||||
|
if let Some(keys) = serai
|
||||||
|
.validator_sets()
|
||||||
|
.keys(ExternalValidatorSet { network, session: latest_session })
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?
|
||||||
|
{
|
||||||
|
return Ok(Some((latest_session, keys)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the latest session has yet to set keys, use the prior session
|
||||||
|
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
||||||
|
if let Some(keys) = serai
|
||||||
|
.validator_sets()
|
||||||
|
.keys(ExternalValidatorSet { network, session: prior_session })
|
||||||
|
.await
|
||||||
|
.map_err(|e| format!("{e:?}"))?
|
||||||
|
{
|
||||||
|
return Ok(Some((prior_session, keys)));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
|
||||||
|
/// block.
|
||||||
|
async fn cosigning_sets(
|
||||||
|
serai: &TemporalSerai<'_>,
|
||||||
|
) -> Result<Vec<(ExternalValidatorSet, Public)>, String> {
|
||||||
|
let mut sets = vec![];
|
||||||
|
for network in ExternalNetworkId::all() {
|
||||||
|
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
||||||
|
// If this network doesn't have usable keys, move on
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
sets.push((ExternalValidatorSet { network, session }, keys.0));
|
||||||
|
}
|
||||||
|
Ok(sets)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An object usable to request notable cosigns for a block.
|
||||||
|
pub trait RequestNotableCosigns: 'static + Send {
|
||||||
|
/// The error type which may be encountered when requesting notable cosigns.
|
||||||
|
type Error: Debug;
|
||||||
|
|
||||||
|
/// Request the notable cosigns for this global session.
|
||||||
|
fn request_notable_cosigns(
|
||||||
|
&self,
|
||||||
|
global_session: [u8; 32],
|
||||||
|
) -> impl Send + Future<Output = Result<(), Self::Error>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// An error used to indicate the cosigning protocol has faulted.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct Faulted;
|
||||||
|
|
||||||
|
/// An error incurred while intaking a cosign.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub enum IntakeCosignError {
|
||||||
|
/// Cosign is for a not-yet-indexed block
|
||||||
|
NotYetIndexedBlock,
|
||||||
|
/// A later cosign for this cosigner has already been handled
|
||||||
|
StaleCosign,
|
||||||
|
/// The cosign's global session isn't recognized
|
||||||
|
UnrecognizedGlobalSession,
|
||||||
|
/// The cosign is for a block before its global session starts
|
||||||
|
BeforeGlobalSessionStart,
|
||||||
|
/// The cosign is for a block after its global session ends
|
||||||
|
AfterGlobalSessionEnd,
|
||||||
|
/// The cosign's signing network wasn't a participant in this global session
|
||||||
|
NonParticipatingNetwork,
|
||||||
|
/// The cosign had an invalid signature
|
||||||
|
InvalidSignature,
|
||||||
|
/// The cosign is for a global session which has yet to have its declaration block cosigned
|
||||||
|
FutureGlobalSession,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl IntakeCosignError {
|
||||||
|
/// If this error is temporal to the local view
|
||||||
|
pub fn temporal(&self) -> bool {
|
||||||
|
match self {
|
||||||
|
IntakeCosignError::NotYetIndexedBlock |
|
||||||
|
IntakeCosignError::StaleCosign |
|
||||||
|
IntakeCosignError::UnrecognizedGlobalSession |
|
||||||
|
IntakeCosignError::FutureGlobalSession => true,
|
||||||
|
IntakeCosignError::BeforeGlobalSessionStart |
|
||||||
|
IntakeCosignError::AfterGlobalSessionEnd |
|
||||||
|
IntakeCosignError::NonParticipatingNetwork |
|
||||||
|
IntakeCosignError::InvalidSignature => false,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The interface to manage cosigning with.
|
||||||
|
pub struct Cosigning<D: Db> {
|
||||||
|
db: D,
|
||||||
|
}
|
||||||
|
impl<D: Db> Cosigning<D> {
|
||||||
|
/// Spawn the tasks to intend and evaluate cosigns.
|
||||||
|
///
|
||||||
|
/// The database specified must only be used with a singular instance of the Serai network, and
|
||||||
|
/// only used once at any given time.
|
||||||
|
pub fn spawn<R: RequestNotableCosigns>(
|
||||||
|
db: D,
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
request: R,
|
||||||
|
tasks_to_run_upon_cosigning: Vec<TaskHandle>,
|
||||||
|
) -> Self {
|
||||||
|
let (intend_task, _intend_task_handle) = Task::new();
|
||||||
|
let (evaluator_task, evaluator_task_handle) = Task::new();
|
||||||
|
let (delay_task, delay_task_handle) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(intend::CosignIntendTask { db: db.clone(), serai })
|
||||||
|
.continually_run(intend_task, vec![evaluator_task_handle]),
|
||||||
|
);
|
||||||
|
tokio::spawn(
|
||||||
|
(evaluator::CosignEvaluatorTask {
|
||||||
|
db: db.clone(),
|
||||||
|
request,
|
||||||
|
last_request_for_cosigns: Instant::now(),
|
||||||
|
})
|
||||||
|
.continually_run(evaluator_task, vec![delay_task_handle]),
|
||||||
|
);
|
||||||
|
tokio::spawn(
|
||||||
|
(delay::CosignDelayTask { db: db.clone() })
|
||||||
|
.continually_run(delay_task, tasks_to_run_upon_cosigning),
|
||||||
|
);
|
||||||
|
Self { db }
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The latest cosigned block number.
|
||||||
|
pub fn latest_cosigned_block_number(getter: &impl Get) -> Result<u64, Faulted> {
|
||||||
|
if FaultedSession::get(getter).is_some() {
|
||||||
|
Err(Faulted)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(LatestCosignedBlockNumber::get(getter).unwrap_or(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch a cosigned Substrate block's hash by its block number.
|
||||||
|
pub fn cosigned_block(
|
||||||
|
getter: &impl Get,
|
||||||
|
block_number: u64,
|
||||||
|
) -> Result<Option<BlockHash>, Faulted> {
|
||||||
|
if block_number > Self::latest_cosigned_block_number(getter)? {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Some(
|
||||||
|
SubstrateBlockHash::get(getter, block_number).expect("cosigned block but didn't index it"),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Fetch the notable cosigns for a global session in order to respond to requests.
|
||||||
|
///
|
||||||
|
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||||
|
/// cosigns for this session.
|
||||||
|
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||||
|
let mut cosigns = vec![];
|
||||||
|
for network in ExternalNetworkId::all() {
|
||||||
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
||||||
|
cosigns.push(cosign);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosigns
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The cosigns to rebroadcast every `BROADCAST_FREQUENCY` seconds.
|
||||||
|
///
|
||||||
|
/// This will be the most recent cosigns, in case the initial broadcast failed, or the faulty
|
||||||
|
/// cosigns, in case of a fault, to induce identification of the fault by others.
|
||||||
|
pub fn cosigns_to_rebroadcast(&self) -> Vec<SignedCosign> {
|
||||||
|
if let Some(faulted) = FaultedSession::get(&self.db) {
|
||||||
|
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
||||||
|
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
||||||
|
// identification in those who see the faulty cosigns as honest
|
||||||
|
for network in ExternalNetworkId::all() {
|
||||||
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
||||||
|
if cosign.cosign.global_session == faulted {
|
||||||
|
cosigns.push(cosign);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosigns
|
||||||
|
} else {
|
||||||
|
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
||||||
|
return vec![];
|
||||||
|
};
|
||||||
|
let mut cosigns = vec![];
|
||||||
|
for network in ExternalNetworkId::all() {
|
||||||
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||||
|
cosigns.push(cosign);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosigns
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Intake a cosign.
|
||||||
|
//
|
||||||
|
// Takes `&mut self` as this should only be called once at any given moment.
|
||||||
|
pub fn intake_cosign(&mut self, signed_cosign: &SignedCosign) -> Result<(), IntakeCosignError> {
|
||||||
|
let cosign = &signed_cosign.cosign;
|
||||||
|
let network = cosign.cosigner;
|
||||||
|
|
||||||
|
// Check our indexed blockchain includes a block with this block number
|
||||||
|
let Some(our_block_hash) = SubstrateBlockHash::get(&self.db, cosign.block_number) else {
|
||||||
|
Err(IntakeCosignError::NotYetIndexedBlock)?
|
||||||
|
};
|
||||||
|
let faulty = cosign.block_hash != our_block_hash;
|
||||||
|
|
||||||
|
// Check this isn't a dated cosign within its global session (as it would be if rebroadcasted)
|
||||||
|
if !faulty {
|
||||||
|
if let Some(existing) =
|
||||||
|
NetworksLatestCosignedBlock::get(&self.db, cosign.global_session, network)
|
||||||
|
{
|
||||||
|
if existing.cosign.block_number >= cosign.block_number {
|
||||||
|
Err(IntakeCosignError::StaleCosign)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let Some(global_session) = GlobalSessions::get(&self.db, cosign.global_session) else {
|
||||||
|
Err(IntakeCosignError::UnrecognizedGlobalSession)?
|
||||||
|
};
|
||||||
|
|
||||||
|
// Check the cosigned block number is in range to the global session
|
||||||
|
if cosign.block_number < global_session.start_block_number {
|
||||||
|
// Cosign is for a block predating the global session
|
||||||
|
Err(IntakeCosignError::BeforeGlobalSessionStart)?;
|
||||||
|
}
|
||||||
|
if !faulty {
|
||||||
|
// This prevents a malicious validator set, on the same chain, from producing a cosign after
|
||||||
|
// their final block, replacing their notable cosign
|
||||||
|
if let Some(last_block) = GlobalSessionsLastBlock::get(&self.db, cosign.global_session) {
|
||||||
|
if cosign.block_number > last_block {
|
||||||
|
// Cosign is for a block after the last block this global session should have signed
|
||||||
|
Err(IntakeCosignError::AfterGlobalSessionEnd)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check the cosign's signature
|
||||||
|
{
|
||||||
|
let key = Public::from({
|
||||||
|
let Some(key) = global_session.keys.get(&network) else {
|
||||||
|
Err(IntakeCosignError::NonParticipatingNetwork)?
|
||||||
|
};
|
||||||
|
*key
|
||||||
|
});
|
||||||
|
|
||||||
|
if !signed_cosign.verify_signature(key) {
|
||||||
|
Err(IntakeCosignError::InvalidSignature)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
||||||
|
// cosign
|
||||||
|
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
|
if !faulty {
|
||||||
|
// If this is for a future global session, we don't acknowledge this cosign at this time
|
||||||
|
let latest_cosigned_block_number = LatestCosignedBlockNumber::get(&txn).unwrap_or(0);
|
||||||
|
// This global session starts the block *after* its declaration, so we want to check if the
|
||||||
|
// block declaring it was cosigned
|
||||||
|
if (global_session.start_block_number - 1) > latest_cosigned_block_number {
|
||||||
|
drop(txn);
|
||||||
|
return Err(IntakeCosignError::FutureGlobalSession);
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is safe as it's in-range and newer, as prior checked since it isn't faulty
|
||||||
|
NetworksLatestCosignedBlock::set(&mut txn, cosign.global_session, network, signed_cosign);
|
||||||
|
} else {
|
||||||
|
let mut faults = Faults::get(&txn, cosign.global_session).unwrap_or(vec![]);
|
||||||
|
// Only handle this as a fault if this set wasn't prior faulty
|
||||||
|
if !faults.iter().any(|cosign| cosign.cosign.cosigner == network) {
|
||||||
|
faults.push(signed_cosign.clone());
|
||||||
|
Faults::set(&mut txn, cosign.global_session, &faults);
|
||||||
|
|
||||||
|
let mut weight_cosigned = 0;
|
||||||
|
for fault in &faults {
|
||||||
|
let stake = global_session
|
||||||
|
.stakes
|
||||||
|
.get(&fault.cosign.cosigner)
|
||||||
|
.expect("cosigner with recognized key didn't have a stake entry saved");
|
||||||
|
weight_cosigned += stake;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the sum weight means a fault has occurred
|
||||||
|
if weight_cosigned >= ((global_session.total_stake * 17) / 100) {
|
||||||
|
FaultedSession::set(&mut txn, &cosign.global_session);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Receive intended cosigns to produce for this ExternalValidatorSet.
|
||||||
|
///
|
||||||
|
/// All cosigns intended, up to and including the next notable cosign, are returned.
|
||||||
|
///
|
||||||
|
/// This will drain the internal channel and not re-yield these intentions again.
|
||||||
|
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<CosignIntent> {
|
||||||
|
let mut res: Vec<CosignIntent> = vec![];
|
||||||
|
// While we have yet to find a notable cosign...
|
||||||
|
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
|
||||||
|
let Some(intent) = intend::IntendedCosigns::try_recv(txn, set) else { break };
|
||||||
|
res.push(intent);
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
}
|
||||||
25
coordinator/cosign/types/Cargo.toml
Normal file
25
coordinator/cosign/types/Cargo.toml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
[package]
|
||||||
|
name = "serai-cosign-types"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Evaluator of cosigns for the Serai network"
|
||||||
|
license = "AGPL-3.0-only"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
keywords = []
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
rust-version = "1.85"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
|
serai-primitives = { path = "../../../substrate/primitives", default-features = false, features = ["std"] }
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
72
coordinator/cosign/types/src/lib.rs
Normal file
72
coordinator/cosign/types/src/lib.rs
Normal file
@@ -0,0 +1,72 @@
|
|||||||
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
//! Types used when cosigning Serai. For more info, please see `serai-cosign`.
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
|
use serai_primitives::{BlockHash, crypto::Public, network_id::ExternalNetworkId};
|
||||||
|
|
||||||
|
/// The schnorrkel context to used when signing a cosign.
|
||||||
|
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
||||||
|
|
||||||
|
/// An intended cosign.
|
||||||
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub struct CosignIntent {
|
||||||
|
/// The global session this cosign is being performed under.
|
||||||
|
pub global_session: [u8; 32],
|
||||||
|
/// The number of the block to cosign.
|
||||||
|
pub block_number: u64,
|
||||||
|
/// The hash of the block to cosign.
|
||||||
|
pub block_hash: BlockHash,
|
||||||
|
/// If this cosign must be handled before further cosigns are.
|
||||||
|
pub notable: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A cosign.
|
||||||
|
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub struct Cosign {
|
||||||
|
/// The global session this cosign is being performed under.
|
||||||
|
pub global_session: [u8; 32],
|
||||||
|
/// The number of the block to cosign.
|
||||||
|
pub block_number: u64,
|
||||||
|
/// The hash of the block to cosign.
|
||||||
|
pub block_hash: BlockHash,
|
||||||
|
/// The actual cosigner.
|
||||||
|
pub cosigner: ExternalNetworkId,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl CosignIntent {
|
||||||
|
/// Convert this into a `Cosign`.
|
||||||
|
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
|
||||||
|
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
|
||||||
|
Cosign { global_session, block_number, block_hash, cosigner }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Cosign {
|
||||||
|
/// The message to sign to sign this cosign.
|
||||||
|
///
|
||||||
|
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
|
||||||
|
pub fn signature_message(&self) -> Vec<u8> {
|
||||||
|
// We use a schnorrkel context to domain-separate this
|
||||||
|
borsh::to_vec(self).unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A signed cosign.
|
||||||
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub struct SignedCosign {
|
||||||
|
/// The cosign.
|
||||||
|
pub cosign: Cosign,
|
||||||
|
/// The signature for the cosign.
|
||||||
|
pub signature: [u8; 64],
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SignedCosign {
|
||||||
|
/// Verify a cosign's signature.
|
||||||
|
pub fn verify_signature(&self, signer: Public) -> bool {
|
||||||
|
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
||||||
|
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
||||||
|
|
||||||
|
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
|
||||||
|
}
|
||||||
|
}
|
||||||
33
coordinator/p2p/Cargo.toml
Normal file
33
coordinator/p2p/Cargo.toml
Normal file
@@ -0,0 +1,33 @@
|
|||||||
|
[package]
|
||||||
|
name = "serai-coordinator-p2p"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Serai coordinator's P2P abstraction"
|
||||||
|
license = "AGPL-3.0-only"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/p2p"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
keywords = []
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
rust-version = "1.85"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
|
serai-db = { path = "../../common/db", version = "0.1" }
|
||||||
|
|
||||||
|
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] }
|
||||||
|
serai-cosign = { path = "../cosign" }
|
||||||
|
tributary-sdk = { path = "../tributary-sdk" }
|
||||||
|
|
||||||
|
futures-lite = { version = "2", default-features = false, features = ["std"] }
|
||||||
|
tokio = { version = "1", default-features = false, features = ["sync", "macros"] }
|
||||||
|
|
||||||
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
serai-task = { path = "../../common/task", version = "0.1" }
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
AGPL-3.0-only license
|
AGPL-3.0-only license
|
||||||
|
|
||||||
Copyright (c) 2022-2023 Luke Parker
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
This program is free software: you can redistribute it and/or modify
|
||||||
it under the terms of the GNU Affero General Public License Version 3 as
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
3
coordinator/p2p/README.md
Normal file
3
coordinator/p2p/README.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
# Serai Coordinator P2P
|
||||||
|
|
||||||
|
The P2P abstraction used by Serai's coordinator, and tasks over it.
|
||||||
42
coordinator/p2p/libp2p/Cargo.toml
Normal file
42
coordinator/p2p/libp2p/Cargo.toml
Normal file
@@ -0,0 +1,42 @@
|
|||||||
|
[package]
|
||||||
|
name = "serai-coordinator-libp2p-p2p"
|
||||||
|
version = "0.1.0"
|
||||||
|
description = "Serai coordinator's libp2p-based P2P backend"
|
||||||
|
license = "AGPL-3.0-only"
|
||||||
|
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/p2p/libp2p"
|
||||||
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
|
keywords = []
|
||||||
|
edition = "2021"
|
||||||
|
publish = false
|
||||||
|
rust-version = "1.87"
|
||||||
|
|
||||||
|
[package.metadata.docs.rs]
|
||||||
|
all-features = true
|
||||||
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
|
[lints]
|
||||||
|
workspace = true
|
||||||
|
|
||||||
|
[dependencies]
|
||||||
|
async-trait = { version = "0.1", default-features = false }
|
||||||
|
|
||||||
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
|
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||||
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
|
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai"] }
|
||||||
|
serai-cosign = { path = "../../cosign" }
|
||||||
|
tributary-sdk = { path = "../../tributary-sdk" }
|
||||||
|
|
||||||
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
|
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||||
|
libp2p = { version = "0.56", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
||||||
|
|
||||||
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
serai-task = { path = "../../../common/task", version = "0.1" }
|
||||||
|
serai-coordinator-p2p = { path = "../" }
|
||||||
15
coordinator/p2p/libp2p/LICENSE
Normal file
15
coordinator/p2p/libp2p/LICENSE
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
AGPL-3.0-only license
|
||||||
|
|
||||||
|
Copyright (c) 2023-2025 Luke Parker
|
||||||
|
|
||||||
|
This program is free software: you can redistribute it and/or modify
|
||||||
|
it under the terms of the GNU Affero General Public License Version 3 as
|
||||||
|
published by the Free Software Foundation.
|
||||||
|
|
||||||
|
This program is distributed in the hope that it will be useful,
|
||||||
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
GNU Affero General Public License for more details.
|
||||||
|
|
||||||
|
You should have received a copy of the GNU Affero General Public License
|
||||||
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||||
14
coordinator/p2p/libp2p/README.md
Normal file
14
coordinator/p2p/libp2p/README.md
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
# Serai Coordinator libp2p P2P
|
||||||
|
|
||||||
|
A libp2p-backed P2P instantiation for Serai's coordinator.
|
||||||
|
|
||||||
|
The libp2p swarm is limited to validators from the Serai network. The swarm
|
||||||
|
does not maintain any of its own peer finding/routing infrastructure, instead
|
||||||
|
relying on the Serai network's connection information to dial peers. This does
|
||||||
|
limit the listening peers to only the peers immediately reachable via the same
|
||||||
|
IP address (despite the two distinct services), not hidden behind a NAT, yet is
|
||||||
|
also quite simple and gives full control of who to connect to to us.
|
||||||
|
|
||||||
|
Peers are decided via the internal `DialTask` which aims to maintain a target
|
||||||
|
amount of peers for each external network. This ensures cosigns are able to
|
||||||
|
propagate across the external networks which sign them.
|
||||||
187
coordinator/p2p/libp2p/src/authenticate.rs
Normal file
187
coordinator/p2p/libp2p/src/authenticate.rs
Normal file
@@ -0,0 +1,187 @@
|
|||||||
|
use core::{pin::Pin, future::Future};
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
|
use blake2::{Digest, Blake2s256};
|
||||||
|
use schnorrkel::{Keypair, PublicKey, Signature};
|
||||||
|
|
||||||
|
use serai_client::primitives::PublicKey as Public;
|
||||||
|
|
||||||
|
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
|
use libp2p::{
|
||||||
|
core::upgrade::{UpgradeInfo, InboundConnectionUpgrade, OutboundConnectionUpgrade},
|
||||||
|
identity::{self, PeerId},
|
||||||
|
noise,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::peer_id_from_public;
|
||||||
|
|
||||||
|
const PROTOCOL: &str = "/serai/coordinator/validators";
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub(crate) struct OnlyValidators {
|
||||||
|
pub(crate) serai_key: Zeroizing<Keypair>,
|
||||||
|
pub(crate) noise_keypair: identity::Keypair,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl OnlyValidators {
|
||||||
|
/// The ephemeral challenge protocol for authentication.
|
||||||
|
///
|
||||||
|
/// We use ephemeral challenges to prevent replaying signatures from historic sessions.
|
||||||
|
///
|
||||||
|
/// We don't immediately send the challenge. We only send a commitment to it. This prevents our
|
||||||
|
/// remote peer from choosing their challenge in response to our challenge, in case there was any
|
||||||
|
/// benefit to doing so.
|
||||||
|
async fn challenges<S: 'static + Send + Unpin + AsyncRead + AsyncWrite>(
|
||||||
|
socket: &mut noise::Output<S>,
|
||||||
|
) -> io::Result<([u8; 32], [u8; 32])> {
|
||||||
|
let mut our_challenge = [0; 32];
|
||||||
|
OsRng.fill_bytes(&mut our_challenge);
|
||||||
|
|
||||||
|
// Write the hash of our challenge
|
||||||
|
socket.write_all(&Blake2s256::digest(our_challenge)).await?;
|
||||||
|
|
||||||
|
// Read the hash of their challenge
|
||||||
|
let mut their_challenge_commitment = [0; 32];
|
||||||
|
socket.read_exact(&mut their_challenge_commitment).await?;
|
||||||
|
|
||||||
|
// Reveal our challenge
|
||||||
|
socket.write_all(&our_challenge).await?;
|
||||||
|
|
||||||
|
// Read their challenge
|
||||||
|
let mut their_challenge = [0; 32];
|
||||||
|
socket.read_exact(&mut their_challenge).await?;
|
||||||
|
|
||||||
|
// Verify their challenge
|
||||||
|
if <[u8; 32]>::from(Blake2s256::digest(their_challenge)) != their_challenge_commitment {
|
||||||
|
Err(io::Error::other("challenge didn't match challenge commitment"))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok((our_challenge, their_challenge))
|
||||||
|
}
|
||||||
|
|
||||||
|
// We sign the two noise peer IDs and the ephemeral challenges.
|
||||||
|
//
|
||||||
|
// Signing the noise peer IDs ensures we're authenticating this noise connection. The only
|
||||||
|
// expectations placed on noise are for it to prevent a MITM from impersonating the other end or
|
||||||
|
// modifying any messages sent.
|
||||||
|
//
|
||||||
|
// Signing the ephemeral challenges prevents any replays. While that should be unnecessary, as
|
||||||
|
// noise MAY prevent replays across sessions (even when the same key is used), and noise IDs
|
||||||
|
// shouldn't be reused (so it should be fine to reuse an existing signature for these noise IDs),
|
||||||
|
// it doesn't hurt.
|
||||||
|
async fn authenticate<S: 'static + Send + Unpin + AsyncRead + AsyncWrite>(
|
||||||
|
&self,
|
||||||
|
socket: &mut noise::Output<S>,
|
||||||
|
dialer_peer_id: PeerId,
|
||||||
|
dialer_challenge: [u8; 32],
|
||||||
|
listener_peer_id: PeerId,
|
||||||
|
listener_challenge: [u8; 32],
|
||||||
|
) -> io::Result<PeerId> {
|
||||||
|
// Write our public key
|
||||||
|
socket.write_all(&self.serai_key.public.to_bytes()).await?;
|
||||||
|
|
||||||
|
let msg = borsh::to_vec(&(
|
||||||
|
dialer_peer_id.to_bytes(),
|
||||||
|
dialer_challenge,
|
||||||
|
listener_peer_id.to_bytes(),
|
||||||
|
listener_challenge,
|
||||||
|
))
|
||||||
|
.unwrap();
|
||||||
|
let signature = self.serai_key.sign_simple(PROTOCOL.as_bytes(), &msg);
|
||||||
|
socket.write_all(&signature.to_bytes()).await?;
|
||||||
|
|
||||||
|
let mut public_key_and_sig = [0; 96];
|
||||||
|
socket.read_exact(&mut public_key_and_sig).await?;
|
||||||
|
let public_key = PublicKey::from_bytes(&public_key_and_sig[.. 32])
|
||||||
|
.map_err(|_| io::Error::other("invalid public key"))?;
|
||||||
|
let sig = Signature::from_bytes(&public_key_and_sig[32 ..])
|
||||||
|
.map_err(|_| io::Error::other("invalid signature serialization"))?;
|
||||||
|
|
||||||
|
public_key
|
||||||
|
.verify_simple(PROTOCOL.as_bytes(), &msg, &sig)
|
||||||
|
.map_err(|_| io::Error::other("invalid signature"))?;
|
||||||
|
|
||||||
|
Ok(peer_id_from_public(Public::from_raw(public_key.to_bytes())))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UpgradeInfo for OnlyValidators {
|
||||||
|
type Info = <noise::Config as UpgradeInfo>::Info;
|
||||||
|
type InfoIter = <noise::Config as UpgradeInfo>::InfoIter;
|
||||||
|
fn protocol_info(&self) -> Self::InfoIter {
|
||||||
|
// A keypair only causes an error if its sign operation fails, which is only possible with RSA,
|
||||||
|
// which isn't used within this codebase
|
||||||
|
noise::Config::new(&self.noise_keypair).unwrap().protocol_info()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundConnectionUpgrade<S>
|
||||||
|
for OnlyValidators
|
||||||
|
{
|
||||||
|
type Output = (PeerId, noise::Output<S>);
|
||||||
|
type Error = io::Error;
|
||||||
|
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||||
|
|
||||||
|
fn upgrade_inbound(
|
||||||
|
self,
|
||||||
|
socket: S,
|
||||||
|
info: <Self as UpgradeInfo>::Info,
|
||||||
|
) -> <Self as InboundConnectionUpgrade<S>>::Future {
|
||||||
|
Box::pin(async move {
|
||||||
|
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||||
|
.unwrap()
|
||||||
|
.upgrade_inbound(socket, info)
|
||||||
|
.await
|
||||||
|
.map_err(io::Error::other)?;
|
||||||
|
|
||||||
|
let (our_challenge, dialer_challenge) = OnlyValidators::challenges(&mut socket).await?;
|
||||||
|
let dialer_serai_validator = self
|
||||||
|
.authenticate(
|
||||||
|
&mut socket,
|
||||||
|
dialer_noise_peer_id,
|
||||||
|
dialer_challenge,
|
||||||
|
PeerId::from_public_key(&self.noise_keypair.public()),
|
||||||
|
our_challenge,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok((dialer_serai_validator, socket))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundConnectionUpgrade<S>
|
||||||
|
for OnlyValidators
|
||||||
|
{
|
||||||
|
type Output = (PeerId, noise::Output<S>);
|
||||||
|
type Error = io::Error;
|
||||||
|
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||||
|
|
||||||
|
fn upgrade_outbound(
|
||||||
|
self,
|
||||||
|
socket: S,
|
||||||
|
info: <Self as UpgradeInfo>::Info,
|
||||||
|
) -> <Self as OutboundConnectionUpgrade<S>>::Future {
|
||||||
|
Box::pin(async move {
|
||||||
|
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||||
|
.unwrap()
|
||||||
|
.upgrade_outbound(socket, info)
|
||||||
|
.await
|
||||||
|
.map_err(io::Error::other)?;
|
||||||
|
|
||||||
|
let (our_challenge, listener_challenge) = OnlyValidators::challenges(&mut socket).await?;
|
||||||
|
let listener_serai_validator = self
|
||||||
|
.authenticate(
|
||||||
|
&mut socket,
|
||||||
|
PeerId::from_public_key(&self.noise_keypair.public()),
|
||||||
|
our_challenge,
|
||||||
|
listener_noise_peer_id,
|
||||||
|
listener_challenge,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
Ok((listener_serai_validator, socket))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
127
coordinator/p2p/libp2p/src/dial.rs
Normal file
127
coordinator/p2p/libp2p/src/dial.rs
Normal file
@@ -0,0 +1,127 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::{sync::Arc, collections::HashSet};
|
||||||
|
|
||||||
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use serai_client::{SeraiError, Serai};
|
||||||
|
|
||||||
|
use libp2p::{
|
||||||
|
core::multiaddr::{Protocol, Multiaddr},
|
||||||
|
swarm::dial_opts::DialOpts,
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::{PORT, Peers, validators::Validators};
|
||||||
|
|
||||||
|
const TARGET_PEERS_PER_NETWORK: usize = 5;
|
||||||
|
/*
|
||||||
|
If we only tracked the target amount of peers per network, we'd risk being eclipsed by an
|
||||||
|
adversary who immediately connects to us with their array of validators upon our boot. Their
|
||||||
|
array would satisfy our target amount of peers, so we'd never seek more, enabling the adversary
|
||||||
|
to be the only entity we peered with.
|
||||||
|
|
||||||
|
We solve this by additionally requiring an explicit amount of peers we dialed. That means we
|
||||||
|
randomly chose to connect to these peers.
|
||||||
|
*/
|
||||||
|
// TODO const TARGET_DIALED_PEERS_PER_NETWORK: usize = 3;
|
||||||
|
|
||||||
|
pub(crate) struct DialTask {
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
validators: Validators,
|
||||||
|
peers: Peers,
|
||||||
|
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DialTask {
|
||||||
|
pub(crate) fn new(
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
peers: Peers,
|
||||||
|
to_dial: mpsc::UnboundedSender<DialOpts>,
|
||||||
|
) -> Self {
|
||||||
|
DialTask { serai: serai.clone(), validators: Validators::new(serai).0, peers, to_dial }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ContinuallyRan for DialTask {
|
||||||
|
// Only run every five minutes, not the default of every five seconds
|
||||||
|
const DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||||
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 10 * 60;
|
||||||
|
|
||||||
|
type Error = SeraiError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
self.validators.update().await?;
|
||||||
|
|
||||||
|
// If any of our peers is lacking, try to connect to more
|
||||||
|
let mut dialed = false;
|
||||||
|
let peer_counts = self
|
||||||
|
.peers
|
||||||
|
.peers
|
||||||
|
.read()
|
||||||
|
.await
|
||||||
|
.iter()
|
||||||
|
.map(|(network, peers)| (*network, peers.len()))
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
for (network, peer_count) in peer_counts {
|
||||||
|
/*
|
||||||
|
If we don't have the target amount of peers, and we don't have all the validators in the
|
||||||
|
set but one, attempt to connect to more validators within this set.
|
||||||
|
|
||||||
|
The latter clause is so if there's a set with only 3 validators, we don't infinitely try
|
||||||
|
to connect to the target amount of peers for this network as we never will. Instead, we
|
||||||
|
only try to connect to most of the validators actually present.
|
||||||
|
*/
|
||||||
|
if (peer_count < TARGET_PEERS_PER_NETWORK) &&
|
||||||
|
(peer_count <
|
||||||
|
self
|
||||||
|
.validators
|
||||||
|
.by_network()
|
||||||
|
.get(&network)
|
||||||
|
.map(HashSet::len)
|
||||||
|
.unwrap_or(0)
|
||||||
|
.saturating_sub(1))
|
||||||
|
{
|
||||||
|
let mut potential_peers = self.serai.p2p_validators(network).await?;
|
||||||
|
for _ in 0 .. (TARGET_PEERS_PER_NETWORK - peer_count) {
|
||||||
|
if potential_peers.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let index_to_dial =
|
||||||
|
usize::try_from(OsRng.next_u64() % u64::try_from(potential_peers.len()).unwrap())
|
||||||
|
.unwrap();
|
||||||
|
let randomly_selected_peer = potential_peers.swap_remove(index_to_dial);
|
||||||
|
|
||||||
|
log::info!("found peer from substrate: {randomly_selected_peer}");
|
||||||
|
|
||||||
|
// Map the peer from a Substrate P2P network peer to a Coordinator P2P network peer
|
||||||
|
let mapped_peer = randomly_selected_peer
|
||||||
|
.into_iter()
|
||||||
|
.filter_map(|protocol| match protocol {
|
||||||
|
// Drop PeerIds from the Substrate P2p network
|
||||||
|
Protocol::P2p(_) => None,
|
||||||
|
// Use our own TCP port
|
||||||
|
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
|
||||||
|
// Pass-through any other specifications (IPv4, IPv6, etc)
|
||||||
|
other => Some(other),
|
||||||
|
})
|
||||||
|
.collect::<Multiaddr>();
|
||||||
|
|
||||||
|
log::debug!("mapped found peer: {mapped_peer}");
|
||||||
|
|
||||||
|
self
|
||||||
|
.to_dial
|
||||||
|
.send(DialOpts::unknown_peer_id().address(mapped_peer).build())
|
||||||
|
.expect("dial receiver closed?");
|
||||||
|
dialed = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(dialed)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
75
coordinator/p2p/libp2p/src/gossip.rs
Normal file
75
coordinator/p2p/libp2p/src/gossip.rs
Normal file
@@ -0,0 +1,75 @@
|
|||||||
|
use core::time::Duration;
|
||||||
|
|
||||||
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
|
use libp2p::gossipsub::{
|
||||||
|
IdentTopic, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder, IdentityTransform,
|
||||||
|
AllowAllSubscriptionFilter, Behaviour,
|
||||||
|
};
|
||||||
|
pub use libp2p::gossipsub::Event;
|
||||||
|
|
||||||
|
use serai_cosign::SignedCosign;
|
||||||
|
|
||||||
|
// Block size limit + 16 KB of space for signatures/metadata
|
||||||
|
pub(crate) const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary_sdk::BLOCK_SIZE_LIMIT + 16384;
|
||||||
|
|
||||||
|
const LIBP2P_PROTOCOL: &str = "/serai/coordinator/gossip/1.0.0";
|
||||||
|
const BASE_TOPIC: &str = "/";
|
||||||
|
|
||||||
|
fn topic_for_tributary(tributary: [u8; 32]) -> IdentTopic {
|
||||||
|
IdentTopic::new(format!("/tributary/{}", hex::encode(tributary)))
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub(crate) enum Message {
|
||||||
|
Tributary { tributary: [u8; 32], message: Vec<u8> },
|
||||||
|
Cosign(SignedCosign),
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Message {
|
||||||
|
pub(crate) fn topic(&self) -> IdentTopic {
|
||||||
|
match self {
|
||||||
|
Message::Tributary { tributary, .. } => topic_for_tributary(*tributary),
|
||||||
|
Message::Cosign(_) => IdentTopic::new(BASE_TOPIC),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) type Behavior = Behaviour<IdentityTransform, AllowAllSubscriptionFilter>;
|
||||||
|
|
||||||
|
pub(crate) fn new_behavior() -> Behavior {
|
||||||
|
// The latency used by the Tendermint protocol, used here as the gossip epoch duration
|
||||||
|
// libp2p-rs defaults to 1 second, whereas ours will be ~2
|
||||||
|
let heartbeat_interval = tributary_sdk::tendermint::LATENCY_TIME;
|
||||||
|
// The amount of heartbeats which will occur within a single Tributary block
|
||||||
|
let heartbeats_per_block =
|
||||||
|
tributary_sdk::tendermint::TARGET_BLOCK_TIME.div_ceil(heartbeat_interval);
|
||||||
|
// libp2p-rs defaults to 5, whereas ours will be ~8
|
||||||
|
let heartbeats_to_keep = 2 * heartbeats_per_block;
|
||||||
|
// libp2p-rs defaults to 3 whereas ours will be ~4
|
||||||
|
let heartbeats_to_gossip = heartbeats_per_block;
|
||||||
|
|
||||||
|
let config = ConfigBuilder::default()
|
||||||
|
.protocol_id_prefix(LIBP2P_PROTOCOL)
|
||||||
|
.history_length(usize::try_from(heartbeats_to_keep).unwrap())
|
||||||
|
.history_gossip(usize::try_from(heartbeats_to_gossip).unwrap())
|
||||||
|
.heartbeat_interval(Duration::from_millis(heartbeat_interval.into()))
|
||||||
|
.max_transmit_size(MAX_LIBP2P_GOSSIP_MESSAGE_SIZE)
|
||||||
|
.duplicate_cache_time(Duration::from_millis((heartbeats_to_keep * heartbeat_interval).into()))
|
||||||
|
.validation_mode(ValidationMode::Anonymous)
|
||||||
|
// Uses a content based message ID to avoid duplicates as much as possible
|
||||||
|
.message_id_fn(|msg| {
|
||||||
|
MessageId::new(&Blake2s256::digest([msg.topic.as_str().as_bytes(), &msg.data].concat()))
|
||||||
|
})
|
||||||
|
.build();
|
||||||
|
|
||||||
|
let mut gossip = Behavior::new(MessageAuthenticity::Anonymous, config.unwrap()).unwrap();
|
||||||
|
|
||||||
|
// Subscribe to the base topic
|
||||||
|
let topic = IdentTopic::new(BASE_TOPIC);
|
||||||
|
let _ = gossip.subscribe(&topic);
|
||||||
|
|
||||||
|
gossip
|
||||||
|
}
|
||||||
416
coordinator/p2p/libp2p/src/lib.rs
Normal file
416
coordinator/p2p/libp2p/src/lib.rs
Normal file
@@ -0,0 +1,416 @@
|
|||||||
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
|
#![doc = include_str!("../README.md")]
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
use core::{future::Future, time::Duration};
|
||||||
|
use std::{
|
||||||
|
sync::Arc,
|
||||||
|
collections::{HashSet, HashMap},
|
||||||
|
};
|
||||||
|
|
||||||
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
use schnorrkel::Keypair;
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::{ExternalNetworkId, PublicKey},
|
||||||
|
validator_sets::primitives::ExternalValidatorSet,
|
||||||
|
Serai,
|
||||||
|
};
|
||||||
|
|
||||||
|
use tokio::sync::{mpsc, oneshot, Mutex, RwLock};
|
||||||
|
|
||||||
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
|
use serai_cosign::SignedCosign;
|
||||||
|
|
||||||
|
use libp2p::{
|
||||||
|
multihash::Multihash,
|
||||||
|
identity::{self, PeerId},
|
||||||
|
tcp::Config as TcpConfig,
|
||||||
|
yamux, allow_block_list,
|
||||||
|
connection_limits::{self, ConnectionLimits},
|
||||||
|
swarm::NetworkBehaviour,
|
||||||
|
SwarmBuilder,
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
|
||||||
|
|
||||||
|
/// A struct to sync the validators from the Serai node in order to keep track of them.
|
||||||
|
mod validators;
|
||||||
|
use validators::UpdateValidatorsTask;
|
||||||
|
|
||||||
|
/// The authentication protocol upgrade to limit the P2P network to active validators.
|
||||||
|
mod authenticate;
|
||||||
|
use authenticate::OnlyValidators;
|
||||||
|
|
||||||
|
/// The ping behavior, used to ensure connection latency is below the limit
|
||||||
|
mod ping;
|
||||||
|
|
||||||
|
/// The request-response messages and behavior
|
||||||
|
mod reqres;
|
||||||
|
use reqres::{InboundRequestId, Request, Response};
|
||||||
|
|
||||||
|
/// The gossip messages and behavior
|
||||||
|
mod gossip;
|
||||||
|
use gossip::Message;
|
||||||
|
|
||||||
|
/// The swarm task, running it and dispatching to/from it
|
||||||
|
mod swarm;
|
||||||
|
use swarm::SwarmTask;
|
||||||
|
|
||||||
|
/// The dial task, to find new peers to connect to
|
||||||
|
mod dial;
|
||||||
|
use dial::DialTask;
|
||||||
|
|
||||||
|
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||||
|
|
||||||
|
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
||||||
|
// 0 represents the identity Multihash, that no hash was performed
|
||||||
|
// It's an internal constant so we can't refer to the constant inside libp2p
|
||||||
|
PeerId::from_multihash(Multihash::wrap(0, &public.0).unwrap()).unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The representation of a peer.
|
||||||
|
pub struct Peer<'a> {
|
||||||
|
outbound_requests: &'a mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
|
id: PeerId,
|
||||||
|
}
|
||||||
|
impl serai_coordinator_p2p::Peer<'_> for Peer<'_> {
|
||||||
|
fn send_heartbeat(
|
||||||
|
&self,
|
||||||
|
heartbeat: Heartbeat,
|
||||||
|
) -> impl Send + Future<Output = Option<Vec<TributaryBlockWithCommit>>> {
|
||||||
|
async move {
|
||||||
|
const HEARTBEAT_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
let request = Request::Heartbeat(heartbeat);
|
||||||
|
let (sender, receiver) = oneshot::channel();
|
||||||
|
self
|
||||||
|
.outbound_requests
|
||||||
|
.send((self.id, request, sender))
|
||||||
|
.expect("outbound requests recv channel was dropped?");
|
||||||
|
if let Ok(Ok(Response::Blocks(blocks))) =
|
||||||
|
tokio::time::timeout(HEARTBEAT_TIMEOUT, receiver).await
|
||||||
|
{
|
||||||
|
Some(blocks)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
struct Peers {
|
||||||
|
peers: Arc<RwLock<HashMap<ExternalNetworkId, HashSet<PeerId>>>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
|
||||||
|
// network for peers, we could use it solely for bootstrapping/as a fallback.
|
||||||
|
#[derive(NetworkBehaviour)]
|
||||||
|
struct Behavior {
|
||||||
|
// Used to only allow Serai validators as peers
|
||||||
|
allow_list: allow_block_list::Behaviour<allow_block_list::AllowedPeers>,
|
||||||
|
// Used to limit each peer to a single connection
|
||||||
|
connection_limits: connection_limits::Behaviour,
|
||||||
|
// Used to ensure connection latency is within tolerances
|
||||||
|
ping: ping::Behavior,
|
||||||
|
// Used to request data from specific peers
|
||||||
|
reqres: reqres::Behavior,
|
||||||
|
// Used to broadcast messages to all other peers subscribed to a topic
|
||||||
|
gossip: gossip::Behavior,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::type_complexity)]
|
||||||
|
struct Libp2pInner {
|
||||||
|
peers: Peers,
|
||||||
|
|
||||||
|
gossip: mpsc::UnboundedSender<Message>,
|
||||||
|
outbound_requests: mpsc::UnboundedSender<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
|
|
||||||
|
tributary_gossip: Mutex<mpsc::UnboundedReceiver<([u8; 32], Vec<u8>)>>,
|
||||||
|
|
||||||
|
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
|
||||||
|
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
||||||
|
|
||||||
|
heartbeat_requests:
|
||||||
|
Mutex<mpsc::UnboundedReceiver<(InboundRequestId, ExternalValidatorSet, [u8; 32])>>,
|
||||||
|
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(InboundRequestId, [u8; 32])>>,
|
||||||
|
inbound_request_responses: mpsc::UnboundedSender<(InboundRequestId, Response)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The libp2p-backed P2P implementation.
|
||||||
|
///
|
||||||
|
/// The P2p trait implementation does not support backpressure and is expected to be fully
|
||||||
|
/// utilized. Failure to poll the entire API will cause unbounded memory growth.
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct Libp2p(Arc<Libp2pInner>);
|
||||||
|
|
||||||
|
impl Libp2p {
|
||||||
|
/// Create a new libp2p-backed P2P instance.
|
||||||
|
///
|
||||||
|
/// This will spawn all of the internal tasks necessary for functioning.
|
||||||
|
pub fn new(serai_key: &Zeroizing<Keypair>, serai: Arc<Serai>) -> Libp2p {
|
||||||
|
// Define the object we track peers with
|
||||||
|
let peers = Peers { peers: Arc::new(RwLock::new(HashMap::new())) };
|
||||||
|
|
||||||
|
// Define the dial task
|
||||||
|
let (dial_task_def, dial_task) = Task::new();
|
||||||
|
let (to_dial_send, to_dial_recv) = mpsc::unbounded_channel();
|
||||||
|
tokio::spawn(
|
||||||
|
DialTask::new(serai.clone(), peers.clone(), to_dial_send)
|
||||||
|
.continually_run(dial_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
let swarm = {
|
||||||
|
let new_only_validators = |noise_keypair: &identity::Keypair| -> Result<_, ()> {
|
||||||
|
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
||||||
|
.with_tokio()
|
||||||
|
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, yamux::Config::default)
|
||||||
|
.unwrap()
|
||||||
|
.with_behaviour(|_| Behavior {
|
||||||
|
allow_list: allow_block_list::Behaviour::default(),
|
||||||
|
// Limit each per to a single connection
|
||||||
|
connection_limits: connection_limits::Behaviour::new(
|
||||||
|
ConnectionLimits::default().with_max_established_per_peer(Some(1)),
|
||||||
|
),
|
||||||
|
ping: ping::new_behavior(),
|
||||||
|
reqres: reqres::new_behavior(),
|
||||||
|
gossip: gossip::new_behavior(),
|
||||||
|
})
|
||||||
|
.unwrap()
|
||||||
|
.with_swarm_config(|config| {
|
||||||
|
config
|
||||||
|
.with_idle_connection_timeout(ping::INTERVAL + ping::TIMEOUT + Duration::from_secs(5))
|
||||||
|
})
|
||||||
|
.build();
|
||||||
|
swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap();
|
||||||
|
swarm.listen_on(format!("/ip6/::/tcp/{PORT}").parse().unwrap()).unwrap();
|
||||||
|
swarm
|
||||||
|
};
|
||||||
|
|
||||||
|
let (swarm_validators, validator_changes) = UpdateValidatorsTask::spawn(serai);
|
||||||
|
|
||||||
|
let (gossip_send, gossip_recv) = mpsc::unbounded_channel();
|
||||||
|
let (signed_cosigns_send, signed_cosigns_recv) = mpsc::unbounded_channel();
|
||||||
|
let (tributary_gossip_send, tributary_gossip_recv) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
let (outbound_requests_send, outbound_requests_recv) = mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
let (heartbeat_requests_send, heartbeat_requests_recv) = mpsc::unbounded_channel();
|
||||||
|
let (notable_cosign_requests_send, notable_cosign_requests_recv) = mpsc::unbounded_channel();
|
||||||
|
let (inbound_request_responses_send, inbound_request_responses_recv) =
|
||||||
|
mpsc::unbounded_channel();
|
||||||
|
|
||||||
|
// Create the swarm task
|
||||||
|
SwarmTask::spawn(
|
||||||
|
dial_task,
|
||||||
|
to_dial_recv,
|
||||||
|
swarm_validators,
|
||||||
|
validator_changes,
|
||||||
|
peers.clone(),
|
||||||
|
swarm,
|
||||||
|
gossip_recv,
|
||||||
|
signed_cosigns_send.clone(),
|
||||||
|
tributary_gossip_send,
|
||||||
|
outbound_requests_recv,
|
||||||
|
heartbeat_requests_send,
|
||||||
|
notable_cosign_requests_send,
|
||||||
|
inbound_request_responses_recv,
|
||||||
|
);
|
||||||
|
|
||||||
|
Libp2p(Arc::new(Libp2pInner {
|
||||||
|
peers,
|
||||||
|
|
||||||
|
gossip: gossip_send,
|
||||||
|
outbound_requests: outbound_requests_send,
|
||||||
|
|
||||||
|
tributary_gossip: Mutex::new(tributary_gossip_recv),
|
||||||
|
|
||||||
|
signed_cosigns: Mutex::new(signed_cosigns_recv),
|
||||||
|
signed_cosigns_send,
|
||||||
|
|
||||||
|
heartbeat_requests: Mutex::new(heartbeat_requests_recv),
|
||||||
|
notable_cosign_requests: Mutex::new(notable_cosign_requests_recv),
|
||||||
|
inbound_request_responses: inbound_request_responses_send,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl tributary_sdk::P2p for Libp2p {
|
||||||
|
fn broadcast(&self, tributary: [u8; 32], message: Vec<u8>) -> impl Send + Future<Output = ()> {
|
||||||
|
async move {
|
||||||
|
self
|
||||||
|
.0
|
||||||
|
.gossip
|
||||||
|
.send(Message::Tributary { tributary, message })
|
||||||
|
.expect("gossip recv channel was dropped?");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl serai_cosign::RequestNotableCosigns for Libp2p {
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
fn request_notable_cosigns(
|
||||||
|
&self,
|
||||||
|
global_session: [u8; 32],
|
||||||
|
) -> impl Send + Future<Output = Result<(), Self::Error>> {
|
||||||
|
async move {
|
||||||
|
const AMOUNT_OF_PEERS_TO_REQUEST_FROM: usize = 3;
|
||||||
|
const NOTABLE_COSIGNS_TIMEOUT: Duration = Duration::from_secs(5);
|
||||||
|
|
||||||
|
let request = Request::NotableCosigns { global_session };
|
||||||
|
|
||||||
|
let peers = self.0.peers.peers.read().await.clone();
|
||||||
|
// HashSet of all peers
|
||||||
|
let peers = peers.into_values().flat_map(<_>::into_iter).collect::<HashSet<_>>();
|
||||||
|
// Vec of all peers
|
||||||
|
let mut peers = peers.into_iter().collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let mut channels = Vec::with_capacity(AMOUNT_OF_PEERS_TO_REQUEST_FROM);
|
||||||
|
for _ in 0 .. AMOUNT_OF_PEERS_TO_REQUEST_FROM {
|
||||||
|
if peers.is_empty() {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
let i = usize::try_from(OsRng.next_u64() % u64::try_from(peers.len()).unwrap()).unwrap();
|
||||||
|
let peer = peers.swap_remove(i);
|
||||||
|
|
||||||
|
let (sender, receiver) = oneshot::channel();
|
||||||
|
self
|
||||||
|
.0
|
||||||
|
.outbound_requests
|
||||||
|
.send((peer, request, sender))
|
||||||
|
.expect("outbound requests recv channel was dropped?");
|
||||||
|
channels.push(receiver);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We could reduce our latency by using FuturesUnordered here but the latency isn't a concern
|
||||||
|
for channel in channels {
|
||||||
|
if let Ok(Ok(Response::NotableCosigns(cosigns))) =
|
||||||
|
tokio::time::timeout(NOTABLE_COSIGNS_TIMEOUT, channel).await
|
||||||
|
{
|
||||||
|
for cosign in cosigns {
|
||||||
|
self
|
||||||
|
.0
|
||||||
|
.signed_cosigns_send
|
||||||
|
.send(cosign)
|
||||||
|
.expect("signed_cosigns recv in this object was dropped?");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl serai_coordinator_p2p::P2p for Libp2p {
|
||||||
|
type Peer<'a> = Peer<'a>;
|
||||||
|
|
||||||
|
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
||||||
|
async move {
|
||||||
|
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
|
||||||
|
return vec![];
|
||||||
|
};
|
||||||
|
let mut res = vec![];
|
||||||
|
for id in peer_ids {
|
||||||
|
res.push(Peer { outbound_requests: &self.0.outbound_requests, id });
|
||||||
|
}
|
||||||
|
res
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()> {
|
||||||
|
async move {
|
||||||
|
self.0.gossip.send(Message::Cosign(cosign)).expect("gossip recv channel was dropped?");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn heartbeat(
|
||||||
|
&self,
|
||||||
|
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)> {
|
||||||
|
async move {
|
||||||
|
let (request_id, set, latest_block_hash) = self
|
||||||
|
.0
|
||||||
|
.heartbeat_requests
|
||||||
|
.lock()
|
||||||
|
.await
|
||||||
|
.recv()
|
||||||
|
.await
|
||||||
|
.expect("heartbeat_requests_send was dropped?");
|
||||||
|
let (sender, receiver) = oneshot::channel();
|
||||||
|
tokio::spawn({
|
||||||
|
let respond = self.0.inbound_request_responses.clone();
|
||||||
|
async move {
|
||||||
|
// The swarm task expects us to respond to every request. If the caller drops this
|
||||||
|
// channel, we'll receive `Err` and respond with `vec![]`, safely satisfying that bound
|
||||||
|
// without requiring the caller send a value down this channel
|
||||||
|
let response = if let Ok(blocks) = receiver.await {
|
||||||
|
Response::Blocks(blocks)
|
||||||
|
} else {
|
||||||
|
Response::Blocks(vec![])
|
||||||
|
};
|
||||||
|
respond
|
||||||
|
.send((request_id, response))
|
||||||
|
.expect("inbound_request_responses_recv was dropped?");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
(Heartbeat { set, latest_block_hash }, sender)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn notable_cosigns_request(
|
||||||
|
&self,
|
||||||
|
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)> {
|
||||||
|
async move {
|
||||||
|
let (request_id, global_session) = self
|
||||||
|
.0
|
||||||
|
.notable_cosign_requests
|
||||||
|
.lock()
|
||||||
|
.await
|
||||||
|
.recv()
|
||||||
|
.await
|
||||||
|
.expect("notable_cosign_requests_send was dropped?");
|
||||||
|
let (sender, receiver) = oneshot::channel();
|
||||||
|
tokio::spawn({
|
||||||
|
let respond = self.0.inbound_request_responses.clone();
|
||||||
|
async move {
|
||||||
|
let response = if let Ok(notable_cosigns) = receiver.await {
|
||||||
|
Response::NotableCosigns(notable_cosigns)
|
||||||
|
} else {
|
||||||
|
Response::NotableCosigns(vec![])
|
||||||
|
};
|
||||||
|
respond
|
||||||
|
.send((request_id, response))
|
||||||
|
.expect("inbound_request_responses_recv was dropped?");
|
||||||
|
}
|
||||||
|
});
|
||||||
|
(global_session, sender)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)> {
|
||||||
|
async move {
|
||||||
|
self.0.tributary_gossip.lock().await.recv().await.expect("tributary_gossip send was dropped?")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cosign(&self) -> impl Send + Future<Output = SignedCosign> {
|
||||||
|
async move {
|
||||||
|
self
|
||||||
|
.0
|
||||||
|
.signed_cosigns
|
||||||
|
.lock()
|
||||||
|
.await
|
||||||
|
.recv()
|
||||||
|
.await
|
||||||
|
.expect("signed_cosigns couldn't recv despite send in same object?")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
17
coordinator/p2p/libp2p/src/ping.rs
Normal file
17
coordinator/p2p/libp2p/src/ping.rs
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
use core::time::Duration;
|
||||||
|
|
||||||
|
use tributary_sdk::tendermint::LATENCY_TIME;
|
||||||
|
|
||||||
|
use libp2p::ping::{self, Config, Behaviour};
|
||||||
|
pub use ping::Event;
|
||||||
|
|
||||||
|
pub(crate) const INTERVAL: Duration = Duration::from_secs(30);
|
||||||
|
// LATENCY_TIME represents the maximum latency for message delivery. Sending the ping, and
|
||||||
|
// receiving the pong, each have to occur within this time bound to validate the connection. We
|
||||||
|
// enforce that, as best we can, by requiring the round-trip be within twice the allowed latency.
|
||||||
|
pub(crate) const TIMEOUT: Duration = Duration::from_millis((2 * LATENCY_TIME) as u64);
|
||||||
|
|
||||||
|
pub(crate) type Behavior = Behaviour;
|
||||||
|
pub(crate) fn new_behavior() -> Behavior {
|
||||||
|
Behavior::new(Config::default().with_interval(INTERVAL).with_timeout(TIMEOUT))
|
||||||
|
}
|
||||||
134
coordinator/p2p/libp2p/src/reqres.rs
Normal file
134
coordinator/p2p/libp2p/src/reqres.rs
Normal file
@@ -0,0 +1,134 @@
|
|||||||
|
use core::{fmt, time::Duration};
|
||||||
|
use std::io;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
|
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
|
|
||||||
|
use libp2p::request_response::{
|
||||||
|
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
|
||||||
|
};
|
||||||
|
pub use request_response::{InboundRequestId, Message};
|
||||||
|
|
||||||
|
use serai_cosign::SignedCosign;
|
||||||
|
|
||||||
|
use serai_coordinator_p2p::{Heartbeat, TributaryBlockWithCommit};
|
||||||
|
|
||||||
|
/// The maximum message size for the request-response protocol
|
||||||
|
// This is derived from the heartbeat message size as it's our largest message
|
||||||
|
pub(crate) const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
|
||||||
|
1024 + serai_coordinator_p2p::heartbeat::BATCH_SIZE_LIMIT;
|
||||||
|
|
||||||
|
const PROTOCOL: &str = "/serai/coordinator/reqres/1.0.0";
|
||||||
|
|
||||||
|
/// Requests which can be made via the request-response protocol.
|
||||||
|
#[derive(Clone, Copy, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub(crate) enum Request {
|
||||||
|
/// A heartbeat informing our peers of our latest block, for the specified blockchain, on regular
|
||||||
|
/// intervals.
|
||||||
|
///
|
||||||
|
/// If our peers have more blocks than us, they're expected to respond with those blocks.
|
||||||
|
Heartbeat(Heartbeat),
|
||||||
|
/// A request for the notable cosigns for a global session.
|
||||||
|
NotableCosigns { global_session: [u8; 32] },
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Responses which can be received via the request-response protocol.
|
||||||
|
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub(crate) enum Response {
|
||||||
|
None,
|
||||||
|
Blocks(Vec<TributaryBlockWithCommit>),
|
||||||
|
NotableCosigns(Vec<SignedCosign>),
|
||||||
|
}
|
||||||
|
impl fmt::Debug for Response {
|
||||||
|
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
match self {
|
||||||
|
Response::None => fmt.debug_struct("Response::None").finish(),
|
||||||
|
Response::Blocks(_) => fmt.debug_struct("Response::Block").finish_non_exhaustive(),
|
||||||
|
Response::NotableCosigns(_) => {
|
||||||
|
fmt.debug_struct("Response::NotableCosigns").finish_non_exhaustive()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The codec used for the request-response protocol.
|
||||||
|
///
|
||||||
|
/// We don't use CBOR or JSON, but use borsh to create `Vec<u8>`s we then length-prefix. While
|
||||||
|
/// ideally, we'd use borsh directly with the `io` traits defined here, they're async and there
|
||||||
|
/// isn't an amenable API within borsh for incremental deserialization.
|
||||||
|
#[derive(Default, Clone, Copy, Debug)]
|
||||||
|
pub(crate) struct Codec;
|
||||||
|
impl Codec {
|
||||||
|
async fn read<M: BorshDeserialize>(io: &mut (impl Unpin + AsyncRead)) -> io::Result<M> {
|
||||||
|
let mut len = [0; 4];
|
||||||
|
io.read_exact(&mut len).await?;
|
||||||
|
let len = usize::try_from(u32::from_le_bytes(len)).expect("not at least a 32-bit platform?");
|
||||||
|
if len > MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
||||||
|
Err(io::Error::other("request length exceeded MAX_LIBP2P_REQRES_MESSAGE_SIZE"))?;
|
||||||
|
}
|
||||||
|
// This may be a non-trivial allocation easily causable
|
||||||
|
// While we could chunk the read, meaning we only perform the allocation as bandwidth is used,
|
||||||
|
// the max message size should be sufficiently sane
|
||||||
|
let mut buf = vec![0; len];
|
||||||
|
io.read_exact(&mut buf).await?;
|
||||||
|
let mut buf = buf.as_slice();
|
||||||
|
let res = M::deserialize(&mut buf)?;
|
||||||
|
if !buf.is_empty() {
|
||||||
|
Err(io::Error::other("p2p message had extra data appended to it"))?;
|
||||||
|
}
|
||||||
|
Ok(res)
|
||||||
|
}
|
||||||
|
async fn write(io: &mut (impl Unpin + AsyncWrite), msg: &impl BorshSerialize) -> io::Result<()> {
|
||||||
|
let msg = borsh::to_vec(msg).unwrap();
|
||||||
|
io.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await?;
|
||||||
|
io.write_all(&msg).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[async_trait]
|
||||||
|
impl CodecTrait for Codec {
|
||||||
|
type Protocol = &'static str;
|
||||||
|
type Request = Request;
|
||||||
|
type Response = Response;
|
||||||
|
|
||||||
|
async fn read_request<R: Send + Unpin + AsyncRead>(
|
||||||
|
&mut self,
|
||||||
|
_: &Self::Protocol,
|
||||||
|
io: &mut R,
|
||||||
|
) -> io::Result<Request> {
|
||||||
|
Self::read(io).await
|
||||||
|
}
|
||||||
|
async fn read_response<R: Send + Unpin + AsyncRead>(
|
||||||
|
&mut self,
|
||||||
|
_: &Self::Protocol,
|
||||||
|
io: &mut R,
|
||||||
|
) -> io::Result<Response> {
|
||||||
|
Self::read(io).await
|
||||||
|
}
|
||||||
|
async fn write_request<W: Send + Unpin + AsyncWrite>(
|
||||||
|
&mut self,
|
||||||
|
_: &Self::Protocol,
|
||||||
|
io: &mut W,
|
||||||
|
req: Request,
|
||||||
|
) -> io::Result<()> {
|
||||||
|
Self::write(io, &req).await
|
||||||
|
}
|
||||||
|
async fn write_response<W: Send + Unpin + AsyncWrite>(
|
||||||
|
&mut self,
|
||||||
|
_: &Self::Protocol,
|
||||||
|
io: &mut W,
|
||||||
|
res: Response,
|
||||||
|
) -> io::Result<()> {
|
||||||
|
Self::write(io, &res).await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) type Event = GenericEvent<Request, Response>;
|
||||||
|
|
||||||
|
pub(crate) type Behavior = Behaviour<Codec>;
|
||||||
|
pub(crate) fn new_behavior() -> Behavior {
|
||||||
|
let config = Config::default().with_request_timeout(Duration::from_secs(5));
|
||||||
|
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
|
||||||
|
}
|
||||||
360
coordinator/p2p/libp2p/src/swarm.rs
Normal file
360
coordinator/p2p/libp2p/src/swarm.rs
Normal file
@@ -0,0 +1,360 @@
|
|||||||
|
use std::{
|
||||||
|
sync::Arc,
|
||||||
|
collections::{HashSet, HashMap},
|
||||||
|
time::{Duration, Instant},
|
||||||
|
};
|
||||||
|
|
||||||
|
use borsh::BorshDeserialize;
|
||||||
|
|
||||||
|
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||||
|
|
||||||
|
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||||
|
|
||||||
|
use serai_task::TaskHandle;
|
||||||
|
|
||||||
|
use serai_cosign::SignedCosign;
|
||||||
|
|
||||||
|
use futures_util::StreamExt;
|
||||||
|
use libp2p::{
|
||||||
|
identity::PeerId,
|
||||||
|
request_response::{InboundRequestId, OutboundRequestId, ResponseChannel},
|
||||||
|
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_coordinator_p2p::Heartbeat;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
Peers, BehaviorEvent, Behavior,
|
||||||
|
validators::{self, Validators},
|
||||||
|
ping,
|
||||||
|
reqres::{self, Request, Response},
|
||||||
|
gossip,
|
||||||
|
};
|
||||||
|
|
||||||
|
const TIME_BETWEEN_REBUILD_PEERS: Duration = Duration::from_secs(10 * 60);
|
||||||
|
|
||||||
|
/*
|
||||||
|
`SwarmTask` handles everything we need the `Swarm` object for. The goal is to minimize the
|
||||||
|
contention on this task. Unfortunately, the `Swarm` object itself is needed for a variety of
|
||||||
|
purposes making this a rather large task.
|
||||||
|
|
||||||
|
Responsibilities include:
|
||||||
|
- Actually dialing new peers (the selection process occurs in another task)
|
||||||
|
- Maintaining the peers structure (as we need the Swarm object to see who our peers are)
|
||||||
|
- Gossiping messages
|
||||||
|
- Dispatching gossiped messages
|
||||||
|
- Sending requests
|
||||||
|
- Dispatching responses to requests
|
||||||
|
- Dispatching received requests
|
||||||
|
- Sending responses
|
||||||
|
*/
|
||||||
|
pub(crate) struct SwarmTask {
|
||||||
|
dial_task: TaskHandle,
|
||||||
|
to_dial: mpsc::UnboundedReceiver<DialOpts>,
|
||||||
|
last_dial_task_run: Instant,
|
||||||
|
|
||||||
|
validators: Arc<RwLock<Validators>>,
|
||||||
|
validator_changes: mpsc::UnboundedReceiver<validators::Changes>,
|
||||||
|
peers: Peers,
|
||||||
|
rebuild_peers_at: Instant,
|
||||||
|
|
||||||
|
swarm: Swarm<Behavior>,
|
||||||
|
|
||||||
|
gossip: mpsc::UnboundedReceiver<gossip::Message>,
|
||||||
|
signed_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||||
|
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
||||||
|
|
||||||
|
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
|
outbound_request_responses: HashMap<OutboundRequestId, oneshot::Sender<Response>>,
|
||||||
|
|
||||||
|
inbound_request_response_channels: HashMap<InboundRequestId, ResponseChannel<Response>>,
|
||||||
|
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>,
|
||||||
|
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
|
||||||
|
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl SwarmTask {
|
||||||
|
fn handle_gossip(&mut self, event: gossip::Event) {
|
||||||
|
match event {
|
||||||
|
gossip::Event::Message { message, .. } => {
|
||||||
|
let Ok(message) = gossip::Message::deserialize(&mut message.data.as_slice()) else {
|
||||||
|
// TODO: Penalize the PeerId which created this message, which requires authenticating
|
||||||
|
// each message OR moving to explicit acknowledgement before re-gossiping
|
||||||
|
return;
|
||||||
|
};
|
||||||
|
match message {
|
||||||
|
gossip::Message::Tributary { tributary, message } => {
|
||||||
|
let _: Result<_, _> = self.tributary_gossip.send((tributary, message));
|
||||||
|
}
|
||||||
|
gossip::Message::Cosign(signed_cosign) => {
|
||||||
|
let _: Result<_, _> = self.signed_cosigns.send(signed_cosign);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
||||||
|
gossip::Event::GossipsubNotSupported { peer_id } |
|
||||||
|
gossip::Event::SlowPeer { peer_id, .. } => {
|
||||||
|
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_reqres(&mut self, event: reqres::Event) {
|
||||||
|
match event {
|
||||||
|
reqres::Event::Message { message, .. } => match message {
|
||||||
|
reqres::Message::Request { request_id, request, channel } => match request {
|
||||||
|
reqres::Request::Heartbeat(Heartbeat { set, latest_block_hash }) => {
|
||||||
|
self.inbound_request_response_channels.insert(request_id, channel);
|
||||||
|
let _: Result<_, _> =
|
||||||
|
self.heartbeat_requests.send((request_id, set, latest_block_hash));
|
||||||
|
}
|
||||||
|
reqres::Request::NotableCosigns { global_session } => {
|
||||||
|
self.inbound_request_response_channels.insert(request_id, channel);
|
||||||
|
let _: Result<_, _> = self.notable_cosign_requests.send((request_id, global_session));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
reqres::Message::Response { request_id, response } => {
|
||||||
|
if let Some(channel) = self.outbound_request_responses.remove(&request_id) {
|
||||||
|
let _: Result<_, _> = channel.send(response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
reqres::Event::OutboundFailure { request_id, .. } => {
|
||||||
|
// Send None as the response for the request
|
||||||
|
if let Some(channel) = self.outbound_request_responses.remove(&request_id) {
|
||||||
|
let _: Result<_, _> = channel.send(Response::None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
reqres::Event::InboundFailure { .. } | reqres::Event::ResponseSent { .. } => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn run(mut self) {
|
||||||
|
loop {
|
||||||
|
let time_till_rebuild_peers = self.rebuild_peers_at.saturating_duration_since(Instant::now());
|
||||||
|
|
||||||
|
tokio::select! {
|
||||||
|
// If the validators have changed, update the allow list
|
||||||
|
validator_changes = self.validator_changes.recv() => {
|
||||||
|
let validator_changes = validator_changes.expect("validators update task shut down?");
|
||||||
|
let behavior = &mut self.swarm.behaviour_mut().allow_list;
|
||||||
|
for removed in validator_changes.removed {
|
||||||
|
behavior.disallow_peer(removed);
|
||||||
|
}
|
||||||
|
for added in validator_changes.added {
|
||||||
|
behavior.allow_peer(added);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Dial peers we're instructed to
|
||||||
|
dial_opts = self.to_dial.recv() => {
|
||||||
|
let dial_opts = dial_opts.expect("DialTask was closed?");
|
||||||
|
let _: Result<_, _> = self.swarm.dial(dial_opts);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Rebuild the peers every 10 minutes.
|
||||||
|
|
||||||
|
This protects against any race conditions/edge cases we have in our logic to track peers,
|
||||||
|
along with unrepresented behavior such as when a peer changes the networks they're active
|
||||||
|
in. This lets the peer tracking logic simply be 'good enough' to not become horribly
|
||||||
|
corrupt over the span of `TIME_BETWEEN_REBUILD_PEERS`.
|
||||||
|
|
||||||
|
We also use this to disconnect all peers who are no longer active in any network.
|
||||||
|
*/
|
||||||
|
() = tokio::time::sleep(time_till_rebuild_peers) => {
|
||||||
|
let validators_by_network = self.validators.read().await.by_network().clone();
|
||||||
|
let connected_peers = self.swarm.connected_peers().copied().collect::<HashSet<_>>();
|
||||||
|
|
||||||
|
// Build the new peers object
|
||||||
|
let mut peers = HashMap::new();
|
||||||
|
for (network, validators) in validators_by_network {
|
||||||
|
peers.insert(network, validators.intersection(&connected_peers).copied().collect());
|
||||||
|
}
|
||||||
|
|
||||||
|
// Write the new peers object
|
||||||
|
*self.peers.peers.write().await = peers;
|
||||||
|
self.rebuild_peers_at = Instant::now() + TIME_BETWEEN_REBUILD_PEERS;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle swarm events
|
||||||
|
event = self.swarm.next() => {
|
||||||
|
// `Swarm::next` will never return `Poll::Ready(None)`
|
||||||
|
// https://docs.rs/
|
||||||
|
// libp2p/0.54.1/libp2p/struct.Swarm.html#impl-Stream-for-Swarm%3CTBehaviour%3E
|
||||||
|
let event = event.unwrap();
|
||||||
|
match event {
|
||||||
|
// New connection, so update peers
|
||||||
|
SwarmEvent::ConnectionEstablished { peer_id, .. } => {
|
||||||
|
let Some(networks) =
|
||||||
|
self.validators.read().await.networks(&peer_id).cloned() else { continue };
|
||||||
|
let mut peers = self.peers.peers.write().await;
|
||||||
|
for network in networks {
|
||||||
|
peers.entry(network).or_insert_with(HashSet::new).insert(peer_id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Connection closed, so update peers
|
||||||
|
SwarmEvent::ConnectionClosed { peer_id, .. } => {
|
||||||
|
let Some(networks) =
|
||||||
|
self.validators.read().await.networks(&peer_id).cloned() else { continue };
|
||||||
|
let mut peers = self.peers.peers.write().await;
|
||||||
|
for network in networks {
|
||||||
|
peers.entry(network).or_insert_with(HashSet::new).remove(&peer_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
We want to re-run the dial task, since we lost a peer, in case we should find new
|
||||||
|
peers. This opens a DoS where a validator repeatedly opens/closes connections to
|
||||||
|
force iterations of the dial task. We prevent this by setting a minimum distance
|
||||||
|
since the last explicit iteration.
|
||||||
|
|
||||||
|
This is suboptimal. If we have several disconnects in immediate proximity, we'll
|
||||||
|
trigger the dial task upon the first (where we may still have enough peers we
|
||||||
|
shouldn't dial more) but not the last (where we may have so few peers left we
|
||||||
|
should dial more). This is accepted as the dial task will eventually run on its
|
||||||
|
natural timer.
|
||||||
|
*/
|
||||||
|
const MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL: Duration = Duration::from_secs(60);
|
||||||
|
let now = Instant::now();
|
||||||
|
if (self.last_dial_task_run + MINIMUM_TIME_SINCE_LAST_EXPLICIT_DIAL) < now {
|
||||||
|
self.dial_task.run_now();
|
||||||
|
self.last_dial_task_run = now;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
SwarmEvent::Behaviour(event) => {
|
||||||
|
match event {
|
||||||
|
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event) => {
|
||||||
|
// This *is* an exhaustive match as these events are empty enums
|
||||||
|
match event {}
|
||||||
|
}
|
||||||
|
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, }) => {
|
||||||
|
if result.is_err() {
|
||||||
|
self.swarm.close_connection(connection);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
BehaviorEvent::Reqres(event) => self.handle_reqres(event),
|
||||||
|
BehaviorEvent::Gossip(event) => self.handle_gossip(event),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// We don't handle any of these
|
||||||
|
SwarmEvent::IncomingConnection { .. } |
|
||||||
|
SwarmEvent::IncomingConnectionError { .. } |
|
||||||
|
SwarmEvent::OutgoingConnectionError { .. } |
|
||||||
|
SwarmEvent::NewListenAddr { .. } |
|
||||||
|
SwarmEvent::ExpiredListenAddr { .. } |
|
||||||
|
SwarmEvent::ListenerClosed { .. } |
|
||||||
|
SwarmEvent::ListenerError { .. } |
|
||||||
|
SwarmEvent::Dialing { .. } |
|
||||||
|
SwarmEvent::NewExternalAddrCandidate { .. } |
|
||||||
|
SwarmEvent::ExternalAddrConfirmed { .. } |
|
||||||
|
SwarmEvent::ExternalAddrExpired { .. } |
|
||||||
|
SwarmEvent::NewExternalAddrOfPeer { .. } => {}
|
||||||
|
|
||||||
|
// Requires as SwarmEvent is non-exhaustive
|
||||||
|
_ => log::warn!("unhandled SwarmEvent: {event:?}"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
message = self.gossip.recv() => {
|
||||||
|
let message = message.expect("channel for messages to gossip was closed?");
|
||||||
|
let topic = message.topic();
|
||||||
|
let message = borsh::to_vec(&message).unwrap();
|
||||||
|
|
||||||
|
/*
|
||||||
|
If we're sending a message for this topic, it's because this topic is relevant to us.
|
||||||
|
Subscribe to it.
|
||||||
|
|
||||||
|
We create topics roughly weekly, one per validator set/session. Once present in a
|
||||||
|
topic, we're interested in all messages for it until the validator set/session retires.
|
||||||
|
Then there should no longer be any messages for the topic as we should drop the
|
||||||
|
Tributary which creates the messages.
|
||||||
|
|
||||||
|
We use this as an argument to not bother implement unsubscribing from topics. They're
|
||||||
|
incredibly infrequently created and old topics shouldn't still have messages published
|
||||||
|
to them. Having the coordinator reboot being our method of unsubscribing is fine.
|
||||||
|
|
||||||
|
Alternatively, we could route an API to determine when a topic is retired, or retire
|
||||||
|
any topics we haven't sent messages on in the past hour.
|
||||||
|
*/
|
||||||
|
let behavior = self.swarm.behaviour_mut();
|
||||||
|
let _: Result<_, _> = behavior.gossip.subscribe(&topic);
|
||||||
|
/*
|
||||||
|
This may be an error of `InsufficientPeers`. If so, we could ask DialTask to dial more
|
||||||
|
peers for this network. We don't as we assume DialTask will detect the lack of peers
|
||||||
|
for this network, and will already successfully handle this.
|
||||||
|
*/
|
||||||
|
let _: Result<_, _> = behavior.gossip.publish(topic.hash(), message);
|
||||||
|
}
|
||||||
|
|
||||||
|
request = self.outbound_requests.recv() => {
|
||||||
|
let (peer, request, response_channel) =
|
||||||
|
request.expect("channel for requests was closed?");
|
||||||
|
let request_id = self.swarm.behaviour_mut().reqres.send_request(&peer, request);
|
||||||
|
self.outbound_request_responses.insert(request_id, response_channel);
|
||||||
|
}
|
||||||
|
|
||||||
|
response = self.inbound_request_responses.recv() => {
|
||||||
|
let (request_id, response) =
|
||||||
|
response.expect("channel for inbound request responses was closed?");
|
||||||
|
if let Some(channel) = self.inbound_request_response_channels.remove(&request_id) {
|
||||||
|
let _: Result<_, _> =
|
||||||
|
self.swarm.behaviour_mut().reqres.send_response(channel, response);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[allow(clippy::too_many_arguments)]
|
||||||
|
pub(crate) fn spawn(
|
||||||
|
dial_task: TaskHandle,
|
||||||
|
to_dial: mpsc::UnboundedReceiver<DialOpts>,
|
||||||
|
|
||||||
|
validators: Arc<RwLock<Validators>>,
|
||||||
|
validator_changes: mpsc::UnboundedReceiver<validators::Changes>,
|
||||||
|
peers: Peers,
|
||||||
|
|
||||||
|
swarm: Swarm<Behavior>,
|
||||||
|
|
||||||
|
gossip: mpsc::UnboundedReceiver<gossip::Message>,
|
||||||
|
signed_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||||
|
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
||||||
|
|
||||||
|
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
|
|
||||||
|
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>,
|
||||||
|
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
|
||||||
|
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
|
||||||
|
) {
|
||||||
|
tokio::spawn(
|
||||||
|
SwarmTask {
|
||||||
|
dial_task,
|
||||||
|
to_dial,
|
||||||
|
last_dial_task_run: Instant::now(),
|
||||||
|
|
||||||
|
validators,
|
||||||
|
validator_changes,
|
||||||
|
peers,
|
||||||
|
rebuild_peers_at: Instant::now() + TIME_BETWEEN_REBUILD_PEERS,
|
||||||
|
|
||||||
|
swarm,
|
||||||
|
|
||||||
|
gossip,
|
||||||
|
signed_cosigns,
|
||||||
|
tributary_gossip,
|
||||||
|
|
||||||
|
outbound_requests,
|
||||||
|
outbound_request_responses: HashMap::new(),
|
||||||
|
|
||||||
|
inbound_request_response_channels: HashMap::new(),
|
||||||
|
heartbeat_requests,
|
||||||
|
notable_cosign_requests,
|
||||||
|
inbound_request_responses,
|
||||||
|
}
|
||||||
|
.run(),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
}
|
||||||
221
coordinator/p2p/libp2p/src/validators.rs
Normal file
221
coordinator/p2p/libp2p/src/validators.rs
Normal file
@@ -0,0 +1,221 @@
|
|||||||
|
use core::{borrow::Borrow, future::Future};
|
||||||
|
use std::{
|
||||||
|
sync::Arc,
|
||||||
|
collections::{HashSet, HashMap},
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::ExternalNetworkId, validator_sets::primitives::Session, SeraiError, Serai,
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
|
use libp2p::PeerId;
|
||||||
|
|
||||||
|
use futures_util::stream::{StreamExt, FuturesUnordered};
|
||||||
|
use tokio::sync::{mpsc, RwLock};
|
||||||
|
|
||||||
|
use crate::peer_id_from_public;
|
||||||
|
|
||||||
|
pub(crate) struct Changes {
|
||||||
|
pub(crate) removed: HashSet<PeerId>,
|
||||||
|
pub(crate) added: HashSet<PeerId>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct Validators {
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
|
||||||
|
// A cache for which session we're populated with the validators of
|
||||||
|
sessions: HashMap<ExternalNetworkId, Session>,
|
||||||
|
// The validators by network
|
||||||
|
by_network: HashMap<ExternalNetworkId, HashSet<PeerId>>,
|
||||||
|
// The validators and their networks
|
||||||
|
validators: HashMap<PeerId, HashSet<ExternalNetworkId>>,
|
||||||
|
|
||||||
|
// The channel to send the changes down
|
||||||
|
changes: mpsc::UnboundedSender<Changes>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Validators {
|
||||||
|
pub(crate) fn new(serai: Arc<Serai>) -> (Self, mpsc::UnboundedReceiver<Changes>) {
|
||||||
|
let (send, recv) = mpsc::unbounded_channel();
|
||||||
|
let validators = Validators {
|
||||||
|
serai,
|
||||||
|
sessions: HashMap::new(),
|
||||||
|
by_network: HashMap::new(),
|
||||||
|
validators: HashMap::new(),
|
||||||
|
changes: send,
|
||||||
|
};
|
||||||
|
(validators, recv)
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn session_changes(
|
||||||
|
serai: impl Borrow<Serai>,
|
||||||
|
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>,
|
||||||
|
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
||||||
|
/*
|
||||||
|
This uses the latest finalized block, not the latest cosigned block, which should be fine as
|
||||||
|
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
|
||||||
|
bypass the cosign protocol unless a historical global session was malicious, in which case
|
||||||
|
the cosign protocol already breaks.
|
||||||
|
|
||||||
|
Besides, we can't connect to historical validators, only the current validators.
|
||||||
|
*/
|
||||||
|
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
||||||
|
let temporal_serai = temporal_serai.validator_sets();
|
||||||
|
|
||||||
|
let mut session_changes = vec![];
|
||||||
|
{
|
||||||
|
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
||||||
|
// we poll it till it yields all futures with the most minimal processing possible
|
||||||
|
let mut futures = FuturesUnordered::new();
|
||||||
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
|
let sessions = sessions.borrow();
|
||||||
|
futures.push(async move {
|
||||||
|
let session = match temporal_serai.session(network.into()).await {
|
||||||
|
Ok(Some(session)) => session,
|
||||||
|
Ok(None) => return Ok(None),
|
||||||
|
Err(e) => return Err(e),
|
||||||
|
};
|
||||||
|
|
||||||
|
if sessions.get(&network) == Some(&session) {
|
||||||
|
Ok(None)
|
||||||
|
} else {
|
||||||
|
match temporal_serai.active_network_validators(network.into()).await {
|
||||||
|
Ok(validators) => Ok(Some((
|
||||||
|
network,
|
||||||
|
session,
|
||||||
|
validators.into_iter().map(peer_id_from_public).collect(),
|
||||||
|
))),
|
||||||
|
Err(e) => Err(e),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
while let Some(session_change) = futures.next().await {
|
||||||
|
if let Some(session_change) = session_change? {
|
||||||
|
session_changes.push(session_change);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(session_changes)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn incorporate_session_changes(
|
||||||
|
&mut self,
|
||||||
|
session_changes: Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>,
|
||||||
|
) {
|
||||||
|
let mut removed = HashSet::new();
|
||||||
|
let mut added = HashSet::new();
|
||||||
|
|
||||||
|
for (network, session, validators) in session_changes {
|
||||||
|
// Remove the existing validators
|
||||||
|
for validator in self.by_network.remove(&network).unwrap_or_else(HashSet::new) {
|
||||||
|
// Get all networks this validator is in
|
||||||
|
let mut networks = self.validators.remove(&validator).unwrap();
|
||||||
|
// Remove this one
|
||||||
|
networks.remove(&network);
|
||||||
|
if !networks.is_empty() {
|
||||||
|
// Insert the networks back if the validator was present in other networks
|
||||||
|
self.validators.insert(validator, networks);
|
||||||
|
} else {
|
||||||
|
// Because this validator is no longer present in any network, mark them as removed
|
||||||
|
/*
|
||||||
|
This isn't accurate. The validator isn't present in the latest session for this
|
||||||
|
network. The validator was present in the prior session which has yet to retire. Our
|
||||||
|
lack of explicit inclusion for both the prior session and the current session causes
|
||||||
|
only the validators mutually present in both sessions to be responsible for all actions
|
||||||
|
still ongoing as the prior validator set retires.
|
||||||
|
|
||||||
|
TODO: Fix this
|
||||||
|
*/
|
||||||
|
removed.insert(validator);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add the new validators
|
||||||
|
for validator in validators.iter().copied() {
|
||||||
|
self.validators.entry(validator).or_insert_with(HashSet::new).insert(network);
|
||||||
|
added.insert(validator);
|
||||||
|
}
|
||||||
|
self.by_network.insert(network, validators);
|
||||||
|
|
||||||
|
// Update the session we have populated
|
||||||
|
self.sessions.insert(network, session);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only flag validators for removal if they weren't simultaneously added by these changes
|
||||||
|
removed.retain(|validator| !added.contains(validator));
|
||||||
|
// Send the changes, dropping the error
|
||||||
|
// This lets the caller opt-out of change notifications by dropping the receiver
|
||||||
|
let _: Result<_, _> = self.changes.send(Changes { removed, added });
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Update the view of the validators.
|
||||||
|
pub(crate) async fn update(&mut self) -> Result<(), SeraiError> {
|
||||||
|
let session_changes = Self::session_changes(&*self.serai, &self.sessions).await?;
|
||||||
|
self.incorporate_session_changes(session_changes);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn by_network(&self) -> &HashMap<ExternalNetworkId, HashSet<PeerId>> {
|
||||||
|
&self.by_network
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<ExternalNetworkId>> {
|
||||||
|
self.validators.get(peer_id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A task which updates a set of validators.
|
||||||
|
///
|
||||||
|
/// The validators managed by this tak will have their exclusive lock held for a minimal amount of
|
||||||
|
/// time while the update occurs to minimize the disruption to the services relying on it.
|
||||||
|
pub(crate) struct UpdateValidatorsTask {
|
||||||
|
validators: Arc<RwLock<Validators>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl UpdateValidatorsTask {
|
||||||
|
/// Spawn a new instance of the UpdateValidatorsTask.
|
||||||
|
///
|
||||||
|
/// This returns a reference to the Validators it updates after spawning itself.
|
||||||
|
pub(crate) fn spawn(
|
||||||
|
serai: Arc<Serai>,
|
||||||
|
) -> (Arc<RwLock<Validators>>, mpsc::UnboundedReceiver<Changes>) {
|
||||||
|
// The validators which will be updated
|
||||||
|
let (validators, changes) = Validators::new(serai);
|
||||||
|
let validators = Arc::new(RwLock::new(validators));
|
||||||
|
|
||||||
|
// Define the task
|
||||||
|
let (update_validators_task, update_validators_task_handle) = Task::new();
|
||||||
|
// Forget the handle, as dropping the handle would stop the task
|
||||||
|
core::mem::forget(update_validators_task_handle);
|
||||||
|
// Spawn the task
|
||||||
|
tokio::spawn(
|
||||||
|
(Self { validators: validators.clone() }).continually_run(update_validators_task, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Return the validators
|
||||||
|
(validators, changes)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl ContinuallyRan for UpdateValidatorsTask {
|
||||||
|
// Only run every minute, not the default of every five seconds
|
||||||
|
const DELAY_BETWEEN_ITERATIONS: u64 = 60;
|
||||||
|
const MAX_DELAY_BETWEEN_ITERATIONS: u64 = 5 * 60;
|
||||||
|
|
||||||
|
type Error = SeraiError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let session_changes = {
|
||||||
|
let validators = self.validators.read().await;
|
||||||
|
Validators::session_changes(validators.serai.clone(), validators.sessions.clone()).await?
|
||||||
|
};
|
||||||
|
self.validators.write().await.incorporate_session_changes(session_changes);
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
151
coordinator/p2p/src/heartbeat.rs
Normal file
151
coordinator/p2p/src/heartbeat.rs
Normal file
@@ -0,0 +1,151 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
|
use serai_primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet};
|
||||||
|
|
||||||
|
use futures_lite::FutureExt;
|
||||||
|
|
||||||
|
use tributary_sdk::{ReadWrite, TransactionTrait, Block, Tributary, TributaryReader};
|
||||||
|
|
||||||
|
use serai_db::*;
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use crate::{Heartbeat, Peer, P2p};
|
||||||
|
|
||||||
|
// Amount of blocks in a minute
|
||||||
|
const BLOCKS_PER_MINUTE: usize =
|
||||||
|
(60 / (tributary_sdk::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
|
||||||
|
|
||||||
|
/// The minimum amount of blocks to include/included within a batch, assuming there's blocks to
|
||||||
|
/// include in the batch.
|
||||||
|
///
|
||||||
|
/// This decides the size limit of the Batch (the Block size limit multiplied by the minimum amount
|
||||||
|
/// of blocks we'll send). The actual amount of blocks sent will be the amount which fits within
|
||||||
|
/// the size limit.
|
||||||
|
pub const MIN_BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
||||||
|
|
||||||
|
/// The size limit for a batch of blocks sent in response to a Heartbeat.
|
||||||
|
///
|
||||||
|
/// This estimates the size of a commit as `32 + (MAX_VALIDATORS * 128)`. At the time of writing, a
|
||||||
|
/// commit is `8 + (validators * 32) + (32 + (validators * 32))` (for the time, list of validators,
|
||||||
|
/// and aggregate signature). Accordingly, this should be a safe over-estimate.
|
||||||
|
pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
|
||||||
|
(tributary_sdk::BLOCK_SIZE_LIMIT + 32 + ((MAX_KEY_SHARES_PER_SET as usize) * 128));
|
||||||
|
|
||||||
|
/// Sends a heartbeat to other validators on regular intervals informing them of our Tributary's
|
||||||
|
/// tip.
|
||||||
|
///
|
||||||
|
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
||||||
|
/// the sync protocol for our Tributaries.
|
||||||
|
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
||||||
|
pub(crate) set: ExternalValidatorSet,
|
||||||
|
pub(crate) tributary: Tributary<TD, Tx, P>,
|
||||||
|
pub(crate) reader: TributaryReader<TD, Tx>,
|
||||||
|
pub(crate) p2p: P,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<TD: Db, Tx: TransactionTrait, P: P2p> ContinuallyRan for HeartbeatTask<TD, Tx, P> {
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
// If our blockchain hasn't had a block in the past minute, trigger the heartbeat protocol
|
||||||
|
const TIME_TO_TRIGGER_SYNCING: Duration = Duration::from_secs(60);
|
||||||
|
|
||||||
|
let mut tip = self.reader.tip();
|
||||||
|
let time_since = {
|
||||||
|
let block_time = if let Some(time_of_block) = self.reader.time_of_block(&tip) {
|
||||||
|
SystemTime::UNIX_EPOCH + Duration::from_secs(time_of_block)
|
||||||
|
} else {
|
||||||
|
// If we couldn't fetch this block's time, assume it's old
|
||||||
|
// We don't want to declare its unix time as 0 and claim it's 50+ years old though
|
||||||
|
log::warn!(
|
||||||
|
"heartbeat task couldn't fetch the time of a block, flagging it as a minute old"
|
||||||
|
);
|
||||||
|
SystemTime::now() - TIME_TO_TRIGGER_SYNCING
|
||||||
|
};
|
||||||
|
SystemTime::now().duration_since(block_time).unwrap_or(Duration::ZERO)
|
||||||
|
};
|
||||||
|
let mut tip_is_stale = false;
|
||||||
|
|
||||||
|
let mut synced_block = false;
|
||||||
|
if TIME_TO_TRIGGER_SYNCING <= time_since {
|
||||||
|
log::warn!(
|
||||||
|
"last known tributary block for {:?} was {} seconds ago",
|
||||||
|
self.set,
|
||||||
|
time_since.as_secs()
|
||||||
|
);
|
||||||
|
|
||||||
|
// This requests all peers for this network, without differentiating by session
|
||||||
|
// This should be fine as most validators should overlap across sessions
|
||||||
|
'peer: for peer in self.p2p.peers(self.set.network).await {
|
||||||
|
loop {
|
||||||
|
// Create the request for blocks
|
||||||
|
if tip_is_stale {
|
||||||
|
tip = self.reader.tip();
|
||||||
|
tip_is_stale = false;
|
||||||
|
}
|
||||||
|
// Necessary due to https://github.com/rust-lang/rust/issues/100013
|
||||||
|
let Some(blocks) = peer
|
||||||
|
.send_heartbeat(Heartbeat { set: self.set, latest_block_hash: tip })
|
||||||
|
.boxed()
|
||||||
|
.await
|
||||||
|
else {
|
||||||
|
continue 'peer;
|
||||||
|
};
|
||||||
|
|
||||||
|
// This is the final batch if it has less than the maximum amount of blocks
|
||||||
|
// (signifying there weren't more blocks after this to fill the batch with)
|
||||||
|
let final_batch = blocks.len() < MIN_BLOCKS_PER_BATCH;
|
||||||
|
|
||||||
|
// Sync each block
|
||||||
|
for block_with_commit in blocks {
|
||||||
|
let Ok(block) = Block::read(&mut block_with_commit.block.as_slice()) else {
|
||||||
|
// TODO: Disconnect/slash this peer
|
||||||
|
log::warn!("received invalid Block inside response to heartbeat");
|
||||||
|
continue 'peer;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Attempt to sync the block
|
||||||
|
if !self.tributary.sync_block(block, block_with_commit.commit).await {
|
||||||
|
// The block may be invalid or stale if we added a block elsewhere
|
||||||
|
if (!tip_is_stale) && (tip != self.reader.tip()) {
|
||||||
|
// Since the Tributary's tip advanced on its own, return
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since this block was invalid or stale in a way non-trivial to detect, try to
|
||||||
|
// sync with the next peer
|
||||||
|
continue 'peer;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because we synced a block, flag the tip as stale
|
||||||
|
tip_is_stale = true;
|
||||||
|
// And that we did sync a block
|
||||||
|
synced_block = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this was the final batch, move on from this peer
|
||||||
|
// We could assume they were honest and we are done syncing the chain, but this is a
|
||||||
|
// bit more robust
|
||||||
|
if final_batch {
|
||||||
|
continue 'peer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This will cause the tak to be run less and less often, ensuring we aren't spamming the
|
||||||
|
// net if we legitimately aren't making progress
|
||||||
|
if !synced_block {
|
||||||
|
Err(format!(
|
||||||
|
"tried to sync blocks for {:?} since we haven't seen one in {} seconds but didn't",
|
||||||
|
self.set,
|
||||||
|
time_since.as_secs(),
|
||||||
|
))?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(synced_block)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
204
coordinator/p2p/src/lib.rs
Normal file
204
coordinator/p2p/src/lib.rs
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
|
#![doc = include_str!("../README.md")]
|
||||||
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
|
use core::future::Future;
|
||||||
|
use std::collections::HashMap;
|
||||||
|
|
||||||
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
|
use serai_primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet};
|
||||||
|
|
||||||
|
use serai_db::Db;
|
||||||
|
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
|
||||||
|
use serai_cosign::{SignedCosign, Cosigning};
|
||||||
|
|
||||||
|
use tokio::sync::{mpsc, oneshot};
|
||||||
|
|
||||||
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
|
/// The heartbeat task, effecting sync of Tributaries
|
||||||
|
pub mod heartbeat;
|
||||||
|
use crate::heartbeat::HeartbeatTask;
|
||||||
|
|
||||||
|
/// A heartbeat for a Tributary.
|
||||||
|
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
||||||
|
pub struct Heartbeat {
|
||||||
|
/// The Tributary this is the heartbeat of.
|
||||||
|
pub set: ExternalValidatorSet,
|
||||||
|
/// The hash of the latest block added to the Tributary.
|
||||||
|
pub latest_block_hash: [u8; 32],
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A tributary block and its commit.
|
||||||
|
#[derive(Clone, BorshSerialize, BorshDeserialize)]
|
||||||
|
pub struct TributaryBlockWithCommit {
|
||||||
|
/// The serialized block.
|
||||||
|
pub block: Vec<u8>,
|
||||||
|
/// The serialized commit.
|
||||||
|
pub commit: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
/// A representation of a peer.
|
||||||
|
pub trait Peer<'a>: Send {
|
||||||
|
/// Send a heartbeat to this peer.
|
||||||
|
fn send_heartbeat(
|
||||||
|
&self,
|
||||||
|
heartbeat: Heartbeat,
|
||||||
|
) -> impl Send + Future<Output = Option<Vec<TributaryBlockWithCommit>>>;
|
||||||
|
}
|
||||||
|
|
||||||
|
/// The representation of the P2P network.
|
||||||
|
pub trait P2p:
|
||||||
|
Send + Sync + Clone + tributary_sdk::P2p + serai_cosign::RequestNotableCosigns
|
||||||
|
{
|
||||||
|
/// The representation of a peer.
|
||||||
|
type Peer<'a>: Peer<'a>;
|
||||||
|
|
||||||
|
/// Fetch the peers for this network.
|
||||||
|
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
||||||
|
|
||||||
|
/// Broadcast a cosign.
|
||||||
|
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
|
||||||
|
|
||||||
|
/// A cancel-safe future for the next heartbeat received over the P2P network.
|
||||||
|
///
|
||||||
|
/// Yields the validator set its for, the latest block hash observed, and a channel to return the
|
||||||
|
/// descending blocks. This channel MUST NOT and will not have its receiver dropped before a
|
||||||
|
/// message is sent.
|
||||||
|
fn heartbeat(
|
||||||
|
&self,
|
||||||
|
) -> impl Send + Future<Output = (Heartbeat, oneshot::Sender<Vec<TributaryBlockWithCommit>>)>;
|
||||||
|
|
||||||
|
/// A cancel-safe future for the next request for the notable cosigns of a gloabl session.
|
||||||
|
///
|
||||||
|
/// Yields the global session the request is for and a channel to return the notable cosigns.
|
||||||
|
/// This channel MUST NOT and will not have its receiver dropped before a message is sent.
|
||||||
|
fn notable_cosigns_request(
|
||||||
|
&self,
|
||||||
|
) -> impl Send + Future<Output = ([u8; 32], oneshot::Sender<Vec<SignedCosign>>)>;
|
||||||
|
|
||||||
|
/// A cancel-safe future for the next message regarding a Tributary.
|
||||||
|
///
|
||||||
|
/// Yields the message's Tributary's genesis block hash and the message.
|
||||||
|
fn tributary_message(&self) -> impl Send + Future<Output = ([u8; 32], Vec<u8>)>;
|
||||||
|
|
||||||
|
/// A cancel-safe future for the next cosign received.
|
||||||
|
fn cosign(&self) -> impl Send + Future<Output = SignedCosign>;
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_notable_cosigns_request<D: Db>(
|
||||||
|
db: &D,
|
||||||
|
global_session: [u8; 32],
|
||||||
|
channel: oneshot::Sender<Vec<SignedCosign>>,
|
||||||
|
) {
|
||||||
|
let cosigns = Cosigning::<D>::notable_cosigns(db, global_session);
|
||||||
|
channel.send(cosigns).expect("channel listening for cosign oneshot response was dropped?");
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_heartbeat<D: Db, T: TransactionTrait>(
|
||||||
|
reader: &TributaryReader<D, T>,
|
||||||
|
mut latest_block_hash: [u8; 32],
|
||||||
|
channel: oneshot::Sender<Vec<TributaryBlockWithCommit>>,
|
||||||
|
) {
|
||||||
|
let mut res_size = 8;
|
||||||
|
let mut res = vec![];
|
||||||
|
// This former case should be covered by this latter case
|
||||||
|
while (res.len() < heartbeat::MIN_BLOCKS_PER_BATCH) || (res_size < heartbeat::BATCH_SIZE_LIMIT) {
|
||||||
|
let Some(block_after) = reader.block_after(&latest_block_hash) else { break };
|
||||||
|
|
||||||
|
// These `break` conditions should only occur under edge cases, such as if we're actively
|
||||||
|
// deleting this Tributary due to being done with it
|
||||||
|
let Some(block) = reader.block(&block_after) else { break };
|
||||||
|
let block = block.serialize();
|
||||||
|
let Some(commit) = reader.commit(&block_after) else { break };
|
||||||
|
res_size += 8 + block.len() + 8 + commit.len();
|
||||||
|
res.push(TributaryBlockWithCommit { block, commit });
|
||||||
|
|
||||||
|
latest_block_hash = block_after;
|
||||||
|
}
|
||||||
|
channel
|
||||||
|
.send(res)
|
||||||
|
.map_err(|_| ())
|
||||||
|
.expect("channel listening for heartbeat oneshot response was dropped?");
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run the P2P instance.
|
||||||
|
///
|
||||||
|
/// `add_tributary`'s and `retire_tributary's senders, along with `send_cosigns`'s receiver, must
|
||||||
|
/// never be dropped. `retire_tributary` is not required to only be instructed with added
|
||||||
|
/// Tributaries.
|
||||||
|
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
|
||||||
|
db: impl Db,
|
||||||
|
p2p: P,
|
||||||
|
mut add_tributary: mpsc::UnboundedReceiver<(ExternalValidatorSet, Tributary<TD, Tx, P>)>,
|
||||||
|
mut retire_tributary: mpsc::UnboundedReceiver<ExternalValidatorSet>,
|
||||||
|
send_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||||
|
) {
|
||||||
|
let mut readers = HashMap::<ExternalValidatorSet, TributaryReader<TD, Tx>>::new();
|
||||||
|
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
|
||||||
|
let mut heartbeat_tasks = HashMap::<ExternalValidatorSet, _>::new();
|
||||||
|
|
||||||
|
loop {
|
||||||
|
tokio::select! {
|
||||||
|
tributary = add_tributary.recv() => {
|
||||||
|
let (set, tributary) = tributary.expect("add_tributary send was dropped");
|
||||||
|
let reader = tributary.reader();
|
||||||
|
readers.insert(set, reader.clone());
|
||||||
|
|
||||||
|
let (heartbeat_task_def, heartbeat_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(HeartbeatTask {
|
||||||
|
set,
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
reader: reader.clone(),
|
||||||
|
p2p: p2p.clone(),
|
||||||
|
}).continually_run(heartbeat_task_def, vec![])
|
||||||
|
);
|
||||||
|
heartbeat_tasks.insert(set, heartbeat_task);
|
||||||
|
|
||||||
|
let (tributary_message_send, mut tributary_message_recv) = mpsc::unbounded_channel();
|
||||||
|
tributaries.insert(tributary.genesis(), tributary_message_send);
|
||||||
|
// For as long as this sender exists, handle the messages from it on a dedicated task
|
||||||
|
tokio::spawn(async move {
|
||||||
|
while let Some(message) = tributary_message_recv.recv().await {
|
||||||
|
tributary.handle_message(&message).await;
|
||||||
|
}
|
||||||
|
});
|
||||||
|
}
|
||||||
|
set = retire_tributary.recv() => {
|
||||||
|
let set = set.expect("retire_tributary send was dropped");
|
||||||
|
let Some(reader) = readers.remove(&set) else { continue };
|
||||||
|
tributaries.remove(&reader.genesis()).expect("tributary reader but no tributary");
|
||||||
|
heartbeat_tasks.remove(&set).expect("tributary but no heartbeat task");
|
||||||
|
}
|
||||||
|
|
||||||
|
(heartbeat, channel) = p2p.heartbeat() => {
|
||||||
|
if let Some(reader) = readers.get(&heartbeat.set) {
|
||||||
|
let reader = reader.clone(); // This is a cheap clone
|
||||||
|
// We spawn this on a task due to the DB reads needed
|
||||||
|
tokio::spawn(async move {
|
||||||
|
handle_heartbeat(&reader, heartbeat.latest_block_hash, channel)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
(global_session, channel) = p2p.notable_cosigns_request() => {
|
||||||
|
tokio::spawn({
|
||||||
|
let db = db.clone();
|
||||||
|
async move { handle_notable_cosigns_request(&db, global_session, channel) }
|
||||||
|
});
|
||||||
|
}
|
||||||
|
(tributary, message) = p2p.tributary_message() => {
|
||||||
|
if let Some(tributary) = tributaries.get(&tributary) {
|
||||||
|
tributary.send(message).expect("tributary message recv was dropped?");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
cosign = p2p.cosign() => {
|
||||||
|
// We don't call `Cosigning::intake_cosign` here as that can only be called from a single
|
||||||
|
// location. We also need to intake the cosigns we produce, which means we need to merge
|
||||||
|
// these streams (signing, network) somehow. That's done with this mpsc channel
|
||||||
|
send_cosigns.send(cosign).expect("channel receiving cosigns was dropped");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,333 +0,0 @@
|
|||||||
use core::time::Duration;
|
|
||||||
use std::{
|
|
||||||
sync::Arc,
|
|
||||||
collections::{HashSet, HashMap},
|
|
||||||
};
|
|
||||||
|
|
||||||
use tokio::{
|
|
||||||
sync::{mpsc, Mutex, RwLock},
|
|
||||||
time::sleep,
|
|
||||||
};
|
|
||||||
|
|
||||||
use borsh::BorshSerialize;
|
|
||||||
use sp_application_crypto::RuntimePublic;
|
|
||||||
use serai_client::{
|
|
||||||
primitives::{ExternalNetworkId, EXTERNAL_NETWORKS},
|
|
||||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
|
||||||
Serai, SeraiError, TemporalSerai,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db, create_db};
|
|
||||||
|
|
||||||
use processor_messages::coordinator::cosign_block_msg;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
p2p::{CosignedBlock, GossipMessageKind, P2p},
|
|
||||||
substrate::LatestCosignedBlock,
|
|
||||||
};
|
|
||||||
|
|
||||||
create_db! {
|
|
||||||
CosignDb {
|
|
||||||
ReceivedCosign: (set: ExternalValidatorSet, block: [u8; 32]) -> CosignedBlock,
|
|
||||||
LatestCosign: (network: ExternalNetworkId) -> CosignedBlock,
|
|
||||||
DistinctChain: (set: ExternalValidatorSet) -> (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct CosignEvaluator<D: Db> {
|
|
||||||
db: Mutex<D>,
|
|
||||||
serai: Arc<Serai>,
|
|
||||||
stakes: RwLock<Option<HashMap<ExternalNetworkId, u64>>>,
|
|
||||||
latest_cosigns: RwLock<HashMap<ExternalNetworkId, CosignedBlock>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Db> CosignEvaluator<D> {
|
|
||||||
async fn update_latest_cosign(&self) {
|
|
||||||
let stakes_lock = self.stakes.read().await;
|
|
||||||
// If we haven't gotten the stake data yet, return
|
|
||||||
let Some(stakes) = stakes_lock.as_ref() else { return };
|
|
||||||
|
|
||||||
let total_stake = stakes.values().copied().sum::<u64>();
|
|
||||||
|
|
||||||
let latest_cosigns = self.latest_cosigns.read().await;
|
|
||||||
let mut highest_block = 0;
|
|
||||||
for cosign in latest_cosigns.values() {
|
|
||||||
let mut networks = HashSet::new();
|
|
||||||
for (network, sub_cosign) in &*latest_cosigns {
|
|
||||||
if sub_cosign.block_number >= cosign.block_number {
|
|
||||||
networks.insert(network);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let sum_stake =
|
|
||||||
networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::<u64>();
|
|
||||||
let needed_stake = ((total_stake * 2) / 3) + 1;
|
|
||||||
if (total_stake == 0) || (sum_stake > needed_stake) {
|
|
||||||
highest_block = highest_block.max(cosign.block_number);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut db_lock = self.db.lock().await;
|
|
||||||
let mut txn = db_lock.txn();
|
|
||||||
if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) {
|
|
||||||
log::info!("setting latest cosigned block to {}", highest_block);
|
|
||||||
LatestCosignedBlock::set(&mut txn, &highest_block);
|
|
||||||
}
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn update_stakes(&self) -> Result<(), SeraiError> {
|
|
||||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
|
||||||
|
|
||||||
let mut stakes = HashMap::new();
|
|
||||||
for network in EXTERNAL_NETWORKS {
|
|
||||||
// Use if this network has published a Batch for a short-circuit of if they've ever set a key
|
|
||||||
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
|
|
||||||
if set_key {
|
|
||||||
stakes.insert(
|
|
||||||
network,
|
|
||||||
serai
|
|
||||||
.validator_sets()
|
|
||||||
.total_allocated_stake(network.into())
|
|
||||||
.await?
|
|
||||||
.expect("network which published a batch didn't have a stake set")
|
|
||||||
.0,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Since we've successfully built stakes, set it
|
|
||||||
*self.stakes.write().await = Some(stakes);
|
|
||||||
|
|
||||||
self.update_latest_cosign().await;
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Uses Err to signify a message should be retried
|
|
||||||
async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> {
|
|
||||||
// If we already have this cosign or a newer cosign, return
|
|
||||||
if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) {
|
|
||||||
if latest.block_number >= cosign.block_number {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this an old cosign (older than a day), drop it
|
|
||||||
let latest_block = self.serai.latest_finalized_block().await?;
|
|
||||||
if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() {
|
|
||||||
log::debug!("received old cosign supposedly signed by {:?}", cosign.network);
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else {
|
|
||||||
log::warn!("received cosign with a block number which doesn't map to a block");
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
async fn set_with_keys_fn(
|
|
||||||
serai: &TemporalSerai<'_>,
|
|
||||||
network: ExternalNetworkId,
|
|
||||||
) -> Result<Option<ExternalValidatorSet>, SeraiError> {
|
|
||||||
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
|
|
||||||
log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
|
||||||
Ok(Some(
|
|
||||||
if serai
|
|
||||||
.validator_sets()
|
|
||||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
ExternalValidatorSet { network, session: prior_session }
|
|
||||||
} else {
|
|
||||||
ExternalValidatorSet { network, session: latest_session }
|
|
||||||
},
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the key for this network as of the prior block
|
|
||||||
// If we have two chains, this value may be different across chains depending on if one chain
|
|
||||||
// included the set_keys and one didn't
|
|
||||||
// Because set_keys will force a cosign, it will force detection of distinct blocks
|
|
||||||
// re: set_keys using keys prior to set_keys (assumed amenable to all)
|
|
||||||
let serai = self.serai.as_of(block.header.parent_hash.into());
|
|
||||||
|
|
||||||
let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else {
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else {
|
|
||||||
log::warn!("received cosign for a block we didn't have keys for");
|
|
||||||
return Ok(());
|
|
||||||
};
|
|
||||||
|
|
||||||
if !keys
|
|
||||||
.0
|
|
||||||
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &cosign.signature.into())
|
|
||||||
{
|
|
||||||
log::warn!("received cosigned block with an invalid signature");
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
log::info!(
|
|
||||||
"received cosign for block {} ({}) by {:?}",
|
|
||||||
block.number(),
|
|
||||||
hex::encode(cosign.block),
|
|
||||||
cosign.network
|
|
||||||
);
|
|
||||||
|
|
||||||
// Save this cosign to the DB
|
|
||||||
{
|
|
||||||
let mut db = self.db.lock().await;
|
|
||||||
let mut txn = db.txn();
|
|
||||||
ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign);
|
|
||||||
LatestCosign::set(&mut txn, set_with_keys.network, &(cosign));
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
if cosign.block != block.hash() {
|
|
||||||
log::error!(
|
|
||||||
"received cosign for a distinct block at {}. we have {}. cosign had {}",
|
|
||||||
cosign.block_number,
|
|
||||||
hex::encode(block.hash()),
|
|
||||||
hex::encode(cosign.block)
|
|
||||||
);
|
|
||||||
|
|
||||||
let serai = self.serai.as_of(latest_block.hash());
|
|
||||||
|
|
||||||
let mut db = self.db.lock().await;
|
|
||||||
// Save this set as being on a different chain
|
|
||||||
let mut txn = db.txn();
|
|
||||||
DistinctChain::set(&mut txn, set_with_keys, &());
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
let mut total_stake = 0;
|
|
||||||
let mut total_on_distinct_chain = 0;
|
|
||||||
for network in EXTERNAL_NETWORKS {
|
|
||||||
// Get the current set for this network
|
|
||||||
let set_with_keys = {
|
|
||||||
let mut res;
|
|
||||||
while {
|
|
||||||
res = set_with_keys_fn(&serai, network).await;
|
|
||||||
res.is_err()
|
|
||||||
} {
|
|
||||||
log::error!(
|
|
||||||
"couldn't get the set with keys when checking for a distinct chain: {:?}",
|
|
||||||
res
|
|
||||||
);
|
|
||||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
|
||||||
}
|
|
||||||
res.unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Get its stake
|
|
||||||
// Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition
|
|
||||||
if let Some(set_with_keys) = set_with_keys {
|
|
||||||
let stake = {
|
|
||||||
let mut res;
|
|
||||||
while {
|
|
||||||
res =
|
|
||||||
serai.validator_sets().total_allocated_stake(set_with_keys.network.into()).await;
|
|
||||||
res.is_err()
|
|
||||||
} {
|
|
||||||
log::error!(
|
|
||||||
"couldn't get total allocated stake when checking for a distinct chain: {:?}",
|
|
||||||
res
|
|
||||||
);
|
|
||||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
|
||||||
}
|
|
||||||
res.unwrap()
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(stake) = stake {
|
|
||||||
total_stake += stake.0;
|
|
||||||
|
|
||||||
if DistinctChain::get(&*db, set_with_keys).is_some() {
|
|
||||||
total_on_distinct_chain += stake.0;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17%
|
|
||||||
if (total_stake * 17 / 100) <= total_on_distinct_chain {
|
|
||||||
panic!("17% of validator sets (by stake) have co-signed a distinct chain");
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
{
|
|
||||||
let mut latest_cosigns = self.latest_cosigns.write().await;
|
|
||||||
latest_cosigns.insert(cosign.network, cosign);
|
|
||||||
}
|
|
||||||
self.update_latest_cosign().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::new_ret_no_self)]
|
|
||||||
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
|
|
||||||
let mut latest_cosigns = HashMap::new();
|
|
||||||
for network in EXTERNAL_NETWORKS {
|
|
||||||
if let Some(cosign) = LatestCosign::get(&db, network) {
|
|
||||||
latest_cosigns.insert(network, cosign);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let evaluator = Arc::new(Self {
|
|
||||||
db: Mutex::new(db),
|
|
||||||
serai,
|
|
||||||
stakes: RwLock::new(None),
|
|
||||||
latest_cosigns: RwLock::new(latest_cosigns),
|
|
||||||
});
|
|
||||||
|
|
||||||
// Spawn a task to update stakes regularly
|
|
||||||
tokio::spawn({
|
|
||||||
let evaluator = evaluator.clone();
|
|
||||||
async move {
|
|
||||||
loop {
|
|
||||||
// Run this until it passes
|
|
||||||
while evaluator.update_stakes().await.is_err() {
|
|
||||||
log::warn!("couldn't update stakes in the cosign evaluator");
|
|
||||||
// Try again in 10 seconds
|
|
||||||
sleep(Duration::from_secs(10)).await;
|
|
||||||
}
|
|
||||||
// Run it every 10 minutes as we don't need the exact stake data for this to be valid
|
|
||||||
sleep(Duration::from_secs(10 * 60)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Spawn a task to receive cosigns and handle them
|
|
||||||
let (send, mut recv) = mpsc::unbounded_channel();
|
|
||||||
tokio::spawn({
|
|
||||||
let evaluator = evaluator.clone();
|
|
||||||
async move {
|
|
||||||
while let Some(msg) = recv.recv().await {
|
|
||||||
while evaluator.handle_new_cosign(msg).await.is_err() {
|
|
||||||
// Try again in 10 seconds
|
|
||||||
sleep(Duration::from_secs(10)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Spawn a task to rebroadcast the most recent cosigns
|
|
||||||
tokio::spawn({
|
|
||||||
async move {
|
|
||||||
loop {
|
|
||||||
let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::<Vec<_>>();
|
|
||||||
for cosign in cosigns {
|
|
||||||
let mut buf = vec![];
|
|
||||||
cosign.serialize(&mut buf).unwrap();
|
|
||||||
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
|
|
||||||
}
|
|
||||||
sleep(Duration::from_secs(60)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
// Return the channel to send cosigns
|
|
||||||
send
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,134 +1,148 @@
|
|||||||
use blake2::{
|
use std::{path::Path, fs};
|
||||||
digest::{consts::U32, Digest},
|
|
||||||
Blake2b,
|
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
||||||
};
|
use serai_db::{create_db, db_channel};
|
||||||
|
|
||||||
|
use dkg::Participant;
|
||||||
|
|
||||||
use scale::Encode;
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
in_instructions::primitives::{Batch, SignedBatch},
|
|
||||||
primitives::ExternalNetworkId,
|
primitives::ExternalNetworkId,
|
||||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
||||||
};
|
};
|
||||||
|
|
||||||
pub use serai_db::*;
|
use serai_cosign::SignedCosign;
|
||||||
|
use serai_coordinator_substrate::NewSetInformation;
|
||||||
|
use serai_coordinator_tributary::Transaction;
|
||||||
|
|
||||||
use ::tributary::ReadWrite;
|
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||||
use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
|
pub(crate) type Db = std::sync::Arc<serai_db::ParityDb>;
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
pub(crate) type Db = serai_db::RocksDB;
|
||||||
|
|
||||||
create_db!(
|
#[allow(unused_variables, unreachable_code)]
|
||||||
MainDb {
|
fn db(path: &str) -> Db {
|
||||||
HandledMessageDb: (network: ExternalNetworkId) -> u64,
|
{
|
||||||
ActiveTributaryDb: () -> Vec<u8>,
|
let path: &Path = path.as_ref();
|
||||||
RetiredTributaryDb: (set: ExternalValidatorSet) -> (),
|
// This may error if this path already exists, which we shouldn't propagate/panic on. If this
|
||||||
FirstPreprocessDb: (
|
// is a problem (such as we don't have the necessary permissions to write to this path), we
|
||||||
network: ExternalNetworkId,
|
// expect the following DB opening to error.
|
||||||
id_type: RecognizedIdType,
|
let _: Result<_, _> = fs::create_dir_all(path.parent().unwrap());
|
||||||
id: &[u8]
|
|
||||||
) -> Vec<Vec<u8>>,
|
|
||||||
LastReceivedBatchDb: (network: ExternalNetworkId) -> u32,
|
|
||||||
ExpectedBatchDb: (network: ExternalNetworkId, id: u32) -> [u8; 32],
|
|
||||||
BatchDb: (network: ExternalNetworkId, id: u32) -> SignedBatch,
|
|
||||||
LastVerifiedBatchDb: (network: ExternalNetworkId) -> u32,
|
|
||||||
HandoverBatchDb: (set: ExternalValidatorSet) -> u32,
|
|
||||||
LookupHandoverBatchDb: (network: ExternalNetworkId, batch: u32) -> Session,
|
|
||||||
QueuedBatchesDb: (set: ExternalValidatorSet) -> Vec<u8>
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
impl ActiveTributaryDb {
|
|
||||||
pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<TributarySpec>) {
|
|
||||||
let bytes = Self::get(getter).unwrap_or_default();
|
|
||||||
let mut bytes_ref: &[u8] = bytes.as_ref();
|
|
||||||
|
|
||||||
let mut tributaries = vec![];
|
|
||||||
while !bytes_ref.is_empty() {
|
|
||||||
tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
(bytes, tributaries)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn add_participating_in_tributary(txn: &mut impl DbTxn, spec: &TributarySpec) {
|
#[cfg(all(feature = "parity-db", feature = "rocksdb"))]
|
||||||
let (mut existing_bytes, existing) = ActiveTributaryDb::active_tributaries(txn);
|
panic!("built with parity-db and rocksdb");
|
||||||
for tributary in &existing {
|
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||||
if tributary == spec {
|
let db = serai_db::new_parity_db(path);
|
||||||
return;
|
#[cfg(feature = "rocksdb")]
|
||||||
}
|
let db = serai_db::new_rocksdb(path);
|
||||||
}
|
db
|
||||||
|
}
|
||||||
|
|
||||||
spec.serialize(&mut existing_bytes).unwrap();
|
pub(crate) fn coordinator_db() -> Db {
|
||||||
ActiveTributaryDb::set(txn, &existing_bytes);
|
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||||
}
|
db(&format!("{root_path}/coordinator/db"))
|
||||||
|
}
|
||||||
|
|
||||||
pub fn retire_tributary(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
|
fn tributary_db_folder(set: ExternalValidatorSet) -> String {
|
||||||
let mut active = Self::active_tributaries(txn).1;
|
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||||
for i in 0 .. active.len() {
|
let network = match set.network {
|
||||||
if active[i].set() == set {
|
ExternalNetworkId::Bitcoin => "Bitcoin",
|
||||||
active.remove(i);
|
ExternalNetworkId::Ethereum => "Ethereum",
|
||||||
break;
|
ExternalNetworkId::Monero => "Monero",
|
||||||
}
|
};
|
||||||
}
|
format!("{root_path}/tributary-{network}-{}", set.session.0)
|
||||||
|
}
|
||||||
|
|
||||||
let mut bytes = vec![];
|
pub(crate) fn tributary_db(set: ExternalValidatorSet) -> Db {
|
||||||
for active in active {
|
db(&format!("{}/db", tributary_db_folder(set)))
|
||||||
active.serialize(&mut bytes).unwrap();
|
}
|
||||||
}
|
|
||||||
Self::set(txn, &bytes);
|
pub(crate) fn prune_tributary_db(set: ExternalValidatorSet) {
|
||||||
RetiredTributaryDb::set(txn, set, &());
|
log::info!("pruning data directory for tributary {set:?}");
|
||||||
|
let db = tributary_db_folder(set);
|
||||||
|
if fs::exists(&db).expect("couldn't check if tributary DB exists") {
|
||||||
|
fs::remove_dir_all(db).unwrap();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl FirstPreprocessDb {
|
create_db! {
|
||||||
pub fn save_first_preprocess(
|
Coordinator {
|
||||||
txn: &mut impl DbTxn,
|
// The currently active Tributaries
|
||||||
network: ExternalNetworkId,
|
ActiveTributaries: () -> Vec<NewSetInformation>,
|
||||||
id_type: RecognizedIdType,
|
// The latest Tributary to have been retired for a network
|
||||||
id: &[u8],
|
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been
|
||||||
preprocess: &Vec<Vec<u8>>,
|
// retired
|
||||||
) {
|
RetiredTributary: (network: ExternalNetworkId) -> Session,
|
||||||
if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) {
|
// The last handled message from a Processor
|
||||||
assert_eq!(&existing, preprocess, "saved a distinct first preprocess");
|
LastProcessorMessage: (network: ExternalNetworkId) -> u64,
|
||||||
return;
|
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
||||||
|
ErroneousCosigns: () -> Vec<SignedCosign>,
|
||||||
|
// The keys to confirm and set on the Serai network
|
||||||
|
KeysToConfirm: (set: ExternalValidatorSet) -> KeyPair,
|
||||||
|
// The key was set on the Serai network
|
||||||
|
KeySet: (set: ExternalValidatorSet) -> (),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
Coordinator {
|
||||||
|
// Cosigns we produced
|
||||||
|
SignedCosigns: () -> SignedCosign,
|
||||||
|
// Tributaries to clean up upon reboot
|
||||||
|
TributaryCleanup: () -> ExternalValidatorSet,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
mod _internal_db {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
Coordinator {
|
||||||
|
// Tributary transactions to publish from the Processor messages
|
||||||
|
TributaryTransactionsFromProcessorMessages: (set: ExternalValidatorSet) -> Transaction,
|
||||||
|
// Tributary transactions to publish from the DKG confirmation task
|
||||||
|
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
|
||||||
|
// Participants to remove
|
||||||
|
RemoveParticipant: (set: ExternalValidatorSet) -> Participant,
|
||||||
}
|
}
|
||||||
FirstPreprocessDb::set(txn, network, id_type, id, preprocess);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl ExpectedBatchDb {
|
pub(crate) struct TributaryTransactionsFromProcessorMessages;
|
||||||
pub fn save_expected_batch(txn: &mut impl DbTxn, batch: &Batch) {
|
impl TributaryTransactionsFromProcessorMessages {
|
||||||
LastReceivedBatchDb::set(txn, batch.network, &batch.id);
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) {
|
||||||
Self::set(
|
// If this set has yet to be retired, send this transaction
|
||||||
txn,
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
batch.network,
|
_internal_db::TributaryTransactionsFromProcessorMessages::send(txn, set, tx);
|
||||||
batch.id,
|
|
||||||
&Blake2b::<U32>::digest(batch.instructions.encode()).into(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl HandoverBatchDb {
|
|
||||||
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: u32) {
|
|
||||||
Self::set(txn, set, &batch);
|
|
||||||
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl QueuedBatchesDb {
|
|
||||||
pub fn queue(txn: &mut impl DbTxn, set: ExternalValidatorSet, batch: &Transaction) {
|
|
||||||
let mut batches = Self::get(txn, set).unwrap_or_default();
|
|
||||||
batch.write(&mut batches).unwrap();
|
|
||||||
Self::set(txn, set, &batches);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn take(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<Transaction> {
|
|
||||||
let batches_vec = Self::get(txn, set).unwrap_or_default();
|
|
||||||
txn.del(Self::key(set));
|
|
||||||
|
|
||||||
let mut batches: &[u8] = &batches_vec;
|
|
||||||
let mut res = vec![];
|
|
||||||
while !batches.is_empty() {
|
|
||||||
res.push(Transaction::read(&mut batches).unwrap());
|
|
||||||
}
|
}
|
||||||
res
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> {
|
||||||
|
_internal_db::TributaryTransactionsFromProcessorMessages::try_recv(txn, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct TributaryTransactionsFromDkgConfirmation;
|
||||||
|
impl TributaryTransactionsFromDkgConfirmation {
|
||||||
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) {
|
||||||
|
// If this set has yet to be retired, send this transaction
|
||||||
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
|
_internal_db::TributaryTransactionsFromDkgConfirmation::send(txn, set, tx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> {
|
||||||
|
_internal_db::TributaryTransactionsFromDkgConfirmation::try_recv(txn, set)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub(crate) struct RemoveParticipant;
|
||||||
|
impl RemoveParticipant {
|
||||||
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) {
|
||||||
|
// If this set has yet to be retired, send this transaction
|
||||||
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
|
_internal_db::RemoveParticipant::send(txn, set, &participant);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> {
|
||||||
|
_internal_db::RemoveParticipant::try_recv(txn, set)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
439
coordinator/src/dkg_confirmation.rs
Normal file
439
coordinator/src/dkg_confirmation.rs
Normal file
@@ -0,0 +1,439 @@
|
|||||||
|
use core::{ops::Deref, future::Future};
|
||||||
|
use std::{boxed::Box, collections::HashMap};
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
use rand_core::OsRng;
|
||||||
|
use ciphersuite::{group::GroupEncoding, *};
|
||||||
|
use dkg::{Participant, musig};
|
||||||
|
use frost_schnorrkel::{
|
||||||
|
frost::{curve::Ristretto, FrostError, sign::*},
|
||||||
|
Schnorrkel,
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db as DbTrait};
|
||||||
|
|
||||||
|
use serai_client::{
|
||||||
|
primitives::SeraiAddress,
|
||||||
|
validator_sets::primitives::{ExternalValidatorSet, musig_context, set_keys_message},
|
||||||
|
};
|
||||||
|
|
||||||
|
use serai_task::{DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
|
use serai_coordinator_substrate::{NewSetInformation, Keys};
|
||||||
|
use serai_coordinator_tributary::{Transaction, DkgConfirmationMessages};
|
||||||
|
|
||||||
|
use crate::{KeysToConfirm, KeySet, TributaryTransactionsFromDkgConfirmation};
|
||||||
|
|
||||||
|
fn schnorrkel() -> Schnorrkel {
|
||||||
|
Schnorrkel::new(b"substrate") // TODO: Pull the constant for this
|
||||||
|
}
|
||||||
|
|
||||||
|
fn our_i(
|
||||||
|
set: &NewSetInformation,
|
||||||
|
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
data: &HashMap<Participant, Vec<u8>>,
|
||||||
|
) -> Participant {
|
||||||
|
let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes());
|
||||||
|
|
||||||
|
let mut our_i = None;
|
||||||
|
for participant in data.keys() {
|
||||||
|
let validator_index = usize::from(u16::from(*participant) - 1);
|
||||||
|
let (validator, _weight) = set.validators[validator_index];
|
||||||
|
if validator == public {
|
||||||
|
our_i = Some(*participant);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
our_i.unwrap()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Take a HashMap of participations with non-contiguous Participants and convert them to a
|
||||||
|
// contiguous sequence.
|
||||||
|
//
|
||||||
|
// The input data is expected to not include our own data, which also won't be in the output data.
|
||||||
|
//
|
||||||
|
// Returns the mapping from the contiguous Participants to the original Participants.
|
||||||
|
fn make_contiguous<T>(
|
||||||
|
our_i: Participant,
|
||||||
|
mut data: HashMap<Participant, Vec<u8>>,
|
||||||
|
transform: impl Fn(Vec<u8>) -> std::io::Result<T>,
|
||||||
|
) -> Result<HashMap<Participant, T>, Participant> {
|
||||||
|
assert!(!data.contains_key(&our_i));
|
||||||
|
|
||||||
|
let mut ordered_participants = data.keys().copied().collect::<Vec<_>>();
|
||||||
|
ordered_participants.sort_by_key(|participant| u16::from(*participant));
|
||||||
|
|
||||||
|
let mut our_i = Some(our_i);
|
||||||
|
let mut contiguous = HashMap::new();
|
||||||
|
let mut i = 1;
|
||||||
|
for participant in ordered_participants {
|
||||||
|
// If this is the first participant after our own index, increment to account for our index
|
||||||
|
if let Some(our_i_value) = our_i {
|
||||||
|
if u16::from(participant) > u16::from(our_i_value) {
|
||||||
|
i += 1;
|
||||||
|
our_i = None;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let contiguous_index = Participant::new(i).unwrap();
|
||||||
|
let data = match transform(data.remove(&participant).unwrap()) {
|
||||||
|
Ok(data) => data,
|
||||||
|
Err(_) => Err(participant)?,
|
||||||
|
};
|
||||||
|
contiguous.insert(contiguous_index, data);
|
||||||
|
i += 1;
|
||||||
|
}
|
||||||
|
Ok(contiguous)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_frost_error<T>(result: Result<T, FrostError>) -> Result<T, Participant> {
|
||||||
|
match &result {
|
||||||
|
Ok(_) => Ok(result.unwrap()),
|
||||||
|
Err(FrostError::InvalidPreprocess(participant) | FrostError::InvalidShare(participant)) => {
|
||||||
|
Err(*participant)
|
||||||
|
}
|
||||||
|
// All of these should be unreachable
|
||||||
|
Err(
|
||||||
|
FrostError::InternalError(_) |
|
||||||
|
FrostError::InvalidParticipant(_, _) |
|
||||||
|
FrostError::InvalidSigningSet(_) |
|
||||||
|
FrostError::InvalidParticipantQuantity(_, _) |
|
||||||
|
FrostError::DuplicatedParticipant(_) |
|
||||||
|
FrostError::MissingParticipant(_),
|
||||||
|
) => {
|
||||||
|
result.unwrap();
|
||||||
|
unreachable!("continued execution after unwrapping Result::Err");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rustfmt::skip]
|
||||||
|
enum Signer {
|
||||||
|
Preprocess { attempt: u32, seed: CachedPreprocess, preprocess: [u8; 64] },
|
||||||
|
Share {
|
||||||
|
attempt: u32,
|
||||||
|
musig_validators: Vec<SeraiAddress>,
|
||||||
|
share: [u8; 32],
|
||||||
|
machine: Box<AlgorithmSignatureMachine<Ristretto, Schnorrkel>>,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Performs the DKG Confirmation protocol.
|
||||||
|
pub(crate) struct ConfirmDkgTask<CD: DbTrait, TD: DbTrait> {
|
||||||
|
db: CD,
|
||||||
|
|
||||||
|
set: NewSetInformation,
|
||||||
|
tributary_db: TD,
|
||||||
|
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
signer: Option<Signer>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
|
||||||
|
pub(crate) fn new(
|
||||||
|
db: CD,
|
||||||
|
set: NewSetInformation,
|
||||||
|
tributary_db: TD,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
) -> Self {
|
||||||
|
Self { db, set, tributary_db, key, signer: None }
|
||||||
|
}
|
||||||
|
|
||||||
|
fn slash(db: &mut CD, set: ExternalValidatorSet, validator: SeraiAddress) {
|
||||||
|
let mut txn = db.txn();
|
||||||
|
TributaryTransactionsFromDkgConfirmation::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::RemoveParticipant { participant: validator, signed: Default::default() },
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
fn preprocess(
|
||||||
|
db: &mut CD,
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
attempt: u32,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
signer: &mut Option<Signer>,
|
||||||
|
) {
|
||||||
|
// Perform the preprocess
|
||||||
|
let public_key = Ristretto::generator() * key.deref();
|
||||||
|
let (machine, preprocess) = AlgorithmMachine::new(
|
||||||
|
schnorrkel(),
|
||||||
|
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
|
||||||
|
musig(musig_context(set.into()), key, &[public_key]).unwrap(),
|
||||||
|
)
|
||||||
|
.preprocess(&mut OsRng);
|
||||||
|
// We take the preprocess so we can use it in a distinct machine with the actual Musig
|
||||||
|
// parameters
|
||||||
|
let seed = machine.cache();
|
||||||
|
|
||||||
|
let mut preprocess_bytes = [0u8; 64];
|
||||||
|
preprocess_bytes.copy_from_slice(&preprocess.serialize());
|
||||||
|
let preprocess = preprocess_bytes;
|
||||||
|
|
||||||
|
let mut txn = db.txn();
|
||||||
|
// If this attempt has already been preprocessed for, the Tributary will de-duplicate it
|
||||||
|
// This may mean the Tributary preprocess is distinct from ours, but we check for that later
|
||||||
|
TributaryTransactionsFromDkgConfirmation::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed: Default::default() },
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
*signer = Some(Signer::Preprocess { attempt, seed, preprocess });
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
|
||||||
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// If we were sent a key to set, create the signer for it
|
||||||
|
if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() {
|
||||||
|
// Create and publish the initial preprocess
|
||||||
|
Self::preprocess(&mut self.db, self.set.set, 0, self.key.clone(), &mut self.signer);
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we have keys to confirm, handle all messages from the tributary
|
||||||
|
if let Some(key_pair) = KeysToConfirm::get(&self.db, self.set.set) {
|
||||||
|
// Handle all messages from the Tributary
|
||||||
|
loop {
|
||||||
|
let mut tributary_txn = self.tributary_db.txn();
|
||||||
|
let Some(msg) = DkgConfirmationMessages::try_recv(&mut tributary_txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
match msg {
|
||||||
|
messages::sign::CoordinatorMessage::Reattempt {
|
||||||
|
id: messages::sign::SignId { attempt, .. },
|
||||||
|
} => {
|
||||||
|
// Create and publish the preprocess for the specified attempt
|
||||||
|
Self::preprocess(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
attempt,
|
||||||
|
self.key.clone(),
|
||||||
|
&mut self.signer,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
messages::sign::CoordinatorMessage::Preprocesses {
|
||||||
|
id: messages::sign::SignId { attempt, .. },
|
||||||
|
mut preprocesses,
|
||||||
|
} => {
|
||||||
|
// Confirm the preprocess we're expected to sign with is the one we locally have
|
||||||
|
// It may be different if we rebooted and made a second preprocess for this attempt
|
||||||
|
let Some(Signer::Preprocess { attempt: our_attempt, seed, preprocess }) =
|
||||||
|
self.signer.take()
|
||||||
|
else {
|
||||||
|
// If this message is not expected, commit the txn to drop it and move on
|
||||||
|
// At some point, we'll get a Reattempt and reset
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Determine the MuSig key signed with
|
||||||
|
let musig_validators = {
|
||||||
|
let mut ordered_participants = preprocesses.keys().copied().collect::<Vec<_>>();
|
||||||
|
ordered_participants.sort_by_key(|participant| u16::from(*participant));
|
||||||
|
|
||||||
|
let mut res = vec![];
|
||||||
|
for participant in ordered_participants {
|
||||||
|
let (validator, _weight) =
|
||||||
|
self.set.validators[usize::from(u16::from(participant) - 1)];
|
||||||
|
res.push(validator);
|
||||||
|
}
|
||||||
|
res
|
||||||
|
};
|
||||||
|
|
||||||
|
let musig_public_keys = musig_validators
|
||||||
|
.iter()
|
||||||
|
.map(|key| {
|
||||||
|
Ristretto::read_G(&mut key.0.as_slice())
|
||||||
|
.expect("Serai validator had invalid public key")
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
|
let keys =
|
||||||
|
musig(musig_context(self.set.set.into()), self.key.clone(), &musig_public_keys)
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
// Rebuild the machine
|
||||||
|
let (machine, preprocess_from_cache) =
|
||||||
|
AlgorithmSignMachine::from_cache(schnorrkel(), keys, seed);
|
||||||
|
assert_eq!(preprocess.as_slice(), preprocess_from_cache.serialize().as_slice());
|
||||||
|
|
||||||
|
// Ensure this is a consistent signing session
|
||||||
|
let our_i = our_i(&self.set, &self.key, &preprocesses);
|
||||||
|
let consistent = (attempt == our_attempt) &&
|
||||||
|
(preprocesses.remove(&our_i).unwrap().as_slice() == preprocess.as_slice());
|
||||||
|
if !consistent {
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reformat the preprocesses into the expected format for Musig
|
||||||
|
let preprocesses = match make_contiguous(our_i, preprocesses, |preprocess| {
|
||||||
|
machine.read_preprocess(&mut preprocess.as_slice())
|
||||||
|
}) {
|
||||||
|
Ok(preprocesses) => preprocesses,
|
||||||
|
// This yields the *original participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
self.set.validators[usize::from(u16::from(participant) - 1)].0,
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Calculate our share
|
||||||
|
let (machine, share) = match handle_frost_error(
|
||||||
|
machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)),
|
||||||
|
) {
|
||||||
|
Ok((machine, share)) => (machine, share),
|
||||||
|
// This yields the *musig participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
musig_validators[usize::from(u16::from(participant) - 1)],
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// Send our share
|
||||||
|
let share = <[u8; 32]>::try_from(share.serialize()).unwrap();
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
TributaryTransactionsFromDkgConfirmation::send(
|
||||||
|
&mut txn,
|
||||||
|
self.set.set,
|
||||||
|
&Transaction::DkgConfirmationShare { attempt, share, signed: Default::default() },
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
self.signer = Some(Signer::Share {
|
||||||
|
attempt,
|
||||||
|
musig_validators,
|
||||||
|
share,
|
||||||
|
machine: Box::new(machine),
|
||||||
|
});
|
||||||
|
}
|
||||||
|
messages::sign::CoordinatorMessage::Shares {
|
||||||
|
id: messages::sign::SignId { attempt, .. },
|
||||||
|
mut shares,
|
||||||
|
} => {
|
||||||
|
let Some(Signer::Share { attempt: our_attempt, musig_validators, share, machine }) =
|
||||||
|
self.signer.take()
|
||||||
|
else {
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
// Ensure this is a consistent signing session
|
||||||
|
let our_i = our_i(&self.set, &self.key, &shares);
|
||||||
|
let consistent = (attempt == our_attempt) &&
|
||||||
|
(shares.remove(&our_i).unwrap().as_slice() == share.as_slice());
|
||||||
|
if !consistent {
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reformat the shares into the expected format for Musig
|
||||||
|
let shares = match make_contiguous(our_i, shares, |share| {
|
||||||
|
machine.read_share(&mut share.as_slice())
|
||||||
|
}) {
|
||||||
|
Ok(shares) => shares,
|
||||||
|
// This yields the *original participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
self.set.validators[usize::from(u16::from(participant) - 1)].0,
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match handle_frost_error(machine.complete(shares)) {
|
||||||
|
Ok(signature) => {
|
||||||
|
// Create the bitvec of the participants
|
||||||
|
let mut signature_participants;
|
||||||
|
{
|
||||||
|
use bitvec::prelude::*;
|
||||||
|
signature_participants = bitvec![u8, Lsb0; 0; 0];
|
||||||
|
let mut i = 0;
|
||||||
|
for (validator, _) in &self.set.validators {
|
||||||
|
if Some(validator) == musig_validators.get(i) {
|
||||||
|
signature_participants.push(true);
|
||||||
|
i += 1;
|
||||||
|
} else {
|
||||||
|
signature_participants.push(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// This is safe to call multiple times as it'll just change which *valid*
|
||||||
|
// signature to publish
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
Keys::set(
|
||||||
|
&mut txn,
|
||||||
|
self.set.set,
|
||||||
|
key_pair.clone(),
|
||||||
|
signature_participants,
|
||||||
|
signature.into(),
|
||||||
|
);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
// This yields the *musig participant index*
|
||||||
|
Err(participant) => {
|
||||||
|
Self::slash(
|
||||||
|
&mut self.db,
|
||||||
|
self.set.set,
|
||||||
|
musig_validators[usize::from(u16::from(participant) - 1)],
|
||||||
|
);
|
||||||
|
tributary_txn.commit();
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because we successfully handled this message, note we made proress
|
||||||
|
made_progress = true;
|
||||||
|
tributary_txn.commit();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the key has been set on Serai
|
||||||
|
if KeysToConfirm::get(&self.db, self.set.set).is_some() &&
|
||||||
|
KeySet::get(&self.db, self.set.set).is_some()
|
||||||
|
{
|
||||||
|
// Take the keys to confirm so we never instantiate the signer again
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
KeysToConfirm::take(&mut txn, self.set.set);
|
||||||
|
KeySet::take(&mut txn, self.set.set);
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
// Drop our own signer
|
||||||
|
// The task won't die until the Tributary does, but now it'll never do anything again
|
||||||
|
self.signer = None;
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@@ -1,46 +0,0 @@
|
|||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use serai_client::primitives::ExternalNetworkId;
|
|
||||||
use processor_messages::{ProcessorMessage, CoordinatorMessage};
|
|
||||||
|
|
||||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub struct Message {
|
|
||||||
pub id: u64,
|
|
||||||
pub network: ExternalNetworkId,
|
|
||||||
pub msg: ProcessorMessage,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
pub trait Processors: 'static + Send + Sync + Clone {
|
|
||||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>);
|
|
||||||
async fn recv(&self, network: ExternalNetworkId) -> Message;
|
|
||||||
async fn ack(&self, msg: Message);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl Processors for Arc<MessageQueue> {
|
|
||||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
|
||||||
let msg: CoordinatorMessage = msg.into();
|
|
||||||
let metadata =
|
|
||||||
Metadata { from: self.service, to: Service::Processor(network), intent: msg.intent() };
|
|
||||||
let msg = borsh::to_vec(&msg).unwrap();
|
|
||||||
self.queue(metadata, msg).await;
|
|
||||||
}
|
|
||||||
async fn recv(&self, network: ExternalNetworkId) -> Message {
|
|
||||||
let msg = self.next(Service::Processor(network)).await;
|
|
||||||
assert_eq!(msg.from, Service::Processor(network));
|
|
||||||
|
|
||||||
let id = msg.id;
|
|
||||||
|
|
||||||
// Deserialize it into a ProcessorMessage
|
|
||||||
let msg: ProcessorMessage =
|
|
||||||
borsh::from_slice(&msg.msg).expect("message wasn't a borsh-encoded ProcessorMessage");
|
|
||||||
|
|
||||||
return Message { id, network, msg };
|
|
||||||
}
|
|
||||||
async fn ack(&self, msg: Message) {
|
|
||||||
MessageQueue::ack(self, Service::Processor(msg.network), msg.id).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
164
coordinator/src/substrate.rs
Normal file
164
coordinator/src/substrate.rs
Normal file
@@ -0,0 +1,164 @@
|
|||||||
|
use core::future::Future;
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
use ciphersuite::*;
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use serai_db::{DbTxn, Db as DbTrait};
|
||||||
|
|
||||||
|
use serai_client::validator_sets::primitives::{Session, ExternalValidatorSet};
|
||||||
|
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||||
|
|
||||||
|
use tributary_sdk::Tributary;
|
||||||
|
|
||||||
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
|
use serai_coordinator_tributary::Transaction;
|
||||||
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
|
use crate::{Db, KeySet};
|
||||||
|
|
||||||
|
pub(crate) struct SubstrateTask<P: P2p> {
|
||||||
|
pub(crate) serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
pub(crate) db: Db,
|
||||||
|
pub(crate) message_queue: Arc<MessageQueue>,
|
||||||
|
pub(crate) p2p: P,
|
||||||
|
pub(crate) p2p_add_tributary:
|
||||||
|
mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||||
|
pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ExternalValidatorSet>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
||||||
|
type Error = String; // TODO
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// Handle the Canonical events
|
||||||
|
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
match msg {
|
||||||
|
messages::substrate::CoordinatorMessage::SetKeys { session, .. } => {
|
||||||
|
KeySet::set(&mut txn, ExternalValidatorSet { network, session }, &());
|
||||||
|
}
|
||||||
|
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
||||||
|
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
|
||||||
|
let next_to_be_retired =
|
||||||
|
prior_retired.map(|session| Session(session.0 + 1)).unwrap_or(Session(0));
|
||||||
|
assert_eq!(session, next_to_be_retired);
|
||||||
|
crate::db::RetiredTributary::set(&mut txn, network, &session);
|
||||||
|
self
|
||||||
|
.p2p_retire_tributary
|
||||||
|
.send(ExternalValidatorSet { network, session })
|
||||||
|
.expect("p2p retire_tributary channel dropped?");
|
||||||
|
}
|
||||||
|
messages::substrate::CoordinatorMessage::Block { .. } => {}
|
||||||
|
}
|
||||||
|
|
||||||
|
let msg = messages::CoordinatorMessage::from(msg);
|
||||||
|
let metadata = Metadata {
|
||||||
|
from: Service::Coordinator,
|
||||||
|
to: Service::Processor(network),
|
||||||
|
intent: msg.intent(),
|
||||||
|
};
|
||||||
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
|
self.message_queue.queue(metadata, msg).await?;
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the NewSet events
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(new_set) = serai_coordinator_substrate::NewSet::try_recv(&mut txn) else { break };
|
||||||
|
|
||||||
|
if let Some(historic_session) = new_set.set.session.0.checked_sub(2) {
|
||||||
|
// We should have retired this session if we're here
|
||||||
|
if crate::db::RetiredTributary::get(&txn, new_set.set.network).map(|session| session.0) <
|
||||||
|
Some(historic_session)
|
||||||
|
{
|
||||||
|
/*
|
||||||
|
If we haven't, it's because we're processing the NewSet event before the retiry
|
||||||
|
event from the Canonical event stream. This happens if the Canonical event, and
|
||||||
|
then the NewSet event, is fired while we're already iterating over NewSet events.
|
||||||
|
|
||||||
|
We break, dropping the txn, restoring this NewSet to the database, so we'll only
|
||||||
|
handle it once a future iteration of this loop handles the retiry event.
|
||||||
|
*/
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Queue this historical Tributary for deletion.
|
||||||
|
|
||||||
|
We explicitly don't queue this upon Tributary retire, instead here, to give time to
|
||||||
|
investigate retired Tributaries if questions are raised post-retiry. This gives a
|
||||||
|
week (the duration of the following session) after the Tributary has been retired to
|
||||||
|
make a backup of the data directory for any investigations.
|
||||||
|
*/
|
||||||
|
crate::db::TributaryCleanup::send(
|
||||||
|
&mut txn,
|
||||||
|
&ExternalValidatorSet {
|
||||||
|
network: new_set.set.network,
|
||||||
|
session: Session(historic_session),
|
||||||
|
},
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Save this Tributary as active to the database
|
||||||
|
{
|
||||||
|
let mut active_tributaries =
|
||||||
|
crate::db::ActiveTributaries::get(&txn).unwrap_or(Vec::with_capacity(1));
|
||||||
|
active_tributaries.push(new_set.clone());
|
||||||
|
crate::db::ActiveTributaries::set(&mut txn, &active_tributaries);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Send GenerateKey to the processor
|
||||||
|
let msg = messages::key_gen::CoordinatorMessage::GenerateKey {
|
||||||
|
session: new_set.set.session,
|
||||||
|
threshold: new_set.threshold,
|
||||||
|
evrf_public_keys: new_set.evrf_public_keys.clone(),
|
||||||
|
};
|
||||||
|
let msg = messages::CoordinatorMessage::from(msg);
|
||||||
|
let metadata = Metadata {
|
||||||
|
from: Service::Coordinator,
|
||||||
|
to: Service::Processor(new_set.set.network),
|
||||||
|
intent: msg.intent(),
|
||||||
|
};
|
||||||
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
|
self.message_queue.queue(metadata, msg).await?;
|
||||||
|
|
||||||
|
// Commit the transaction for all of this
|
||||||
|
txn.commit();
|
||||||
|
|
||||||
|
// Now spawn the Tributary
|
||||||
|
// If we reboot after committing the txn, but before this is called, this will be called
|
||||||
|
// on boot
|
||||||
|
crate::tributary::spawn_tributary(
|
||||||
|
self.db.clone(),
|
||||||
|
self.message_queue.clone(),
|
||||||
|
self.p2p.clone(),
|
||||||
|
&self.p2p_add_tributary,
|
||||||
|
new_set,
|
||||||
|
self.serai_key.clone(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,338 +0,0 @@
|
|||||||
/*
|
|
||||||
If:
|
|
||||||
A) This block has events and it's been at least X blocks since the last cosign or
|
|
||||||
B) This block doesn't have events but it's been X blocks since a skipped block which did
|
|
||||||
have events or
|
|
||||||
C) This block key gens (which changes who the cosigners are)
|
|
||||||
cosign this block.
|
|
||||||
|
|
||||||
This creates both a minimum and maximum delay of X blocks before a block's cosigning begins,
|
|
||||||
barring key gens which are exceptional. The minimum delay is there to ensure we don't constantly
|
|
||||||
spawn new protocols every 6 seconds, overwriting the old ones. The maximum delay is there to
|
|
||||||
ensure any block needing cosigned is consigned within a reasonable amount of time.
|
|
||||||
*/
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::Ciphersuite;
|
|
||||||
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
|
||||||
|
|
||||||
use serai_client::{
|
|
||||||
primitives::ExternalNetworkId,
|
|
||||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
|
||||||
Serai, SeraiError,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::*;
|
|
||||||
|
|
||||||
use crate::{Db, substrate::in_set, tributary::SeraiBlockNumber};
|
|
||||||
|
|
||||||
// 5 minutes, expressed in blocks
|
|
||||||
// TODO: Pull a constant for block time
|
|
||||||
const COSIGN_DISTANCE: u64 = 5 * 60 / 6;
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
|
||||||
enum HasEvents {
|
|
||||||
KeyGen,
|
|
||||||
Yes,
|
|
||||||
No,
|
|
||||||
}
|
|
||||||
|
|
||||||
create_db!(
|
|
||||||
SubstrateCosignDb {
|
|
||||||
ScanCosignFrom: () -> u64,
|
|
||||||
IntendedCosign: () -> (u64, Option<u64>),
|
|
||||||
BlockHasEventsCache: (block: u64) -> HasEvents,
|
|
||||||
LatestCosignedBlock: () -> u64,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
impl IntendedCosign {
|
|
||||||
// Sets the intended to cosign block, clearing the prior value entirely.
|
|
||||||
pub fn set_intended_cosign(txn: &mut impl DbTxn, intended: u64) {
|
|
||||||
Self::set(txn, &(intended, None::<u64>));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sets the cosign skipped since the last intended to cosign block.
|
|
||||||
pub fn set_skipped_cosign(txn: &mut impl DbTxn, skipped: u64) {
|
|
||||||
let (intended, prior_skipped) = Self::get(txn).unwrap();
|
|
||||||
assert!(prior_skipped.is_none());
|
|
||||||
Self::set(txn, &(intended, Some(skipped)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl LatestCosignedBlock {
|
|
||||||
pub fn latest_cosigned_block(getter: &impl Get) -> u64 {
|
|
||||||
Self::get(getter).unwrap_or_default().max(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db_channel! {
|
|
||||||
SubstrateDbChannels {
|
|
||||||
CosignTransactions: (network: ExternalNetworkId) -> (Session, u64, [u8; 32]),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CosignTransactions {
|
|
||||||
// Append a cosign transaction.
|
|
||||||
pub fn append_cosign(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
number: u64,
|
|
||||||
hash: [u8; 32],
|
|
||||||
) {
|
|
||||||
CosignTransactions::send(txn, set.network, &(set.session, number, hash))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn block_has_events(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
serai: &Serai,
|
|
||||||
block: u64,
|
|
||||||
) -> Result<HasEvents, SeraiError> {
|
|
||||||
let cached = BlockHasEventsCache::get(txn, block);
|
|
||||||
match cached {
|
|
||||||
None => {
|
|
||||||
let serai = serai.as_of(
|
|
||||||
serai
|
|
||||||
.finalized_block_by_number(block)
|
|
||||||
.await?
|
|
||||||
.expect("couldn't get block which should've been finalized")
|
|
||||||
.hash(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if !serai.validator_sets().key_gen_events().await?.is_empty() {
|
|
||||||
return Ok(HasEvents::KeyGen);
|
|
||||||
}
|
|
||||||
|
|
||||||
let has_no_events = serai.coins().burn_with_instruction_events().await?.is_empty() &&
|
|
||||||
serai.in_instructions().batch_events().await?.is_empty() &&
|
|
||||||
serai.validator_sets().new_set_events().await?.is_empty() &&
|
|
||||||
serai.validator_sets().set_retired_events().await?.is_empty();
|
|
||||||
|
|
||||||
let has_events = if has_no_events { HasEvents::No } else { HasEvents::Yes };
|
|
||||||
|
|
||||||
BlockHasEventsCache::set(txn, block, &has_events);
|
|
||||||
Ok(has_events)
|
|
||||||
}
|
|
||||||
Some(code) => Ok(code),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn potentially_cosign_block(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
serai: &Serai,
|
|
||||||
block: u64,
|
|
||||||
skipped_block: Option<u64>,
|
|
||||||
window_end_exclusive: u64,
|
|
||||||
) -> Result<bool, SeraiError> {
|
|
||||||
// The following code regarding marking cosigned if prior block is cosigned expects this block to
|
|
||||||
// not be zero
|
|
||||||
// While we could perform this check there, there's no reason not to optimize the entire function
|
|
||||||
// as such
|
|
||||||
if block == 0 {
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_has_events = block_has_events(txn, serai, block).await?;
|
|
||||||
|
|
||||||
// If this block had no events and immediately follows a cosigned block, mark it as cosigned
|
|
||||||
if (block_has_events == HasEvents::No) &&
|
|
||||||
(LatestCosignedBlock::latest_cosigned_block(txn) == (block - 1))
|
|
||||||
{
|
|
||||||
log::debug!("automatically co-signing next block ({block}) since it has no events");
|
|
||||||
LatestCosignedBlock::set(txn, &block);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we skipped a block, we're supposed to sign it plus the COSIGN_DISTANCE if no other blocks
|
|
||||||
// trigger a cosigning protocol covering it
|
|
||||||
// This means there will be the maximum delay allowed from a block needing cosigning occurring
|
|
||||||
// and a cosign for it triggering
|
|
||||||
let maximally_latent_cosign_block =
|
|
||||||
skipped_block.map(|skipped_block| skipped_block + COSIGN_DISTANCE);
|
|
||||||
|
|
||||||
// If this block is within the window,
|
|
||||||
if block < window_end_exclusive {
|
|
||||||
// and set a key, cosign it
|
|
||||||
if block_has_events == HasEvents::KeyGen {
|
|
||||||
IntendedCosign::set_intended_cosign(txn, block);
|
|
||||||
// Carry skipped if it isn't included by cosigning this block
|
|
||||||
if let Some(skipped) = skipped_block {
|
|
||||||
if skipped > block {
|
|
||||||
IntendedCosign::set_skipped_cosign(txn, block);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
} else if (Some(block) == maximally_latent_cosign_block) || (block_has_events != HasEvents::No) {
|
|
||||||
// Since this block was outside the window and had events/was maximally latent, cosign it
|
|
||||||
IntendedCosign::set_intended_cosign(txn, block);
|
|
||||||
return Ok(true);
|
|
||||||
}
|
|
||||||
Ok(false)
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
Advances the cosign protocol as should be done per the latest block.
|
|
||||||
|
|
||||||
A block is considered cosigned if:
|
|
||||||
A) It was cosigned
|
|
||||||
B) It's the parent of a cosigned block
|
|
||||||
C) It immediately follows a cosigned block and has no events requiring cosigning
|
|
||||||
|
|
||||||
This only actually performs advancement within a limited bound (generally until it finds a block
|
|
||||||
which should be cosigned). Accordingly, it is necessary to call multiple times even if
|
|
||||||
`latest_number` doesn't change.
|
|
||||||
*/
|
|
||||||
async fn advance_cosign_protocol_inner(
|
|
||||||
db: &mut impl Db,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
serai: &Serai,
|
|
||||||
latest_number: u64,
|
|
||||||
) -> Result<(), SeraiError> {
|
|
||||||
let mut txn = db.txn();
|
|
||||||
|
|
||||||
const INITIAL_INTENDED_COSIGN: u64 = 1;
|
|
||||||
let (last_intended_to_cosign_block, mut skipped_block) = {
|
|
||||||
let intended_cosign = IntendedCosign::get(&txn);
|
|
||||||
// If we haven't prior intended to cosign a block, set the intended cosign to 1
|
|
||||||
if let Some(intended_cosign) = intended_cosign {
|
|
||||||
intended_cosign
|
|
||||||
} else {
|
|
||||||
IntendedCosign::set_intended_cosign(&mut txn, INITIAL_INTENDED_COSIGN);
|
|
||||||
IntendedCosign::get(&txn).unwrap()
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// "windows" refers to the window of blocks where even if there's a block which should be
|
|
||||||
// cosigned, it won't be due to proximity due to the prior cosign
|
|
||||||
let mut window_end_exclusive = last_intended_to_cosign_block + COSIGN_DISTANCE;
|
|
||||||
// If we've never triggered a cosign, don't skip any cosigns based on proximity
|
|
||||||
if last_intended_to_cosign_block == INITIAL_INTENDED_COSIGN {
|
|
||||||
window_end_exclusive = 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// The consensus rules for this are `last_intended_to_cosign_block + 1`
|
|
||||||
let scan_start_block = last_intended_to_cosign_block + 1;
|
|
||||||
// As a practical optimization, we don't re-scan old blocks since old blocks are independent to
|
|
||||||
// new state
|
|
||||||
let scan_start_block = scan_start_block.max(ScanCosignFrom::get(&txn).unwrap_or(1));
|
|
||||||
|
|
||||||
// Check all blocks within the window to see if they should be cosigned
|
|
||||||
// If so, we're skipping them and need to flag them as skipped so that once the window closes, we
|
|
||||||
// do cosign them
|
|
||||||
// We only perform this check if we haven't already marked a block as skipped since the cosign
|
|
||||||
// the skipped block will cause will cosign all other blocks within this window
|
|
||||||
if skipped_block.is_none() {
|
|
||||||
let window_end_inclusive = window_end_exclusive - 1;
|
|
||||||
for b in scan_start_block ..= window_end_inclusive.min(latest_number) {
|
|
||||||
if block_has_events(&mut txn, serai, b).await? == HasEvents::Yes {
|
|
||||||
skipped_block = Some(b);
|
|
||||||
log::debug!("skipping cosigning {b} due to proximity to prior cosign");
|
|
||||||
IntendedCosign::set_skipped_cosign(&mut txn, b);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// A block which should be cosigned
|
|
||||||
let mut to_cosign = None;
|
|
||||||
// A list of sets which are cosigning, along with a boolean of if we're in the set
|
|
||||||
let mut cosigning = vec![];
|
|
||||||
|
|
||||||
for block in scan_start_block ..= latest_number {
|
|
||||||
let actual_block = serai
|
|
||||||
.finalized_block_by_number(block)
|
|
||||||
.await?
|
|
||||||
.expect("couldn't get block which should've been finalized");
|
|
||||||
|
|
||||||
// Save the block number for this block, as needed by the cosigner to perform cosigning
|
|
||||||
SeraiBlockNumber::set(&mut txn, actual_block.hash(), &block);
|
|
||||||
|
|
||||||
if potentially_cosign_block(&mut txn, serai, block, skipped_block, window_end_exclusive).await?
|
|
||||||
{
|
|
||||||
to_cosign = Some((block, actual_block.hash()));
|
|
||||||
|
|
||||||
// Get the keys as of the prior block
|
|
||||||
// If this key sets new keys, the coordinator won't acknowledge so until we process this
|
|
||||||
// block
|
|
||||||
// We won't process this block until its co-signed
|
|
||||||
// Using the keys of the prior block ensures this deadlock isn't reached
|
|
||||||
let serai = serai.as_of(actual_block.header.parent_hash.into());
|
|
||||||
|
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
|
||||||
// Get the latest session to have set keys
|
|
||||||
let set_with_keys = {
|
|
||||||
let Some(latest_session) = serai.validator_sets().session(network.into()).await? else {
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
|
||||||
if serai
|
|
||||||
.validator_sets()
|
|
||||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
|
||||||
.await?
|
|
||||||
.is_some()
|
|
||||||
{
|
|
||||||
ExternalValidatorSet { network, session: prior_session }
|
|
||||||
} else {
|
|
||||||
let set = ExternalValidatorSet { network, session: latest_session };
|
|
||||||
if serai.validator_sets().keys(set).await?.is_none() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
set
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
log::debug!("{:?} will be cosigning {block}", set_with_keys.network);
|
|
||||||
cosigning.push((set_with_keys, in_set(key, &serai, set_with_keys.into()).await?.unwrap()));
|
|
||||||
}
|
|
||||||
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this TX is committed, always start future scanning from the next block
|
|
||||||
ScanCosignFrom::set(&mut txn, &(block + 1));
|
|
||||||
// Since we're scanning *from* the next block, tidy the cache
|
|
||||||
BlockHasEventsCache::del(&mut txn, block);
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some((number, hash)) = to_cosign {
|
|
||||||
// If this block doesn't have cosigners, yet does have events, automatically mark it as
|
|
||||||
// cosigned
|
|
||||||
if cosigning.is_empty() {
|
|
||||||
log::debug!("{} had no cosigners available, marking as cosigned", number);
|
|
||||||
LatestCosignedBlock::set(&mut txn, &number);
|
|
||||||
} else {
|
|
||||||
for (set, in_set) in cosigning {
|
|
||||||
if in_set {
|
|
||||||
log::debug!("cosigning {number} with {:?} {:?}", set.network, set.session);
|
|
||||||
CosignTransactions::append_cosign(&mut txn, set, number, hash);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn advance_cosign_protocol(
|
|
||||||
db: &mut impl Db,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
serai: &Serai,
|
|
||||||
latest_number: u64,
|
|
||||||
) -> Result<(), SeraiError> {
|
|
||||||
loop {
|
|
||||||
let scan_from = ScanCosignFrom::get(db).unwrap_or(1);
|
|
||||||
// Only scan 1000 blocks at a time to limit a massive txn from forming
|
|
||||||
let scan_to = latest_number.min(scan_from + 1000);
|
|
||||||
advance_cosign_protocol_inner(db, key, serai, scan_to).await?;
|
|
||||||
// If we didn't limit the scan_to, break
|
|
||||||
if scan_to == latest_number {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,32 +0,0 @@
|
|||||||
use serai_client::primitives::ExternalNetworkId;
|
|
||||||
|
|
||||||
pub use serai_db::*;
|
|
||||||
|
|
||||||
mod inner_db {
|
|
||||||
use super::*;
|
|
||||||
|
|
||||||
create_db!(
|
|
||||||
SubstrateDb {
|
|
||||||
NextBlock: () -> u64,
|
|
||||||
HandledEvent: (block: [u8; 32]) -> u32,
|
|
||||||
BatchInstructionsHashDb: (network: ExternalNetworkId, id: u32) -> [u8; 32]
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}
|
|
||||||
pub(crate) use inner_db::{NextBlock, BatchInstructionsHashDb};
|
|
||||||
|
|
||||||
pub struct HandledEvent;
|
|
||||||
impl HandledEvent {
|
|
||||||
fn next_to_handle_event(getter: &impl Get, block: [u8; 32]) -> u32 {
|
|
||||||
inner_db::HandledEvent::get(getter, block).map_or(0, |last| last + 1)
|
|
||||||
}
|
|
||||||
pub fn is_unhandled(getter: &impl Get, block: [u8; 32], event_id: u32) -> bool {
|
|
||||||
let next = Self::next_to_handle_event(getter, block);
|
|
||||||
assert!(next >= event_id);
|
|
||||||
next == event_id
|
|
||||||
}
|
|
||||||
pub fn handle_event(txn: &mut impl DbTxn, block: [u8; 32], index: u32) {
|
|
||||||
assert!(Self::next_to_handle_event(txn, block) == index);
|
|
||||||
inner_db::HandledEvent::set(txn, block, &index);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,547 +0,0 @@
|
|||||||
use core::{ops::Deref, time::Duration};
|
|
||||||
use std::{
|
|
||||||
sync::Arc,
|
|
||||||
collections::{HashSet, HashMap},
|
|
||||||
};
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
|
|
||||||
use serai_client::{
|
|
||||||
coins::CoinsEvent,
|
|
||||||
in_instructions::InInstructionsEvent,
|
|
||||||
primitives::{BlockHash, ExternalNetworkId},
|
|
||||||
validator_sets::{
|
|
||||||
primitives::{ExternalValidatorSet, ValidatorSet},
|
|
||||||
ValidatorSetsEvent,
|
|
||||||
},
|
|
||||||
Block, Serai, SeraiError, TemporalSerai,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::DbTxn;
|
|
||||||
|
|
||||||
use processor_messages::SubstrateContext;
|
|
||||||
|
|
||||||
use tokio::{sync::mpsc, time::sleep};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Db,
|
|
||||||
processors::Processors,
|
|
||||||
tributary::{TributarySpec, SeraiDkgCompleted},
|
|
||||||
};
|
|
||||||
|
|
||||||
mod db;
|
|
||||||
pub use db::*;
|
|
||||||
|
|
||||||
mod cosign;
|
|
||||||
pub use cosign::*;
|
|
||||||
|
|
||||||
async fn in_set(
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
serai: &TemporalSerai<'_>,
|
|
||||||
set: ValidatorSet,
|
|
||||||
) -> Result<Option<bool>, SeraiError> {
|
|
||||||
let Some(participants) = serai.validator_sets().participants(set.network).await? else {
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
let key = (Ristretto::generator() * key.deref()).to_bytes();
|
|
||||||
Ok(Some(participants.iter().any(|(participant, _)| participant.0 == key)))
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_new_set<D: Db>(
|
|
||||||
txn: &mut D::Transaction<'_>,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
|
||||||
serai: &Serai,
|
|
||||||
block: &Block,
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
) -> Result<(), SeraiError> {
|
|
||||||
if in_set(key, &serai.as_of(block.hash()), set.into())
|
|
||||||
.await?
|
|
||||||
.expect("NewSet for set which doesn't exist")
|
|
||||||
{
|
|
||||||
log::info!("present in set {:?}", set);
|
|
||||||
|
|
||||||
let set_data = {
|
|
||||||
let serai = serai.as_of(block.hash());
|
|
||||||
let serai = serai.validator_sets();
|
|
||||||
let set_participants =
|
|
||||||
serai.participants(set.network.into()).await?.expect("NewSet for set which doesn't exist");
|
|
||||||
|
|
||||||
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
|
|
||||||
};
|
|
||||||
|
|
||||||
let time = if let Ok(time) = block.time() {
|
|
||||||
time
|
|
||||||
} else {
|
|
||||||
assert_eq!(block.number(), 0);
|
|
||||||
// Use the next block's time
|
|
||||||
loop {
|
|
||||||
let Ok(Some(res)) = serai.finalized_block_by_number(1).await else {
|
|
||||||
sleep(Duration::from_secs(5)).await;
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
break res.time().unwrap();
|
|
||||||
}
|
|
||||||
};
|
|
||||||
// The block time is in milliseconds yet the Tributary is in seconds
|
|
||||||
let time = time / 1000;
|
|
||||||
// Since this block is in the past, and Tendermint doesn't play nice with starting chains after
|
|
||||||
// their start time (though it does eventually work), delay the start time by 120 seconds
|
|
||||||
// This is meant to handle ~20 blocks of lack of finalization for this first block
|
|
||||||
const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;
|
|
||||||
let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;
|
|
||||||
|
|
||||||
let spec = TributarySpec::new(block.hash(), time, set, set_data);
|
|
||||||
|
|
||||||
log::info!("creating new tributary for {:?}", spec.set());
|
|
||||||
|
|
||||||
// Save it to the database now, not on the channel receiver's side, so this is safe against
|
|
||||||
// reboots
|
|
||||||
// If this txn finishes, and we reboot, then this'll be reloaded from active Tributaries
|
|
||||||
// If this txn doesn't finish, this will be re-fired
|
|
||||||
// If we waited to save to the DB, this txn may be finished, preventing re-firing, yet the
|
|
||||||
// prior fired event may have not been received yet
|
|
||||||
crate::ActiveTributaryDb::add_participating_in_tributary(txn, &spec);
|
|
||||||
|
|
||||||
new_tributary_spec.send(spec).unwrap();
|
|
||||||
} else {
|
|
||||||
log::info!("not present in new set {:?}", set);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn handle_batch_and_burns<Pro: Processors>(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
processors: &Pro,
|
|
||||||
serai: &Serai,
|
|
||||||
block: &Block,
|
|
||||||
) -> Result<(), SeraiError> {
|
|
||||||
// Track which networks had events with a Vec in ordr to preserve the insertion order
|
|
||||||
// While that shouldn't be needed, ensuring order never hurts, and may enable design choices
|
|
||||||
// with regards to Processor <-> Coordinator message passing
|
|
||||||
let mut networks_with_event = vec![];
|
|
||||||
let mut network_had_event = |burns: &mut HashMap<_, _>, batches: &mut HashMap<_, _>, network| {
|
|
||||||
// Don't insert this network multiple times
|
|
||||||
// A Vec is still used in order to maintain the insertion order
|
|
||||||
if !networks_with_event.contains(&network) {
|
|
||||||
networks_with_event.push(network);
|
|
||||||
burns.insert(network, vec![]);
|
|
||||||
batches.insert(network, vec![]);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut batch_block = HashMap::new();
|
|
||||||
let mut batches = HashMap::<ExternalNetworkId, Vec<u32>>::new();
|
|
||||||
let mut burns = HashMap::new();
|
|
||||||
|
|
||||||
let serai = serai.as_of(block.hash());
|
|
||||||
for batch in serai.in_instructions().batch_events().await? {
|
|
||||||
if let InInstructionsEvent::Batch { network, id, block: network_block, instructions_hash } =
|
|
||||||
batch
|
|
||||||
{
|
|
||||||
network_had_event(&mut burns, &mut batches, network);
|
|
||||||
|
|
||||||
BatchInstructionsHashDb::set(txn, network, id, &instructions_hash);
|
|
||||||
|
|
||||||
// Make sure this is the only Batch event for this network in this Block
|
|
||||||
assert!(batch_block.insert(network, network_block).is_none());
|
|
||||||
|
|
||||||
// Add the batch included by this block
|
|
||||||
batches.get_mut(&network).unwrap().push(id);
|
|
||||||
} else {
|
|
||||||
panic!("Batch event wasn't Batch: {batch:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for burn in serai.coins().burn_with_instruction_events().await? {
|
|
||||||
if let CoinsEvent::BurnWithInstruction { from: _, instruction } = burn {
|
|
||||||
let network = instruction.balance.coin.network();
|
|
||||||
network_had_event(&mut burns, &mut batches, network);
|
|
||||||
|
|
||||||
// network_had_event should register an entry in burns
|
|
||||||
burns.get_mut(&network).unwrap().push(instruction);
|
|
||||||
} else {
|
|
||||||
panic!("Burn event wasn't Burn: {burn:?}");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
assert_eq!(HashSet::<&_>::from_iter(networks_with_event.iter()).len(), networks_with_event.len());
|
|
||||||
|
|
||||||
for network in networks_with_event {
|
|
||||||
let network_latest_finalized_block = if let Some(block) = batch_block.remove(&network) {
|
|
||||||
block
|
|
||||||
} else {
|
|
||||||
// If it's had a batch or a burn, it must have had a block acknowledged
|
|
||||||
serai
|
|
||||||
.in_instructions()
|
|
||||||
.latest_block_for_network(network)
|
|
||||||
.await?
|
|
||||||
.expect("network had a batch/burn yet never set a latest block")
|
|
||||||
};
|
|
||||||
|
|
||||||
processors
|
|
||||||
.send(
|
|
||||||
network,
|
|
||||||
processor_messages::substrate::CoordinatorMessage::SubstrateBlock {
|
|
||||||
context: SubstrateContext {
|
|
||||||
serai_time: block.time().unwrap() / 1000,
|
|
||||||
network_latest_finalized_block,
|
|
||||||
},
|
|
||||||
block: block.number(),
|
|
||||||
burns: burns.remove(&network).unwrap(),
|
|
||||||
batches: batches.remove(&network).unwrap(),
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle a specific Substrate block, returning an error when it fails to get data
|
|
||||||
// (not blocking / holding)
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn handle_block<D: Db, Pro: Processors>(
|
|
||||||
db: &mut D,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
|
||||||
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
|
||||||
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
|
||||||
processors: &Pro,
|
|
||||||
serai: &Serai,
|
|
||||||
block: Block,
|
|
||||||
) -> Result<(), SeraiError> {
|
|
||||||
let hash = block.hash();
|
|
||||||
|
|
||||||
// Define an indexed event ID.
|
|
||||||
let mut event_id = 0;
|
|
||||||
|
|
||||||
// If a new validator set was activated, create tributary/inform processor to do a DKG
|
|
||||||
for new_set in serai.as_of(hash).validator_sets().new_set_events().await? {
|
|
||||||
// Individually mark each event as handled so on reboot, we minimize duplicates
|
|
||||||
// Additionally, if the Serai connection also fails 1/100 times, this means a block with 1000
|
|
||||||
// events will successfully be incrementally handled
|
|
||||||
// (though the Serai connection should be stable, making this unnecessary)
|
|
||||||
let ValidatorSetsEvent::NewSet { set } = new_set else {
|
|
||||||
panic!("NewSet event wasn't NewSet: {new_set:?}");
|
|
||||||
};
|
|
||||||
|
|
||||||
// We only coordinate/process external networks
|
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
|
||||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
|
||||||
log::info!("found fresh new set event {:?}", new_set);
|
|
||||||
let mut txn = db.txn();
|
|
||||||
handle_new_set::<D>(&mut txn, key, new_tributary_spec, serai, &block, set).await?;
|
|
||||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
event_id += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If a key pair was confirmed, inform the processor
|
|
||||||
for key_gen in serai.as_of(hash).validator_sets().key_gen_events().await? {
|
|
||||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
|
||||||
log::info!("found fresh key gen event {:?}", key_gen);
|
|
||||||
let ValidatorSetsEvent::KeyGen { set, key_pair } = key_gen else {
|
|
||||||
panic!("KeyGen event wasn't KeyGen: {key_gen:?}");
|
|
||||||
};
|
|
||||||
let substrate_key = key_pair.0 .0;
|
|
||||||
processors
|
|
||||||
.send(
|
|
||||||
set.network,
|
|
||||||
processor_messages::substrate::CoordinatorMessage::ConfirmKeyPair {
|
|
||||||
context: SubstrateContext {
|
|
||||||
serai_time: block.time().unwrap() / 1000,
|
|
||||||
network_latest_finalized_block: serai
|
|
||||||
.as_of(block.hash())
|
|
||||||
.in_instructions()
|
|
||||||
.latest_block_for_network(set.network)
|
|
||||||
.await?
|
|
||||||
// The processor treats this as a magic value which will cause it to find a network
|
|
||||||
// block which has a time greater than or equal to the Serai time
|
|
||||||
.unwrap_or(BlockHash([0; 32])),
|
|
||||||
},
|
|
||||||
session: set.session,
|
|
||||||
key_pair,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
// TODO: If we were in the set, yet were removed, drop the tributary
|
|
||||||
|
|
||||||
let mut txn = db.txn();
|
|
||||||
SeraiDkgCompleted::set(&mut txn, set, &substrate_key);
|
|
||||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
event_id += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
for accepted_handover in serai.as_of(hash).validator_sets().accepted_handover_events().await? {
|
|
||||||
let ValidatorSetsEvent::AcceptedHandover { set } = accepted_handover else {
|
|
||||||
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
|
|
||||||
};
|
|
||||||
|
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
|
||||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
|
||||||
log::info!("found fresh accepted handover event {:?}", accepted_handover);
|
|
||||||
// TODO: This isn't atomic with the event handling
|
|
||||||
// Send a oneshot receiver so we can await the response?
|
|
||||||
perform_slash_report.send(set).unwrap();
|
|
||||||
let mut txn = db.txn();
|
|
||||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
event_id += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
for retired_set in serai.as_of(hash).validator_sets().set_retired_events().await? {
|
|
||||||
let ValidatorSetsEvent::SetRetired { set } = retired_set else {
|
|
||||||
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
|
|
||||||
};
|
|
||||||
|
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(set) else { continue };
|
|
||||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
|
||||||
log::info!("found fresh set retired event {:?}", retired_set);
|
|
||||||
let mut txn = db.txn();
|
|
||||||
crate::ActiveTributaryDb::retire_tributary(&mut txn, set);
|
|
||||||
tributary_retired.send(set).unwrap();
|
|
||||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
event_id += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Finally, tell the processor of acknowledged blocks/burns
|
|
||||||
// This uses a single event as unlike prior events which individually executed code, all
|
|
||||||
// following events share data collection
|
|
||||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
|
||||||
let mut txn = db.txn();
|
|
||||||
handle_batch_and_burns(&mut txn, processors, serai, &block).await?;
|
|
||||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn handle_new_blocks<D: Db, Pro: Processors>(
|
|
||||||
db: &mut D,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
|
||||||
perform_slash_report: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
|
||||||
tributary_retired: &mpsc::UnboundedSender<ExternalValidatorSet>,
|
|
||||||
processors: &Pro,
|
|
||||||
serai: &Serai,
|
|
||||||
next_block: &mut u64,
|
|
||||||
) -> Result<(), SeraiError> {
|
|
||||||
// Check if there's been a new Substrate block
|
|
||||||
let latest_number = serai.latest_finalized_block().await?.number();
|
|
||||||
|
|
||||||
// Advance the cosigning protocol
|
|
||||||
advance_cosign_protocol(db, key, serai, latest_number).await?;
|
|
||||||
|
|
||||||
// Reduce to the latest cosigned block
|
|
||||||
let latest_number = latest_number.min(LatestCosignedBlock::latest_cosigned_block(db));
|
|
||||||
|
|
||||||
if latest_number < *next_block {
|
|
||||||
return Ok(());
|
|
||||||
}
|
|
||||||
|
|
||||||
for b in *next_block ..= latest_number {
|
|
||||||
let block = serai
|
|
||||||
.finalized_block_by_number(b)
|
|
||||||
.await?
|
|
||||||
.expect("couldn't get block before the latest finalized block");
|
|
||||||
|
|
||||||
log::info!("handling substrate block {b}");
|
|
||||||
handle_block(
|
|
||||||
db,
|
|
||||||
key,
|
|
||||||
new_tributary_spec,
|
|
||||||
perform_slash_report,
|
|
||||||
tributary_retired,
|
|
||||||
processors,
|
|
||||||
serai,
|
|
||||||
block,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
*next_block += 1;
|
|
||||||
|
|
||||||
let mut txn = db.txn();
|
|
||||||
NextBlock::set(&mut txn, next_block);
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
log::info!("handled substrate block {b}");
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn scan_task<D: Db, Pro: Processors>(
|
|
||||||
mut db: D,
|
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
processors: Pro,
|
|
||||||
serai: Arc<Serai>,
|
|
||||||
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
|
|
||||||
perform_slash_report: mpsc::UnboundedSender<ExternalValidatorSet>,
|
|
||||||
tributary_retired: mpsc::UnboundedSender<ExternalValidatorSet>,
|
|
||||||
) {
|
|
||||||
log::info!("scanning substrate");
|
|
||||||
let mut next_substrate_block = NextBlock::get(&db).unwrap_or_default();
|
|
||||||
|
|
||||||
/*
|
|
||||||
let new_substrate_block_notifier = {
|
|
||||||
let serai = &serai;
|
|
||||||
move || async move {
|
|
||||||
loop {
|
|
||||||
match serai.newly_finalized_block().await {
|
|
||||||
Ok(sub) => return sub,
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("couldn't communicate with serai node: {e}");
|
|
||||||
sleep(Duration::from_secs(5)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
*/
|
|
||||||
// TODO: Restore the above subscription-based system
|
|
||||||
// That would require moving serai-client from HTTP to websockets
|
|
||||||
let new_substrate_block_notifier = {
|
|
||||||
let serai = &serai;
|
|
||||||
move |next_substrate_block| async move {
|
|
||||||
loop {
|
|
||||||
match serai.latest_finalized_block().await {
|
|
||||||
Ok(latest) => {
|
|
||||||
if latest.header.number >= next_substrate_block {
|
|
||||||
return latest;
|
|
||||||
}
|
|
||||||
sleep(Duration::from_secs(3)).await;
|
|
||||||
}
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("couldn't communicate with serai node: {e}");
|
|
||||||
sleep(Duration::from_secs(5)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
loop {
|
|
||||||
// await the next block, yet if our notifier had an error, re-create it
|
|
||||||
{
|
|
||||||
let Ok(_) = tokio::time::timeout(
|
|
||||||
Duration::from_secs(60),
|
|
||||||
new_substrate_block_notifier(next_substrate_block),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
else {
|
|
||||||
// Timed out, which may be because Serai isn't finalizing or may be some issue with the
|
|
||||||
// notifier
|
|
||||||
if serai.latest_finalized_block().await.map(|block| block.number()).ok() ==
|
|
||||||
Some(next_substrate_block.saturating_sub(1))
|
|
||||||
{
|
|
||||||
log::info!("serai hasn't finalized a block in the last 60s...");
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
};
|
|
||||||
|
|
||||||
/*
|
|
||||||
// next_block is a Option<Result>
|
|
||||||
if next_block.and_then(Result::ok).is_none() {
|
|
||||||
substrate_block_notifier = new_substrate_block_notifier(next_substrate_block);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
}
|
|
||||||
|
|
||||||
match handle_new_blocks(
|
|
||||||
&mut db,
|
|
||||||
&key,
|
|
||||||
&new_tributary_spec,
|
|
||||||
&perform_slash_report,
|
|
||||||
&tributary_retired,
|
|
||||||
&processors,
|
|
||||||
&serai,
|
|
||||||
&mut next_substrate_block,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(()) => {}
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("couldn't communicate with serai node: {e}");
|
|
||||||
sleep(Duration::from_secs(5)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Gets the expected ID for the next Batch.
|
|
||||||
///
|
|
||||||
/// Will log an error and apply a slight sleep on error, letting the caller simply immediately
|
|
||||||
/// retry.
|
|
||||||
pub(crate) async fn expected_next_batch(
|
|
||||||
serai: &Serai,
|
|
||||||
network: ExternalNetworkId,
|
|
||||||
) -> Result<u32, SeraiError> {
|
|
||||||
async fn expected_next_batch_inner(
|
|
||||||
serai: &Serai,
|
|
||||||
network: ExternalNetworkId,
|
|
||||||
) -> Result<u32, SeraiError> {
|
|
||||||
let serai = serai.as_of_latest_finalized_block().await?;
|
|
||||||
let last = serai.in_instructions().last_batch_for_network(network).await?;
|
|
||||||
Ok(if let Some(last) = last { last + 1 } else { 0 })
|
|
||||||
}
|
|
||||||
match expected_next_batch_inner(serai, network).await {
|
|
||||||
Ok(next) => Ok(next),
|
|
||||||
Err(e) => {
|
|
||||||
log::error!("couldn't get the expected next batch from substrate: {e:?}");
|
|
||||||
sleep(Duration::from_millis(100)).await;
|
|
||||||
Err(e)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verifies `Batch`s which have already been indexed from Substrate.
|
|
||||||
///
|
|
||||||
/// Spins if a distinct `Batch` is detected on-chain.
|
|
||||||
///
|
|
||||||
/// This has a slight malleability in that doesn't verify *who* published a `Batch` is as expected.
|
|
||||||
/// This is deemed fine.
|
|
||||||
pub(crate) async fn verify_published_batches<D: Db>(
|
|
||||||
txn: &mut D::Transaction<'_>,
|
|
||||||
network: ExternalNetworkId,
|
|
||||||
optimistic_up_to: u32,
|
|
||||||
) -> Option<u32> {
|
|
||||||
// TODO: Localize from MainDb to SubstrateDb
|
|
||||||
let last = crate::LastVerifiedBatchDb::get(txn, network);
|
|
||||||
for id in last.map_or(0, |last| last + 1) ..= optimistic_up_to {
|
|
||||||
let Some(on_chain) = BatchInstructionsHashDb::get(txn, network, id) else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
let off_chain = crate::ExpectedBatchDb::get(txn, network, id).unwrap();
|
|
||||||
if on_chain != off_chain {
|
|
||||||
// Halt operations on this network and spin, as this is a critical fault
|
|
||||||
loop {
|
|
||||||
log::error!(
|
|
||||||
"{}! network: {:?} id: {} off-chain: {} on-chain: {}",
|
|
||||||
"on-chain batch doesn't match off-chain",
|
|
||||||
network,
|
|
||||||
id,
|
|
||||||
hex::encode(off_chain),
|
|
||||||
hex::encode(on_chain),
|
|
||||||
);
|
|
||||||
sleep(Duration::from_secs(60)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
crate::LastVerifiedBatchDb::set(txn, network, &id);
|
|
||||||
}
|
|
||||||
|
|
||||||
crate::LastVerifiedBatchDb::get(txn, network)
|
|
||||||
}
|
|
||||||
@@ -1,125 +0,0 @@
|
|||||||
use core::fmt::Debug;
|
|
||||||
use std::{
|
|
||||||
sync::Arc,
|
|
||||||
collections::{VecDeque, HashSet, HashMap},
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
|
|
||||||
|
|
||||||
use processor_messages::CoordinatorMessage;
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use tokio::sync::RwLock;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
processors::{Message, Processors},
|
|
||||||
TributaryP2p, ReqResMessageKind, GossipMessageKind, P2pMessageKind, Message as P2pMessage, P2p,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub mod tributary;
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct MemProcessors(pub Arc<RwLock<HashMap<ExternalNetworkId, VecDeque<CoordinatorMessage>>>>);
|
|
||||||
impl MemProcessors {
|
|
||||||
#[allow(clippy::new_without_default)]
|
|
||||||
pub fn new() -> MemProcessors {
|
|
||||||
MemProcessors(Arc::new(RwLock::new(HashMap::new())))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl Processors for MemProcessors {
|
|
||||||
async fn send(&self, network: ExternalNetworkId, msg: impl Send + Into<CoordinatorMessage>) {
|
|
||||||
let mut processors = self.0.write().await;
|
|
||||||
let processor = processors.entry(network).or_insert_with(VecDeque::new);
|
|
||||||
processor.push_back(msg.into());
|
|
||||||
}
|
|
||||||
async fn recv(&self, _: ExternalNetworkId) -> Message {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
async fn ack(&self, _: Message) {
|
|
||||||
todo!()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::type_complexity)]
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct LocalP2p(
|
|
||||||
usize,
|
|
||||||
pub Arc<RwLock<(HashSet<Vec<u8>>, Vec<VecDeque<(usize, P2pMessageKind, Vec<u8>)>>)>>,
|
|
||||||
);
|
|
||||||
|
|
||||||
impl LocalP2p {
|
|
||||||
pub fn new(validators: usize) -> Vec<LocalP2p> {
|
|
||||||
let shared = Arc::new(RwLock::new((HashSet::new(), vec![VecDeque::new(); validators])));
|
|
||||||
let mut res = vec![];
|
|
||||||
for i in 0 .. validators {
|
|
||||||
res.push(LocalP2p(i, shared.clone()));
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl P2p for LocalP2p {
|
|
||||||
type Id = usize;
|
|
||||||
|
|
||||||
async fn subscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
|
|
||||||
async fn unsubscribe(&self, _set: ExternalValidatorSet, _genesis: [u8; 32]) {}
|
|
||||||
|
|
||||||
async fn send_raw(&self, to: Self::Id, msg: Vec<u8>) {
|
|
||||||
let mut msg_ref = msg.as_slice();
|
|
||||||
let kind = ReqResMessageKind::read(&mut msg_ref).unwrap();
|
|
||||||
self.1.write().await.1[to].push_back((self.0, P2pMessageKind::ReqRes(kind), msg_ref.to_vec()));
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn broadcast_raw(&self, kind: P2pMessageKind, msg: Vec<u8>) {
|
|
||||||
// Content-based deduplication
|
|
||||||
let mut lock = self.1.write().await;
|
|
||||||
{
|
|
||||||
let already_sent = &mut lock.0;
|
|
||||||
if already_sent.contains(&msg) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
already_sent.insert(msg.clone());
|
|
||||||
}
|
|
||||||
let queues = &mut lock.1;
|
|
||||||
|
|
||||||
let kind_len = (match kind {
|
|
||||||
P2pMessageKind::ReqRes(kind) => kind.serialize(),
|
|
||||||
P2pMessageKind::Gossip(kind) => kind.serialize(),
|
|
||||||
})
|
|
||||||
.len();
|
|
||||||
let msg = msg[kind_len ..].to_vec();
|
|
||||||
|
|
||||||
for (i, msg_queue) in queues.iter_mut().enumerate() {
|
|
||||||
if i == self.0 {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
msg_queue.push_back((self.0, kind, msg.clone()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn receive(&self) -> P2pMessage<Self> {
|
|
||||||
// This is a cursed way to implement an async read from a Vec
|
|
||||||
loop {
|
|
||||||
if let Some((sender, kind, msg)) = self.1.write().await.1[self.0].pop_front() {
|
|
||||||
return P2pMessage { sender, kind, msg };
|
|
||||||
}
|
|
||||||
tokio::time::sleep(std::time::Duration::from_millis(100)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
impl TributaryP2p for LocalP2p {
|
|
||||||
async fn broadcast(&self, genesis: [u8; 32], msg: Vec<u8>) {
|
|
||||||
<Self as P2p>::broadcast(
|
|
||||||
self,
|
|
||||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)),
|
|
||||||
msg,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,240 +0,0 @@
|
|||||||
use std::{
|
|
||||||
time::{Duration, SystemTime},
|
|
||||||
collections::HashSet,
|
|
||||||
};
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
use rand_core::{RngCore, CryptoRng, OsRng};
|
|
||||||
use futures_util::{task::Poll, poll};
|
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{
|
|
||||||
group::{ff::Field, GroupEncoding},
|
|
||||||
Ciphersuite,
|
|
||||||
};
|
|
||||||
|
|
||||||
use sp_application_crypto::sr25519;
|
|
||||||
use borsh::BorshDeserialize;
|
|
||||||
use serai_client::{
|
|
||||||
primitives::ExternalNetworkId,
|
|
||||||
validator_sets::primitives::{ExternalValidatorSet, Session},
|
|
||||||
};
|
|
||||||
|
|
||||||
use tokio::time::sleep;
|
|
||||||
|
|
||||||
use serai_db::MemDb;
|
|
||||||
|
|
||||||
use tributary::Tributary;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
GossipMessageKind, P2pMessageKind, P2p,
|
|
||||||
tributary::{Transaction, TributarySpec},
|
|
||||||
tests::LocalP2p,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn new_keys<R: RngCore + CryptoRng>(
|
|
||||||
rng: &mut R,
|
|
||||||
) -> Vec<Zeroizing<<Ristretto as Ciphersuite>::F>> {
|
|
||||||
let mut keys = vec![];
|
|
||||||
for _ in 0 .. 5 {
|
|
||||||
keys.push(Zeroizing::new(<Ristretto as Ciphersuite>::F::random(&mut *rng)));
|
|
||||||
}
|
|
||||||
keys
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new_spec<R: RngCore + CryptoRng>(
|
|
||||||
rng: &mut R,
|
|
||||||
keys: &[Zeroizing<<Ristretto as Ciphersuite>::F>],
|
|
||||||
) -> TributarySpec {
|
|
||||||
let mut serai_block = [0; 32];
|
|
||||||
rng.fill_bytes(&mut serai_block);
|
|
||||||
|
|
||||||
let start_time = SystemTime::now().duration_since(SystemTime::UNIX_EPOCH).unwrap().as_secs();
|
|
||||||
|
|
||||||
let set = ExternalValidatorSet { session: Session(0), network: ExternalNetworkId::Bitcoin };
|
|
||||||
|
|
||||||
let set_participants = keys
|
|
||||||
.iter()
|
|
||||||
.map(|key| {
|
|
||||||
(sr25519::Public::from((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1)
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let res = TributarySpec::new(serai_block, start_time, set, set_participants);
|
|
||||||
assert_eq!(
|
|
||||||
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
|
|
||||||
res,
|
|
||||||
);
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn new_tributaries(
|
|
||||||
keys: &[Zeroizing<<Ristretto as Ciphersuite>::F>],
|
|
||||||
spec: &TributarySpec,
|
|
||||||
) -> Vec<(MemDb, LocalP2p, Tributary<MemDb, Transaction, LocalP2p>)> {
|
|
||||||
let p2p = LocalP2p::new(keys.len());
|
|
||||||
let mut res = vec![];
|
|
||||||
for (i, key) in keys.iter().enumerate() {
|
|
||||||
let db = MemDb::new();
|
|
||||||
res.push((
|
|
||||||
db.clone(),
|
|
||||||
p2p[i].clone(),
|
|
||||||
Tributary::<_, Transaction, _>::new(
|
|
||||||
db,
|
|
||||||
spec.genesis(),
|
|
||||||
spec.start_time(),
|
|
||||||
key.clone(),
|
|
||||||
spec.validators(),
|
|
||||||
p2p[i].clone(),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap(),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn run_tributaries(
|
|
||||||
mut tributaries: Vec<(LocalP2p, Tributary<MemDb, Transaction, LocalP2p>)>,
|
|
||||||
) {
|
|
||||||
loop {
|
|
||||||
for (p2p, tributary) in &mut tributaries {
|
|
||||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
|
||||||
match msg.kind {
|
|
||||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
|
||||||
assert_eq!(genesis, tributary.genesis());
|
|
||||||
if tributary.handle_message(&msg.msg).await {
|
|
||||||
p2p.broadcast(msg.kind, msg.msg).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => panic!("unexpected p2p message found"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
sleep(Duration::from_millis(100)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn wait_for_tx_inclusion(
|
|
||||||
tributary: &Tributary<MemDb, Transaction, LocalP2p>,
|
|
||||||
mut last_checked: [u8; 32],
|
|
||||||
hash: [u8; 32],
|
|
||||||
) -> [u8; 32] {
|
|
||||||
let reader = tributary.reader();
|
|
||||||
loop {
|
|
||||||
let tip = tributary.tip().await;
|
|
||||||
if tip == last_checked {
|
|
||||||
sleep(Duration::from_secs(1)).await;
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut queue = vec![reader.block(&tip).unwrap()];
|
|
||||||
let mut block = None;
|
|
||||||
while {
|
|
||||||
let parent = queue.last().unwrap().parent();
|
|
||||||
if parent == tributary.genesis() {
|
|
||||||
false
|
|
||||||
} else {
|
|
||||||
block = Some(reader.block(&parent).unwrap());
|
|
||||||
block.as_ref().unwrap().hash() != last_checked
|
|
||||||
}
|
|
||||||
} {
|
|
||||||
queue.push(block.take().unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
while let Some(block) = queue.pop() {
|
|
||||||
for tx in &block.transactions {
|
|
||||||
if tx.hash() == hash {
|
|
||||||
return block.hash();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
last_checked = tip;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn tributary_test() {
|
|
||||||
let keys = new_keys(&mut OsRng);
|
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
|
||||||
|
|
||||||
let mut tributaries = new_tributaries(&keys, &spec)
|
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, p2p, tributary)| (p2p, tributary))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut blocks = 0;
|
|
||||||
let mut last_block = spec.genesis();
|
|
||||||
|
|
||||||
// Doesn't use run_tributaries as we want to wind these down at a certain point
|
|
||||||
// run_tributaries will run them ad infinitum
|
|
||||||
let timeout = SystemTime::now() + Duration::from_secs(65);
|
|
||||||
while (blocks < 10) && (SystemTime::now().duration_since(timeout).is_err()) {
|
|
||||||
for (p2p, tributary) in &mut tributaries {
|
|
||||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
|
||||||
match msg.kind {
|
|
||||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
|
||||||
assert_eq!(genesis, tributary.genesis());
|
|
||||||
tributary.handle_message(&msg.msg).await;
|
|
||||||
}
|
|
||||||
_ => panic!("unexpected p2p message found"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let tip = tributaries[0].1.tip().await;
|
|
||||||
if tip != last_block {
|
|
||||||
last_block = tip;
|
|
||||||
blocks += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
sleep(Duration::from_millis(100)).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
if blocks != 10 {
|
|
||||||
panic!("tributary chain test hit timeout");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle all existing messages
|
|
||||||
for (p2p, tributary) in &mut tributaries {
|
|
||||||
while let Poll::Ready(msg) = poll!(p2p.receive()) {
|
|
||||||
match msg.kind {
|
|
||||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(genesis)) => {
|
|
||||||
assert_eq!(genesis, tributary.genesis());
|
|
||||||
tributary.handle_message(&msg.msg).await;
|
|
||||||
}
|
|
||||||
_ => panic!("unexpected p2p message found"),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// handle_message informed the Tendermint machine, yet it still has to process it
|
|
||||||
// Sleep for a second accordingly
|
|
||||||
// TODO: Is there a better way to handle this?
|
|
||||||
sleep(Duration::from_secs(1)).await;
|
|
||||||
|
|
||||||
// All tributaries should agree on the tip, within a block
|
|
||||||
let mut tips = HashSet::new();
|
|
||||||
for (_, tributary) in &tributaries {
|
|
||||||
tips.insert(tributary.tip().await);
|
|
||||||
}
|
|
||||||
assert!(tips.len() <= 2);
|
|
||||||
if tips.len() == 2 {
|
|
||||||
for tip in &tips {
|
|
||||||
// Find a Tributary where this isn't the tip
|
|
||||||
for (_, tributary) in &tributaries {
|
|
||||||
let Some(after) = tributary.reader().block_after(tip) else { continue };
|
|
||||||
// Make sure the block after is the other tip
|
|
||||||
assert!(tips.contains(&after));
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert_eq!(tips.len(), 1);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
panic!("tributary had different tip with a variance exceeding one block");
|
|
||||||
}
|
|
||||||
@@ -1,394 +0,0 @@
|
|||||||
use core::time::Duration;
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
use rand_core::{RngCore, OsRng};
|
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
use frost::Participant;
|
|
||||||
|
|
||||||
use sp_runtime::traits::Verify;
|
|
||||||
use serai_client::{
|
|
||||||
primitives::{SeraiAddress, Signature},
|
|
||||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
|
||||||
};
|
|
||||||
|
|
||||||
use tokio::time::sleep;
|
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db, MemDb};
|
|
||||||
|
|
||||||
use processor_messages::{
|
|
||||||
key_gen::{self, KeyGenId},
|
|
||||||
CoordinatorMessage,
|
|
||||||
};
|
|
||||||
|
|
||||||
use tributary::{TransactionTrait, Tributary};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
tributary::{
|
|
||||||
Transaction, TributarySpec,
|
|
||||||
scanner::{PublishSeraiTransaction, handle_new_blocks},
|
|
||||||
},
|
|
||||||
tests::{
|
|
||||||
MemProcessors, LocalP2p,
|
|
||||||
tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn dkg_test() {
|
|
||||||
env_logger::init();
|
|
||||||
|
|
||||||
let keys = new_keys(&mut OsRng);
|
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
|
||||||
|
|
||||||
let full_tributaries = new_tributaries(&keys, &spec).await;
|
|
||||||
let mut dbs = vec![];
|
|
||||||
let mut tributaries = vec![];
|
|
||||||
for (db, p2p, tributary) in full_tributaries {
|
|
||||||
dbs.push(db);
|
|
||||||
tributaries.push((p2p, tributary));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the tributaries in the background
|
|
||||||
tokio::spawn(run_tributaries(tributaries.clone()));
|
|
||||||
|
|
||||||
let mut txs = vec![];
|
|
||||||
// Create DKG commitments for each key
|
|
||||||
for key in &keys {
|
|
||||||
let attempt = 0;
|
|
||||||
let mut commitments = vec![0; 256];
|
|
||||||
OsRng.fill_bytes(&mut commitments);
|
|
||||||
|
|
||||||
let mut tx = Transaction::DkgCommitments {
|
|
||||||
attempt,
|
|
||||||
commitments: vec![commitments],
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
};
|
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
|
||||||
txs.push(tx);
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
|
||||||
|
|
||||||
// Publish all commitments but one
|
|
||||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
|
||||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until these are included
|
|
||||||
for tx in txs.iter().skip(1) {
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
let expected_commitments: HashMap<_, _> = txs
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, tx)| {
|
|
||||||
if let Transaction::DkgCommitments { commitments, .. } = tx {
|
|
||||||
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
|
|
||||||
} else {
|
|
||||||
panic!("txs had non-commitments");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
async fn new_processors(
|
|
||||||
db: &mut MemDb,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
spec: &TributarySpec,
|
|
||||||
tributary: &Tributary<MemDb, Transaction, LocalP2p>,
|
|
||||||
) -> MemProcessors {
|
|
||||||
let processors = MemProcessors::new();
|
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
|
||||||
db,
|
|
||||||
key,
|
|
||||||
&|_, _, _, _| async {
|
|
||||||
panic!("provided TX caused recognized_id to be called in new_processors")
|
|
||||||
},
|
|
||||||
&processors,
|
|
||||||
&(),
|
|
||||||
&|_| async {
|
|
||||||
panic!(
|
|
||||||
"test tried to publish a new Tributary TX from handle_application_tx in new_processors"
|
|
||||||
)
|
|
||||||
},
|
|
||||||
spec,
|
|
||||||
&tributary.reader(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
processors
|
|
||||||
}
|
|
||||||
|
|
||||||
// Instantiate a scanner and verify it has nothing to report
|
|
||||||
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
|
|
||||||
assert!(processors.0.read().await.is_empty());
|
|
||||||
|
|
||||||
// Publish the last commitment
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
|
||||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
|
||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
|
||||||
|
|
||||||
// Verify the scanner emits a KeyGen::Commitments message
|
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
|
||||||
&mut dbs[0],
|
|
||||||
&keys[0],
|
|
||||||
&|_, _, _, _| async {
|
|
||||||
panic!("provided TX caused recognized_id to be called after Commitments")
|
|
||||||
},
|
|
||||||
&processors,
|
|
||||||
&(),
|
|
||||||
&|_| async {
|
|
||||||
panic!(
|
|
||||||
"test tried to publish a new Tributary TX from handle_application_tx after Commitments"
|
|
||||||
)
|
|
||||||
},
|
|
||||||
&spec,
|
|
||||||
&tributaries[0].1.reader(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
{
|
|
||||||
let mut msgs = processors.0.write().await;
|
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
|
||||||
let mut expected_commitments = expected_commitments.clone();
|
|
||||||
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap());
|
|
||||||
assert_eq!(
|
|
||||||
msgs.pop_front().unwrap(),
|
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
commitments: expected_commitments
|
|
||||||
})
|
|
||||||
);
|
|
||||||
assert!(msgs.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify all keys exhibit this scanner behavior
|
|
||||||
for (i, key) in keys.iter().enumerate().skip(1) {
|
|
||||||
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
|
|
||||||
let mut msgs = processors.0.write().await;
|
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
|
||||||
let mut expected_commitments = expected_commitments.clone();
|
|
||||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
|
||||||
assert_eq!(
|
|
||||||
msgs.pop_front().unwrap(),
|
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
commitments: expected_commitments
|
|
||||||
})
|
|
||||||
);
|
|
||||||
assert!(msgs.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now do shares
|
|
||||||
let mut txs = vec![];
|
|
||||||
for (k, key) in keys.iter().enumerate() {
|
|
||||||
let attempt = 0;
|
|
||||||
|
|
||||||
let mut shares = vec![vec![]];
|
|
||||||
for i in 0 .. keys.len() {
|
|
||||||
if i != k {
|
|
||||||
let mut share = vec![0; 256];
|
|
||||||
OsRng.fill_bytes(&mut share);
|
|
||||||
shares.last_mut().unwrap().push(share);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut txn = dbs[k].txn();
|
|
||||||
let mut tx = Transaction::DkgShares {
|
|
||||||
attempt,
|
|
||||||
shares,
|
|
||||||
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
};
|
|
||||||
txn.commit();
|
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
|
||||||
txs.push(tx);
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
|
||||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
|
||||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
|
||||||
}
|
|
||||||
for tx in txs.iter().skip(1) {
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// With just 4 sets of shares, nothing should happen yet
|
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
|
||||||
&mut dbs[0],
|
|
||||||
&keys[0],
|
|
||||||
&|_, _, _, _| async {
|
|
||||||
panic!("provided TX caused recognized_id to be called after some shares")
|
|
||||||
},
|
|
||||||
&processors,
|
|
||||||
&(),
|
|
||||||
&|_| async {
|
|
||||||
panic!(
|
|
||||||
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
|
|
||||||
)
|
|
||||||
},
|
|
||||||
&spec,
|
|
||||||
&tributaries[0].1.reader(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
assert_eq!(processors.0.read().await.len(), 1);
|
|
||||||
assert!(processors.0.read().await[&spec.set().network].is_empty());
|
|
||||||
|
|
||||||
// Publish the final set of shares
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
|
||||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
|
||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
|
||||||
|
|
||||||
// Each scanner should emit a distinct shares message
|
|
||||||
let shares_for = |i: usize| {
|
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
shares: vec![txs
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter_map(|(l, tx)| {
|
|
||||||
if let Transaction::DkgShares { shares, .. } = tx {
|
|
||||||
if i == l {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
let relative_i = i - (if i > l { 1 } else { 0 });
|
|
||||||
Some((
|
|
||||||
Participant::new((l + 1).try_into().unwrap()).unwrap(),
|
|
||||||
shares[0][relative_i].clone(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
panic!("txs had non-shares");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<HashMap<_, _>>()],
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
// Any scanner which has handled the prior blocks should only emit the new event
|
|
||||||
for (i, key) in keys.iter().enumerate() {
|
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
|
||||||
&mut dbs[i],
|
|
||||||
key,
|
|
||||||
&|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
|
||||||
&processors,
|
|
||||||
&(),
|
|
||||||
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
|
||||||
&spec,
|
|
||||||
&tributaries[i].1.reader(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
{
|
|
||||||
let mut msgs = processors.0.write().await;
|
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
|
||||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
|
||||||
assert!(msgs.is_empty());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Yet new scanners should emit all events
|
|
||||||
for (i, key) in keys.iter().enumerate() {
|
|
||||||
let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;
|
|
||||||
let mut msgs = processors.0.write().await;
|
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
|
||||||
let mut expected_commitments = expected_commitments.clone();
|
|
||||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
|
||||||
assert_eq!(
|
|
||||||
msgs.pop_front().unwrap(),
|
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
commitments: expected_commitments
|
|
||||||
})
|
|
||||||
);
|
|
||||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
|
||||||
assert!(msgs.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send DkgConfirmed
|
|
||||||
let mut substrate_key = [0; 32];
|
|
||||||
OsRng.fill_bytes(&mut substrate_key);
|
|
||||||
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
|
|
||||||
OsRng.fill_bytes(&mut network_key);
|
|
||||||
let key_pair =
|
|
||||||
KeyPair(serai_client::Public::from(substrate_key), network_key.try_into().unwrap());
|
|
||||||
|
|
||||||
let mut txs = vec![];
|
|
||||||
for (i, key) in keys.iter().enumerate() {
|
|
||||||
let attempt = 0;
|
|
||||||
let mut txn = dbs[i].txn();
|
|
||||||
let share =
|
|
||||||
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
let mut tx = Transaction::DkgConfirmed {
|
|
||||||
attempt,
|
|
||||||
confirmation_share: share,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
};
|
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
|
||||||
txs.push(tx);
|
|
||||||
}
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
|
||||||
for (i, tx) in txs.iter().enumerate() {
|
|
||||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
|
||||||
}
|
|
||||||
for tx in &txs {
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct CheckPublishSetKeys {
|
|
||||||
spec: TributarySpec,
|
|
||||||
key_pair: KeyPair,
|
|
||||||
}
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl PublishSeraiTransaction for CheckPublishSetKeys {
|
|
||||||
async fn publish_set_keys(
|
|
||||||
&self,
|
|
||||||
_db: &(impl Sync + Get),
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
removed: Vec<SeraiAddress>,
|
|
||||||
key_pair: KeyPair,
|
|
||||||
signature: Signature,
|
|
||||||
) {
|
|
||||||
assert_eq!(set, self.spec.set());
|
|
||||||
assert!(removed.is_empty());
|
|
||||||
assert_eq!(self.key_pair, key_pair);
|
|
||||||
assert!(signature.verify(
|
|
||||||
&*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair),
|
|
||||||
&serai_client::Public::from(
|
|
||||||
dkg_musig::musig_key_vartime::<Ristretto>(
|
|
||||||
serai_client::validator_sets::primitives::musig_context(set.into()),
|
|
||||||
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>()
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
.to_bytes()
|
|
||||||
),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// The scanner should successfully try to publish a transaction with a validly signed signature
|
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
|
||||||
&mut dbs[0],
|
|
||||||
&keys[0],
|
|
||||||
&|_, _, _, _| async {
|
|
||||||
panic!("provided TX caused recognized_id to be called after DKG confirmation")
|
|
||||||
},
|
|
||||||
&processors,
|
|
||||||
&CheckPublishSetKeys { spec: spec.clone(), key_pair: key_pair.clone() },
|
|
||||||
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
|
||||||
&spec,
|
|
||||||
&tributaries[0].1.reader(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
{
|
|
||||||
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,74 +0,0 @@
|
|||||||
use core::time::Duration;
|
|
||||||
use std::sync::Arc;
|
|
||||||
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use tokio::{
|
|
||||||
sync::{mpsc, broadcast},
|
|
||||||
time::sleep,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::MemDb;
|
|
||||||
|
|
||||||
use tributary::Tributary;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
tributary::Transaction,
|
|
||||||
ActiveTributary, TributaryEvent,
|
|
||||||
p2p::handle_p2p_task,
|
|
||||||
tests::{
|
|
||||||
LocalP2p,
|
|
||||||
tributary::{new_keys, new_spec, new_tributaries},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn handle_p2p_test() {
|
|
||||||
let keys = new_keys(&mut OsRng);
|
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
|
||||||
|
|
||||||
let mut tributaries = new_tributaries(&keys, &spec)
|
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, p2p, tributary)| (p2p, tributary))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let mut tributary_senders = vec![];
|
|
||||||
let mut tributary_arcs = vec![];
|
|
||||||
for (p2p, tributary) in tributaries.drain(..) {
|
|
||||||
let tributary = Arc::new(tributary);
|
|
||||||
tributary_arcs.push(tributary.clone());
|
|
||||||
let (new_tributary_send, new_tributary_recv) = broadcast::channel(5);
|
|
||||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
|
||||||
tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv));
|
|
||||||
new_tributary_send
|
|
||||||
.send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary }))
|
|
||||||
.map_err(|_| "failed to send ActiveTributary")
|
|
||||||
.unwrap();
|
|
||||||
tributary_senders.push(new_tributary_send);
|
|
||||||
}
|
|
||||||
let tributaries = tributary_arcs;
|
|
||||||
|
|
||||||
// After two blocks of time, we should have a new block
|
|
||||||
// We don't wait one block of time as we may have missed the chance for this block
|
|
||||||
sleep(Duration::from_secs((2 * Tributary::<MemDb, Transaction, LocalP2p>::block_time()).into()))
|
|
||||||
.await;
|
|
||||||
let tip = tributaries[0].tip().await;
|
|
||||||
assert!(tip != spec.genesis());
|
|
||||||
|
|
||||||
// Sleep one second to make sure this block propagates
|
|
||||||
sleep(Duration::from_secs(1)).await;
|
|
||||||
// Make sure every tributary has it
|
|
||||||
for tributary in &tributaries {
|
|
||||||
assert!(tributary.reader().block(&tip).is_some());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Then after another block of time, we should have yet another new block
|
|
||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
|
||||||
let new_tip = tributaries[0].tip().await;
|
|
||||||
assert!(new_tip != tip);
|
|
||||||
sleep(Duration::from_secs(1)).await;
|
|
||||||
for tributary in tributaries {
|
|
||||||
assert!(tributary.reader().block(&new_tip).is_some());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,294 +0,0 @@
|
|||||||
use core::fmt::Debug;
|
|
||||||
|
|
||||||
use rand_core::{RngCore, OsRng};
|
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{group::Group, Ciphersuite};
|
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
|
||||||
use serai_client::{
|
|
||||||
primitives::{SeraiAddress, Signature},
|
|
||||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair, MAX_KEY_SHARES_PER_SET},
|
|
||||||
};
|
|
||||||
use processor_messages::coordinator::SubstrateSignableId;
|
|
||||||
|
|
||||||
use tributary::{ReadWrite, tests::random_signed_with_nonce};
|
|
||||||
|
|
||||||
use crate::tributary::{Label, SignData, Transaction, scanner::PublishSeraiTransaction};
|
|
||||||
|
|
||||||
mod chain;
|
|
||||||
pub use chain::*;
|
|
||||||
|
|
||||||
mod tx;
|
|
||||||
|
|
||||||
mod dkg;
|
|
||||||
// TODO: Test the other transactions
|
|
||||||
|
|
||||||
mod handle_p2p;
|
|
||||||
mod sync;
|
|
||||||
|
|
||||||
#[async_trait::async_trait]
|
|
||||||
impl PublishSeraiTransaction for () {
|
|
||||||
async fn publish_set_keys(
|
|
||||||
&self,
|
|
||||||
_db: &(impl Sync + serai_db::Get),
|
|
||||||
_set: ExternalValidatorSet,
|
|
||||||
_removed: Vec<SeraiAddress>,
|
|
||||||
_key_pair: KeyPair,
|
|
||||||
_signature: Signature,
|
|
||||||
) {
|
|
||||||
panic!("publish_set_keys was called in test")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn random_u32<R: RngCore>(rng: &mut R) -> u32 {
|
|
||||||
u32::try_from(rng.next_u64() >> 32).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn random_vec<R: RngCore>(rng: &mut R, limit: usize) -> Vec<u8> {
|
|
||||||
let len = usize::try_from(rng.next_u64() % u64::try_from(limit).unwrap()).unwrap();
|
|
||||||
let mut res = vec![0; len];
|
|
||||||
rng.fill_bytes(&mut res);
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
fn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(
|
|
||||||
rng: &mut R,
|
|
||||||
plan: Id,
|
|
||||||
label: Label,
|
|
||||||
) -> SignData<Id> {
|
|
||||||
SignData {
|
|
||||||
plan,
|
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
label,
|
|
||||||
|
|
||||||
data: {
|
|
||||||
let mut res = vec![];
|
|
||||||
for _ in 0 ..= (rng.next_u64() % 255) {
|
|
||||||
res.push(random_vec(&mut OsRng, 512));
|
|
||||||
}
|
|
||||||
res
|
|
||||||
},
|
|
||||||
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, label.nonce()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn test_read_write<RW: Eq + Debug + ReadWrite>(value: &RW) {
|
|
||||||
assert_eq!(value, &RW::read::<&[u8]>(&mut value.serialize().as_ref()).unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn tx_size_limit() {
|
|
||||||
use serai_client::validator_sets::primitives::MAX_KEY_LEN;
|
|
||||||
|
|
||||||
use tributary::TRANSACTION_SIZE_LIMIT;
|
|
||||||
|
|
||||||
let max_dkg_coefficients = (MAX_KEY_SHARES_PER_SET * 2).div_ceil(3) + 1;
|
|
||||||
let max_key_shares_per_individual = MAX_KEY_SHARES_PER_SET - max_dkg_coefficients;
|
|
||||||
// Handwave the DKG Commitments size as the size of the commitments to the coefficients and
|
|
||||||
// 1024 bytes for all overhead
|
|
||||||
let handwaved_dkg_commitments_size = (max_dkg_coefficients * MAX_KEY_LEN) + 1024;
|
|
||||||
assert!(
|
|
||||||
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
|
||||||
(handwaved_dkg_commitments_size * max_key_shares_per_individual)
|
|
||||||
);
|
|
||||||
|
|
||||||
// Encryption key, PoP (2 elements), message
|
|
||||||
let elements_per_share = 4;
|
|
||||||
let handwaved_dkg_shares_size =
|
|
||||||
(elements_per_share * MAX_KEY_LEN * MAX_KEY_SHARES_PER_SET) + 1024;
|
|
||||||
assert!(
|
|
||||||
u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() >=
|
|
||||||
(handwaved_dkg_shares_size * max_key_shares_per_individual)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn serialize_sign_data() {
|
|
||||||
fn test_read_write<Id: Clone + PartialEq + Eq + Debug + Encode + Decode>(value: &SignData<Id>) {
|
|
||||||
let mut buf = vec![];
|
|
||||||
value.write(&mut buf).unwrap();
|
|
||||||
assert_eq!(value, &SignData::read(&mut buf.as_slice()).unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut plan = [0; 3];
|
|
||||||
OsRng.fill_bytes(&mut plan);
|
|
||||||
test_read_write(&random_sign_data::<_, _>(
|
|
||||||
&mut OsRng,
|
|
||||||
plan,
|
|
||||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
|
||||||
));
|
|
||||||
let mut plan = [0; 5];
|
|
||||||
OsRng.fill_bytes(&mut plan);
|
|
||||||
test_read_write(&random_sign_data::<_, _>(
|
|
||||||
&mut OsRng,
|
|
||||||
plan,
|
|
||||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
|
||||||
));
|
|
||||||
let mut plan = [0; 8];
|
|
||||||
OsRng.fill_bytes(&mut plan);
|
|
||||||
test_read_write(&random_sign_data::<_, _>(
|
|
||||||
&mut OsRng,
|
|
||||||
plan,
|
|
||||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
|
||||||
));
|
|
||||||
let mut plan = [0; 24];
|
|
||||||
OsRng.fill_bytes(&mut plan);
|
|
||||||
test_read_write(&random_sign_data::<_, _>(
|
|
||||||
&mut OsRng,
|
|
||||||
plan,
|
|
||||||
if (OsRng.next_u64() % 2) == 0 { Label::Preprocess } else { Label::Share },
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn serialize_transaction() {
|
|
||||||
test_read_write(&Transaction::RemoveParticipantDueToDkg {
|
|
||||||
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
|
||||||
});
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
|
||||||
for _ in 0 .. (OsRng.next_u64() % 100) {
|
|
||||||
let mut temp = commitments[0].clone();
|
|
||||||
OsRng.fill_bytes(&mut temp);
|
|
||||||
commitments.push(temp);
|
|
||||||
}
|
|
||||||
test_read_write(&Transaction::DkgCommitments {
|
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
commitments,
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
// This supports a variable share length, and variable amount of sent shares, yet share length
|
|
||||||
// and sent shares is expected to be constant among recipients
|
|
||||||
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
|
|
||||||
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
|
|
||||||
// Create a valid vec of shares
|
|
||||||
let mut shares = vec![];
|
|
||||||
// Create up to 150 participants
|
|
||||||
for _ in 0 ..= (OsRng.next_u64() % 150) {
|
|
||||||
// Give each sender multiple shares
|
|
||||||
let mut sender_shares = vec![];
|
|
||||||
for _ in 0 .. amount_of_shares {
|
|
||||||
let mut share = vec![0; share_len];
|
|
||||||
OsRng.fill_bytes(&mut share);
|
|
||||||
sender_shares.push(share);
|
|
||||||
}
|
|
||||||
shares.push(sender_shares);
|
|
||||||
}
|
|
||||||
|
|
||||||
test_read_write(&Transaction::DkgShares {
|
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
shares,
|
|
||||||
confirmation_nonces: {
|
|
||||||
let mut nonces = [0; 64];
|
|
||||||
OsRng.fill_bytes(&mut nonces);
|
|
||||||
nonces
|
|
||||||
},
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 1),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
for i in 0 .. 2 {
|
|
||||||
test_read_write(&Transaction::InvalidDkgShare {
|
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
accuser: frost::Participant::new(
|
|
||||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
faulty: frost::Participant::new(
|
|
||||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
blame: if i == 0 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
|
|
||||||
},
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
test_read_write(&Transaction::DkgConfirmed {
|
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
confirmation_share: {
|
|
||||||
let mut share = [0; 32];
|
|
||||||
OsRng.fill_bytes(&mut share);
|
|
||||||
share
|
|
||||||
},
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
|
||||||
});
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut block = [0; 32];
|
|
||||||
OsRng.fill_bytes(&mut block);
|
|
||||||
test_read_write(&Transaction::CosignSubstrateBlock(block));
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut block = [0; 32];
|
|
||||||
OsRng.fill_bytes(&mut block);
|
|
||||||
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
|
||||||
test_read_write(&Transaction::Batch { block, batch });
|
|
||||||
}
|
|
||||||
test_read_write(&Transaction::SubstrateBlock(OsRng.next_u64()));
|
|
||||||
|
|
||||||
{
|
|
||||||
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
|
||||||
test_read_write(&Transaction::SubstrateSign(random_sign_data(
|
|
||||||
&mut OsRng,
|
|
||||||
SubstrateSignableId::Batch(batch),
|
|
||||||
Label::Preprocess,
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let batch = u32::try_from(OsRng.next_u64() >> 32).unwrap();
|
|
||||||
test_read_write(&Transaction::SubstrateSign(random_sign_data(
|
|
||||||
&mut OsRng,
|
|
||||||
SubstrateSignableId::Batch(batch),
|
|
||||||
Label::Share,
|
|
||||||
)));
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut plan = [0; 32];
|
|
||||||
OsRng.fill_bytes(&mut plan);
|
|
||||||
test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Preprocess)));
|
|
||||||
}
|
|
||||||
{
|
|
||||||
let mut plan = [0; 32];
|
|
||||||
OsRng.fill_bytes(&mut plan);
|
|
||||||
test_read_write(&Transaction::Sign(random_sign_data(&mut OsRng, plan, Label::Share)));
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut plan = [0; 32];
|
|
||||||
OsRng.fill_bytes(&mut plan);
|
|
||||||
let mut tx_hash = vec![0; (OsRng.next_u64() % 64).try_into().unwrap()];
|
|
||||||
OsRng.fill_bytes(&mut tx_hash);
|
|
||||||
test_read_write(&Transaction::SignCompleted {
|
|
||||||
plan,
|
|
||||||
tx_hash,
|
|
||||||
first_signer: random_signed_with_nonce(&mut OsRng, 2).signer,
|
|
||||||
signature: random_signed_with_nonce(&mut OsRng, 2).signature,
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
test_read_write(&Transaction::SlashReport(
|
|
||||||
{
|
|
||||||
let amount =
|
|
||||||
usize::try_from(OsRng.next_u64() % u64::from(MAX_KEY_SHARES_PER_SET - 1)).unwrap();
|
|
||||||
let mut points = vec![];
|
|
||||||
for _ in 0 .. amount {
|
|
||||||
points.push((OsRng.next_u64() >> 32).try_into().unwrap());
|
|
||||||
}
|
|
||||||
points
|
|
||||||
},
|
|
||||||
random_signed_with_nonce(&mut OsRng, 0),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
@@ -1,166 +0,0 @@
|
|||||||
use core::time::Duration;
|
|
||||||
use std::{sync::Arc, collections::HashSet};
|
|
||||||
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
|
|
||||||
use tokio::{
|
|
||||||
sync::{mpsc, broadcast},
|
|
||||||
time::sleep,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::MemDb;
|
|
||||||
|
|
||||||
use tributary::Tributary;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
tributary::Transaction,
|
|
||||||
ActiveTributary, TributaryEvent,
|
|
||||||
p2p::{heartbeat_tributaries_task, handle_p2p_task},
|
|
||||||
tests::{
|
|
||||||
LocalP2p,
|
|
||||||
tributary::{new_keys, new_spec, new_tributaries},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn sync_test() {
|
|
||||||
let mut keys = new_keys(&mut OsRng);
|
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
|
||||||
// Ensure this can have a node fail
|
|
||||||
assert!(spec.n(&[]) > spec.t());
|
|
||||||
|
|
||||||
let mut tributaries = new_tributaries(&keys, &spec)
|
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, p2p, tributary)| (p2p, tributary))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// Keep a Tributary back, effectively having it offline
|
|
||||||
let syncer_key = keys.pop().unwrap();
|
|
||||||
let (syncer_p2p, syncer_tributary) = tributaries.pop().unwrap();
|
|
||||||
|
|
||||||
// Have the rest form a P2P net
|
|
||||||
let mut tributary_senders = vec![];
|
|
||||||
let mut tributary_arcs = vec![];
|
|
||||||
let mut p2p_threads = vec![];
|
|
||||||
for (p2p, tributary) in tributaries.drain(..) {
|
|
||||||
let tributary = Arc::new(tributary);
|
|
||||||
tributary_arcs.push(tributary.clone());
|
|
||||||
let (new_tributary_send, new_tributary_recv) = broadcast::channel(5);
|
|
||||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
|
||||||
let thread = tokio::spawn(handle_p2p_task(p2p, cosign_send, new_tributary_recv));
|
|
||||||
new_tributary_send
|
|
||||||
.send(TributaryEvent::NewTributary(ActiveTributary { spec: spec.clone(), tributary }))
|
|
||||||
.map_err(|_| "failed to send ActiveTributary")
|
|
||||||
.unwrap();
|
|
||||||
tributary_senders.push(new_tributary_send);
|
|
||||||
p2p_threads.push(thread);
|
|
||||||
}
|
|
||||||
let tributaries = tributary_arcs;
|
|
||||||
|
|
||||||
// After four blocks of time, we should have a new block
|
|
||||||
// We don't wait one block of time as we may have missed the chance for the first block
|
|
||||||
// We don't wait two blocks because we may have missed the chance, and then had a failure to
|
|
||||||
// propose by our 'offline' validator, which would cause the Tendermint round time to increase,
|
|
||||||
// requiring a longer delay
|
|
||||||
let block_time = u64::from(Tributary::<MemDb, Transaction, LocalP2p>::block_time());
|
|
||||||
sleep(Duration::from_secs(4 * block_time)).await;
|
|
||||||
let tip = tributaries[0].tip().await;
|
|
||||||
assert!(tip != spec.genesis());
|
|
||||||
|
|
||||||
// Sleep one second to make sure this block propagates
|
|
||||||
sleep(Duration::from_secs(1)).await;
|
|
||||||
// Make sure every tributary has it
|
|
||||||
for tributary in &tributaries {
|
|
||||||
assert!(tributary.reader().block(&tip).is_some());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now that we've confirmed the other tributaries formed a net without issue, drop the syncer's
|
|
||||||
// pending P2P messages
|
|
||||||
syncer_p2p.1.write().await.1.last_mut().unwrap().clear();
|
|
||||||
|
|
||||||
// Have it join the net
|
|
||||||
let syncer_key = Ristretto::generator() * *syncer_key;
|
|
||||||
let syncer_tributary = Arc::new(syncer_tributary);
|
|
||||||
let (syncer_tributary_send, syncer_tributary_recv) = broadcast::channel(5);
|
|
||||||
let (cosign_send, _) = mpsc::unbounded_channel();
|
|
||||||
tokio::spawn(handle_p2p_task(syncer_p2p.clone(), cosign_send, syncer_tributary_recv));
|
|
||||||
syncer_tributary_send
|
|
||||||
.send(TributaryEvent::NewTributary(ActiveTributary {
|
|
||||||
spec: spec.clone(),
|
|
||||||
tributary: syncer_tributary.clone(),
|
|
||||||
}))
|
|
||||||
.map_err(|_| "failed to send ActiveTributary to syncer")
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// It shouldn't automatically catch up. If it somehow was, our test would be broken
|
|
||||||
// Sanity check this
|
|
||||||
let tip = tributaries[0].tip().await;
|
|
||||||
// Wait until a new block occurs
|
|
||||||
sleep(Duration::from_secs(3 * block_time)).await;
|
|
||||||
// Make sure a new block actually occurred
|
|
||||||
assert!(tributaries[0].tip().await != tip);
|
|
||||||
// Make sure the new block alone didn't trigger catching up
|
|
||||||
assert_eq!(syncer_tributary.tip().await, spec.genesis());
|
|
||||||
|
|
||||||
// Start the heartbeat protocol
|
|
||||||
let (syncer_heartbeat_tributary_send, syncer_heartbeat_tributary_recv) = broadcast::channel(5);
|
|
||||||
tokio::spawn(heartbeat_tributaries_task(syncer_p2p, syncer_heartbeat_tributary_recv));
|
|
||||||
syncer_heartbeat_tributary_send
|
|
||||||
.send(TributaryEvent::NewTributary(ActiveTributary {
|
|
||||||
spec: spec.clone(),
|
|
||||||
tributary: syncer_tributary.clone(),
|
|
||||||
}))
|
|
||||||
.map_err(|_| "failed to send ActiveTributary to heartbeat")
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// The heartbeat is once every 10 blocks, with some limitations
|
|
||||||
sleep(Duration::from_secs(20 * block_time)).await;
|
|
||||||
assert!(syncer_tributary.tip().await != spec.genesis());
|
|
||||||
|
|
||||||
// Verify it synced to the tip
|
|
||||||
let syncer_tip = {
|
|
||||||
let tributary = &tributaries[0];
|
|
||||||
|
|
||||||
let tip = tributary.tip().await;
|
|
||||||
let syncer_tip = syncer_tributary.tip().await;
|
|
||||||
// Allow a one block tolerance in case of race conditions
|
|
||||||
assert!(
|
|
||||||
HashSet::from([tip, tributary.reader().block(&tip).unwrap().parent()]).contains(&syncer_tip)
|
|
||||||
);
|
|
||||||
syncer_tip
|
|
||||||
};
|
|
||||||
|
|
||||||
sleep(Duration::from_secs(block_time)).await;
|
|
||||||
|
|
||||||
// Verify it's now keeping up
|
|
||||||
assert!(syncer_tributary.tip().await != syncer_tip);
|
|
||||||
|
|
||||||
// Verify it's now participating in consensus
|
|
||||||
// Because only `t` validators are used in a commit, take n - t nodes offline
|
|
||||||
// leaving only `t` nodes. Which should force it to participate in the consensus
|
|
||||||
// of next blocks.
|
|
||||||
let spares = usize::from(spec.n(&[]) - spec.t());
|
|
||||||
for thread in p2p_threads.iter().take(spares) {
|
|
||||||
thread.abort();
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait for a block
|
|
||||||
sleep(Duration::from_secs(block_time)).await;
|
|
||||||
|
|
||||||
if syncer_tributary
|
|
||||||
.reader()
|
|
||||||
.parsed_commit(&syncer_tributary.tip().await)
|
|
||||||
.unwrap()
|
|
||||||
.validators
|
|
||||||
.iter()
|
|
||||||
.any(|signer| signer == &syncer_key.to_bytes())
|
|
||||||
{
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
panic!("synced tributary didn't start participating in consensus");
|
|
||||||
}
|
|
||||||
@@ -1,63 +0,0 @@
|
|||||||
use core::time::Duration;
|
|
||||||
|
|
||||||
use rand_core::{RngCore, OsRng};
|
|
||||||
|
|
||||||
use tokio::time::sleep;
|
|
||||||
|
|
||||||
use serai_db::MemDb;
|
|
||||||
|
|
||||||
use tributary::{
|
|
||||||
transaction::Transaction as TransactionTrait, Transaction as TributaryTransaction, Tributary,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
tributary::Transaction,
|
|
||||||
tests::{
|
|
||||||
LocalP2p,
|
|
||||||
tributary::{new_keys, new_spec, new_tributaries, run_tributaries, wait_for_tx_inclusion},
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn tx_test() {
|
|
||||||
let keys = new_keys(&mut OsRng);
|
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
|
||||||
|
|
||||||
let tributaries = new_tributaries(&keys, &spec)
|
|
||||||
.await
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, p2p, tributary)| (p2p, tributary))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// Run the tributaries in the background
|
|
||||||
tokio::spawn(run_tributaries(tributaries.clone()));
|
|
||||||
|
|
||||||
// Send a TX from a random Tributary
|
|
||||||
let sender =
|
|
||||||
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
|
|
||||||
let key = keys[sender].clone();
|
|
||||||
|
|
||||||
let attempt = 0;
|
|
||||||
let mut commitments = vec![0; 256];
|
|
||||||
OsRng.fill_bytes(&mut commitments);
|
|
||||||
|
|
||||||
// Create the TX with a null signature so we can get its sig hash
|
|
||||||
let block_before_tx = tributaries[sender].1.tip().await;
|
|
||||||
let mut tx = Transaction::DkgCommitments {
|
|
||||||
attempt,
|
|
||||||
commitments: vec![commitments.clone()],
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
};
|
|
||||||
tx.sign(&mut OsRng, spec.genesis(), &key);
|
|
||||||
|
|
||||||
assert_eq!(tributaries[sender].1.add_transaction(tx.clone()).await, Ok(true));
|
|
||||||
let included_in = wait_for_tx_inclusion(&tributaries[sender].1, block_before_tx, tx.hash()).await;
|
|
||||||
// Also sleep for the block time to ensure the block is synced around before we run checks on it
|
|
||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
|
||||||
|
|
||||||
// All tributaries should have acknowledged this transaction in a block
|
|
||||||
for (_, tributary) in tributaries {
|
|
||||||
let block = tributary.reader().block(&included_in).unwrap();
|
|
||||||
assert_eq!(block.transactions, vec![TributaryTransaction::Application(tx.clone())]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
595
coordinator/src/tributary.rs
Normal file
595
coordinator/src/tributary.rs
Normal file
@@ -0,0 +1,595 @@
|
|||||||
|
use core::{future::Future, time::Duration};
|
||||||
|
use std::sync::Arc;
|
||||||
|
|
||||||
|
use zeroize::Zeroizing;
|
||||||
|
use rand_core::OsRng;
|
||||||
|
use blake2::{digest::typenum::U32, Digest, Blake2s};
|
||||||
|
use ciphersuite::*;
|
||||||
|
use dalek_ff_group::Ristretto;
|
||||||
|
|
||||||
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
|
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
||||||
|
|
||||||
|
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||||
|
|
||||||
|
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
||||||
|
|
||||||
|
use serai_task::{Task, TaskHandle, DoesNotError, ContinuallyRan};
|
||||||
|
|
||||||
|
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||||
|
|
||||||
|
use serai_cosign::{Faulted, CosignIntent, Cosigning};
|
||||||
|
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
|
||||||
|
use serai_coordinator_tributary::{
|
||||||
|
Topic, Transaction, ProcessorMessages, CosignIntents, RecognizedTopics, ScanTributaryTask,
|
||||||
|
};
|
||||||
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
Db, TributaryTransactionsFromProcessorMessages, TributaryTransactionsFromDkgConfirmation,
|
||||||
|
RemoveParticipant, dkg_confirmation::ConfirmDkgTask,
|
||||||
|
};
|
||||||
|
|
||||||
|
create_db! {
|
||||||
|
Coordinator {
|
||||||
|
PublishOnRecognition: (set: ExternalValidatorSet, topic: Topic) -> Transaction,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db_channel! {
|
||||||
|
Coordinator {
|
||||||
|
PendingCosigns: (set: ExternalValidatorSet) -> CosignIntent,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provide a Provided Transaction to the Tributary.
|
||||||
|
///
|
||||||
|
/// This is not a well-designed function. This is specific to the context in which its called,
|
||||||
|
/// within this file. It should only be considered an internal helper for this domain alone.
|
||||||
|
async fn provide_transaction<TD: DbTrait, P: P2p>(
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
|
tx: Transaction,
|
||||||
|
) {
|
||||||
|
match tributary.provide_transaction(tx.clone()).await {
|
||||||
|
// The Tributary uses its own DB, so we may provide this multiple times if we reboot before
|
||||||
|
// committing the txn which provoked this
|
||||||
|
Ok(()) | Err(ProvidedError::AlreadyProvided) => {}
|
||||||
|
Err(ProvidedError::NotProvided) => {
|
||||||
|
panic!("providing a Transaction which wasn't a Provided transaction: {tx:?}");
|
||||||
|
}
|
||||||
|
Err(ProvidedError::InvalidProvided(e)) => {
|
||||||
|
panic!("providing an invalid Provided transaction, tx: {tx:?}, error: {e:?}")
|
||||||
|
}
|
||||||
|
// The Tributary's scan task won't advance if we don't have the Provided transactions
|
||||||
|
// present on-chain, and this enters an infinite loop to block the calling task from
|
||||||
|
// advancing
|
||||||
|
Err(ProvidedError::LocalMismatchesOnChain) => loop {
|
||||||
|
log::error!(
|
||||||
|
"Tributary {set:?} was supposed to provide {tx:?} but peers disagree, halting Tributary",
|
||||||
|
);
|
||||||
|
// Print this every five minutes as this does need to be handled
|
||||||
|
tokio::time::sleep(Duration::from_secs(5 * 60)).await;
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Provides Cosign/Cosigned Transactions onto the Tributary.
|
||||||
|
pub(crate) struct ProvideCosignCosignedTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
|
db: CD,
|
||||||
|
tributary_db: TD,
|
||||||
|
set: NewSetInformation,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
}
|
||||||
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan
|
||||||
|
for ProvideCosignCosignedTransactionsTask<CD, TD, P>
|
||||||
|
{
|
||||||
|
type Error = String;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// Check if we produced any cosigns we were supposed to
|
||||||
|
let mut pending_notable_cosign = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
|
||||||
|
// Fetch the next cosign this tributary should handle
|
||||||
|
let Some(cosign) = PendingCosigns::try_recv(&mut txn, self.set.set) else { break };
|
||||||
|
pending_notable_cosign = cosign.notable;
|
||||||
|
|
||||||
|
// If we (Serai) haven't cosigned this block, break as this is still pending
|
||||||
|
let latest = match Cosigning::<CD>::latest_cosigned_block_number(&txn) {
|
||||||
|
Ok(latest) => latest,
|
||||||
|
Err(Faulted) => {
|
||||||
|
log::error!("cosigning faulted");
|
||||||
|
Err("cosigning faulted")?
|
||||||
|
}
|
||||||
|
};
|
||||||
|
if latest < cosign.block_number {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Because we've cosigned it, provide the TX for that
|
||||||
|
{
|
||||||
|
let mut txn = self.tributary_db.txn();
|
||||||
|
CosignIntents::provide(&mut txn, self.set.set, &cosign);
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
provide_transaction(
|
||||||
|
self.set.set,
|
||||||
|
&self.tributary,
|
||||||
|
Transaction::Cosigned { substrate_block_hash: cosign.block_hash },
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
// Clear pending_notable_cosign since this cosign isn't pending
|
||||||
|
pending_notable_cosign = false;
|
||||||
|
|
||||||
|
// Commit the txn to clear this from PendingCosigns
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we don't have any notable cosigns pending, provide the next set of cosign intents
|
||||||
|
if !pending_notable_cosign {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
// intended_cosigns will only yield up to and including the next notable cosign
|
||||||
|
for cosign in Cosigning::<CD>::intended_cosigns(&mut txn, self.set.set) {
|
||||||
|
// Flag this cosign as pending
|
||||||
|
PendingCosigns::send(&mut txn, self.set.set, &cosign);
|
||||||
|
// Provide the transaction to queue it for work
|
||||||
|
provide_transaction(
|
||||||
|
self.set.set,
|
||||||
|
&self.tributary,
|
||||||
|
Transaction::Cosign { substrate_block_hash: cosign.block_hash },
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
}
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
|
async fn add_signed_unsigned_transaction<TD: DbTrait, P: P2p>(
|
||||||
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
|
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
mut tx: Transaction,
|
||||||
|
) -> bool {
|
||||||
|
// If this is a signed transaction, sign it
|
||||||
|
if matches!(tx.kind(), TransactionKind::Signed(_, _)) {
|
||||||
|
tx.sign(&mut OsRng, tributary.genesis(), key);
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = tributary.add_transaction(tx.clone()).await;
|
||||||
|
match &res {
|
||||||
|
// Fresh publication, already published
|
||||||
|
Ok(true | false) => {}
|
||||||
|
Err(
|
||||||
|
TransactionError::TooLargeTransaction |
|
||||||
|
TransactionError::InvalidSigner |
|
||||||
|
TransactionError::InvalidSignature |
|
||||||
|
TransactionError::InvalidContent,
|
||||||
|
) => {
|
||||||
|
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
|
||||||
|
}
|
||||||
|
// InvalidNonce may be out-of-order TXs, not invalid ones, but we only create nonce #n+1 after
|
||||||
|
// on-chain inclusion of the TX with nonce #n, so it is invalid within our context unless the
|
||||||
|
// issue is this transaction was already included on-chain
|
||||||
|
Err(TransactionError::InvalidNonce) => {
|
||||||
|
let TransactionKind::Signed(order, signed) = tx.kind() else {
|
||||||
|
panic!("non-Signed transaction had InvalidNonce");
|
||||||
|
};
|
||||||
|
let next_nonce = tributary
|
||||||
|
.next_nonce(&signed.signer, &order)
|
||||||
|
.await
|
||||||
|
.expect("signer who is a present validator didn't have a nonce");
|
||||||
|
assert!(next_nonce != signed.nonce);
|
||||||
|
// We're publishing an old transaction
|
||||||
|
if next_nonce > signed.nonce {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
panic!("nonce in transaction wasn't contiguous with nonce on-chain");
|
||||||
|
}
|
||||||
|
// We've published too many transactions recently
|
||||||
|
Err(TransactionError::TooManyInMempool) => {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
// This isn't a Provided transaction so this should never be hit
|
||||||
|
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn add_with_recognition_check<TD: DbTrait, P: P2p>(
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
tributary_db: &mut TD,
|
||||||
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
|
key: &Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
tx: Transaction,
|
||||||
|
) -> bool {
|
||||||
|
let kind = tx.kind();
|
||||||
|
match kind {
|
||||||
|
TransactionKind::Provided(_) => provide_transaction(set, tributary, tx).await,
|
||||||
|
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
|
||||||
|
// If this is a transaction with signing data, check the topic is recognized before
|
||||||
|
// publishing
|
||||||
|
let topic = tx.topic();
|
||||||
|
let still_requires_recognition = if let Some(topic) = topic {
|
||||||
|
(topic.requires_recognition() && (!RecognizedTopics::recognized(tributary_db, set, topic)))
|
||||||
|
.then_some(topic)
|
||||||
|
} else {
|
||||||
|
None
|
||||||
|
};
|
||||||
|
if let Some(topic) = still_requires_recognition {
|
||||||
|
// Queue the transaction until the topic is recognized
|
||||||
|
// We use the Tributary DB for this so it's cleaned up when the Tributary DB is
|
||||||
|
let mut tributary_txn = tributary_db.txn();
|
||||||
|
PublishOnRecognition::set(&mut tributary_txn, set, topic, &tx);
|
||||||
|
tributary_txn.commit();
|
||||||
|
} else {
|
||||||
|
// Actually add the transaction
|
||||||
|
if !add_signed_unsigned_transaction(tributary, key, tx).await {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Adds all of the transactions sent via `TributaryTransactionsFromProcessorMessages`.
|
||||||
|
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
|
db: CD,
|
||||||
|
tributary_db: TD,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
set: NewSetInformation,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
}
|
||||||
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
|
||||||
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
|
||||||
|
// Provide/add all transactions sent our way
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(tx) = TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
if !add_with_recognition_check(
|
||||||
|
self.set.set,
|
||||||
|
&mut self.tributary_db,
|
||||||
|
&self.tributary,
|
||||||
|
&self.key,
|
||||||
|
tx,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(tx) = TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
|
||||||
|
if !add_with_recognition_check(
|
||||||
|
self.set.set,
|
||||||
|
&mut self.tributary_db,
|
||||||
|
&self.tributary,
|
||||||
|
&self.key,
|
||||||
|
tx,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Provide/add all transactions due to newly recognized topics
|
||||||
|
loop {
|
||||||
|
let mut tributary_txn = self.tributary_db.txn();
|
||||||
|
let Some(topic) =
|
||||||
|
RecognizedTopics::try_recv_topic_requiring_recognition(&mut tributary_txn, self.set.set)
|
||||||
|
else {
|
||||||
|
break;
|
||||||
|
};
|
||||||
|
if let Some(tx) = PublishOnRecognition::take(&mut tributary_txn, self.set.set, topic) {
|
||||||
|
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
made_progress = true;
|
||||||
|
tributary_txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Publish any participant removals
|
||||||
|
loop {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(participant) = RemoveParticipant::try_recv(&mut txn, self.set.set) else { break };
|
||||||
|
let tx = Transaction::RemoveParticipant {
|
||||||
|
participant: self.set.participant_indexes_reverse_lookup[&participant],
|
||||||
|
signed: Default::default(),
|
||||||
|
};
|
||||||
|
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
made_progress = true;
|
||||||
|
txn.commit();
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Takes the messages from ScanTributaryTask and publishes them to the message-queue.
|
||||||
|
pub(crate) struct TributaryProcessorMessagesTask<TD: DbTrait> {
|
||||||
|
tributary_db: TD,
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
message_queue: Arc<MessageQueue>,
|
||||||
|
}
|
||||||
|
impl<TD: DbTrait> ContinuallyRan for TributaryProcessorMessagesTask<TD> {
|
||||||
|
type Error = String; // TODO
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut made_progress = false;
|
||||||
|
loop {
|
||||||
|
let mut txn = self.tributary_db.txn();
|
||||||
|
let Some(msg) = ProcessorMessages::try_recv(&mut txn, self.set) else { break };
|
||||||
|
let metadata = Metadata {
|
||||||
|
from: Service::Coordinator,
|
||||||
|
to: Service::Processor(self.set.network),
|
||||||
|
intent: msg.intent(),
|
||||||
|
};
|
||||||
|
let msg = borsh::to_vec(&msg).unwrap();
|
||||||
|
self.message_queue.queue(metadata, msg).await?;
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
Ok(made_progress)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Checks for the notification to sign a slash report and does so if present.
|
||||||
|
pub(crate) struct SignSlashReportTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
|
db: CD,
|
||||||
|
tributary_db: TD,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
set: NewSetInformation,
|
||||||
|
key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
}
|
||||||
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD, TD, P> {
|
||||||
|
type Error = DoesNotError;
|
||||||
|
|
||||||
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
|
async move {
|
||||||
|
let mut txn = self.db.txn();
|
||||||
|
let Some(()) = SignSlashReport::try_recv(&mut txn, self.set.set) else { return Ok(false) };
|
||||||
|
|
||||||
|
// Fetch the slash report for this Tributary
|
||||||
|
let mut tx =
|
||||||
|
serai_coordinator_tributary::slash_report_transaction(&self.tributary_db, &self.set);
|
||||||
|
tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
|
||||||
|
|
||||||
|
let res = self.tributary.add_transaction(tx.clone()).await;
|
||||||
|
match &res {
|
||||||
|
// Fresh publication, already published
|
||||||
|
Ok(true | false) => {}
|
||||||
|
Err(
|
||||||
|
TransactionError::TooLargeTransaction |
|
||||||
|
TransactionError::InvalidSigner |
|
||||||
|
TransactionError::InvalidNonce |
|
||||||
|
TransactionError::InvalidSignature |
|
||||||
|
TransactionError::InvalidContent,
|
||||||
|
) => {
|
||||||
|
panic!("created an invalid SlashReport transaction, tx: {tx:?}, err: {res:?}");
|
||||||
|
}
|
||||||
|
// We've published too many transactions recently
|
||||||
|
// Drop this txn to try to publish it again later on a future iteration
|
||||||
|
Err(TransactionError::TooManyInMempool) => {
|
||||||
|
drop(txn);
|
||||||
|
return Ok(false);
|
||||||
|
}
|
||||||
|
// This isn't a Provided transaction so this should never be hit
|
||||||
|
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||||
|
}
|
||||||
|
|
||||||
|
txn.commit();
|
||||||
|
Ok(true)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Run the scan task whenever the Tributary adds a new block.
|
||||||
|
async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
||||||
|
db: CD,
|
||||||
|
set: ExternalValidatorSet,
|
||||||
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
|
scan_tributary_task: TaskHandle,
|
||||||
|
tasks_to_keep_alive: Vec<TaskHandle>,
|
||||||
|
) {
|
||||||
|
loop {
|
||||||
|
// Break once this Tributary is retired
|
||||||
|
if crate::RetiredTributary::get(&db, set.network).map(|session| session.0) >=
|
||||||
|
Some(set.session.0)
|
||||||
|
{
|
||||||
|
drop(tasks_to_keep_alive);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Have the tributary scanner run as soon as there's a new block
|
||||||
|
match tributary.next_block_notification().await.await {
|
||||||
|
Ok(()) => scan_tributary_task.run_now(),
|
||||||
|
// unreachable since this owns the tributary object and doesn't drop it
|
||||||
|
Err(_) => panic!("tributary was dropped causing notification to error"),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Spawn a Tributary.
|
||||||
|
///
|
||||||
|
/// This will:
|
||||||
|
/// - Spawn the Tributary
|
||||||
|
/// - Inform the P2P network of the Tributary
|
||||||
|
/// - Spawn the ScanTributaryTask
|
||||||
|
/// - Spawn the ProvideCosignCosignedTransactionsTask
|
||||||
|
/// - Spawn the TributaryProcessorMessagesTask
|
||||||
|
/// - Spawn the AddTributaryTransactionsTask
|
||||||
|
/// - Spawn the ConfirmDkgTask
|
||||||
|
/// - Spawn the SignSlashReportTask
|
||||||
|
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
|
||||||
|
pub(crate) async fn spawn_tributary<P: P2p>(
|
||||||
|
db: Db,
|
||||||
|
message_queue: Arc<MessageQueue>,
|
||||||
|
p2p: P,
|
||||||
|
p2p_add_tributary: &mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||||
|
set: NewSetInformation,
|
||||||
|
serai_key: Zeroizing<<Ristretto as WrappedGroup>::F>,
|
||||||
|
) {
|
||||||
|
// Don't spawn retired Tributaries
|
||||||
|
if crate::db::RetiredTributary::get(&db, set.set.network).map(|session| session.0) >=
|
||||||
|
Some(set.set.session.0)
|
||||||
|
{
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let genesis =
|
||||||
|
<[u8; 32]>::from(Blake2s::<U32>::digest(borsh::to_vec(&(set.serai_block, set.set)).unwrap()));
|
||||||
|
|
||||||
|
// Since the Serai block will be finalized, then cosigned, before we handle this, this time will
|
||||||
|
// be a couple of minutes stale. While the Tributary will still function with a start time in the
|
||||||
|
// past, the Tributary will immediately incur round timeouts. We reduce these by adding a
|
||||||
|
// constant delay of a couple of minutes.
|
||||||
|
const TRIBUTARY_START_TIME_DELAY: u64 = 120;
|
||||||
|
let start_time = set.declaration_time + TRIBUTARY_START_TIME_DELAY;
|
||||||
|
|
||||||
|
let mut tributary_validators = Vec::with_capacity(set.validators.len());
|
||||||
|
for (validator, weight) in set.validators.iter().copied() {
|
||||||
|
let validator_key = <Ristretto as GroupIo>::read_G(&mut validator.0.as_slice())
|
||||||
|
.expect("Serai validator had an invalid public key");
|
||||||
|
let weight = u64::from(weight);
|
||||||
|
tributary_validators.push((validator_key, weight));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spawn the Tributary
|
||||||
|
let tributary_db = crate::db::tributary_db(set.set);
|
||||||
|
let tributary = Tributary::new(
|
||||||
|
tributary_db.clone(),
|
||||||
|
genesis,
|
||||||
|
start_time,
|
||||||
|
serai_key.clone(),
|
||||||
|
tributary_validators,
|
||||||
|
p2p,
|
||||||
|
)
|
||||||
|
.await
|
||||||
|
.unwrap();
|
||||||
|
let reader = tributary.reader();
|
||||||
|
|
||||||
|
// Inform the P2P network
|
||||||
|
p2p_add_tributary
|
||||||
|
.send((set.set, tributary.clone()))
|
||||||
|
.expect("p2p's add_tributary channel was closed?");
|
||||||
|
|
||||||
|
// Spawn the task to provide Cosign/Cosigned transactions onto the Tributary
|
||||||
|
let (provide_cosign_cosigned_transactions_task_def, provide_cosign_cosigned_transactions_task) =
|
||||||
|
Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(ProvideCosignCosignedTransactionsTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db: tributary_db.clone(),
|
||||||
|
set: set.clone(),
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
})
|
||||||
|
.continually_run(provide_cosign_cosigned_transactions_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the task to send all messages from the Tributary scanner to the message-queue
|
||||||
|
let (scan_tributary_messages_task_def, scan_tributary_messages_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(TributaryProcessorMessagesTask {
|
||||||
|
tributary_db: tributary_db.clone(),
|
||||||
|
set: set.set,
|
||||||
|
message_queue,
|
||||||
|
})
|
||||||
|
.continually_run(scan_tributary_messages_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the scan task
|
||||||
|
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
ScanTributaryTask::<_, P>::new(tributary_db.clone(), set.clone(), reader)
|
||||||
|
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
|
||||||
|
// dropped, it will be too
|
||||||
|
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the add transactions task
|
||||||
|
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(AddTributaryTransactionsTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db: tributary_db.clone(),
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
set: set.clone(),
|
||||||
|
key: serai_key.clone(),
|
||||||
|
})
|
||||||
|
.continually_run(add_tributary_transactions_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the task to confirm the DKG result
|
||||||
|
let (confirm_dkg_task_def, confirm_dkg_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
ConfirmDkgTask::new(db.clone(), set.clone(), tributary_db.clone(), serai_key.clone())
|
||||||
|
.continually_run(confirm_dkg_task_def, vec![add_tributary_transactions_task]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Spawn the sign slash report task
|
||||||
|
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(SignSlashReportTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db,
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
set: set.clone(),
|
||||||
|
key: serai_key,
|
||||||
|
})
|
||||||
|
.continually_run(sign_slash_report_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
|
// Whenever a new block occurs, immediately run the scan task
|
||||||
|
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
|
||||||
|
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
|
||||||
|
// infinitum
|
||||||
|
tokio::spawn(scan_on_new_block(
|
||||||
|
db,
|
||||||
|
set.set,
|
||||||
|
tributary,
|
||||||
|
scan_tributary_task,
|
||||||
|
vec![provide_cosign_cosigned_transactions_task, confirm_dkg_task, sign_slash_report_task],
|
||||||
|
));
|
||||||
|
}
|
||||||
@@ -1,198 +0,0 @@
|
|||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use scale::Encode;
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
|
||||||
|
|
||||||
use dalek_ff_group::Ristretto;
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite};
|
|
||||||
use frost::Participant;
|
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::{KeyPair, ExternalValidatorSet};
|
|
||||||
|
|
||||||
use processor_messages::coordinator::SubstrateSignableId;
|
|
||||||
|
|
||||||
pub use serai_db::*;
|
|
||||||
|
|
||||||
use tributary::ReadWrite;
|
|
||||||
|
|
||||||
use crate::tributary::{Label, Transaction};
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
|
||||||
pub enum Topic {
|
|
||||||
Dkg,
|
|
||||||
DkgConfirmation,
|
|
||||||
SubstrateSign(SubstrateSignableId),
|
|
||||||
Sign([u8; 32]),
|
|
||||||
}
|
|
||||||
|
|
||||||
// A struct to refer to a piece of data all validators will presumably provide a value for.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode)]
|
|
||||||
pub struct DataSpecification {
|
|
||||||
pub topic: Topic,
|
|
||||||
pub label: Label,
|
|
||||||
pub attempt: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum DataSet {
|
|
||||||
Participating(HashMap<Participant, Vec<u8>>),
|
|
||||||
NotParticipating,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub enum Accumulation {
|
|
||||||
Ready(DataSet),
|
|
||||||
NotReady,
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Move from genesis to set for indexing
|
|
||||||
create_db!(
|
|
||||||
Tributary {
|
|
||||||
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
|
||||||
SeraiDkgCompleted: (spec: ExternalValidatorSet) -> [u8; 32],
|
|
||||||
|
|
||||||
TributaryBlockNumber: (block: [u8; 32]) -> u32,
|
|
||||||
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
|
||||||
|
|
||||||
// TODO: Revisit the point of this
|
|
||||||
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
|
||||||
RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
|
|
||||||
OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
|
||||||
// TODO: Combine these two
|
|
||||||
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
|
||||||
SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,
|
|
||||||
|
|
||||||
VotedToRemove: (genesis: [u8; 32], voter: [u8; 32], to_remove: [u8; 32]) -> (),
|
|
||||||
VotesToRemove: (genesis: [u8; 32], to_remove: [u8; 32]) -> u16,
|
|
||||||
|
|
||||||
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
|
|
||||||
ReattemptDb: (genesis: [u8; 32], block: u32) -> Vec<Topic>,
|
|
||||||
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
|
||||||
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
|
||||||
|
|
||||||
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
|
||||||
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
|
||||||
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
|
|
||||||
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
|
|
||||||
DkgLocallyCompleted: (genesis: [u8; 32]) -> (),
|
|
||||||
|
|
||||||
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
|
||||||
|
|
||||||
SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec<u8>,
|
|
||||||
|
|
||||||
SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,
|
|
||||||
SlashReported: (genesis: [u8; 32]) -> u16,
|
|
||||||
SlashReportCutOff: (genesis: [u8; 32]) -> u64,
|
|
||||||
SlashReport: (set: ExternalValidatorSet) -> Vec<([u8; 32], u32)>,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
impl FatalSlashes {
|
|
||||||
pub fn get_as_keys(getter: &impl Get, genesis: [u8; 32]) -> Vec<<Ristretto as Ciphersuite>::G> {
|
|
||||||
FatalSlashes::get(getter, genesis)
|
|
||||||
.unwrap_or(vec![])
|
|
||||||
.iter()
|
|
||||||
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl FatallySlashed {
|
|
||||||
pub fn set_fatally_slashed(txn: &mut impl DbTxn, genesis: [u8; 32], account: [u8; 32]) {
|
|
||||||
Self::set(txn, genesis, account, &());
|
|
||||||
let mut existing = FatalSlashes::get(txn, genesis).unwrap_or_default();
|
|
||||||
|
|
||||||
// Don't append if we already have it, which can occur upon multiple faults
|
|
||||||
if existing.iter().any(|existing| existing == &account) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
existing.push(account);
|
|
||||||
FatalSlashes::set(txn, genesis, &existing);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AttemptDb {
|
|
||||||
pub fn recognize_topic(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) {
|
|
||||||
Self::set(txn, genesis, &topic, &0u32);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn start_next_attempt(txn: &mut impl DbTxn, genesis: [u8; 32], topic: Topic) -> u32 {
|
|
||||||
let next =
|
|
||||||
Self::attempt(txn, genesis, topic).expect("starting next attempt for unknown topic") + 1;
|
|
||||||
Self::set(txn, genesis, &topic, &next);
|
|
||||||
next
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
|
||||||
let attempt = Self::get(getter, genesis, &topic);
|
|
||||||
// Don't require explicit recognition of the Dkg topic as it starts when the chain does
|
|
||||||
// Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it
|
|
||||||
// should always happen (eventually)
|
|
||||||
if attempt.is_none() &&
|
|
||||||
((topic == Topic::Dkg) ||
|
|
||||||
(topic == Topic::DkgConfirmation) ||
|
|
||||||
(topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))
|
|
||||||
{
|
|
||||||
return Some(0);
|
|
||||||
}
|
|
||||||
attempt
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReattemptDb {
|
|
||||||
pub fn schedule_reattempt(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
current_block_number: u32,
|
|
||||||
topic: Topic,
|
|
||||||
) {
|
|
||||||
// 5 minutes
|
|
||||||
#[cfg(not(feature = "longer-reattempts"))]
|
|
||||||
const BASE_REATTEMPT_DELAY: u32 = (5 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME;
|
|
||||||
|
|
||||||
// 10 minutes, intended for latent environments like the GitHub CI
|
|
||||||
#[cfg(feature = "longer-reattempts")]
|
|
||||||
const BASE_REATTEMPT_DELAY: u32 = (10 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME;
|
|
||||||
|
|
||||||
// 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5
|
|
||||||
// Assumes no event will take longer than 15 minutes, yet grows the time in case there are
|
|
||||||
// network bandwidth issues
|
|
||||||
let mut reattempt_delay = BASE_REATTEMPT_DELAY *
|
|
||||||
((AttemptDb::attempt(txn, genesis, topic)
|
|
||||||
.expect("scheduling re-attempt for unknown topic") /
|
|
||||||
3) +
|
|
||||||
1)
|
|
||||||
.min(3);
|
|
||||||
// Allow more time for DKGs since they have an extra round and much more data
|
|
||||||
if matches!(topic, Topic::Dkg) {
|
|
||||||
reattempt_delay *= 4;
|
|
||||||
}
|
|
||||||
let upon_block = current_block_number + reattempt_delay;
|
|
||||||
|
|
||||||
let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]);
|
|
||||||
reattempts.push(topic);
|
|
||||||
Self::set(txn, genesis, upon_block, &reattempts);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn take(txn: &mut impl DbTxn, genesis: [u8; 32], block_number: u32) -> Vec<Topic> {
|
|
||||||
let res = Self::get(txn, genesis, block_number).unwrap_or(vec![]);
|
|
||||||
if !res.is_empty() {
|
|
||||||
Self::del(txn, genesis, block_number);
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SignedTransactionDb {
|
|
||||||
pub fn take_signed_transaction(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
order: &[u8],
|
|
||||||
nonce: u32,
|
|
||||||
) -> Option<Transaction> {
|
|
||||||
let res = SignedTransactionDb::get(txn, order, nonce)
|
|
||||||
.map(|bytes| Transaction::read(&mut bytes.as_slice()).unwrap());
|
|
||||||
if res.is_some() {
|
|
||||||
Self::del(txn, order, nonce);
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user