mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 12:19:24 +00:00
Compare commits
1 Commits
ff-0.14
...
undroppabl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce3b90541e |
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
|||||||
nightly-2025-02-01
|
nightly-2024-07-01
|
||||||
|
|||||||
5
.github/workflows/monero-tests.yaml
vendored
5
.github/workflows/monero-tests.yaml
vendored
@@ -39,6 +39,9 @@ jobs:
|
|||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
|
||||||
|
|
||||||
# Doesn't run unit tests with features as the tests workflow will
|
# Doesn't run unit tests with features as the tests workflow will
|
||||||
|
|
||||||
@@ -62,6 +65,7 @@ jobs:
|
|||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
|
||||||
|
|
||||||
- name: Run Integration Tests
|
- name: Run Integration Tests
|
||||||
# Don't run if the the tests workflow also will
|
# Don't run if the the tests workflow also will
|
||||||
@@ -70,3 +74,4 @@ jobs:
|
|||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'
|
||||||
|
|||||||
3
.github/workflows/networks-tests.yml
vendored
3
.github/workflows/networks-tests.yml
vendored
@@ -46,4 +46,7 @@ jobs:
|
|||||||
-p monero-simple-request-rpc \
|
-p monero-simple-request-rpc \
|
||||||
-p monero-address \
|
-p monero-address \
|
||||||
-p monero-wallet \
|
-p monero-wallet \
|
||||||
|
-p monero-seed \
|
||||||
|
-p polyseed \
|
||||||
|
-p monero-wallet-util \
|
||||||
-p monero-serai-verify-chain
|
-p monero-serai-verify-chain
|
||||||
|
|||||||
37
.github/workflows/pages.yml
vendored
37
.github/workflows/pages.yml
vendored
@@ -1,7 +1,6 @@
|
|||||||
# MIT License
|
# MIT License
|
||||||
#
|
#
|
||||||
# Copyright (c) 2022 just-the-docs
|
# Copyright (c) 2022 just-the-docs
|
||||||
# Copyright (c) 2022-2024 Luke Parker
|
|
||||||
#
|
#
|
||||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
# of this software and associated documentation files (the "Software"), to deal
|
# of this software and associated documentation files (the "Software"), to deal
|
||||||
@@ -21,21 +20,31 @@
|
|||||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
# SOFTWARE.
|
# SOFTWARE.
|
||||||
|
|
||||||
name: Deploy Rust docs and Jekyll site to Pages
|
# This workflow uses actions that are not certified by GitHub.
|
||||||
|
# They are provided by a third-party and are governed by
|
||||||
|
# separate terms of service, privacy policy, and support
|
||||||
|
# documentation.
|
||||||
|
|
||||||
|
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
||||||
|
name: Deploy Jekyll site to Pages
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- "develop"
|
- "develop"
|
||||||
|
paths:
|
||||||
|
- "docs/**"
|
||||||
|
|
||||||
|
# Allows you to run this workflow manually from the Actions tab
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
|
||||||
|
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
pages: write
|
pages: write
|
||||||
id-token: write
|
id-token: write
|
||||||
|
|
||||||
# Only allow one concurrent deployment
|
# Allow one concurrent deployment
|
||||||
concurrency:
|
concurrency:
|
||||||
group: "pages"
|
group: "pages"
|
||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
@@ -44,6 +53,9 @@ jobs:
|
|||||||
# Build job
|
# Build job
|
||||||
build:
|
build:
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
|
defaults:
|
||||||
|
run:
|
||||||
|
working-directory: docs
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v3
|
uses: actions/checkout@v3
|
||||||
@@ -57,24 +69,11 @@ jobs:
|
|||||||
id: pages
|
id: pages
|
||||||
uses: actions/configure-pages@v3
|
uses: actions/configure-pages@v3
|
||||||
- name: Build with Jekyll
|
- name: Build with Jekyll
|
||||||
run: cd ${{ github.workspace }}/docs && bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||||
env:
|
env:
|
||||||
JEKYLL_ENV: production
|
JEKYLL_ENV: production
|
||||||
|
|
||||||
- name: Get nightly version to use
|
|
||||||
id: nightly
|
|
||||||
shell: bash
|
|
||||||
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
|
||||||
- name: Build Dependencies
|
|
||||||
uses: ./.github/actions/build-dependencies
|
|
||||||
- name: Buld Rust docs
|
|
||||||
run: |
|
|
||||||
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c rust-docs
|
|
||||||
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --all-features
|
|
||||||
mv target/doc docs/_site/rust
|
|
||||||
|
|
||||||
- name: Upload artifact
|
- name: Upload artifact
|
||||||
uses: actions/upload-pages-artifact@v3
|
uses: actions/upload-pages-artifact@v1
|
||||||
with:
|
with:
|
||||||
path: "docs/_site/"
|
path: "docs/_site/"
|
||||||
|
|
||||||
@@ -88,4 +87,4 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Deploy to GitHub Pages
|
- name: Deploy to GitHub Pages
|
||||||
id: deployment
|
id: deployment
|
||||||
uses: actions/deploy-pages@v4
|
uses: actions/deploy-pages@v2
|
||||||
|
|||||||
2581
Cargo.lock
generated
2581
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
14
Cargo.toml
14
Cargo.toml
@@ -64,6 +64,9 @@ members = [
|
|||||||
"networks/monero/rpc/simple-request",
|
"networks/monero/rpc/simple-request",
|
||||||
"networks/monero/wallet/address",
|
"networks/monero/wallet/address",
|
||||||
"networks/monero/wallet",
|
"networks/monero/wallet",
|
||||||
|
"networks/monero/wallet/seed",
|
||||||
|
"networks/monero/wallet/polyseed",
|
||||||
|
"networks/monero/wallet/util",
|
||||||
"networks/monero/verify-chain",
|
"networks/monero/verify-chain",
|
||||||
|
|
||||||
"message-queue",
|
"message-queue",
|
||||||
@@ -141,9 +144,9 @@ members = [
|
|||||||
|
|
||||||
"tests/docker",
|
"tests/docker",
|
||||||
"tests/message-queue",
|
"tests/message-queue",
|
||||||
# TODO "tests/processor",
|
"tests/processor",
|
||||||
# TODO "tests/coordinator",
|
"tests/coordinator",
|
||||||
# TODO "tests/full-stack",
|
"tests/full-stack",
|
||||||
"tests/reproducible-runtime",
|
"tests/reproducible-runtime",
|
||||||
]
|
]
|
||||||
|
|
||||||
@@ -205,10 +208,12 @@ matches = { path = "patches/matches" }
|
|||||||
option-ext = { path = "patches/option-ext" }
|
option-ext = { path = "patches/option-ext" }
|
||||||
directories-next = { path = "patches/directories-next" }
|
directories-next = { path = "patches/directories-next" }
|
||||||
|
|
||||||
|
# The official pasta_curves repo doesn't support Zeroize
|
||||||
|
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
[workspace.lints.clippy]
|
||||||
unwrap_or_default = "allow"
|
unwrap_or_default = "allow"
|
||||||
map_unwrap_or = "allow"
|
map_unwrap_or = "allow"
|
||||||
needless_continue = "allow"
|
|
||||||
borrow_as_ptr = "deny"
|
borrow_as_ptr = "deny"
|
||||||
cast_lossless = "deny"
|
cast_lossless = "deny"
|
||||||
cast_possible_truncation = "deny"
|
cast_possible_truncation = "deny"
|
||||||
@@ -239,6 +244,7 @@ manual_string_new = "deny"
|
|||||||
match_bool = "deny"
|
match_bool = "deny"
|
||||||
match_same_arms = "deny"
|
match_same_arms = "deny"
|
||||||
missing_fields_in_debug = "deny"
|
missing_fields_in_debug = "deny"
|
||||||
|
needless_continue = "deny"
|
||||||
needless_pass_by_value = "deny"
|
needless_pass_by_value = "deny"
|
||||||
ptr_cast_constness = "deny"
|
ptr_cast_constness = "deny"
|
||||||
range_minus_one = "deny"
|
range_minus_one = "deny"
|
||||||
|
|||||||
2
LICENSE
2
LICENSE
@@ -5,4 +5,4 @@ a full copy of the AGPL-3.0 License is included in the root of this repository
|
|||||||
as a reference text. This copy should be provided with any distribution of a
|
as a reference text. This copy should be provided with any distribution of a
|
||||||
crate licensed under the AGPL-3.0, as per its terms.
|
crate licensed under the AGPL-3.0, as per its terms.
|
||||||
|
|
||||||
The GitHub actions/workflows (`.github`) are licensed under the MIT license.
|
The GitHub actions (`.github/actions`) are licensed under the MIT license.
|
||||||
|
|||||||
Binary file not shown.
@@ -1,427 +0,0 @@
|
|||||||
Attribution-ShareAlike 4.0 International
|
|
||||||
|
|
||||||
=======================================================================
|
|
||||||
|
|
||||||
Creative Commons Corporation ("Creative Commons") is not a law firm and
|
|
||||||
does not provide legal services or legal advice. Distribution of
|
|
||||||
Creative Commons public licenses does not create a lawyer-client or
|
|
||||||
other relationship. Creative Commons makes its licenses and related
|
|
||||||
information available on an "as-is" basis. Creative Commons gives no
|
|
||||||
warranties regarding its licenses, any material licensed under their
|
|
||||||
terms and conditions, or any related information. Creative Commons
|
|
||||||
disclaims all liability for damages resulting from their use to the
|
|
||||||
fullest extent possible.
|
|
||||||
|
|
||||||
Using Creative Commons Public Licenses
|
|
||||||
|
|
||||||
Creative Commons public licenses provide a standard set of terms and
|
|
||||||
conditions that creators and other rights holders may use to share
|
|
||||||
original works of authorship and other material subject to copyright
|
|
||||||
and certain other rights specified in the public license below. The
|
|
||||||
following considerations are for informational purposes only, are not
|
|
||||||
exhaustive, and do not form part of our licenses.
|
|
||||||
|
|
||||||
Considerations for licensors: Our public licenses are
|
|
||||||
intended for use by those authorized to give the public
|
|
||||||
permission to use material in ways otherwise restricted by
|
|
||||||
copyright and certain other rights. Our licenses are
|
|
||||||
irrevocable. Licensors should read and understand the terms
|
|
||||||
and conditions of the license they choose before applying it.
|
|
||||||
Licensors should also secure all rights necessary before
|
|
||||||
applying our licenses so that the public can reuse the
|
|
||||||
material as expected. Licensors should clearly mark any
|
|
||||||
material not subject to the license. This includes other CC-
|
|
||||||
licensed material, or material used under an exception or
|
|
||||||
limitation to copyright. More considerations for licensors:
|
|
||||||
wiki.creativecommons.org/Considerations_for_licensors
|
|
||||||
|
|
||||||
Considerations for the public: By using one of our public
|
|
||||||
licenses, a licensor grants the public permission to use the
|
|
||||||
licensed material under specified terms and conditions. If
|
|
||||||
the licensor's permission is not necessary for any reason--for
|
|
||||||
example, because of any applicable exception or limitation to
|
|
||||||
copyright--then that use is not regulated by the license. Our
|
|
||||||
licenses grant only permissions under copyright and certain
|
|
||||||
other rights that a licensor has authority to grant. Use of
|
|
||||||
the licensed material may still be restricted for other
|
|
||||||
reasons, including because others have copyright or other
|
|
||||||
rights in the material. A licensor may make special requests,
|
|
||||||
such as asking that all changes be marked or described.
|
|
||||||
Although not required by our licenses, you are encouraged to
|
|
||||||
respect those requests where reasonable. More considerations
|
|
||||||
for the public:
|
|
||||||
wiki.creativecommons.org/Considerations_for_licensees
|
|
||||||
|
|
||||||
=======================================================================
|
|
||||||
|
|
||||||
Creative Commons Attribution-ShareAlike 4.0 International Public
|
|
||||||
License
|
|
||||||
|
|
||||||
By exercising the Licensed Rights (defined below), You accept and agree
|
|
||||||
to be bound by the terms and conditions of this Creative Commons
|
|
||||||
Attribution-ShareAlike 4.0 International Public License ("Public
|
|
||||||
License"). To the extent this Public License may be interpreted as a
|
|
||||||
contract, You are granted the Licensed Rights in consideration of Your
|
|
||||||
acceptance of these terms and conditions, and the Licensor grants You
|
|
||||||
such rights in consideration of benefits the Licensor receives from
|
|
||||||
making the Licensed Material available under these terms and
|
|
||||||
conditions.
|
|
||||||
|
|
||||||
|
|
||||||
Section 1 -- Definitions.
|
|
||||||
|
|
||||||
a. Adapted Material means material subject to Copyright and Similar
|
|
||||||
Rights that is derived from or based upon the Licensed Material
|
|
||||||
and in which the Licensed Material is translated, altered,
|
|
||||||
arranged, transformed, or otherwise modified in a manner requiring
|
|
||||||
permission under the Copyright and Similar Rights held by the
|
|
||||||
Licensor. For purposes of this Public License, where the Licensed
|
|
||||||
Material is a musical work, performance, or sound recording,
|
|
||||||
Adapted Material is always produced where the Licensed Material is
|
|
||||||
synched in timed relation with a moving image.
|
|
||||||
|
|
||||||
b. Adapter's License means the license You apply to Your Copyright
|
|
||||||
and Similar Rights in Your contributions to Adapted Material in
|
|
||||||
accordance with the terms and conditions of this Public License.
|
|
||||||
|
|
||||||
c. BY-SA Compatible License means a license listed at
|
|
||||||
creativecommons.org/compatiblelicenses, approved by Creative
|
|
||||||
Commons as essentially the equivalent of this Public License.
|
|
||||||
|
|
||||||
d. Copyright and Similar Rights means copyright and/or similar rights
|
|
||||||
closely related to copyright including, without limitation,
|
|
||||||
performance, broadcast, sound recording, and Sui Generis Database
|
|
||||||
Rights, without regard to how the rights are labeled or
|
|
||||||
categorized. For purposes of this Public License, the rights
|
|
||||||
specified in Section 2(b)(1)-(2) are not Copyright and Similar
|
|
||||||
Rights.
|
|
||||||
|
|
||||||
e. Effective Technological Measures means those measures that, in the
|
|
||||||
absence of proper authority, may not be circumvented under laws
|
|
||||||
fulfilling obligations under Article 11 of the WIPO Copyright
|
|
||||||
Treaty adopted on December 20, 1996, and/or similar international
|
|
||||||
agreements.
|
|
||||||
|
|
||||||
f. Exceptions and Limitations means fair use, fair dealing, and/or
|
|
||||||
any other exception or limitation to Copyright and Similar Rights
|
|
||||||
that applies to Your use of the Licensed Material.
|
|
||||||
|
|
||||||
g. License Elements means the license attributes listed in the name
|
|
||||||
of a Creative Commons Public License. The License Elements of this
|
|
||||||
Public License are Attribution and ShareAlike.
|
|
||||||
|
|
||||||
h. Licensed Material means the artistic or literary work, database,
|
|
||||||
or other material to which the Licensor applied this Public
|
|
||||||
License.
|
|
||||||
|
|
||||||
i. Licensed Rights means the rights granted to You subject to the
|
|
||||||
terms and conditions of this Public License, which are limited to
|
|
||||||
all Copyright and Similar Rights that apply to Your use of the
|
|
||||||
Licensed Material and that the Licensor has authority to license.
|
|
||||||
|
|
||||||
j. Licensor means the individual(s) or entity(ies) granting rights
|
|
||||||
under this Public License.
|
|
||||||
|
|
||||||
k. Share means to provide material to the public by any means or
|
|
||||||
process that requires permission under the Licensed Rights, such
|
|
||||||
as reproduction, public display, public performance, distribution,
|
|
||||||
dissemination, communication, or importation, and to make material
|
|
||||||
available to the public including in ways that members of the
|
|
||||||
public may access the material from a place and at a time
|
|
||||||
individually chosen by them.
|
|
||||||
|
|
||||||
l. Sui Generis Database Rights means rights other than copyright
|
|
||||||
resulting from Directive 96/9/EC of the European Parliament and of
|
|
||||||
the Council of 11 March 1996 on the legal protection of databases,
|
|
||||||
as amended and/or succeeded, as well as other essentially
|
|
||||||
equivalent rights anywhere in the world.
|
|
||||||
|
|
||||||
m. You means the individual or entity exercising the Licensed Rights
|
|
||||||
under this Public License. Your has a corresponding meaning.
|
|
||||||
|
|
||||||
|
|
||||||
Section 2 -- Scope.
|
|
||||||
|
|
||||||
a. License grant.
|
|
||||||
|
|
||||||
1. Subject to the terms and conditions of this Public License,
|
|
||||||
the Licensor hereby grants You a worldwide, royalty-free,
|
|
||||||
non-sublicensable, non-exclusive, irrevocable license to
|
|
||||||
exercise the Licensed Rights in the Licensed Material to:
|
|
||||||
|
|
||||||
a. reproduce and Share the Licensed Material, in whole or
|
|
||||||
in part; and
|
|
||||||
|
|
||||||
b. produce, reproduce, and Share Adapted Material.
|
|
||||||
|
|
||||||
2. Exceptions and Limitations. For the avoidance of doubt, where
|
|
||||||
Exceptions and Limitations apply to Your use, this Public
|
|
||||||
License does not apply, and You do not need to comply with
|
|
||||||
its terms and conditions.
|
|
||||||
|
|
||||||
3. Term. The term of this Public License is specified in Section
|
|
||||||
6(a).
|
|
||||||
|
|
||||||
4. Media and formats; technical modifications allowed. The
|
|
||||||
Licensor authorizes You to exercise the Licensed Rights in
|
|
||||||
all media and formats whether now known or hereafter created,
|
|
||||||
and to make technical modifications necessary to do so. The
|
|
||||||
Licensor waives and/or agrees not to assert any right or
|
|
||||||
authority to forbid You from making technical modifications
|
|
||||||
necessary to exercise the Licensed Rights, including
|
|
||||||
technical modifications necessary to circumvent Effective
|
|
||||||
Technological Measures. For purposes of this Public License,
|
|
||||||
simply making modifications authorized by this Section 2(a)
|
|
||||||
(4) never produces Adapted Material.
|
|
||||||
|
|
||||||
5. Downstream recipients.
|
|
||||||
|
|
||||||
a. Offer from the Licensor -- Licensed Material. Every
|
|
||||||
recipient of the Licensed Material automatically
|
|
||||||
receives an offer from the Licensor to exercise the
|
|
||||||
Licensed Rights under the terms and conditions of this
|
|
||||||
Public License.
|
|
||||||
|
|
||||||
b. Additional offer from the Licensor -- Adapted Material.
|
|
||||||
Every recipient of Adapted Material from You
|
|
||||||
automatically receives an offer from the Licensor to
|
|
||||||
exercise the Licensed Rights in the Adapted Material
|
|
||||||
under the conditions of the Adapter's License You apply.
|
|
||||||
|
|
||||||
c. No downstream restrictions. You may not offer or impose
|
|
||||||
any additional or different terms or conditions on, or
|
|
||||||
apply any Effective Technological Measures to, the
|
|
||||||
Licensed Material if doing so restricts exercise of the
|
|
||||||
Licensed Rights by any recipient of the Licensed
|
|
||||||
Material.
|
|
||||||
|
|
||||||
6. No endorsement. Nothing in this Public License constitutes or
|
|
||||||
may be construed as permission to assert or imply that You
|
|
||||||
are, or that Your use of the Licensed Material is, connected
|
|
||||||
with, or sponsored, endorsed, or granted official status by,
|
|
||||||
the Licensor or others designated to receive attribution as
|
|
||||||
provided in Section 3(a)(1)(A)(i).
|
|
||||||
|
|
||||||
b. Other rights.
|
|
||||||
|
|
||||||
1. Moral rights, such as the right of integrity, are not
|
|
||||||
licensed under this Public License, nor are publicity,
|
|
||||||
privacy, and/or other similar personality rights; however, to
|
|
||||||
the extent possible, the Licensor waives and/or agrees not to
|
|
||||||
assert any such rights held by the Licensor to the limited
|
|
||||||
extent necessary to allow You to exercise the Licensed
|
|
||||||
Rights, but not otherwise.
|
|
||||||
|
|
||||||
2. Patent and trademark rights are not licensed under this
|
|
||||||
Public License.
|
|
||||||
|
|
||||||
3. To the extent possible, the Licensor waives any right to
|
|
||||||
collect royalties from You for the exercise of the Licensed
|
|
||||||
Rights, whether directly or through a collecting society
|
|
||||||
under any voluntary or waivable statutory or compulsory
|
|
||||||
licensing scheme. In all other cases the Licensor expressly
|
|
||||||
reserves any right to collect such royalties.
|
|
||||||
|
|
||||||
|
|
||||||
Section 3 -- License Conditions.
|
|
||||||
|
|
||||||
Your exercise of the Licensed Rights is expressly made subject to the
|
|
||||||
following conditions.
|
|
||||||
|
|
||||||
a. Attribution.
|
|
||||||
|
|
||||||
1. If You Share the Licensed Material (including in modified
|
|
||||||
form), You must:
|
|
||||||
|
|
||||||
a. retain the following if it is supplied by the Licensor
|
|
||||||
with the Licensed Material:
|
|
||||||
|
|
||||||
i. identification of the creator(s) of the Licensed
|
|
||||||
Material and any others designated to receive
|
|
||||||
attribution, in any reasonable manner requested by
|
|
||||||
the Licensor (including by pseudonym if
|
|
||||||
designated);
|
|
||||||
|
|
||||||
ii. a copyright notice;
|
|
||||||
|
|
||||||
iii. a notice that refers to this Public License;
|
|
||||||
|
|
||||||
iv. a notice that refers to the disclaimer of
|
|
||||||
warranties;
|
|
||||||
|
|
||||||
v. a URI or hyperlink to the Licensed Material to the
|
|
||||||
extent reasonably practicable;
|
|
||||||
|
|
||||||
b. indicate if You modified the Licensed Material and
|
|
||||||
retain an indication of any previous modifications; and
|
|
||||||
|
|
||||||
c. indicate the Licensed Material is licensed under this
|
|
||||||
Public License, and include the text of, or the URI or
|
|
||||||
hyperlink to, this Public License.
|
|
||||||
|
|
||||||
2. You may satisfy the conditions in Section 3(a)(1) in any
|
|
||||||
reasonable manner based on the medium, means, and context in
|
|
||||||
which You Share the Licensed Material. For example, it may be
|
|
||||||
reasonable to satisfy the conditions by providing a URI or
|
|
||||||
hyperlink to a resource that includes the required
|
|
||||||
information.
|
|
||||||
|
|
||||||
3. If requested by the Licensor, You must remove any of the
|
|
||||||
information required by Section 3(a)(1)(A) to the extent
|
|
||||||
reasonably practicable.
|
|
||||||
|
|
||||||
b. ShareAlike.
|
|
||||||
|
|
||||||
In addition to the conditions in Section 3(a), if You Share
|
|
||||||
Adapted Material You produce, the following conditions also apply.
|
|
||||||
|
|
||||||
1. The Adapter's License You apply must be a Creative Commons
|
|
||||||
license with the same License Elements, this version or
|
|
||||||
later, or a BY-SA Compatible License.
|
|
||||||
|
|
||||||
2. You must include the text of, or the URI or hyperlink to, the
|
|
||||||
Adapter's License You apply. You may satisfy this condition
|
|
||||||
in any reasonable manner based on the medium, means, and
|
|
||||||
context in which You Share Adapted Material.
|
|
||||||
|
|
||||||
3. You may not offer or impose any additional or different terms
|
|
||||||
or conditions on, or apply any Effective Technological
|
|
||||||
Measures to, Adapted Material that restrict exercise of the
|
|
||||||
rights granted under the Adapter's License You apply.
|
|
||||||
|
|
||||||
|
|
||||||
Section 4 -- Sui Generis Database Rights.
|
|
||||||
|
|
||||||
Where the Licensed Rights include Sui Generis Database Rights that
|
|
||||||
apply to Your use of the Licensed Material:
|
|
||||||
|
|
||||||
a. for the avoidance of doubt, Section 2(a)(1) grants You the right
|
|
||||||
to extract, reuse, reproduce, and Share all or a substantial
|
|
||||||
portion of the contents of the database;
|
|
||||||
|
|
||||||
b. if You include all or a substantial portion of the database
|
|
||||||
contents in a database in which You have Sui Generis Database
|
|
||||||
Rights, then the database in which You have Sui Generis Database
|
|
||||||
Rights (but not its individual contents) is Adapted Material,
|
|
||||||
|
|
||||||
including for purposes of Section 3(b); and
|
|
||||||
c. You must comply with the conditions in Section 3(a) if You Share
|
|
||||||
all or a substantial portion of the contents of the database.
|
|
||||||
|
|
||||||
For the avoidance of doubt, this Section 4 supplements and does not
|
|
||||||
replace Your obligations under this Public License where the Licensed
|
|
||||||
Rights include other Copyright and Similar Rights.
|
|
||||||
|
|
||||||
|
|
||||||
Section 5 -- Disclaimer of Warranties and Limitation of Liability.
|
|
||||||
|
|
||||||
a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE
|
|
||||||
EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS
|
|
||||||
AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF
|
|
||||||
ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS,
|
|
||||||
IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION,
|
|
||||||
WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS,
|
|
||||||
ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT
|
|
||||||
KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT
|
|
||||||
ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU.
|
|
||||||
|
|
||||||
b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE
|
|
||||||
TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION,
|
|
||||||
NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT,
|
|
||||||
INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES,
|
|
||||||
COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR
|
|
||||||
USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN
|
|
||||||
ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR
|
|
||||||
DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR
|
|
||||||
IN PART, THIS LIMITATION MAY NOT APPLY TO YOU.
|
|
||||||
|
|
||||||
c. The disclaimer of warranties and limitation of liability provided
|
|
||||||
above shall be interpreted in a manner that, to the extent
|
|
||||||
possible, most closely approximates an absolute disclaimer and
|
|
||||||
waiver of all liability.
|
|
||||||
|
|
||||||
|
|
||||||
Section 6 -- Term and Termination.
|
|
||||||
|
|
||||||
a. This Public License applies for the term of the Copyright and
|
|
||||||
Similar Rights licensed here. However, if You fail to comply with
|
|
||||||
this Public License, then Your rights under this Public License
|
|
||||||
terminate automatically.
|
|
||||||
|
|
||||||
b. Where Your right to use the Licensed Material has terminated under
|
|
||||||
Section 6(a), it reinstates:
|
|
||||||
|
|
||||||
1. automatically as of the date the violation is cured, provided
|
|
||||||
it is cured within 30 days of Your discovery of the
|
|
||||||
violation; or
|
|
||||||
|
|
||||||
2. upon express reinstatement by the Licensor.
|
|
||||||
|
|
||||||
For the avoidance of doubt, this Section 6(b) does not affect any
|
|
||||||
right the Licensor may have to seek remedies for Your violations
|
|
||||||
of this Public License.
|
|
||||||
|
|
||||||
c. For the avoidance of doubt, the Licensor may also offer the
|
|
||||||
Licensed Material under separate terms or conditions or stop
|
|
||||||
distributing the Licensed Material at any time; however, doing so
|
|
||||||
will not terminate this Public License.
|
|
||||||
|
|
||||||
d. Sections 1, 5, 6, 7, and 8 survive termination of this Public
|
|
||||||
License.
|
|
||||||
|
|
||||||
|
|
||||||
Section 7 -- Other Terms and Conditions.
|
|
||||||
|
|
||||||
a. The Licensor shall not be bound by any additional or different
|
|
||||||
terms or conditions communicated by You unless expressly agreed.
|
|
||||||
|
|
||||||
b. Any arrangements, understandings, or agreements regarding the
|
|
||||||
Licensed Material not stated herein are separate from and
|
|
||||||
independent of the terms and conditions of this Public License.
|
|
||||||
|
|
||||||
|
|
||||||
Section 8 -- Interpretation.
|
|
||||||
|
|
||||||
a. For the avoidance of doubt, this Public License does not, and
|
|
||||||
shall not be interpreted to, reduce, limit, restrict, or impose
|
|
||||||
conditions on any use of the Licensed Material that could lawfully
|
|
||||||
be made without permission under this Public License.
|
|
||||||
|
|
||||||
b. To the extent possible, if any provision of this Public License is
|
|
||||||
deemed unenforceable, it shall be automatically reformed to the
|
|
||||||
minimum extent necessary to make it enforceable. If the provision
|
|
||||||
cannot be reformed, it shall be severed from this Public License
|
|
||||||
without affecting the enforceability of the remaining terms and
|
|
||||||
conditions.
|
|
||||||
|
|
||||||
c. No term or condition of this Public License will be waived and no
|
|
||||||
failure to comply consented to unless expressly agreed to by the
|
|
||||||
Licensor.
|
|
||||||
|
|
||||||
d. Nothing in this Public License constitutes or may be interpreted
|
|
||||||
as a limitation upon, or waiver of, any privileges and immunities
|
|
||||||
that apply to the Licensor or You, including from the legal
|
|
||||||
processes of any jurisdiction or authority.
|
|
||||||
|
|
||||||
|
|
||||||
=======================================================================
|
|
||||||
|
|
||||||
Creative Commons is not a party to its public
|
|
||||||
licenses. Notwithstanding, Creative Commons may elect to apply one of
|
|
||||||
its public licenses to material it publishes and in those instances
|
|
||||||
will be considered the “Licensor.” The text of the Creative Commons
|
|
||||||
public licenses is dedicated to the public domain under the CC0 Public
|
|
||||||
Domain Dedication. Except for the limited purpose of indicating that
|
|
||||||
material is shared under a Creative Commons public license or as
|
|
||||||
otherwise permitted by the Creative Commons policies published at
|
|
||||||
creativecommons.org/policies, Creative Commons does not authorize the
|
|
||||||
use of the trademark "Creative Commons" or any other trademark or logo
|
|
||||||
of Creative Commons without its prior written consent including,
|
|
||||||
without limitation, in connection with any unauthorized modifications
|
|
||||||
to any of its public licenses or any other arrangements,
|
|
||||||
understandings, or agreements concerning use of licensed material. For
|
|
||||||
the avoidance of doubt, this paragraph does not form part of the
|
|
||||||
public licenses.
|
|
||||||
|
|
||||||
Creative Commons may be contacted at creativecommons.org.
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
# Trail of Bits Ethereum Contracts Audit, June 2025
|
|
||||||
|
|
||||||
This audit included:
|
|
||||||
- Our Schnorr contract and associated library (/networks/ethereum/schnorr)
|
|
||||||
- Our Ethereum primitives library (/processor/ethereum/primitives)
|
|
||||||
- Our Deployer contract and associated library (/processor/ethereum/deployer)
|
|
||||||
- Our ERC20 library (/processor/ethereum/erc20)
|
|
||||||
- Our Router contract and associated library (/processor/ethereum/router)
|
|
||||||
|
|
||||||
It is encompassing up to commit 4e0c58464fc4673623938335f06e2e9ea96ca8dd.
|
|
||||||
|
|
||||||
Please see
|
|
||||||
https://github.com/trailofbits/publications/blob/30c4fa3ebf39ff8e4d23ba9567344ec9691697b5/reviews/2025-04-serai-dex-security-review.pdf
|
|
||||||
for provenance.
|
|
||||||
@@ -30,13 +30,53 @@ pub trait Get {
|
|||||||
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
/// is undefined. The transaction may block, deadlock, panic, overwrite one of the two values
|
||||||
/// randomly, or any other action, at time of write or at time of commit.
|
/// randomly, or any other action, at time of write or at time of commit.
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub trait DbTxn: Send + Get {
|
pub trait DbTxn: Sized + Send + Get {
|
||||||
/// Write a value to this key.
|
/// Write a value to this key.
|
||||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||||
/// Delete the value from this key.
|
/// Delete the value from this key.
|
||||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||||
/// Commit this transaction.
|
/// Commit this transaction.
|
||||||
fn commit(self);
|
fn commit(self);
|
||||||
|
/// Close this transaction.
|
||||||
|
///
|
||||||
|
/// This is equivalent to `Drop` on transactions which can be dropped. This is explicit and works
|
||||||
|
/// with transactions which can't be dropped.
|
||||||
|
fn close(self) {
|
||||||
|
drop(self);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Credit for the idea goes to https://jack.wrenn.fyi/blog/undroppable
|
||||||
|
pub struct Undroppable<T>(Option<T>);
|
||||||
|
impl<T> Drop for Undroppable<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
// Use an assertion at compile time to prevent this code from compiling if generated
|
||||||
|
#[allow(clippy::assertions_on_constants)]
|
||||||
|
const {
|
||||||
|
assert!(false, "Undroppable DbTxn was dropped. Ensure all code paths call commit or close");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<T: DbTxn> Get for Undroppable<T> {
|
||||||
|
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||||
|
self.0.as_ref().unwrap().get(key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
impl<T: DbTxn> DbTxn for Undroppable<T> {
|
||||||
|
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||||
|
self.0.as_mut().unwrap().put(key, value);
|
||||||
|
}
|
||||||
|
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||||
|
self.0.as_mut().unwrap().del(key);
|
||||||
|
}
|
||||||
|
fn commit(mut self) {
|
||||||
|
self.0.take().unwrap().commit();
|
||||||
|
let _ = core::mem::ManuallyDrop::new(self);
|
||||||
|
}
|
||||||
|
fn close(mut self) {
|
||||||
|
drop(self.0.take().unwrap());
|
||||||
|
let _ = core::mem::ManuallyDrop::new(self);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A database supporting atomic transaction.
|
/// A database supporting atomic transaction.
|
||||||
@@ -51,6 +91,10 @@ pub trait Db: 'static + Send + Sync + Clone + Get {
|
|||||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||||
}
|
}
|
||||||
/// Open a new transaction.
|
/// Open a new transaction which may be dropped.
|
||||||
fn txn(&mut self) -> Self::Transaction<'_>;
|
fn unsafe_txn(&mut self) -> Self::Transaction<'_>;
|
||||||
|
/// Open a new transaction which must be committed or closed.
|
||||||
|
fn txn(&mut self) -> Undroppable<Self::Transaction<'_>> {
|
||||||
|
Undroppable(Some(self.unsafe_txn()))
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -74,7 +74,7 @@ impl Get for MemDb {
|
|||||||
}
|
}
|
||||||
impl Db for MemDb {
|
impl Db for MemDb {
|
||||||
type Transaction<'a> = MemDbTxn<'a>;
|
type Transaction<'a> = MemDbTxn<'a>;
|
||||||
fn txn(&mut self) -> MemDbTxn<'_> {
|
fn unsafe_txn(&mut self) -> MemDbTxn<'_> {
|
||||||
MemDbTxn(self, HashMap::new(), HashSet::new())
|
MemDbTxn(self, HashMap::new(), HashSet::new())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,7 +37,7 @@ impl Get for Arc<ParityDb> {
|
|||||||
}
|
}
|
||||||
impl Db for Arc<ParityDb> {
|
impl Db for Arc<ParityDb> {
|
||||||
type Transaction<'a> = Transaction<'a>;
|
type Transaction<'a> = Transaction<'a>;
|
||||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
||||||
Transaction(self, vec![])
|
Transaction(self, vec![])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -39,7 +39,7 @@ impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
|||||||
}
|
}
|
||||||
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
||||||
type Transaction<'a> = Transaction<'a, T>;
|
type Transaction<'a> = Transaction<'a, T>;
|
||||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
fn unsafe_txn(&mut self) -> Self::Transaction<'_> {
|
||||||
let mut opts = WriteOptions::default();
|
let mut opts = WriteOptions::default();
|
||||||
opts.set_sync(true);
|
opts.set_sync(true);
|
||||||
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ use crate::{Client, Error};
|
|||||||
#[allow(dead_code)]
|
#[allow(dead_code)]
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
|
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
|
||||||
impl Response<'_> {
|
impl<'a> Response<'a> {
|
||||||
pub fn status(&self) -> StatusCode {
|
pub fn status(&self) -> StatusCode {
|
||||||
self.0.status()
|
self.0.status()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -25,13 +25,12 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
|||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
|
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
dkg = { path = "../crypto/dkg", default-features = false, features = ["std"] }
|
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
||||||
|
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
|
||||||
|
|
||||||
zalloc = { path = "../common/zalloc" }
|
zalloc = { path = "../common/zalloc" }
|
||||||
serai-db = { path = "../common/db" }
|
serai-db = { path = "../common/db" }
|
||||||
@@ -44,6 +43,9 @@ tributary-sdk = { path = "./tributary-sdk" }
|
|||||||
|
|
||||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
|
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||||
|
|
||||||
|
|||||||
@@ -24,6 +24,15 @@ pub(crate) struct CosignDelayTask<D: Db> {
|
|||||||
pub(crate) db: D,
|
pub(crate) db: D,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct AwaitUndroppable<T: DbTxn>(Option<core::mem::ManuallyDrop<Undroppable<T>>>);
|
||||||
|
impl<T: DbTxn> Drop for AwaitUndroppable<T> {
|
||||||
|
fn drop(&mut self) {
|
||||||
|
if let Some(mut txn) = self.0.take() {
|
||||||
|
(unsafe { core::mem::ManuallyDrop::take(&mut txn) }).close();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
||||||
type Error = DoesNotError;
|
type Error = DoesNotError;
|
||||||
|
|
||||||
@@ -35,14 +44,18 @@ impl<D: Db> ContinuallyRan for CosignDelayTask<D> {
|
|||||||
|
|
||||||
// Receive the next block to mark as cosigned
|
// Receive the next block to mark as cosigned
|
||||||
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
let Some((block_number, time_evaluated)) = CosignedBlocks::try_recv(&mut txn) else {
|
||||||
|
txn.close();
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
|
||||||
// Calculate when we should mark it as valid
|
// Calculate when we should mark it as valid
|
||||||
let time_valid =
|
let time_valid =
|
||||||
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
SystemTime::UNIX_EPOCH + Duration::from_secs(time_evaluated) + ACKNOWLEDGEMENT_DELAY;
|
||||||
// Sleep until then
|
// Sleep until then
|
||||||
|
let mut txn = AwaitUndroppable(Some(core::mem::ManuallyDrop::new(txn)));
|
||||||
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
tokio::time::sleep(SystemTime::now().duration_since(time_valid).unwrap_or(Duration::ZERO))
|
||||||
.await;
|
.await;
|
||||||
|
let mut txn = core::mem::ManuallyDrop::into_inner(txn.0.take().unwrap());
|
||||||
|
|
||||||
// Set the cosigned block
|
// Set the cosigned block
|
||||||
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
LatestCosignedBlockNumber::set(&mut txn, &block_number);
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::time::{Duration, Instant, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
@@ -77,27 +77,17 @@ pub(crate) fn currently_evaluated_global_session(getter: &impl Get) -> Option<[u
|
|||||||
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
pub(crate) struct CosignEvaluatorTask<D: Db, R: RequestNotableCosigns> {
|
||||||
pub(crate) db: D,
|
pub(crate) db: D,
|
||||||
pub(crate) request: R,
|
pub(crate) request: R,
|
||||||
pub(crate) last_request_for_cosigns: Instant,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D, R> {
|
||||||
type Error = String;
|
type Error = String;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
let should_request_cosigns = |last_request_for_cosigns: &mut Instant| {
|
|
||||||
const REQUEST_COSIGNS_SPACING: Duration = Duration::from_secs(60);
|
|
||||||
if Instant::now() < (*last_request_for_cosigns + REQUEST_COSIGNS_SPACING) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
*last_request_for_cosigns = Instant::now();
|
|
||||||
true
|
|
||||||
};
|
|
||||||
|
|
||||||
async move {
|
async move {
|
||||||
let mut known_cosign = None;
|
let mut known_cosign = None;
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.unsafe_txn();
|
||||||
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
let Some(BlockEventData { block_number, has_events }) = BlockEvents::try_recv(&mut txn)
|
||||||
else {
|
else {
|
||||||
break;
|
break;
|
||||||
@@ -128,13 +118,12 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
|
|||||||
// Check if the sum weight doesn't cross the required threshold
|
// Check if the sum weight doesn't cross the required threshold
|
||||||
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
if weight_cosigned < (((global_session_info.total_stake * 83) / 100) + 1) {
|
||||||
// Request the necessary cosigns over the network
|
// Request the necessary cosigns over the network
|
||||||
if should_request_cosigns(&mut self.last_request_for_cosigns) {
|
// TODO: Add a timer to ensure this isn't called too often
|
||||||
self
|
self
|
||||||
.request
|
.request
|
||||||
.request_notable_cosigns(global_session)
|
.request_notable_cosigns(global_session)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
}
|
|
||||||
// We return an error so the delay before this task is run again increases
|
// We return an error so the delay before this task is run again increases
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
"notable block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
@@ -191,13 +180,11 @@ impl<D: Db, R: RequestNotableCosigns> ContinuallyRan for CosignEvaluatorTask<D,
|
|||||||
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
// If this session hasn't yet produced notable cosigns, then we presume we'll see
|
||||||
// the desired non-notable cosigns as part of normal operations, without needing to
|
// the desired non-notable cosigns as part of normal operations, without needing to
|
||||||
// explicitly request them
|
// explicitly request them
|
||||||
if should_request_cosigns(&mut self.last_request_for_cosigns) {
|
self
|
||||||
self
|
.request
|
||||||
.request
|
.request_notable_cosigns(global_session)
|
||||||
.request_notable_cosigns(global_session)
|
.await
|
||||||
.await
|
.map_err(|e| format!("{e:?}"))?;
|
||||||
.map_err(|e| format!("{e:?}"))?;
|
|
||||||
}
|
|
||||||
// We return an error so the delay before this task is run again increases
|
// We return an error so the delay before this task is run again increases
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
"block (#{block_number}) wasn't yet cosigned. this should resolve shortly",
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::{sync::Arc, collections::HashMap};
|
|||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, Amount},
|
primitives::{SeraiAddress, Amount},
|
||||||
validator_sets::primitives::ExternalValidatorSet,
|
validator_sets::primitives::ValidatorSet,
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -28,7 +28,7 @@ db_channel! {
|
|||||||
CosignIntendChannels {
|
CosignIntendChannels {
|
||||||
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
GlobalSessionsChannel: () -> ([u8; 32], GlobalSession),
|
||||||
BlockEvents: () -> BlockEventData,
|
BlockEvents: () -> BlockEventData,
|
||||||
IntendedCosigns: (set: ExternalValidatorSet) -> CosignIntent,
|
IntendedCosigns: (set: ValidatorSet) -> CosignIntent,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -70,7 +70,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
self.serai.latest_finalized_block().await.map_err(|e| format!("{e:?}"))?.number();
|
||||||
|
|
||||||
for block_number in start_block_number ..= latest_block_number {
|
for block_number in start_block_number ..= latest_block_number {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.unsafe_txn();
|
||||||
|
|
||||||
let (block, mut has_events) =
|
let (block, mut has_events) =
|
||||||
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
block_has_events_justifying_a_cosign(&self.serai, block_number)
|
||||||
@@ -110,7 +110,7 @@ impl<D: Db> ContinuallyRan for CosignIntendTask<D> {
|
|||||||
keys.insert(set.network, SeraiAddress::from(*key));
|
keys.insert(set.network, SeraiAddress::from(*key));
|
||||||
let stake = serai
|
let stake = serai
|
||||||
.validator_sets()
|
.validator_sets()
|
||||||
.total_allocated_stake(set.network.into())
|
.total_allocated_stake(set.network)
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
.unwrap_or(Amount(0))
|
.unwrap_or(Amount(0))
|
||||||
|
|||||||
@@ -3,7 +3,7 @@
|
|||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use core::{fmt::Debug, future::Future};
|
use core::{fmt::Debug, future::Future};
|
||||||
use std::{sync::Arc, collections::HashMap, time::Instant};
|
use std::{sync::Arc, collections::HashMap};
|
||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
@@ -11,8 +11,8 @@ use scale::{Encode, Decode};
|
|||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{ExternalNetworkId, SeraiAddress},
|
primitives::{NetworkId, SeraiAddress},
|
||||||
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||||
Public, Block, Serai, TemporalSerai,
|
Public, Block, Serai, TemporalSerai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -52,13 +52,13 @@ pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
|||||||
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub(crate) struct GlobalSession {
|
pub(crate) struct GlobalSession {
|
||||||
pub(crate) start_block_number: u64,
|
pub(crate) start_block_number: u64,
|
||||||
pub(crate) sets: Vec<ExternalValidatorSet>,
|
pub(crate) sets: Vec<ValidatorSet>,
|
||||||
pub(crate) keys: HashMap<ExternalNetworkId, SeraiAddress>,
|
pub(crate) keys: HashMap<NetworkId, SeraiAddress>,
|
||||||
pub(crate) stakes: HashMap<ExternalNetworkId, u64>,
|
pub(crate) stakes: HashMap<NetworkId, u64>,
|
||||||
pub(crate) total_stake: u64,
|
pub(crate) total_stake: u64,
|
||||||
}
|
}
|
||||||
impl GlobalSession {
|
impl GlobalSession {
|
||||||
fn id(mut cosigners: Vec<ExternalValidatorSet>) -> [u8; 32] {
|
fn id(mut cosigners: Vec<ValidatorSet>) -> [u8; 32] {
|
||||||
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
cosigners.sort_by_key(|a| borsh::to_vec(a).unwrap());
|
||||||
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
Blake2s256::digest(borsh::to_vec(&cosigners).unwrap()).into()
|
||||||
}
|
}
|
||||||
@@ -101,25 +101,7 @@ pub struct Cosign {
|
|||||||
/// The hash of the block to cosign.
|
/// The hash of the block to cosign.
|
||||||
pub block_hash: [u8; 32],
|
pub block_hash: [u8; 32],
|
||||||
/// The actual cosigner.
|
/// The actual cosigner.
|
||||||
pub cosigner: ExternalNetworkId,
|
pub cosigner: NetworkId,
|
||||||
}
|
|
||||||
|
|
||||||
impl CosignIntent {
|
|
||||||
/// Convert this into a `Cosign`.
|
|
||||||
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
|
|
||||||
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
|
|
||||||
Cosign { global_session, block_number, block_hash, cosigner }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Cosign {
|
|
||||||
/// The message to sign to sign this cosign.
|
|
||||||
///
|
|
||||||
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
|
|
||||||
pub fn signature_message(&self) -> Vec<u8> {
|
|
||||||
// We use a schnorrkel context to domain-separate this
|
|
||||||
self.encode()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// A signed cosign.
|
/// A signed cosign.
|
||||||
@@ -136,7 +118,7 @@ impl SignedCosign {
|
|||||||
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
||||||
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
||||||
|
|
||||||
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
|
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.encode(), &signature).is_ok()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -166,10 +148,7 @@ create_db! {
|
|||||||
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
// one notable block. All validator sets will explicitly produce a cosign for their notable
|
||||||
// block, causing the latest cosigned block for a global session to either be the global
|
// block, causing the latest cosigned block for a global session to either be the global
|
||||||
// session's notable cosigns or the network's latest cosigns.
|
// session's notable cosigns or the network's latest cosigns.
|
||||||
NetworksLatestCosignedBlock: (
|
NetworksLatestCosignedBlock: (global_session: [u8; 32], network: NetworkId) -> SignedCosign,
|
||||||
global_session: [u8; 32],
|
|
||||||
network: ExternalNetworkId
|
|
||||||
) -> SignedCosign,
|
|
||||||
// Cosigns received for blocks not locally recognized as finalized.
|
// Cosigns received for blocks not locally recognized as finalized.
|
||||||
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
Faults: (global_session: [u8; 32]) -> Vec<SignedCosign>,
|
||||||
// The global session which faulted.
|
// The global session which faulted.
|
||||||
@@ -180,10 +159,15 @@ create_db! {
|
|||||||
/// Fetch the keys used for cosigning by a specific network.
|
/// Fetch the keys used for cosigning by a specific network.
|
||||||
async fn keys_for_network(
|
async fn keys_for_network(
|
||||||
serai: &TemporalSerai<'_>,
|
serai: &TemporalSerai<'_>,
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
) -> Result<Option<(Session, KeyPair)>, String> {
|
) -> Result<Option<(Session, KeyPair)>, String> {
|
||||||
|
// The Serai network never cosigns so it has no keys for cosigning
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
return Ok(None);
|
||||||
|
}
|
||||||
|
|
||||||
let Some(latest_session) =
|
let Some(latest_session) =
|
||||||
serai.validator_sets().session(network.into()).await.map_err(|e| format!("{e:?}"))?
|
serai.validator_sets().session(network).await.map_err(|e| format!("{e:?}"))?
|
||||||
else {
|
else {
|
||||||
// If this network hasn't had a session declared, move on
|
// If this network hasn't had a session declared, move on
|
||||||
return Ok(None);
|
return Ok(None);
|
||||||
@@ -192,7 +176,7 @@ async fn keys_for_network(
|
|||||||
// Get the keys for the latest session
|
// Get the keys for the latest session
|
||||||
if let Some(keys) = serai
|
if let Some(keys) = serai
|
||||||
.validator_sets()
|
.validator_sets()
|
||||||
.keys(ExternalValidatorSet { network, session: latest_session })
|
.keys(ValidatorSet { network, session: latest_session })
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
{
|
{
|
||||||
@@ -203,7 +187,7 @@ async fn keys_for_network(
|
|||||||
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
if let Some(prior_session) = latest_session.0.checked_sub(1).map(Session) {
|
||||||
if let Some(keys) = serai
|
if let Some(keys) = serai
|
||||||
.validator_sets()
|
.validator_sets()
|
||||||
.keys(ExternalValidatorSet { network, session: prior_session })
|
.keys(ValidatorSet { network, session: prior_session })
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
{
|
{
|
||||||
@@ -214,19 +198,16 @@ async fn keys_for_network(
|
|||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Fetch the `ExternalValidatorSet`s, and their associated keys, used for cosigning as of this
|
/// Fetch the `ValidatorSet`s, and their associated keys, used for cosigning as of this block.
|
||||||
/// block.
|
async fn cosigning_sets(serai: &TemporalSerai<'_>) -> Result<Vec<(ValidatorSet, Public)>, String> {
|
||||||
async fn cosigning_sets(
|
let mut sets = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||||
serai: &TemporalSerai<'_>,
|
for network in serai_client::primitives::NETWORKS {
|
||||||
) -> Result<Vec<(ExternalValidatorSet, Public)>, String> {
|
|
||||||
let mut sets = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
|
||||||
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
let Some((session, keys)) = keys_for_network(serai, network).await? else {
|
||||||
// If this network doesn't have usable keys, move on
|
// If this network doesn't have usable keys, move on
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
sets.push((ExternalValidatorSet { network, session }, keys.0));
|
sets.push((ValidatorSet { network, session }, keys.0));
|
||||||
}
|
}
|
||||||
Ok(sets)
|
Ok(sets)
|
||||||
}
|
}
|
||||||
@@ -307,12 +288,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
.continually_run(intend_task, vec![evaluator_task_handle]),
|
.continually_run(intend_task, vec![evaluator_task_handle]),
|
||||||
);
|
);
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(evaluator::CosignEvaluatorTask {
|
(evaluator::CosignEvaluatorTask { db: db.clone(), request })
|
||||||
db: db.clone(),
|
.continually_run(evaluator_task, vec![delay_task_handle]),
|
||||||
request,
|
|
||||||
last_request_for_cosigns: Instant::now(),
|
|
||||||
})
|
|
||||||
.continually_run(evaluator_task, vec![delay_task_handle]),
|
|
||||||
);
|
);
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(delay::CosignDelayTask { db: db.clone() })
|
(delay::CosignDelayTask { db: db.clone() })
|
||||||
@@ -346,8 +323,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
/// If this global session hasn't produced any notable cosigns, this will return the latest
|
||||||
/// cosigns for this session.
|
/// cosigns for this session.
|
||||||
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
pub fn notable_cosigns(getter: &impl Get, global_session: [u8; 32]) -> Vec<SignedCosign> {
|
||||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(getter, global_session, network) {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
}
|
}
|
||||||
@@ -364,7 +341,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
let mut cosigns = Faults::get(&self.db, faulted).expect("faulted with no faults");
|
||||||
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
// Also include all of our recognized-as-honest cosigns in an attempt to induce fault
|
||||||
// identification in those who see the faulty cosigns as honest
|
// identification in those who see the faulty cosigns as honest
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, faulted, network) {
|
||||||
if cosign.cosign.global_session == faulted {
|
if cosign.cosign.global_session == faulted {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
@@ -376,8 +353,8 @@ impl<D: Db> Cosigning<D> {
|
|||||||
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
let Some(global_session) = evaluator::currently_evaluated_global_session(&self.db) else {
|
||||||
return vec![];
|
return vec![];
|
||||||
};
|
};
|
||||||
let mut cosigns = Vec::with_capacity(serai_client::primitives::EXTERNAL_NETWORKS.len());
|
let mut cosigns = Vec::with_capacity(serai_client::primitives::NETWORKS.len());
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
if let Some(cosign) = NetworksLatestCosignedBlock::get(&self.db, global_session, network) {
|
||||||
cosigns.push(cosign);
|
cosigns.push(cosign);
|
||||||
}
|
}
|
||||||
@@ -447,7 +424,7 @@ impl<D: Db> Cosigning<D> {
|
|||||||
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
// Since we verified this cosign's signature, and have a chain sufficiently long, handle the
|
||||||
// cosign
|
// cosign
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.unsafe_txn();
|
||||||
|
|
||||||
if !faulty {
|
if !faulty {
|
||||||
// If this is for a future global session, we don't acknowledge this cosign at this time
|
// If this is for a future global session, we don't acknowledge this cosign at this time
|
||||||
@@ -488,12 +465,12 @@ impl<D: Db> Cosigning<D> {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Receive intended cosigns to produce for this ExternalValidatorSet.
|
/// Receive intended cosigns to produce for this ValidatorSet.
|
||||||
///
|
///
|
||||||
/// All cosigns intended, up to and including the next notable cosign, are returned.
|
/// All cosigns intended, up to and including the next notable cosign, are returned.
|
||||||
///
|
///
|
||||||
/// This will drain the internal channel and not re-yield these intentions again.
|
/// This will drain the internal channel and not re-yield these intentions again.
|
||||||
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Vec<CosignIntent> {
|
pub fn intended_cosigns(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<CosignIntent> {
|
||||||
let mut res: Vec<CosignIntent> = vec![];
|
let mut res: Vec<CosignIntent> = vec![];
|
||||||
// While we have yet to find a notable cosign...
|
// While we have yet to find a notable cosign...
|
||||||
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
|
while !res.last().map(|cosign| cosign.notable).unwrap_or(false) {
|
||||||
@@ -503,3 +480,30 @@ impl<D: Db> Cosigning<D> {
|
|||||||
res
|
res
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
mod tests {
|
||||||
|
use super::*;
|
||||||
|
|
||||||
|
struct RNC;
|
||||||
|
impl RequestNotableCosigns for RNC {
|
||||||
|
/// The error type which may be encountered when requesting notable cosigns.
|
||||||
|
type Error = ();
|
||||||
|
|
||||||
|
/// Request the notable cosigns for this global session.
|
||||||
|
fn request_notable_cosigns(
|
||||||
|
&self,
|
||||||
|
global_session: [u8; 32],
|
||||||
|
) -> impl Send + Future<Output = Result<(), Self::Error>> {
|
||||||
|
async move { Ok(()) }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#[tokio::test]
|
||||||
|
async fn test() {
|
||||||
|
let db: serai_db::MemDb = serai_db::MemDb::new();
|
||||||
|
let serai = unsafe { core::mem::transmute(0u64) };
|
||||||
|
let request = RNC;
|
||||||
|
let tasks = vec![];
|
||||||
|
let _ = Cosigning::spawn(db, serai, request, tasks);
|
||||||
|
core::future::pending().await
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -35,7 +35,7 @@ tributary-sdk = { path = "../../tributary-sdk" }
|
|||||||
|
|
||||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||||
tokio = { version = "1", default-features = false, features = ["sync"] }
|
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||||
libp2p = { version = "0.54", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "ping", "request-response", "gossipsub", "macros"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
serai-task = { path = "../../../common/task", version = "0.1" }
|
serai-task = { path = "../../../common/task", version = "0.1" }
|
||||||
|
|||||||
@@ -11,7 +11,8 @@ use serai_client::primitives::PublicKey as Public;
|
|||||||
|
|
||||||
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
core::upgrade::{UpgradeInfo, InboundConnectionUpgrade, OutboundConnectionUpgrade},
|
core::UpgradeInfo,
|
||||||
|
InboundUpgrade, OutboundUpgrade,
|
||||||
identity::{self, PeerId},
|
identity::{self, PeerId},
|
||||||
noise,
|
noise,
|
||||||
};
|
};
|
||||||
@@ -118,18 +119,12 @@ impl UpgradeInfo for OnlyValidators {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundConnectionUpgrade<S>
|
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundUpgrade<S> for OnlyValidators {
|
||||||
for OnlyValidators
|
|
||||||
{
|
|
||||||
type Output = (PeerId, noise::Output<S>);
|
type Output = (PeerId, noise::Output<S>);
|
||||||
type Error = io::Error;
|
type Error = io::Error;
|
||||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||||
|
|
||||||
fn upgrade_inbound(
|
fn upgrade_inbound(self, socket: S, info: Self::Info) -> Self::Future {
|
||||||
self,
|
|
||||||
socket: S,
|
|
||||||
info: <Self as UpgradeInfo>::Info,
|
|
||||||
) -> <Self as InboundConnectionUpgrade<S>>::Future {
|
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
let (dialer_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
@@ -152,18 +147,12 @@ impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> InboundConnectionUpgrad
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundConnectionUpgrade<S>
|
impl<S: 'static + Send + Unpin + AsyncRead + AsyncWrite> OutboundUpgrade<S> for OnlyValidators {
|
||||||
for OnlyValidators
|
|
||||||
{
|
|
||||||
type Output = (PeerId, noise::Output<S>);
|
type Output = (PeerId, noise::Output<S>);
|
||||||
type Error = io::Error;
|
type Error = io::Error;
|
||||||
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
type Future = Pin<Box<dyn Send + Future<Output = Result<Self::Output, Self::Error>>>>;
|
||||||
|
|
||||||
fn upgrade_outbound(
|
fn upgrade_outbound(self, socket: S, info: Self::Info) -> Self::Future {
|
||||||
self,
|
|
||||||
socket: S,
|
|
||||||
info: <Self as UpgradeInfo>::Info,
|
|
||||||
) -> <Self as OutboundConnectionUpgrade<S>>::Future {
|
|
||||||
Box::pin(async move {
|
Box::pin(async move {
|
||||||
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
let (listener_noise_peer_id, mut socket) = noise::Config::new(&self.noise_keypair)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
|
|||||||
@@ -14,8 +14,8 @@ use zeroize::Zeroizing;
|
|||||||
use schnorrkel::Keypair;
|
use schnorrkel::Keypair;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{ExternalNetworkId, PublicKey},
|
primitives::{NetworkId, PublicKey},
|
||||||
validator_sets::primitives::ExternalValidatorSet,
|
validator_sets::primitives::ValidatorSet,
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -50,7 +50,7 @@ mod ping;
|
|||||||
|
|
||||||
/// The request-response messages and behavior
|
/// The request-response messages and behavior
|
||||||
mod reqres;
|
mod reqres;
|
||||||
use reqres::{InboundRequestId, Request, Response};
|
use reqres::{RequestId, Request, Response};
|
||||||
|
|
||||||
/// The gossip messages and behavior
|
/// The gossip messages and behavior
|
||||||
mod gossip;
|
mod gossip;
|
||||||
@@ -66,6 +66,14 @@ use dial::DialTask;
|
|||||||
|
|
||||||
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||||
|
|
||||||
|
// usize::max, manually implemented, as max isn't a const fn
|
||||||
|
const MAX_LIBP2P_MESSAGE_SIZE: usize =
|
||||||
|
if gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
||||||
|
gossip::MAX_LIBP2P_GOSSIP_MESSAGE_SIZE
|
||||||
|
} else {
|
||||||
|
reqres::MAX_LIBP2P_REQRES_MESSAGE_SIZE
|
||||||
|
};
|
||||||
|
|
||||||
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
fn peer_id_from_public(public: PublicKey) -> PeerId {
|
||||||
// 0 represents the identity Multihash, that no hash was performed
|
// 0 represents the identity Multihash, that no hash was performed
|
||||||
// It's an internal constant so we can't refer to the constant inside libp2p
|
// It's an internal constant so we can't refer to the constant inside libp2p
|
||||||
@@ -104,7 +112,7 @@ impl serai_coordinator_p2p::Peer<'_> for Peer<'_> {
|
|||||||
|
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
struct Peers {
|
struct Peers {
|
||||||
peers: Arc<RwLock<HashMap<ExternalNetworkId, HashSet<PeerId>>>>,
|
peers: Arc<RwLock<HashMap<NetworkId, HashSet<PeerId>>>>,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
|
// Consider adding identify/kad/autonat/rendevous/(relay + dcutr). While we currently use the Serai
|
||||||
@@ -135,10 +143,9 @@ struct Libp2pInner {
|
|||||||
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
|
signed_cosigns: Mutex<mpsc::UnboundedReceiver<SignedCosign>>,
|
||||||
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
signed_cosigns_send: mpsc::UnboundedSender<SignedCosign>,
|
||||||
|
|
||||||
heartbeat_requests:
|
heartbeat_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, ValidatorSet, [u8; 32])>>,
|
||||||
Mutex<mpsc::UnboundedReceiver<(InboundRequestId, ExternalValidatorSet, [u8; 32])>>,
|
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(RequestId, [u8; 32])>>,
|
||||||
notable_cosign_requests: Mutex<mpsc::UnboundedReceiver<(InboundRequestId, [u8; 32])>>,
|
inbound_request_responses: mpsc::UnboundedSender<(RequestId, Response)>,
|
||||||
inbound_request_responses: mpsc::UnboundedSender<(InboundRequestId, Response)>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The libp2p-backed P2P implementation.
|
/// The libp2p-backed P2P implementation.
|
||||||
@@ -169,9 +176,19 @@ impl Libp2p {
|
|||||||
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
|
Ok(OnlyValidators { serai_key: serai_key.clone(), noise_keypair: noise_keypair.clone() })
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let new_yamux = || {
|
||||||
|
let mut config = yamux::Config::default();
|
||||||
|
// 1 MiB default + max message size
|
||||||
|
config.set_max_buffer_size((1024 * 1024) + MAX_LIBP2P_MESSAGE_SIZE);
|
||||||
|
// 256 KiB default + max message size
|
||||||
|
config
|
||||||
|
.set_receive_window_size(((256 * 1024) + MAX_LIBP2P_MESSAGE_SIZE).try_into().unwrap());
|
||||||
|
config
|
||||||
|
};
|
||||||
|
|
||||||
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
let mut swarm = SwarmBuilder::with_existing_identity(identity::Keypair::generate_ed25519())
|
||||||
.with_tokio()
|
.with_tokio()
|
||||||
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, yamux::Config::default)
|
.with_tcp(TcpConfig::default().nodelay(true), new_only_validators, new_yamux)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.with_behaviour(|_| Behavior {
|
.with_behaviour(|_| Behavior {
|
||||||
allow_list: allow_block_list::Behaviour::default(),
|
allow_list: allow_block_list::Behaviour::default(),
|
||||||
@@ -313,7 +330,7 @@ impl serai_cosign::RequestNotableCosigns for Libp2p {
|
|||||||
impl serai_coordinator_p2p::P2p for Libp2p {
|
impl serai_coordinator_p2p::P2p for Libp2p {
|
||||||
type Peer<'a> = Peer<'a>;
|
type Peer<'a> = Peer<'a>;
|
||||||
|
|
||||||
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>> {
|
||||||
async move {
|
async move {
|
||||||
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
|
let Some(peer_ids) = self.0.peers.peers.read().await.get(&network).cloned() else {
|
||||||
return vec![];
|
return vec![];
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use futures_util::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt};
|
|||||||
use libp2p::request_response::{
|
use libp2p::request_response::{
|
||||||
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
|
self, Codec as CodecTrait, Event as GenericEvent, Config, Behaviour, ProtocolSupport,
|
||||||
};
|
};
|
||||||
pub use request_response::{InboundRequestId, Message};
|
pub use request_response::{RequestId, Message};
|
||||||
|
|
||||||
use serai_cosign::SignedCosign;
|
use serai_cosign::SignedCosign;
|
||||||
|
|
||||||
@@ -129,6 +129,7 @@ pub(crate) type Event = GenericEvent<Request, Response>;
|
|||||||
|
|
||||||
pub(crate) type Behavior = Behaviour<Codec>;
|
pub(crate) type Behavior = Behaviour<Codec>;
|
||||||
pub(crate) fn new_behavior() -> Behavior {
|
pub(crate) fn new_behavior() -> Behavior {
|
||||||
let config = Config::default().with_request_timeout(Duration::from_secs(5));
|
let mut config = Config::default();
|
||||||
|
config.set_request_timeout(Duration::from_secs(5));
|
||||||
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
|
Behavior::new([(PROTOCOL, ProtocolSupport::Full)], config)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use std::{
|
|||||||
|
|
||||||
use borsh::BorshDeserialize;
|
use borsh::BorshDeserialize;
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||||
|
|
||||||
use tokio::sync::{mpsc, oneshot, RwLock};
|
use tokio::sync::{mpsc, oneshot, RwLock};
|
||||||
|
|
||||||
@@ -17,7 +17,7 @@ use serai_cosign::SignedCosign;
|
|||||||
use futures_util::StreamExt;
|
use futures_util::StreamExt;
|
||||||
use libp2p::{
|
use libp2p::{
|
||||||
identity::PeerId,
|
identity::PeerId,
|
||||||
request_response::{InboundRequestId, OutboundRequestId, ResponseChannel},
|
request_response::{RequestId, ResponseChannel},
|
||||||
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
swarm::{dial_opts::DialOpts, SwarmEvent, Swarm},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -65,12 +65,12 @@ pub(crate) struct SwarmTask {
|
|||||||
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
tributary_gossip: mpsc::UnboundedSender<([u8; 32], Vec<u8>)>,
|
||||||
|
|
||||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
outbound_request_responses: HashMap<OutboundRequestId, oneshot::Sender<Response>>,
|
outbound_request_responses: HashMap<RequestId, oneshot::Sender<Response>>,
|
||||||
|
|
||||||
inbound_request_response_channels: HashMap<InboundRequestId, ResponseChannel<Response>>,
|
inbound_request_response_channels: HashMap<RequestId, ResponseChannel<Response>>,
|
||||||
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>,
|
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
||||||
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
|
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
||||||
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
|
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl SwarmTask {
|
impl SwarmTask {
|
||||||
@@ -222,21 +222,25 @@ impl SwarmTask {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
SwarmEvent::Behaviour(event) => {
|
SwarmEvent::Behaviour(
|
||||||
match event {
|
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event)
|
||||||
BehaviorEvent::AllowList(event) | BehaviorEvent::ConnectionLimits(event) => {
|
) => {
|
||||||
// This *is* an exhaustive match as these events are empty enums
|
// This *is* an exhaustive match as these events are empty enums
|
||||||
match event {}
|
match event {}
|
||||||
}
|
}
|
||||||
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, }) => {
|
SwarmEvent::Behaviour(
|
||||||
if result.is_err() {
|
BehaviorEvent::Ping(ping::Event { peer: _, connection, result, })
|
||||||
self.swarm.close_connection(connection);
|
) => {
|
||||||
}
|
if result.is_err() {
|
||||||
}
|
self.swarm.close_connection(connection);
|
||||||
BehaviorEvent::Reqres(event) => self.handle_reqres(event),
|
|
||||||
BehaviorEvent::Gossip(event) => self.handle_gossip(event),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
SwarmEvent::Behaviour(BehaviorEvent::Reqres(event)) => {
|
||||||
|
self.handle_reqres(event)
|
||||||
|
}
|
||||||
|
SwarmEvent::Behaviour(BehaviorEvent::Gossip(event)) => {
|
||||||
|
self.handle_gossip(event)
|
||||||
|
}
|
||||||
|
|
||||||
// We don't handle any of these
|
// We don't handle any of these
|
||||||
SwarmEvent::IncomingConnection { .. } |
|
SwarmEvent::IncomingConnection { .. } |
|
||||||
@@ -246,14 +250,7 @@ impl SwarmTask {
|
|||||||
SwarmEvent::ExpiredListenAddr { .. } |
|
SwarmEvent::ExpiredListenAddr { .. } |
|
||||||
SwarmEvent::ListenerClosed { .. } |
|
SwarmEvent::ListenerClosed { .. } |
|
||||||
SwarmEvent::ListenerError { .. } |
|
SwarmEvent::ListenerError { .. } |
|
||||||
SwarmEvent::Dialing { .. } |
|
SwarmEvent::Dialing { .. } => {}
|
||||||
SwarmEvent::NewExternalAddrCandidate { .. } |
|
|
||||||
SwarmEvent::ExternalAddrConfirmed { .. } |
|
|
||||||
SwarmEvent::ExternalAddrExpired { .. } |
|
|
||||||
SwarmEvent::NewExternalAddrOfPeer { .. } => {}
|
|
||||||
|
|
||||||
// Requires as SwarmEvent is non-exhaustive
|
|
||||||
_ => log::warn!("unhandled SwarmEvent: {event:?}"),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -324,9 +321,9 @@ impl SwarmTask {
|
|||||||
|
|
||||||
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
outbound_requests: mpsc::UnboundedReceiver<(PeerId, Request, oneshot::Sender<Response>)>,
|
||||||
|
|
||||||
heartbeat_requests: mpsc::UnboundedSender<(InboundRequestId, ExternalValidatorSet, [u8; 32])>,
|
heartbeat_requests: mpsc::UnboundedSender<(RequestId, ValidatorSet, [u8; 32])>,
|
||||||
notable_cosign_requests: mpsc::UnboundedSender<(InboundRequestId, [u8; 32])>,
|
notable_cosign_requests: mpsc::UnboundedSender<(RequestId, [u8; 32])>,
|
||||||
inbound_request_responses: mpsc::UnboundedReceiver<(InboundRequestId, Response)>,
|
inbound_request_responses: mpsc::UnboundedReceiver<(RequestId, Response)>,
|
||||||
) {
|
) {
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
SwarmTask {
|
SwarmTask {
|
||||||
|
|||||||
@@ -4,9 +4,7 @@ use std::{
|
|||||||
collections::{HashSet, HashMap},
|
collections::{HashSet, HashMap},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, SeraiError, Serai};
|
||||||
primitives::ExternalNetworkId, validator_sets::primitives::Session, SeraiError, Serai,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_task::{Task, ContinuallyRan};
|
use serai_task::{Task, ContinuallyRan};
|
||||||
|
|
||||||
@@ -26,11 +24,11 @@ pub(crate) struct Validators {
|
|||||||
serai: Arc<Serai>,
|
serai: Arc<Serai>,
|
||||||
|
|
||||||
// A cache for which session we're populated with the validators of
|
// A cache for which session we're populated with the validators of
|
||||||
sessions: HashMap<ExternalNetworkId, Session>,
|
sessions: HashMap<NetworkId, Session>,
|
||||||
// The validators by network
|
// The validators by network
|
||||||
by_network: HashMap<ExternalNetworkId, HashSet<PeerId>>,
|
by_network: HashMap<NetworkId, HashSet<PeerId>>,
|
||||||
// The validators and their networks
|
// The validators and their networks
|
||||||
validators: HashMap<PeerId, HashSet<ExternalNetworkId>>,
|
validators: HashMap<PeerId, HashSet<NetworkId>>,
|
||||||
|
|
||||||
// The channel to send the changes down
|
// The channel to send the changes down
|
||||||
changes: mpsc::UnboundedSender<Changes>,
|
changes: mpsc::UnboundedSender<Changes>,
|
||||||
@@ -51,16 +49,8 @@ impl Validators {
|
|||||||
|
|
||||||
async fn session_changes(
|
async fn session_changes(
|
||||||
serai: impl Borrow<Serai>,
|
serai: impl Borrow<Serai>,
|
||||||
sessions: impl Borrow<HashMap<ExternalNetworkId, Session>>,
|
sessions: impl Borrow<HashMap<NetworkId, Session>>,
|
||||||
) -> Result<Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
) -> Result<Vec<(NetworkId, Session, HashSet<PeerId>)>, SeraiError> {
|
||||||
/*
|
|
||||||
This uses the latest finalized block, not the latest cosigned block, which should be fine as
|
|
||||||
in the worst case, we'd connect to unexpected validators. They still shouldn't be able to
|
|
||||||
bypass the cosign protocol unless a historical global session was malicious, in which case
|
|
||||||
the cosign protocol already breaks.
|
|
||||||
|
|
||||||
Besides, we can't connect to historical validators, only the current validators.
|
|
||||||
*/
|
|
||||||
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
let temporal_serai = serai.borrow().as_of_latest_finalized_block().await?;
|
||||||
let temporal_serai = temporal_serai.validator_sets();
|
let temporal_serai = temporal_serai.validator_sets();
|
||||||
|
|
||||||
@@ -69,10 +59,13 @@ impl Validators {
|
|||||||
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
// FuturesUnordered can be bad practice as it'll cause timeouts if infrequently polled, but
|
||||||
// we poll it till it yields all futures with the most minimal processing possible
|
// we poll it till it yields all futures with the most minimal processing possible
|
||||||
let mut futures = FuturesUnordered::new();
|
let mut futures = FuturesUnordered::new();
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
let sessions = sessions.borrow();
|
let sessions = sessions.borrow();
|
||||||
futures.push(async move {
|
futures.push(async move {
|
||||||
let session = match temporal_serai.session(network.into()).await {
|
let session = match temporal_serai.session(network).await {
|
||||||
Ok(Some(session)) => session,
|
Ok(Some(session)) => session,
|
||||||
Ok(None) => return Ok(None),
|
Ok(None) => return Ok(None),
|
||||||
Err(e) => return Err(e),
|
Err(e) => return Err(e),
|
||||||
@@ -81,7 +74,7 @@ impl Validators {
|
|||||||
if sessions.get(&network) == Some(&session) {
|
if sessions.get(&network) == Some(&session) {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
} else {
|
} else {
|
||||||
match temporal_serai.active_network_validators(network.into()).await {
|
match temporal_serai.active_network_validators(network).await {
|
||||||
Ok(validators) => Ok(Some((
|
Ok(validators) => Ok(Some((
|
||||||
network,
|
network,
|
||||||
session,
|
session,
|
||||||
@@ -104,7 +97,7 @@ impl Validators {
|
|||||||
|
|
||||||
fn incorporate_session_changes(
|
fn incorporate_session_changes(
|
||||||
&mut self,
|
&mut self,
|
||||||
session_changes: Vec<(ExternalNetworkId, Session, HashSet<PeerId>)>,
|
session_changes: Vec<(NetworkId, Session, HashSet<PeerId>)>,
|
||||||
) {
|
) {
|
||||||
let mut removed = HashSet::new();
|
let mut removed = HashSet::new();
|
||||||
let mut added = HashSet::new();
|
let mut added = HashSet::new();
|
||||||
@@ -159,11 +152,11 @@ impl Validators {
|
|||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn by_network(&self) -> &HashMap<ExternalNetworkId, HashSet<PeerId>> {
|
pub(crate) fn by_network(&self) -> &HashMap<NetworkId, HashSet<PeerId>> {
|
||||||
&self.by_network
|
&self.by_network
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<ExternalNetworkId>> {
|
pub(crate) fn networks(&self, peer_id: &PeerId) -> Option<&HashSet<NetworkId>> {
|
||||||
self.validators.get(peer_id)
|
self.validators.get(peer_id)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::time::{Duration, SystemTime};
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet};
|
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet};
|
||||||
|
|
||||||
use futures_lite::FutureExt;
|
use futures_lite::FutureExt;
|
||||||
|
|
||||||
@@ -38,7 +38,7 @@ pub const BATCH_SIZE_LIMIT: usize = MIN_BLOCKS_PER_BATCH *
|
|||||||
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
/// If the other validator has more blocks then we do, they're expected to inform us. This forms
|
||||||
/// the sync protocol for our Tributaries.
|
/// the sync protocol for our Tributaries.
|
||||||
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
pub(crate) struct HeartbeatTask<TD: Db, Tx: TransactionTrait, P: P2p> {
|
||||||
pub(crate) set: ExternalValidatorSet,
|
pub(crate) set: ValidatorSet,
|
||||||
pub(crate) tributary: Tributary<TD, Tx, P>,
|
pub(crate) tributary: Tributary<TD, Tx, P>,
|
||||||
pub(crate) reader: TributaryReader<TD, Tx>,
|
pub(crate) reader: TributaryReader<TD, Tx>,
|
||||||
pub(crate) p2p: P,
|
pub(crate) p2p: P,
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ use std::collections::HashMap;
|
|||||||
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
||||||
|
|
||||||
use serai_db::Db;
|
use serai_db::Db;
|
||||||
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
|
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
|
||||||
@@ -25,7 +25,7 @@ use crate::heartbeat::HeartbeatTask;
|
|||||||
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
#[derive(Clone, Copy, BorshSerialize, BorshDeserialize, Debug)]
|
||||||
pub struct Heartbeat {
|
pub struct Heartbeat {
|
||||||
/// The Tributary this is the heartbeat of.
|
/// The Tributary this is the heartbeat of.
|
||||||
pub set: ExternalValidatorSet,
|
pub set: ValidatorSet,
|
||||||
/// The hash of the latest block added to the Tributary.
|
/// The hash of the latest block added to the Tributary.
|
||||||
pub latest_block_hash: [u8; 32],
|
pub latest_block_hash: [u8; 32],
|
||||||
}
|
}
|
||||||
@@ -56,7 +56,7 @@ pub trait P2p:
|
|||||||
type Peer<'a>: Peer<'a>;
|
type Peer<'a>: Peer<'a>;
|
||||||
|
|
||||||
/// Fetch the peers for this network.
|
/// Fetch the peers for this network.
|
||||||
fn peers(&self, network: ExternalNetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
fn peers(&self, network: NetworkId) -> impl Send + Future<Output = Vec<Self::Peer<'_>>>;
|
||||||
|
|
||||||
/// Broadcast a cosign.
|
/// Broadcast a cosign.
|
||||||
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
|
fn publish_cosign(&self, cosign: SignedCosign) -> impl Send + Future<Output = ()>;
|
||||||
@@ -131,13 +131,13 @@ fn handle_heartbeat<D: Db, T: TransactionTrait>(
|
|||||||
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
|
pub async fn run<TD: Db, Tx: TransactionTrait, P: P2p>(
|
||||||
db: impl Db,
|
db: impl Db,
|
||||||
p2p: P,
|
p2p: P,
|
||||||
mut add_tributary: mpsc::UnboundedReceiver<(ExternalValidatorSet, Tributary<TD, Tx, P>)>,
|
mut add_tributary: mpsc::UnboundedReceiver<(ValidatorSet, Tributary<TD, Tx, P>)>,
|
||||||
mut retire_tributary: mpsc::UnboundedReceiver<ExternalValidatorSet>,
|
mut retire_tributary: mpsc::UnboundedReceiver<ValidatorSet>,
|
||||||
send_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
send_cosigns: mpsc::UnboundedSender<SignedCosign>,
|
||||||
) {
|
) {
|
||||||
let mut readers = HashMap::<ExternalValidatorSet, TributaryReader<TD, Tx>>::new();
|
let mut readers = HashMap::<ValidatorSet, TributaryReader<TD, Tx>>::new();
|
||||||
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
|
let mut tributaries = HashMap::<[u8; 32], mpsc::UnboundedSender<Vec<u8>>>::new();
|
||||||
let mut heartbeat_tasks = HashMap::<ExternalValidatorSet, _>::new();
|
let mut heartbeat_tasks = HashMap::<ValidatorSet, _>::new();
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
tokio::select! {
|
tokio::select! {
|
||||||
|
|||||||
@@ -3,11 +3,9 @@ use std::{path::Path, fs};
|
|||||||
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
pub(crate) use serai_db::{Get, DbTxn, Db as DbTrait};
|
||||||
use serai_db::{create_db, db_channel};
|
use serai_db::{create_db, db_channel};
|
||||||
|
|
||||||
use dkg::Participant;
|
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::ExternalNetworkId,
|
primitives::NetworkId,
|
||||||
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair},
|
validator_sets::primitives::{Session, ValidatorSet},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_cosign::SignedCosign;
|
use serai_cosign::SignedCosign;
|
||||||
@@ -15,7 +13,7 @@ use serai_coordinator_substrate::NewSetInformation;
|
|||||||
use serai_coordinator_tributary::Transaction;
|
use serai_coordinator_tributary::Transaction;
|
||||||
|
|
||||||
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
#[cfg(all(feature = "parity-db", not(feature = "rocksdb")))]
|
||||||
pub(crate) type Db = std::sync::Arc<serai_db::ParityDb>;
|
pub(crate) type Db = serai_db::ParityDb;
|
||||||
#[cfg(feature = "rocksdb")]
|
#[cfg(feature = "rocksdb")]
|
||||||
pub(crate) type Db = serai_db::RocksDB;
|
pub(crate) type Db = serai_db::RocksDB;
|
||||||
|
|
||||||
@@ -43,21 +41,22 @@ pub(crate) fn coordinator_db() -> Db {
|
|||||||
db(&format!("{root_path}/coordinator/db"))
|
db(&format!("{root_path}/coordinator/db"))
|
||||||
}
|
}
|
||||||
|
|
||||||
fn tributary_db_folder(set: ExternalValidatorSet) -> String {
|
fn tributary_db_folder(set: ValidatorSet) -> String {
|
||||||
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
let root_path = serai_env::var("DB_PATH").expect("path to DB wasn't specified");
|
||||||
let network = match set.network {
|
let network = match set.network {
|
||||||
ExternalNetworkId::Bitcoin => "Bitcoin",
|
NetworkId::Serai => panic!("creating Tributary for the Serai network"),
|
||||||
ExternalNetworkId::Ethereum => "Ethereum",
|
NetworkId::Bitcoin => "Bitcoin",
|
||||||
ExternalNetworkId::Monero => "Monero",
|
NetworkId::Ethereum => "Ethereum",
|
||||||
|
NetworkId::Monero => "Monero",
|
||||||
};
|
};
|
||||||
format!("{root_path}/tributary-{network}-{}", set.session.0)
|
format!("{root_path}/tributary-{network}-{}", set.session.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn tributary_db(set: ExternalValidatorSet) -> Db {
|
pub(crate) fn tributary_db(set: ValidatorSet) -> Db {
|
||||||
db(&format!("{}/db", tributary_db_folder(set)))
|
db(&format!("{}/db", tributary_db_folder(set)))
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn prune_tributary_db(set: ExternalValidatorSet) {
|
pub(crate) fn prune_tributary_db(set: ValidatorSet) {
|
||||||
log::info!("pruning data directory for tributary {set:?}");
|
log::info!("pruning data directory for tributary {set:?}");
|
||||||
let db = tributary_db_folder(set);
|
let db = tributary_db_folder(set);
|
||||||
if fs::exists(&db).expect("couldn't check if tributary DB exists") {
|
if fs::exists(&db).expect("couldn't check if tributary DB exists") {
|
||||||
@@ -72,15 +71,11 @@ create_db! {
|
|||||||
// The latest Tributary to have been retired for a network
|
// The latest Tributary to have been retired for a network
|
||||||
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been
|
// Since Tributaries are retired sequentially, this is informative to if any Tributary has been
|
||||||
// retired
|
// retired
|
||||||
RetiredTributary: (network: ExternalNetworkId) -> Session,
|
RetiredTributary: (network: NetworkId) -> Session,
|
||||||
// The last handled message from a Processor
|
// The last handled message from a Processor
|
||||||
LastProcessorMessage: (network: ExternalNetworkId) -> u64,
|
LastProcessorMessage: (network: NetworkId) -> u64,
|
||||||
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
// Cosigns we produced and tried to intake yet incurred an error while doing so
|
||||||
ErroneousCosigns: () -> Vec<SignedCosign>,
|
ErroneousCosigns: () -> Vec<SignedCosign>,
|
||||||
// The keys to confirm and set on the Serai network
|
|
||||||
KeysToConfirm: (set: ExternalValidatorSet) -> KeyPair,
|
|
||||||
// The key was set on the Serai network
|
|
||||||
KeySet: (set: ExternalValidatorSet) -> (),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -89,7 +84,7 @@ db_channel! {
|
|||||||
// Cosigns we produced
|
// Cosigns we produced
|
||||||
SignedCosigns: () -> SignedCosign,
|
SignedCosigns: () -> SignedCosign,
|
||||||
// Tributaries to clean up upon reboot
|
// Tributaries to clean up upon reboot
|
||||||
TributaryCleanup: () -> ExternalValidatorSet,
|
TributaryCleanup: () -> ValidatorSet,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -98,51 +93,21 @@ mod _internal_db {
|
|||||||
|
|
||||||
db_channel! {
|
db_channel! {
|
||||||
Coordinator {
|
Coordinator {
|
||||||
// Tributary transactions to publish from the Processor messages
|
// Tributary transactions to publish
|
||||||
TributaryTransactionsFromProcessorMessages: (set: ExternalValidatorSet) -> Transaction,
|
TributaryTransactions: (set: ValidatorSet) -> Transaction,
|
||||||
// Tributary transactions to publish from the DKG confirmation task
|
|
||||||
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
|
|
||||||
// Participants to remove
|
|
||||||
RemoveParticipant: (set: ExternalValidatorSet) -> Participant,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) struct TributaryTransactionsFromProcessorMessages;
|
pub(crate) struct TributaryTransactions;
|
||||||
impl TributaryTransactionsFromProcessorMessages {
|
impl TributaryTransactions {
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) {
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet, tx: &Transaction) {
|
||||||
// If this set has yet to be retired, send this transaction
|
// If this set has yet to be retired, send this transaction
|
||||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||||
_internal_db::TributaryTransactionsFromProcessorMessages::send(txn, set, tx);
|
_internal_db::TributaryTransactions::send(txn, set, tx);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> {
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<Transaction> {
|
||||||
_internal_db::TributaryTransactionsFromProcessorMessages::try_recv(txn, set)
|
_internal_db::TributaryTransactions::try_recv(txn, set)
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct TributaryTransactionsFromDkgConfirmation;
|
|
||||||
impl TributaryTransactionsFromDkgConfirmation {
|
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, tx: &Transaction) {
|
|
||||||
// If this set has yet to be retired, send this transaction
|
|
||||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
|
||||||
_internal_db::TributaryTransactionsFromDkgConfirmation::send(txn, set, tx);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Transaction> {
|
|
||||||
_internal_db::TributaryTransactionsFromDkgConfirmation::try_recv(txn, set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) struct RemoveParticipant;
|
|
||||||
impl RemoveParticipant {
|
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) {
|
|
||||||
// If this set has yet to be retired, send this transaction
|
|
||||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
|
||||||
_internal_db::RemoveParticipant::send(txn, set, &participant);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> {
|
|
||||||
_internal_db::RemoveParticipant::try_recv(txn, set)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,437 +0,0 @@
|
|||||||
use core::{ops::Deref, future::Future};
|
|
||||||
use std::{boxed::Box, collections::HashMap};
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
use rand_core::OsRng;
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
|
||||||
use frost_schnorrkel::{
|
|
||||||
frost::{
|
|
||||||
dkg::{Participant, musig::musig},
|
|
||||||
FrostError,
|
|
||||||
sign::*,
|
|
||||||
},
|
|
||||||
Schnorrkel,
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db as DbTrait};
|
|
||||||
|
|
||||||
use serai_client::{
|
|
||||||
primitives::SeraiAddress,
|
|
||||||
validator_sets::primitives::{ExternalValidatorSet, musig_context, set_keys_message},
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_task::{DoesNotError, ContinuallyRan};
|
|
||||||
|
|
||||||
use serai_coordinator_substrate::{NewSetInformation, Keys};
|
|
||||||
use serai_coordinator_tributary::{Transaction, DkgConfirmationMessages};
|
|
||||||
|
|
||||||
use crate::{KeysToConfirm, KeySet, TributaryTransactionsFromDkgConfirmation};
|
|
||||||
|
|
||||||
fn schnorrkel() -> Schnorrkel {
|
|
||||||
Schnorrkel::new(b"substrate") // TODO: Pull the constant for this
|
|
||||||
}
|
|
||||||
|
|
||||||
fn our_i(
|
|
||||||
set: &NewSetInformation,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
data: &HashMap<Participant, Vec<u8>>,
|
|
||||||
) -> Participant {
|
|
||||||
let public = SeraiAddress((Ristretto::generator() * key.deref()).to_bytes());
|
|
||||||
|
|
||||||
let mut our_i = None;
|
|
||||||
for participant in data.keys() {
|
|
||||||
let validator_index = usize::from(u16::from(*participant) - 1);
|
|
||||||
let (validator, _weight) = set.validators[validator_index];
|
|
||||||
if validator == public {
|
|
||||||
our_i = Some(*participant);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
our_i.unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take a HashMap of participations with non-contiguous Participants and convert them to a
|
|
||||||
// contiguous sequence.
|
|
||||||
//
|
|
||||||
// The input data is expected to not include our own data, which also won't be in the output data.
|
|
||||||
//
|
|
||||||
// Returns the mapping from the contiguous Participants to the original Participants.
|
|
||||||
fn make_contiguous<T>(
|
|
||||||
our_i: Participant,
|
|
||||||
mut data: HashMap<Participant, Vec<u8>>,
|
|
||||||
transform: impl Fn(Vec<u8>) -> std::io::Result<T>,
|
|
||||||
) -> Result<HashMap<Participant, T>, Participant> {
|
|
||||||
assert!(!data.contains_key(&our_i));
|
|
||||||
|
|
||||||
let mut ordered_participants = data.keys().copied().collect::<Vec<_>>();
|
|
||||||
ordered_participants.sort_by_key(|participant| u16::from(*participant));
|
|
||||||
|
|
||||||
let mut our_i = Some(our_i);
|
|
||||||
let mut contiguous = HashMap::new();
|
|
||||||
let mut i = 1;
|
|
||||||
for participant in ordered_participants {
|
|
||||||
// If this is the first participant after our own index, increment to account for our index
|
|
||||||
if let Some(our_i_value) = our_i {
|
|
||||||
if u16::from(participant) > u16::from(our_i_value) {
|
|
||||||
i += 1;
|
|
||||||
our_i = None;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let contiguous_index = Participant::new(i).unwrap();
|
|
||||||
let data = match transform(data.remove(&participant).unwrap()) {
|
|
||||||
Ok(data) => data,
|
|
||||||
Err(_) => Err(participant)?,
|
|
||||||
};
|
|
||||||
contiguous.insert(contiguous_index, data);
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
Ok(contiguous)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_frost_error<T>(result: Result<T, FrostError>) -> Result<T, Participant> {
|
|
||||||
match &result {
|
|
||||||
Ok(_) => Ok(result.unwrap()),
|
|
||||||
Err(FrostError::InvalidPreprocess(participant) | FrostError::InvalidShare(participant)) => {
|
|
||||||
Err(*participant)
|
|
||||||
}
|
|
||||||
// All of these should be unreachable
|
|
||||||
Err(
|
|
||||||
FrostError::InternalError(_) |
|
|
||||||
FrostError::InvalidParticipant(_, _) |
|
|
||||||
FrostError::InvalidSigningSet(_) |
|
|
||||||
FrostError::InvalidParticipantQuantity(_, _) |
|
|
||||||
FrostError::DuplicatedParticipant(_) |
|
|
||||||
FrostError::MissingParticipant(_),
|
|
||||||
) => {
|
|
||||||
result.unwrap();
|
|
||||||
unreachable!("continued execution after unwrapping Result::Err");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rustfmt::skip]
|
|
||||||
enum Signer {
|
|
||||||
Preprocess { attempt: u32, seed: CachedPreprocess, preprocess: [u8; 64] },
|
|
||||||
Share {
|
|
||||||
attempt: u32,
|
|
||||||
musig_validators: Vec<SeraiAddress>,
|
|
||||||
share: [u8; 32],
|
|
||||||
machine: Box<AlgorithmSignatureMachine<Ristretto, Schnorrkel>>,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Performs the DKG Confirmation protocol.
|
|
||||||
pub(crate) struct ConfirmDkgTask<CD: DbTrait, TD: DbTrait> {
|
|
||||||
db: CD,
|
|
||||||
|
|
||||||
set: NewSetInformation,
|
|
||||||
tributary_db: TD,
|
|
||||||
|
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
signer: Option<Signer>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<CD: DbTrait, TD: DbTrait> ConfirmDkgTask<CD, TD> {
|
|
||||||
pub(crate) fn new(
|
|
||||||
db: CD,
|
|
||||||
set: NewSetInformation,
|
|
||||||
tributary_db: TD,
|
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
) -> Self {
|
|
||||||
Self { db, set, tributary_db, key, signer: None }
|
|
||||||
}
|
|
||||||
|
|
||||||
fn slash(db: &mut CD, set: ExternalValidatorSet, validator: SeraiAddress) {
|
|
||||||
let mut txn = db.txn();
|
|
||||||
TributaryTransactionsFromDkgConfirmation::send(
|
|
||||||
&mut txn,
|
|
||||||
set,
|
|
||||||
&Transaction::RemoveParticipant { participant: validator, signed: Default::default() },
|
|
||||||
);
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn preprocess(
|
|
||||||
db: &mut CD,
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
attempt: u32,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
signer: &mut Option<Signer>,
|
|
||||||
) {
|
|
||||||
// Perform the preprocess
|
|
||||||
let (machine, preprocess) = AlgorithmMachine::new(
|
|
||||||
schnorrkel(),
|
|
||||||
// We use a 1-of-1 Musig here as we don't know who will actually be in this Musig yet
|
|
||||||
musig(&musig_context(set.into()), key, &[Ristretto::generator() * key.deref()])
|
|
||||||
.unwrap()
|
|
||||||
.into(),
|
|
||||||
)
|
|
||||||
.preprocess(&mut OsRng);
|
|
||||||
// We take the preprocess so we can use it in a distinct machine with the actual Musig
|
|
||||||
// parameters
|
|
||||||
let seed = machine.cache();
|
|
||||||
|
|
||||||
let mut preprocess_bytes = [0u8; 64];
|
|
||||||
preprocess_bytes.copy_from_slice(&preprocess.serialize());
|
|
||||||
let preprocess = preprocess_bytes;
|
|
||||||
|
|
||||||
let mut txn = db.txn();
|
|
||||||
// If this attempt has already been preprocessed for, the Tributary will de-duplicate it
|
|
||||||
// This may mean the Tributary preprocess is distinct from ours, but we check for that later
|
|
||||||
TributaryTransactionsFromDkgConfirmation::send(
|
|
||||||
&mut txn,
|
|
||||||
set,
|
|
||||||
&Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed: Default::default() },
|
|
||||||
);
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
*signer = Some(Signer::Preprocess { attempt, seed, preprocess });
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<CD: DbTrait, TD: DbTrait> ContinuallyRan for ConfirmDkgTask<CD, TD> {
|
|
||||||
type Error = DoesNotError;
|
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
|
||||||
async move {
|
|
||||||
let mut made_progress = false;
|
|
||||||
|
|
||||||
// If we were sent a key to set, create the signer for it
|
|
||||||
if self.signer.is_none() && KeysToConfirm::get(&self.db, self.set.set).is_some() {
|
|
||||||
// Create and publish the initial preprocess
|
|
||||||
Self::preprocess(&mut self.db, self.set.set, 0, &self.key, &mut self.signer);
|
|
||||||
|
|
||||||
made_progress = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we have keys to confirm, handle all messages from the tributary
|
|
||||||
if let Some(key_pair) = KeysToConfirm::get(&self.db, self.set.set) {
|
|
||||||
// Handle all messages from the Tributary
|
|
||||||
loop {
|
|
||||||
let mut tributary_txn = self.tributary_db.txn();
|
|
||||||
let Some(msg) = DkgConfirmationMessages::try_recv(&mut tributary_txn, self.set.set)
|
|
||||||
else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
match msg {
|
|
||||||
messages::sign::CoordinatorMessage::Reattempt {
|
|
||||||
id: messages::sign::SignId { attempt, .. },
|
|
||||||
} => {
|
|
||||||
// Create and publish the preprocess for the specified attempt
|
|
||||||
Self::preprocess(&mut self.db, self.set.set, attempt, &self.key, &mut self.signer);
|
|
||||||
}
|
|
||||||
messages::sign::CoordinatorMessage::Preprocesses {
|
|
||||||
id: messages::sign::SignId { attempt, .. },
|
|
||||||
mut preprocesses,
|
|
||||||
} => {
|
|
||||||
// Confirm the preprocess we're expected to sign with is the one we locally have
|
|
||||||
// It may be different if we rebooted and made a second preprocess for this attempt
|
|
||||||
let Some(Signer::Preprocess { attempt: our_attempt, seed, preprocess }) =
|
|
||||||
self.signer.take()
|
|
||||||
else {
|
|
||||||
// If this message is not expected, commit the txn to drop it and move on
|
|
||||||
// At some point, we'll get a Reattempt and reset
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Determine the MuSig key signed with
|
|
||||||
let musig_validators = {
|
|
||||||
let mut ordered_participants = preprocesses.keys().copied().collect::<Vec<_>>();
|
|
||||||
ordered_participants.sort_by_key(|participant| u16::from(*participant));
|
|
||||||
|
|
||||||
let mut res = vec![];
|
|
||||||
for participant in ordered_participants {
|
|
||||||
let (validator, _weight) =
|
|
||||||
self.set.validators[usize::from(u16::from(participant) - 1)];
|
|
||||||
res.push(validator);
|
|
||||||
}
|
|
||||||
res
|
|
||||||
};
|
|
||||||
|
|
||||||
let musig_public_keys = musig_validators
|
|
||||||
.iter()
|
|
||||||
.map(|key| {
|
|
||||||
Ristretto::read_G(&mut key.0.as_slice())
|
|
||||||
.expect("Serai validator had invalid public key")
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let keys = musig(&musig_context(self.set.set.into()), &self.key, &musig_public_keys)
|
|
||||||
.unwrap()
|
|
||||||
.into();
|
|
||||||
|
|
||||||
// Rebuild the machine
|
|
||||||
let (machine, preprocess_from_cache) =
|
|
||||||
AlgorithmSignMachine::from_cache(schnorrkel(), keys, seed);
|
|
||||||
assert_eq!(preprocess.as_slice(), preprocess_from_cache.serialize().as_slice());
|
|
||||||
|
|
||||||
// Ensure this is a consistent signing session
|
|
||||||
let our_i = our_i(&self.set, &self.key, &preprocesses);
|
|
||||||
let consistent = (attempt == our_attempt) &&
|
|
||||||
(preprocesses.remove(&our_i).unwrap().as_slice() == preprocess.as_slice());
|
|
||||||
if !consistent {
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reformat the preprocesses into the expected format for Musig
|
|
||||||
let preprocesses = match make_contiguous(our_i, preprocesses, |preprocess| {
|
|
||||||
machine.read_preprocess(&mut preprocess.as_slice())
|
|
||||||
}) {
|
|
||||||
Ok(preprocesses) => preprocesses,
|
|
||||||
// This yields the *original participant index*
|
|
||||||
Err(participant) => {
|
|
||||||
Self::slash(
|
|
||||||
&mut self.db,
|
|
||||||
self.set.set,
|
|
||||||
self.set.validators[usize::from(u16::from(participant) - 1)].0,
|
|
||||||
);
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Calculate our share
|
|
||||||
let (machine, share) = match handle_frost_error(
|
|
||||||
machine.sign(preprocesses, &set_keys_message(&self.set.set, &key_pair)),
|
|
||||||
) {
|
|
||||||
Ok((machine, share)) => (machine, share),
|
|
||||||
// This yields the *musig participant index*
|
|
||||||
Err(participant) => {
|
|
||||||
Self::slash(
|
|
||||||
&mut self.db,
|
|
||||||
self.set.set,
|
|
||||||
musig_validators[usize::from(u16::from(participant) - 1)],
|
|
||||||
);
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// Send our share
|
|
||||||
let share = <[u8; 32]>::try_from(share.serialize()).unwrap();
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
TributaryTransactionsFromDkgConfirmation::send(
|
|
||||||
&mut txn,
|
|
||||||
self.set.set,
|
|
||||||
&Transaction::DkgConfirmationShare { attempt, share, signed: Default::default() },
|
|
||||||
);
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
self.signer = Some(Signer::Share {
|
|
||||||
attempt,
|
|
||||||
musig_validators,
|
|
||||||
share,
|
|
||||||
machine: Box::new(machine),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
messages::sign::CoordinatorMessage::Shares {
|
|
||||||
id: messages::sign::SignId { attempt, .. },
|
|
||||||
mut shares,
|
|
||||||
} => {
|
|
||||||
let Some(Signer::Share { attempt: our_attempt, musig_validators, share, machine }) =
|
|
||||||
self.signer.take()
|
|
||||||
else {
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
// Ensure this is a consistent signing session
|
|
||||||
let our_i = our_i(&self.set, &self.key, &shares);
|
|
||||||
let consistent = (attempt == our_attempt) &&
|
|
||||||
(shares.remove(&our_i).unwrap().as_slice() == share.as_slice());
|
|
||||||
if !consistent {
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reformat the shares into the expected format for Musig
|
|
||||||
let shares = match make_contiguous(our_i, shares, |share| {
|
|
||||||
machine.read_share(&mut share.as_slice())
|
|
||||||
}) {
|
|
||||||
Ok(shares) => shares,
|
|
||||||
// This yields the *original participant index*
|
|
||||||
Err(participant) => {
|
|
||||||
Self::slash(
|
|
||||||
&mut self.db,
|
|
||||||
self.set.set,
|
|
||||||
self.set.validators[usize::from(u16::from(participant) - 1)].0,
|
|
||||||
);
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
match handle_frost_error(machine.complete(shares)) {
|
|
||||||
Ok(signature) => {
|
|
||||||
// Create the bitvec of the participants
|
|
||||||
let mut signature_participants;
|
|
||||||
{
|
|
||||||
use bitvec::prelude::*;
|
|
||||||
signature_participants = bitvec![u8, Lsb0; 0; 0];
|
|
||||||
let mut i = 0;
|
|
||||||
for (validator, _) in &self.set.validators {
|
|
||||||
if Some(validator) == musig_validators.get(i) {
|
|
||||||
signature_participants.push(true);
|
|
||||||
i += 1;
|
|
||||||
} else {
|
|
||||||
signature_participants.push(false);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is safe to call multiple times as it'll just change which *valid*
|
|
||||||
// signature to publish
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
Keys::set(
|
|
||||||
&mut txn,
|
|
||||||
self.set.set,
|
|
||||||
key_pair.clone(),
|
|
||||||
signature_participants,
|
|
||||||
signature.into(),
|
|
||||||
);
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
// This yields the *musig participant index*
|
|
||||||
Err(participant) => {
|
|
||||||
Self::slash(
|
|
||||||
&mut self.db,
|
|
||||||
self.set.set,
|
|
||||||
musig_validators[usize::from(u16::from(participant) - 1)],
|
|
||||||
);
|
|
||||||
tributary_txn.commit();
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Because we successfully handled this message, note we made proress
|
|
||||||
made_progress = true;
|
|
||||||
tributary_txn.commit();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if the key has been set on Serai
|
|
||||||
if KeysToConfirm::get(&self.db, self.set.set).is_some() &&
|
|
||||||
KeySet::get(&self.db, self.set.set).is_some()
|
|
||||||
{
|
|
||||||
// Take the keys to confirm so we never instantiate the signer again
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
KeysToConfirm::take(&mut txn, self.set.set);
|
|
||||||
KeySet::take(&mut txn, self.set.set);
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
// Drop our own signer
|
|
||||||
// The task won't die until the Tributary does, but now it'll never do anything again
|
|
||||||
self.signer = None;
|
|
||||||
|
|
||||||
made_progress = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(made_progress)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -14,8 +14,8 @@ use borsh::BorshDeserialize;
|
|||||||
use tokio::sync::mpsc;
|
use tokio::sync::mpsc;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{ExternalNetworkId, PublicKey, SeraiAddress, Signature},
|
primitives::{NetworkId, PublicKey},
|
||||||
validator_sets::primitives::{ExternalValidatorSet, KeyPair},
|
validator_sets::primitives::ValidatorSet,
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
use message_queue::{Service, client::MessageQueue};
|
use message_queue::{Service, client::MessageQueue};
|
||||||
@@ -23,17 +23,13 @@ use message_queue::{Service, client::MessageQueue};
|
|||||||
use serai_task::{Task, TaskHandle, ContinuallyRan};
|
use serai_task::{Task, TaskHandle, ContinuallyRan};
|
||||||
|
|
||||||
use serai_cosign::{Faulted, SignedCosign, Cosigning};
|
use serai_cosign::{Faulted, SignedCosign, Cosigning};
|
||||||
use serai_coordinator_substrate::{
|
use serai_coordinator_substrate::{CanonicalEventStream, EphemeralEventStream, SignSlashReport};
|
||||||
CanonicalEventStream, EphemeralEventStream, SignSlashReport, SetKeysTask, SignedBatches,
|
use serai_coordinator_tributary::{Signed, Transaction, SubstrateBlockPlans};
|
||||||
PublishBatchTask, SlashReports, PublishSlashReportTask,
|
|
||||||
};
|
|
||||||
use serai_coordinator_tributary::{SigningProtocolRound, Signed, Transaction, SubstrateBlockPlans};
|
|
||||||
|
|
||||||
mod db;
|
mod db;
|
||||||
use db::*;
|
use db::*;
|
||||||
|
|
||||||
mod tributary;
|
mod tributary;
|
||||||
mod dkg_confirmation;
|
|
||||||
|
|
||||||
mod substrate;
|
mod substrate;
|
||||||
use substrate::SubstrateTask;
|
use substrate::SubstrateTask;
|
||||||
@@ -149,24 +145,11 @@ fn spawn_cosigning<D: serai_db::Db>(
|
|||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn handle_network(
|
async fn handle_processor_messages(
|
||||||
mut db: impl serai_db::Db,
|
mut db: impl serai_db::Db,
|
||||||
message_queue: Arc<MessageQueue>,
|
message_queue: Arc<MessageQueue>,
|
||||||
serai: Arc<Serai>,
|
network: NetworkId,
|
||||||
network: ExternalNetworkId,
|
|
||||||
) {
|
) {
|
||||||
// Spawn the task to publish batches for this network
|
|
||||||
{
|
|
||||||
let (publish_batch_task_def, publish_batch_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
PublishBatchTask::new(db.clone(), serai.clone(), network)
|
|
||||||
.continually_run(publish_batch_task_def, vec![]),
|
|
||||||
);
|
|
||||||
// Forget its handle so it always runs in the background
|
|
||||||
core::mem::forget(publish_batch_task);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handle Processor messages
|
|
||||||
loop {
|
loop {
|
||||||
let (msg_id, msg) = {
|
let (msg_id, msg) = {
|
||||||
let msg = message_queue.next(Service::Processor(network)).await;
|
let msg = message_queue.next(Service::Processor(network)).await;
|
||||||
@@ -196,8 +179,8 @@ async fn handle_network(
|
|||||||
match msg {
|
match msg {
|
||||||
messages::ProcessorMessage::KeyGen(msg) => match msg {
|
messages::ProcessorMessage::KeyGen(msg) => match msg {
|
||||||
messages::key_gen::ProcessorMessage::Participation { session, participation } => {
|
messages::key_gen::ProcessorMessage::Participation { session, participation } => {
|
||||||
let set = ExternalValidatorSet { network, session };
|
let set = ValidatorSet { network, session };
|
||||||
TributaryTransactionsFromProcessorMessages::send(
|
TributaryTransactions::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set,
|
set,
|
||||||
&Transaction::DkgParticipation { participation, signed: Signed::default() },
|
&Transaction::DkgParticipation { participation, signed: Signed::default() },
|
||||||
@@ -207,84 +190,45 @@ async fn handle_network(
|
|||||||
session,
|
session,
|
||||||
substrate_key,
|
substrate_key,
|
||||||
network_key,
|
network_key,
|
||||||
} => {
|
} => todo!("TODO Transaction::DkgConfirmationPreprocess"),
|
||||||
KeysToConfirm::set(
|
|
||||||
&mut txn,
|
|
||||||
ExternalValidatorSet { network, session },
|
|
||||||
&KeyPair(
|
|
||||||
PublicKey::from_raw(substrate_key),
|
|
||||||
network_key
|
|
||||||
.try_into()
|
|
||||||
.expect("generated a network key which exceeds the maximum key length"),
|
|
||||||
),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
|
messages::key_gen::ProcessorMessage::Blame { session, participant } => {
|
||||||
RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant);
|
let set = ValidatorSet { network, session };
|
||||||
|
TributaryTransactions::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::RemoveParticipant {
|
||||||
|
participant: todo!("TODO"),
|
||||||
|
signed: Signed::default(),
|
||||||
|
},
|
||||||
|
);
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
messages::ProcessorMessage::Sign(msg) => match msg {
|
messages::ProcessorMessage::Sign(msg) => match msg {
|
||||||
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
|
messages::sign::ProcessorMessage::InvalidParticipant { session, participant } => {
|
||||||
RemoveParticipant::send(&mut txn, ExternalValidatorSet { network, session }, participant);
|
let set = ValidatorSet { network, session };
|
||||||
|
TributaryTransactions::send(
|
||||||
|
&mut txn,
|
||||||
|
set,
|
||||||
|
&Transaction::RemoveParticipant {
|
||||||
|
participant: todo!("TODO"),
|
||||||
|
signed: Signed::default(),
|
||||||
|
},
|
||||||
|
);
|
||||||
}
|
}
|
||||||
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
|
messages::sign::ProcessorMessage::Preprocesses { id, preprocesses } => {
|
||||||
let set = ExternalValidatorSet { network, session: id.session };
|
todo!("TODO Transaction::Batch + Transaction::Sign")
|
||||||
if id.attempt == 0 {
|
|
||||||
// Batches are declared by their intent to be signed
|
|
||||||
if let messages::sign::VariantSignId::Batch(hash) = id.id {
|
|
||||||
TributaryTransactionsFromProcessorMessages::send(
|
|
||||||
&mut txn,
|
|
||||||
set,
|
|
||||||
&Transaction::Batch { hash },
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
TributaryTransactionsFromProcessorMessages::send(
|
|
||||||
&mut txn,
|
|
||||||
set,
|
|
||||||
&Transaction::Sign {
|
|
||||||
id: id.id,
|
|
||||||
attempt: id.attempt,
|
|
||||||
round: SigningProtocolRound::Preprocess,
|
|
||||||
data: preprocesses,
|
|
||||||
signed: Signed::default(),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
messages::sign::ProcessorMessage::Shares { id, shares } => {
|
|
||||||
let set = ExternalValidatorSet { network, session: id.session };
|
|
||||||
TributaryTransactionsFromProcessorMessages::send(
|
|
||||||
&mut txn,
|
|
||||||
set,
|
|
||||||
&Transaction::Sign {
|
|
||||||
id: id.id,
|
|
||||||
attempt: id.attempt,
|
|
||||||
round: SigningProtocolRound::Share,
|
|
||||||
data: shares,
|
|
||||||
signed: Signed::default(),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
messages::sign::ProcessorMessage::Shares { id, shares } => todo!("TODO Transaction::Sign"),
|
||||||
},
|
},
|
||||||
messages::ProcessorMessage::Coordinator(msg) => match msg {
|
messages::ProcessorMessage::Coordinator(msg) => match msg {
|
||||||
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
messages::coordinator::ProcessorMessage::CosignedBlock { cosign } => {
|
||||||
SignedCosigns::send(&mut txn, &cosign);
|
SignedCosigns::send(&mut txn, &cosign);
|
||||||
}
|
}
|
||||||
messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
|
messages::coordinator::ProcessorMessage::SignedBatch { batch } => {
|
||||||
SignedBatches::send(&mut txn, &batch);
|
todo!("TODO PublishBatchTask")
|
||||||
}
|
}
|
||||||
messages::coordinator::ProcessorMessage::SignedSlashReport {
|
messages::coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
|
||||||
session,
|
todo!("TODO PublishSlashReportTask")
|
||||||
slash_report,
|
|
||||||
signature,
|
|
||||||
} => {
|
|
||||||
SlashReports::set(
|
|
||||||
&mut txn,
|
|
||||||
ExternalValidatorSet { network, session },
|
|
||||||
slash_report,
|
|
||||||
Signature(signature),
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
messages::ProcessorMessage::Substrate(msg) => match msg {
|
messages::ProcessorMessage::Substrate(msg) => match msg {
|
||||||
@@ -297,9 +241,9 @@ async fn handle_network(
|
|||||||
.push(plan.transaction_plan_id);
|
.push(plan.transaction_plan_id);
|
||||||
}
|
}
|
||||||
for (session, plans) in by_session {
|
for (session, plans) in by_session {
|
||||||
let set = ExternalValidatorSet { network, session };
|
let set = ValidatorSet { network, session };
|
||||||
SubstrateBlockPlans::set(&mut txn, set, block, &plans);
|
SubstrateBlockPlans::set(&mut txn, set, block, &plans);
|
||||||
TributaryTransactionsFromProcessorMessages::send(
|
TributaryTransactions::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set,
|
set,
|
||||||
&Transaction::SubstrateBlock { hash: block },
|
&Transaction::SubstrateBlock { hash: block },
|
||||||
@@ -365,16 +309,10 @@ async fn main() {
|
|||||||
// Cleanup all historic Tributaries
|
// Cleanup all historic Tributaries
|
||||||
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
|
while let Some(to_cleanup) = TributaryCleanup::try_recv(&mut txn) {
|
||||||
prune_tributary_db(to_cleanup);
|
prune_tributary_db(to_cleanup);
|
||||||
// Remove the keys to confirm for this network
|
|
||||||
KeysToConfirm::take(&mut txn, to_cleanup);
|
|
||||||
KeySet::take(&mut txn, to_cleanup);
|
|
||||||
// Drain the cosign intents created for this set
|
// Drain the cosign intents created for this set
|
||||||
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
|
while !Cosigning::<Db>::intended_cosigns(&mut txn, to_cleanup).is_empty() {}
|
||||||
// Drain the transactions to publish for this set
|
// Drain the transactions to publish for this set
|
||||||
while TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, to_cleanup).is_some() {}
|
while TributaryTransactions::try_recv(&mut txn, to_cleanup).is_some() {}
|
||||||
while TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, to_cleanup).is_some() {}
|
|
||||||
// Drain the participants to remove for this set
|
|
||||||
while RemoveParticipant::try_recv(&mut txn, to_cleanup).is_some() {}
|
|
||||||
// Remove the SignSlashReport notification
|
// Remove the SignSlashReport notification
|
||||||
SignSlashReport::try_recv(&mut txn, to_cleanup);
|
SignSlashReport::try_recv(&mut txn, to_cleanup);
|
||||||
}
|
}
|
||||||
@@ -438,7 +376,7 @@ async fn main() {
|
|||||||
EphemeralEventStream::new(
|
EphemeralEventStream::new(
|
||||||
db.clone(),
|
db.clone(),
|
||||||
serai.clone(),
|
serai.clone(),
|
||||||
SeraiAddress((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
|
PublicKey::from_raw((<Ristretto as Ciphersuite>::generator() * serai_key.deref()).to_bytes()),
|
||||||
)
|
)
|
||||||
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
|
.continually_run(substrate_ephemeral_task_def, vec![substrate_task]),
|
||||||
);
|
);
|
||||||
@@ -479,29 +417,12 @@ async fn main() {
|
|||||||
.continually_run(substrate_task_def, vec![]),
|
.continually_run(substrate_task_def, vec![]),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Handle each of the networks
|
// Handle all of the Processors' messages
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
tokio::spawn(handle_network(db.clone(), message_queue.clone(), serai.clone(), network));
|
if network == NetworkId::Serai {
|
||||||
}
|
continue;
|
||||||
|
}
|
||||||
// Spawn the task to set keys
|
tokio::spawn(handle_processor_messages(db.clone(), message_queue.clone(), network));
|
||||||
{
|
|
||||||
let (set_keys_task_def, set_keys_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
SetKeysTask::new(db.clone(), serai.clone()).continually_run(set_keys_task_def, vec![]),
|
|
||||||
);
|
|
||||||
// Forget its handle so it always runs in the background
|
|
||||||
core::mem::forget(set_keys_task);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Spawn the task to publish slash reports
|
|
||||||
{
|
|
||||||
let (publish_slash_report_task_def, publish_slash_report_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
PublishSlashReportTask::new(db, serai).continually_run(publish_slash_report_task_def, vec![]),
|
|
||||||
);
|
|
||||||
// Always have this run in the background
|
|
||||||
core::mem::forget(publish_slash_report_task);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the spawned tasks ad-infinitum
|
// Run the spawned tasks ad-infinitum
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use tokio::sync::mpsc;
|
|||||||
|
|
||||||
use serai_db::{DbTxn, Db as DbTrait};
|
use serai_db::{DbTxn, Db as DbTrait};
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::{Session, ExternalValidatorSet};
|
use serai_client::validator_sets::primitives::{Session, ValidatorSet};
|
||||||
use message_queue::{Service, Metadata, client::MessageQueue};
|
use message_queue::{Service, Metadata, client::MessageQueue};
|
||||||
|
|
||||||
use tributary_sdk::Tributary;
|
use tributary_sdk::Tributary;
|
||||||
@@ -19,7 +19,7 @@ use serai_task::ContinuallyRan;
|
|||||||
use serai_coordinator_tributary::Transaction;
|
use serai_coordinator_tributary::Transaction;
|
||||||
use serai_coordinator_p2p::P2p;
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
use crate::{Db, KeySet};
|
use crate::Db;
|
||||||
|
|
||||||
pub(crate) struct SubstrateTask<P: P2p> {
|
pub(crate) struct SubstrateTask<P: P2p> {
|
||||||
pub(crate) serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
pub(crate) serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
@@ -27,8 +27,8 @@ pub(crate) struct SubstrateTask<P: P2p> {
|
|||||||
pub(crate) message_queue: Arc<MessageQueue>,
|
pub(crate) message_queue: Arc<MessageQueue>,
|
||||||
pub(crate) p2p: P,
|
pub(crate) p2p: P,
|
||||||
pub(crate) p2p_add_tributary:
|
pub(crate) p2p_add_tributary:
|
||||||
mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>,
|
mpsc::UnboundedSender<(ValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||||
pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ExternalValidatorSet>,
|
pub(crate) p2p_retire_tributary: mpsc::UnboundedSender<ValidatorSet>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
||||||
@@ -38,7 +38,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
|||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
|
|
||||||
// Handle the Canonical events
|
// Handle the Canonical events
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
let Some(msg) = serai_coordinator_substrate::Canonical::try_recv(&mut txn, network)
|
||||||
@@ -47,9 +47,8 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
match msg {
|
match msg {
|
||||||
messages::substrate::CoordinatorMessage::SetKeys { session, .. } => {
|
// TODO: Stop trying to confirm the DKG
|
||||||
KeySet::set(&mut txn, ExternalValidatorSet { network, session }, &());
|
messages::substrate::CoordinatorMessage::SetKeys { .. } => todo!("TODO"),
|
||||||
}
|
|
||||||
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
messages::substrate::CoordinatorMessage::SlashesReported { session } => {
|
||||||
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
|
let prior_retired = crate::db::RetiredTributary::get(&txn, network);
|
||||||
let next_to_be_retired =
|
let next_to_be_retired =
|
||||||
@@ -58,7 +57,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
|||||||
crate::db::RetiredTributary::set(&mut txn, network, &session);
|
crate::db::RetiredTributary::set(&mut txn, network, &session);
|
||||||
self
|
self
|
||||||
.p2p_retire_tributary
|
.p2p_retire_tributary
|
||||||
.send(ExternalValidatorSet { network, session })
|
.send(ValidatorSet { network, session })
|
||||||
.expect("p2p retire_tributary channel dropped?");
|
.expect("p2p retire_tributary channel dropped?");
|
||||||
}
|
}
|
||||||
messages::substrate::CoordinatorMessage::Block { .. } => {}
|
messages::substrate::CoordinatorMessage::Block { .. } => {}
|
||||||
@@ -108,10 +107,7 @@ impl<P: P2p> ContinuallyRan for SubstrateTask<P> {
|
|||||||
*/
|
*/
|
||||||
crate::db::TributaryCleanup::send(
|
crate::db::TributaryCleanup::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
&ExternalValidatorSet {
|
&ValidatorSet { network: new_set.set.network, session: Session(historic_session) },
|
||||||
network: new_set.set.network,
|
|
||||||
session: Session(historic_session),
|
|
||||||
},
|
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -11,7 +11,7 @@ use tokio::sync::mpsc;
|
|||||||
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||||
|
|
||||||
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
||||||
|
|
||||||
@@ -21,25 +21,14 @@ use message_queue::{Service, Metadata, client::MessageQueue};
|
|||||||
|
|
||||||
use serai_cosign::{Faulted, CosignIntent, Cosigning};
|
use serai_cosign::{Faulted, CosignIntent, Cosigning};
|
||||||
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
|
use serai_coordinator_substrate::{NewSetInformation, SignSlashReport};
|
||||||
use serai_coordinator_tributary::{
|
use serai_coordinator_tributary::{Transaction, ProcessorMessages, CosignIntents, ScanTributaryTask};
|
||||||
Topic, Transaction, ProcessorMessages, CosignIntents, RecognizedTopics, ScanTributaryTask,
|
|
||||||
};
|
|
||||||
use serai_coordinator_p2p::P2p;
|
use serai_coordinator_p2p::P2p;
|
||||||
|
|
||||||
use crate::{
|
use crate::{Db, TributaryTransactions};
|
||||||
Db, TributaryTransactionsFromProcessorMessages, TributaryTransactionsFromDkgConfirmation,
|
|
||||||
RemoveParticipant, dkg_confirmation::ConfirmDkgTask,
|
|
||||||
};
|
|
||||||
|
|
||||||
create_db! {
|
|
||||||
Coordinator {
|
|
||||||
PublishOnRecognition: (set: ExternalValidatorSet, topic: Topic) -> Transaction,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db_channel! {
|
db_channel! {
|
||||||
Coordinator {
|
Coordinator {
|
||||||
PendingCosigns: (set: ExternalValidatorSet) -> CosignIntent,
|
PendingCosigns: (set: ValidatorSet) -> CosignIntent,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -48,7 +37,7 @@ db_channel! {
|
|||||||
/// This is not a well-designed function. This is specific to the context in which its called,
|
/// This is not a well-designed function. This is specific to the context in which its called,
|
||||||
/// within this file. It should only be considered an internal helper for this domain alone.
|
/// within this file. It should only be considered an internal helper for this domain alone.
|
||||||
async fn provide_transaction<TD: DbTrait, P: P2p>(
|
async fn provide_transaction<TD: DbTrait, P: P2p>(
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
tributary: &Tributary<TD, Transaction, P>,
|
tributary: &Tributary<TD, Transaction, P>,
|
||||||
tx: Transaction,
|
tx: Transaction,
|
||||||
) {
|
) {
|
||||||
@@ -158,101 +147,12 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
#[must_use]
|
/// Adds all of the transactions sent via `TributaryTransactions`.
|
||||||
async fn add_signed_unsigned_transaction<TD: DbTrait, P: P2p>(
|
|
||||||
tributary: &Tributary<TD, Transaction, P>,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
mut tx: Transaction,
|
|
||||||
) -> bool {
|
|
||||||
// If this is a signed transaction, sign it
|
|
||||||
if matches!(tx.kind(), TransactionKind::Signed(_, _)) {
|
|
||||||
tx.sign(&mut OsRng, tributary.genesis(), key);
|
|
||||||
}
|
|
||||||
|
|
||||||
let res = tributary.add_transaction(tx.clone()).await;
|
|
||||||
match &res {
|
|
||||||
// Fresh publication, already published
|
|
||||||
Ok(true | false) => {}
|
|
||||||
Err(
|
|
||||||
TransactionError::TooLargeTransaction |
|
|
||||||
TransactionError::InvalidSigner |
|
|
||||||
TransactionError::InvalidSignature |
|
|
||||||
TransactionError::InvalidContent,
|
|
||||||
) => {
|
|
||||||
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
|
|
||||||
}
|
|
||||||
// InvalidNonce may be out-of-order TXs, not invalid ones, but we only create nonce #n+1 after
|
|
||||||
// on-chain inclusion of the TX with nonce #n, so it is invalid within our context unless the
|
|
||||||
// issue is this transaction was already included on-chain
|
|
||||||
Err(TransactionError::InvalidNonce) => {
|
|
||||||
let TransactionKind::Signed(order, signed) = tx.kind() else {
|
|
||||||
panic!("non-Signed transaction had InvalidNonce");
|
|
||||||
};
|
|
||||||
let next_nonce = tributary
|
|
||||||
.next_nonce(&signed.signer, &order)
|
|
||||||
.await
|
|
||||||
.expect("signer who is a present validator didn't have a nonce");
|
|
||||||
assert!(next_nonce != signed.nonce);
|
|
||||||
// We're publishing an old transaction
|
|
||||||
if next_nonce > signed.nonce {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
panic!("nonce in transaction wasn't contiguous with nonce on-chain");
|
|
||||||
}
|
|
||||||
// We've published too many transactions recently
|
|
||||||
Err(TransactionError::TooManyInMempool) => {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
// This isn't a Provided transaction so this should never be hit
|
|
||||||
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
|
||||||
}
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn add_with_recognition_check<TD: DbTrait, P: P2p>(
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
tributary_db: &mut TD,
|
|
||||||
tributary: &Tributary<TD, Transaction, P>,
|
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
|
||||||
tx: Transaction,
|
|
||||||
) -> bool {
|
|
||||||
let kind = tx.kind();
|
|
||||||
match kind {
|
|
||||||
TransactionKind::Provided(_) => provide_transaction(set, tributary, tx).await,
|
|
||||||
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
|
|
||||||
// If this is a transaction with signing data, check the topic is recognized before
|
|
||||||
// publishing
|
|
||||||
let topic = tx.topic();
|
|
||||||
let still_requires_recognition = if let Some(topic) = topic {
|
|
||||||
(topic.requires_recognition() && (!RecognizedTopics::recognized(tributary_db, set, topic)))
|
|
||||||
.then_some(topic)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
if let Some(topic) = still_requires_recognition {
|
|
||||||
// Queue the transaction until the topic is recognized
|
|
||||||
// We use the Tributary DB for this so it's cleaned up when the Tributary DB is
|
|
||||||
let mut tributary_txn = tributary_db.txn();
|
|
||||||
PublishOnRecognition::set(&mut tributary_txn, set, topic, &tx);
|
|
||||||
tributary_txn.commit();
|
|
||||||
} else {
|
|
||||||
// Actually add the transaction
|
|
||||||
if !add_signed_unsigned_transaction(tributary, key, tx).await {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Adds all of the transactions sent via `TributaryTransactionsFromProcessorMessages`.
|
|
||||||
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
pub(crate) struct AddTributaryTransactionsTask<CD: DbTrait, TD: DbTrait, P: P2p> {
|
||||||
db: CD,
|
db: CD,
|
||||||
tributary_db: TD,
|
tributary_db: TD,
|
||||||
tributary: Tributary<TD, Transaction, P>,
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
set: NewSetInformation,
|
set: ValidatorSet,
|
||||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
}
|
}
|
||||||
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
|
impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactionsTask<CD, TD, P> {
|
||||||
@@ -261,87 +161,49 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactio
|
|||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
|
|
||||||
// Provide/add all transactions sent our way
|
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(tx) = TributaryTransactionsFromDkgConfirmation::try_recv(&mut txn, self.set.set)
|
let Some(mut tx) = TributaryTransactions::try_recv(&mut txn, self.set) else { break };
|
||||||
else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
|
|
||||||
if !add_with_recognition_check(
|
let kind = tx.kind();
|
||||||
self.set.set,
|
match kind {
|
||||||
&mut self.tributary_db,
|
TransactionKind::Provided(_) => provide_transaction(self.set, &self.tributary, tx).await,
|
||||||
&self.tributary,
|
TransactionKind::Unsigned | TransactionKind::Signed(_, _) => {
|
||||||
&self.key,
|
// If this is a signed transaction, sign it
|
||||||
tx,
|
if matches!(kind, TransactionKind::Signed(_, _)) {
|
||||||
)
|
tx.sign(&mut OsRng, self.tributary.genesis(), &self.key);
|
||||||
.await
|
}
|
||||||
{
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
made_progress = true;
|
// Actually add the transaction
|
||||||
txn.commit();
|
// TODO: If this is a preprocess, make sure the topic has been recognized
|
||||||
}
|
let res = self.tributary.add_transaction(tx.clone()).await;
|
||||||
|
match &res {
|
||||||
loop {
|
// Fresh publication, already published
|
||||||
let mut txn = self.db.txn();
|
Ok(true | false) => {}
|
||||||
let Some(tx) = TributaryTransactionsFromProcessorMessages::try_recv(&mut txn, self.set.set)
|
Err(
|
||||||
else {
|
TransactionError::TooLargeTransaction |
|
||||||
break;
|
TransactionError::InvalidSigner |
|
||||||
};
|
TransactionError::InvalidNonce |
|
||||||
|
TransactionError::InvalidSignature |
|
||||||
if !add_with_recognition_check(
|
TransactionError::InvalidContent,
|
||||||
self.set.set,
|
) => {
|
||||||
&mut self.tributary_db,
|
panic!("created an invalid transaction, tx: {tx:?}, err: {res:?}");
|
||||||
&self.tributary,
|
}
|
||||||
&self.key,
|
// We've published too many transactions recently
|
||||||
tx,
|
// Drop this txn to try to publish it again later on a future iteration
|
||||||
)
|
Err(TransactionError::TooManyInMempool) => {
|
||||||
.await
|
drop(txn);
|
||||||
{
|
break;
|
||||||
break;
|
}
|
||||||
}
|
// This isn't a Provided transaction so this should never be hit
|
||||||
|
Err(TransactionError::ProvidedAddedToMempool) => unreachable!(),
|
||||||
made_progress = true;
|
}
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provide/add all transactions due to newly recognized topics
|
|
||||||
loop {
|
|
||||||
let mut tributary_txn = self.tributary_db.txn();
|
|
||||||
let Some(topic) =
|
|
||||||
RecognizedTopics::try_recv_topic_requiring_recognition(&mut tributary_txn, self.set.set)
|
|
||||||
else {
|
|
||||||
break;
|
|
||||||
};
|
|
||||||
if let Some(tx) = PublishOnRecognition::take(&mut tributary_txn, self.set.set, topic) {
|
|
||||||
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
made_progress = true;
|
|
||||||
tributary_txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Publish any participant removals
|
|
||||||
loop {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some(participant) = RemoveParticipant::try_recv(&mut txn, self.set.set) else { break };
|
|
||||||
let tx = Transaction::RemoveParticipant {
|
|
||||||
participant: self.set.participant_indexes_reverse_lookup[&participant],
|
|
||||||
signed: Default::default(),
|
|
||||||
};
|
|
||||||
if !add_signed_unsigned_transaction(&self.tributary, &self.key, tx).await {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
made_progress = true;
|
made_progress = true;
|
||||||
txn.commit();
|
txn.commit();
|
||||||
}
|
}
|
||||||
|
|
||||||
Ok(made_progress)
|
Ok(made_progress)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -350,7 +212,7 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for AddTributaryTransactio
|
|||||||
/// Takes the messages from ScanTributaryTask and publishes them to the message-queue.
|
/// Takes the messages from ScanTributaryTask and publishes them to the message-queue.
|
||||||
pub(crate) struct TributaryProcessorMessagesTask<TD: DbTrait> {
|
pub(crate) struct TributaryProcessorMessagesTask<TD: DbTrait> {
|
||||||
tributary_db: TD,
|
tributary_db: TD,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
message_queue: Arc<MessageQueue>,
|
message_queue: Arc<MessageQueue>,
|
||||||
}
|
}
|
||||||
impl<TD: DbTrait> ContinuallyRan for TributaryProcessorMessagesTask<TD> {
|
impl<TD: DbTrait> ContinuallyRan for TributaryProcessorMessagesTask<TD> {
|
||||||
@@ -430,7 +292,7 @@ impl<CD: DbTrait, TD: DbTrait, P: P2p> ContinuallyRan for SignSlashReportTask<CD
|
|||||||
/// Run the scan task whenever the Tributary adds a new block.
|
/// Run the scan task whenever the Tributary adds a new block.
|
||||||
async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
||||||
db: CD,
|
db: CD,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
tributary: Tributary<TD, Transaction, P>,
|
tributary: Tributary<TD, Transaction, P>,
|
||||||
scan_tributary_task: TaskHandle,
|
scan_tributary_task: TaskHandle,
|
||||||
tasks_to_keep_alive: Vec<TaskHandle>,
|
tasks_to_keep_alive: Vec<TaskHandle>,
|
||||||
@@ -461,15 +323,13 @@ async fn scan_on_new_block<CD: DbTrait, TD: DbTrait, P: P2p>(
|
|||||||
/// - Spawn the ScanTributaryTask
|
/// - Spawn the ScanTributaryTask
|
||||||
/// - Spawn the ProvideCosignCosignedTransactionsTask
|
/// - Spawn the ProvideCosignCosignedTransactionsTask
|
||||||
/// - Spawn the TributaryProcessorMessagesTask
|
/// - Spawn the TributaryProcessorMessagesTask
|
||||||
/// - Spawn the AddTributaryTransactionsTask
|
|
||||||
/// - Spawn the ConfirmDkgTask
|
|
||||||
/// - Spawn the SignSlashReportTask
|
/// - Spawn the SignSlashReportTask
|
||||||
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
|
/// - Iterate the scan task whenever a new block occurs (not just on the standard interval)
|
||||||
pub(crate) async fn spawn_tributary<P: P2p>(
|
pub(crate) async fn spawn_tributary<P: P2p>(
|
||||||
db: Db,
|
db: Db,
|
||||||
message_queue: Arc<MessageQueue>,
|
message_queue: Arc<MessageQueue>,
|
||||||
p2p: P,
|
p2p: P,
|
||||||
p2p_add_tributary: &mpsc::UnboundedSender<(ExternalValidatorSet, Tributary<Db, Transaction, P>)>,
|
p2p_add_tributary: &mpsc::UnboundedSender<(ValidatorSet, Tributary<Db, Transaction, P>)>,
|
||||||
set: NewSetInformation,
|
set: NewSetInformation,
|
||||||
serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
serai_key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
) {
|
) {
|
||||||
@@ -543,45 +403,38 @@ pub(crate) async fn spawn_tributary<P: P2p>(
|
|||||||
// Spawn the scan task
|
// Spawn the scan task
|
||||||
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
|
let (scan_tributary_task_def, scan_tributary_task) = Task::new();
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
ScanTributaryTask::<_, P>::new(tributary_db.clone(), set.clone(), reader)
|
ScanTributaryTask::<_, P>::new(tributary_db.clone(), &set, reader)
|
||||||
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
|
// This is the only handle for this TributaryProcessorMessagesTask, so when this task is
|
||||||
// dropped, it will be too
|
// dropped, it will be too
|
||||||
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
|
.continually_run(scan_tributary_task_def, vec![scan_tributary_messages_task]),
|
||||||
);
|
);
|
||||||
|
|
||||||
// Spawn the add transactions task
|
|
||||||
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
(AddTributaryTransactionsTask {
|
|
||||||
db: db.clone(),
|
|
||||||
tributary_db: tributary_db.clone(),
|
|
||||||
tributary: tributary.clone(),
|
|
||||||
set: set.clone(),
|
|
||||||
key: serai_key.clone(),
|
|
||||||
})
|
|
||||||
.continually_run(add_tributary_transactions_task_def, vec![]),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Spawn the task to confirm the DKG result
|
|
||||||
let (confirm_dkg_task_def, confirm_dkg_task) = Task::new();
|
|
||||||
tokio::spawn(
|
|
||||||
ConfirmDkgTask::new(db.clone(), set.clone(), tributary_db.clone(), serai_key.clone())
|
|
||||||
.continually_run(confirm_dkg_task_def, vec![add_tributary_transactions_task]),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Spawn the sign slash report task
|
// Spawn the sign slash report task
|
||||||
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
|
let (sign_slash_report_task_def, sign_slash_report_task) = Task::new();
|
||||||
tokio::spawn(
|
tokio::spawn(
|
||||||
(SignSlashReportTask {
|
(SignSlashReportTask {
|
||||||
db: db.clone(),
|
db: db.clone(),
|
||||||
tributary_db,
|
tributary_db: tributary_db.clone(),
|
||||||
tributary: tributary.clone(),
|
tributary: tributary.clone(),
|
||||||
set: set.clone(),
|
set: set.clone(),
|
||||||
key: serai_key,
|
key: serai_key.clone(),
|
||||||
})
|
})
|
||||||
.continually_run(sign_slash_report_task_def, vec![]),
|
.continually_run(sign_slash_report_task_def, vec![]),
|
||||||
);
|
);
|
||||||
|
|
||||||
|
// Spawn the add transactions task
|
||||||
|
let (add_tributary_transactions_task_def, add_tributary_transactions_task) = Task::new();
|
||||||
|
tokio::spawn(
|
||||||
|
(AddTributaryTransactionsTask {
|
||||||
|
db: db.clone(),
|
||||||
|
tributary_db,
|
||||||
|
tributary: tributary.clone(),
|
||||||
|
set: set.set,
|
||||||
|
key: serai_key,
|
||||||
|
})
|
||||||
|
.continually_run(add_tributary_transactions_task_def, vec![]),
|
||||||
|
);
|
||||||
|
|
||||||
// Whenever a new block occurs, immediately run the scan task
|
// Whenever a new block occurs, immediately run the scan task
|
||||||
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
|
// This function also preserves the ProvideCosignCosignedTransactionsTask handle until the
|
||||||
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
|
// Tributary is retired, ensuring it isn't dropped prematurely and that the task don't run ad
|
||||||
@@ -591,6 +444,10 @@ pub(crate) async fn spawn_tributary<P: P2p>(
|
|||||||
set.set,
|
set.set,
|
||||||
tributary,
|
tributary,
|
||||||
scan_tributary_task,
|
scan_tributary_task,
|
||||||
vec![provide_cosign_cosigned_transactions_task, confirm_dkg_task, sign_slash_report_task],
|
vec![
|
||||||
|
provide_cosign_cosigned_transactions_task,
|
||||||
|
sign_slash_report_task,
|
||||||
|
add_tributary_transactions_task,
|
||||||
|
],
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,9 +22,6 @@ bitvec = { version = "1", default-features = false, features = ["std"] }
|
|||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
|
use serai_client::Serai;
|
||||||
|
|
||||||
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
|
use messages::substrate::{InInstructionResult, ExecutedBatch, CoordinatorMessage};
|
||||||
|
|
||||||
@@ -152,7 +152,6 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
else {
|
else {
|
||||||
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
|
panic!("SetRetired event wasn't a SetRetired event: {set_retired:?}");
|
||||||
};
|
};
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
|
||||||
crate::Canonical::send(
|
crate::Canonical::send(
|
||||||
&mut txn,
|
&mut txn,
|
||||||
set.network,
|
set.network,
|
||||||
@@ -160,7 +159,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
let mut batch = None;
|
let mut batch = None;
|
||||||
for this_batch in &block.batch_events {
|
for this_batch in &block.batch_events {
|
||||||
let serai_client::in_instructions::InInstructionsEvent::Batch {
|
let serai_client::in_instructions::InInstructionsEvent::Batch {
|
||||||
@@ -181,7 +180,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
batch = Some(ExecutedBatch {
|
batch = Some(ExecutedBatch {
|
||||||
id: *id,
|
id: *id,
|
||||||
publisher: *publishing_session,
|
publisher: *publishing_session,
|
||||||
external_network_block_hash: external_network_block_hash.0,
|
external_network_block_hash: *external_network_block_hash,
|
||||||
in_instructions_hash: *in_instructions_hash,
|
in_instructions_hash: *in_instructions_hash,
|
||||||
in_instruction_results: in_instruction_results
|
in_instruction_results: in_instruction_results
|
||||||
.iter()
|
.iter()
|
||||||
@@ -202,7 +201,7 @@ impl<D: Db> ContinuallyRan for CanonicalEventStream<D> {
|
|||||||
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
|
let serai_client::coins::CoinsEvent::BurnWithInstruction { from: _, instruction } =
|
||||||
&burn
|
&burn
|
||||||
else {
|
else {
|
||||||
panic!("BurnWithInstruction event wasn't a BurnWithInstruction event: {burn:?}");
|
panic!("Burn event wasn't a Burn.in event: {burn:?}");
|
||||||
};
|
};
|
||||||
if instruction.balance.coin.network() == network {
|
if instruction.balance.coin.network() == network {
|
||||||
burns.push(instruction.clone());
|
burns.push(instruction.clone());
|
||||||
|
|||||||
@@ -4,8 +4,8 @@ use std::sync::Arc;
|
|||||||
use futures::stream::{StreamExt, FuturesOrdered};
|
use futures::stream::{StreamExt, FuturesOrdered};
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, EmbeddedEllipticCurve},
|
primitives::{PublicKey, NetworkId, EmbeddedEllipticCurve},
|
||||||
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet},
|
validator_sets::primitives::MAX_KEY_SHARES_PER_SET,
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -26,14 +26,14 @@ create_db!(
|
|||||||
pub struct EphemeralEventStream<D: Db> {
|
pub struct EphemeralEventStream<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
serai: Arc<Serai>,
|
serai: Arc<Serai>,
|
||||||
validator: SeraiAddress,
|
validator: PublicKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> EphemeralEventStream<D> {
|
impl<D: Db> EphemeralEventStream<D> {
|
||||||
/// Create a new ephemeral event stream.
|
/// Create a new ephemeral event stream.
|
||||||
///
|
///
|
||||||
/// Only one of these may exist over the provided database.
|
/// Only one of these may exist over the provided database.
|
||||||
pub fn new(db: D, serai: Arc<Serai>, validator: SeraiAddress) -> Self {
|
pub fn new(db: D, serai: Arc<Serai>, validator: PublicKey) -> Self {
|
||||||
Self { db, serai, validator }
|
Self { db, serai, validator }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -130,22 +130,21 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
|
let serai_client::validator_sets::ValidatorSetsEvent::NewSet { set } = &new_set else {
|
||||||
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
|
panic!("NewSet event wasn't a NewSet event: {new_set:?}");
|
||||||
};
|
};
|
||||||
|
|
||||||
// We only coordinate over external networks
|
// We only coordinate over external networks
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
if set.network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
let serai = self.serai.as_of(block.block_hash);
|
let serai = self.serai.as_of(block.block_hash);
|
||||||
let serai = serai.validator_sets();
|
let serai = serai.validator_sets();
|
||||||
let Some(validators) =
|
let Some(validators) =
|
||||||
serai.participants(set.network.into()).await.map_err(|e| format!("{e:?}"))?
|
serai.participants(set.network).await.map_err(|e| format!("{e:?}"))?
|
||||||
else {
|
else {
|
||||||
Err(format!(
|
Err(format!(
|
||||||
"block #{block_number} declared a new set but didn't have the participants"
|
"block #{block_number} declared a new set but didn't have the participants"
|
||||||
))?
|
))?
|
||||||
};
|
};
|
||||||
let validators = validators
|
|
||||||
.into_iter()
|
|
||||||
.map(|(validator, weight)| (SeraiAddress::from(validator), weight))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
let in_set = validators.iter().any(|(validator, _)| *validator == self.validator);
|
||||||
if in_set {
|
if in_set {
|
||||||
if u16::try_from(validators.len()).is_err() {
|
if u16::try_from(validators.len()).is_err() {
|
||||||
@@ -178,16 +177,14 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
embedded_elliptic_curve_keys.push_back(async move {
|
embedded_elliptic_curve_keys.push_back(async move {
|
||||||
tokio::try_join!(
|
tokio::try_join!(
|
||||||
// One future to fetch the substrate embedded key
|
// One future to fetch the substrate embedded key
|
||||||
serai.embedded_elliptic_curve_key(
|
serai
|
||||||
validator.into(),
|
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519),
|
||||||
EmbeddedEllipticCurve::Embedwards25519
|
|
||||||
),
|
|
||||||
// One future to fetch the external embedded key, if there is a distinct curve
|
// One future to fetch the external embedded key, if there is a distinct curve
|
||||||
async {
|
async {
|
||||||
// `embedded_elliptic_curves` is documented to have the second entry be the
|
// `embedded_elliptic_curves` is documented to have the second entry be the
|
||||||
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
||||||
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
if let Some(curve) = set.network.embedded_elliptic_curves().get(1) {
|
||||||
serai.embedded_elliptic_curve_key(validator.into(), *curve).await.map(Some)
|
serai.embedded_elliptic_curve_key(validator, *curve).await.map(Some)
|
||||||
} else {
|
} else {
|
||||||
Ok(None)
|
Ok(None)
|
||||||
}
|
}
|
||||||
@@ -218,22 +215,19 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut new_set = NewSetInformation {
|
crate::NewSet::send(
|
||||||
set,
|
&mut txn,
|
||||||
serai_block: block.block_hash,
|
&NewSetInformation {
|
||||||
declaration_time: block.time,
|
set: *set,
|
||||||
// TODO: This should be inlined into the Processor's key gen code
|
serai_block: block.block_hash,
|
||||||
// It's legacy from when we removed participants from the key gen
|
declaration_time: block.time,
|
||||||
threshold: ((total_weight * 2) / 3) + 1,
|
// TODO: Why do we have this as an explicit field here?
|
||||||
validators,
|
// Shouldn't thiis be inlined into the Processor's key gen code, where it's used?
|
||||||
evrf_public_keys,
|
threshold: ((total_weight * 2) / 3) + 1,
|
||||||
participant_indexes: Default::default(),
|
validators,
|
||||||
participant_indexes_reverse_lookup: Default::default(),
|
evrf_public_keys,
|
||||||
};
|
},
|
||||||
// These aren't serialized, and we immediately serialize and drop this, so this isn't
|
);
|
||||||
// necessary. It's just good practice not have this be dirty
|
|
||||||
new_set.init_participant_indexes();
|
|
||||||
crate::NewSet::send(&mut txn, &new_set);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -243,8 +237,7 @@ impl<D: Db> ContinuallyRan for EphemeralEventStream<D> {
|
|||||||
else {
|
else {
|
||||||
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
panic!("AcceptedHandover event wasn't a AcceptedHandover event: {accepted_handover:?}");
|
||||||
};
|
};
|
||||||
let Ok(set) = ExternalValidatorSet::try_from(*set) else { continue };
|
crate::SignSlashReport::send(&mut txn, *set);
|
||||||
crate::SignSlashReport::send(&mut txn, set);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
txn.commit();
|
txn.commit();
|
||||||
|
|||||||
@@ -2,16 +2,12 @@
|
|||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{io, BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use dkg::Participant;
|
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{ExternalNetworkId, SeraiAddress, Signature},
|
primitives::{NetworkId, PublicKey, Signature, SeraiAddress},
|
||||||
validator_sets::primitives::{Session, ExternalValidatorSet, KeyPair, SlashReport},
|
validator_sets::primitives::{Session, ValidatorSet, KeyPair},
|
||||||
in_instructions::primitives::SignedBatch,
|
in_instructions::primitives::SignedBatch,
|
||||||
Transaction,
|
Transaction,
|
||||||
};
|
};
|
||||||
@@ -30,12 +26,25 @@ pub use publish_batch::PublishBatchTask;
|
|||||||
mod publish_slash_report;
|
mod publish_slash_report;
|
||||||
pub use publish_slash_report::PublishSlashReportTask;
|
pub use publish_slash_report::PublishSlashReportTask;
|
||||||
|
|
||||||
|
fn borsh_serialize_validators<W: io::Write>(
|
||||||
|
validators: &Vec<(PublicKey, u16)>,
|
||||||
|
writer: &mut W,
|
||||||
|
) -> Result<(), io::Error> {
|
||||||
|
// This doesn't use `encode_to` as `encode_to` panics if the writer returns an error
|
||||||
|
writer.write_all(&validators.encode())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn borsh_deserialize_validators<R: io::Read>(
|
||||||
|
reader: &mut R,
|
||||||
|
) -> Result<Vec<(PublicKey, u16)>, io::Error> {
|
||||||
|
Decode::decode(&mut scale::IoReader(reader)).map_err(io::Error::other)
|
||||||
|
}
|
||||||
|
|
||||||
/// The information for a new set.
|
/// The information for a new set.
|
||||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
#[borsh(init = init_participant_indexes)]
|
|
||||||
pub struct NewSetInformation {
|
pub struct NewSetInformation {
|
||||||
/// The set.
|
/// The set.
|
||||||
pub set: ExternalValidatorSet,
|
pub set: ValidatorSet,
|
||||||
/// The Serai block which declared it.
|
/// The Serai block which declared it.
|
||||||
pub serai_block: [u8; 32],
|
pub serai_block: [u8; 32],
|
||||||
/// The time of the block which declared it, in seconds.
|
/// The time of the block which declared it, in seconds.
|
||||||
@@ -43,37 +52,13 @@ pub struct NewSetInformation {
|
|||||||
/// The threshold to use.
|
/// The threshold to use.
|
||||||
pub threshold: u16,
|
pub threshold: u16,
|
||||||
/// The validators, with the amount of key shares they have.
|
/// The validators, with the amount of key shares they have.
|
||||||
pub validators: Vec<(SeraiAddress, u16)>,
|
#[borsh(
|
||||||
|
serialize_with = "borsh_serialize_validators",
|
||||||
|
deserialize_with = "borsh_deserialize_validators"
|
||||||
|
)]
|
||||||
|
pub validators: Vec<(PublicKey, u16)>,
|
||||||
/// The eVRF public keys.
|
/// The eVRF public keys.
|
||||||
///
|
|
||||||
/// This will have the necessary copies of the keys proper for each validator's weight,
|
|
||||||
/// accordingly syncing up with `participant_indexes`.
|
|
||||||
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
pub evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||||
/// The participant indexes, indexed by their validator.
|
|
||||||
#[borsh(skip)]
|
|
||||||
pub participant_indexes: HashMap<SeraiAddress, Vec<Participant>>,
|
|
||||||
/// The validators, indexed by their participant indexes.
|
|
||||||
#[borsh(skip)]
|
|
||||||
pub participant_indexes_reverse_lookup: HashMap<Participant, SeraiAddress>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl NewSetInformation {
|
|
||||||
fn init_participant_indexes(&mut self) {
|
|
||||||
let mut next_i = 1;
|
|
||||||
self.participant_indexes = HashMap::with_capacity(self.validators.len());
|
|
||||||
self.participant_indexes_reverse_lookup = HashMap::with_capacity(self.validators.len());
|
|
||||||
for (validator, weight) in &self.validators {
|
|
||||||
let mut these_is = Vec::with_capacity((*weight).into());
|
|
||||||
for _ in 0 .. *weight {
|
|
||||||
let this_i = Participant::new(next_i).unwrap();
|
|
||||||
next_i += 1;
|
|
||||||
|
|
||||||
these_is.push(this_i);
|
|
||||||
self.participant_indexes_reverse_lookup.insert(this_i, *validator);
|
|
||||||
}
|
|
||||||
self.participant_indexes.insert(*validator, these_is);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
mod _public_db {
|
mod _public_db {
|
||||||
@@ -82,24 +67,24 @@ mod _public_db {
|
|||||||
db_channel!(
|
db_channel!(
|
||||||
CoordinatorSubstrate {
|
CoordinatorSubstrate {
|
||||||
// Canonical messages to send to the processor
|
// Canonical messages to send to the processor
|
||||||
Canonical: (network: ExternalNetworkId) -> messages::substrate::CoordinatorMessage,
|
Canonical: (network: NetworkId) -> messages::substrate::CoordinatorMessage,
|
||||||
|
|
||||||
// Relevant new set, from an ephemeral event stream
|
// Relevant new set, from an ephemeral event stream
|
||||||
NewSet: () -> NewSetInformation,
|
NewSet: () -> NewSetInformation,
|
||||||
// Potentially relevant sign slash report, from an ephemeral event stream
|
// Potentially relevant sign slash report, from an ephemeral event stream
|
||||||
SignSlashReport: (set: ExternalValidatorSet) -> (),
|
SignSlashReport: (set: ValidatorSet) -> (),
|
||||||
|
|
||||||
// Signed batches to publish onto the Serai network
|
// Signed batches to publish onto the Serai network
|
||||||
SignedBatches: (network: ExternalNetworkId) -> SignedBatch,
|
SignedBatches: (network: NetworkId) -> SignedBatch,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
create_db!(
|
create_db!(
|
||||||
CoordinatorSubstrate {
|
CoordinatorSubstrate {
|
||||||
// Keys to set on the Serai network
|
// Keys to set on the Serai network
|
||||||
Keys: (network: ExternalNetworkId) -> (Session, Vec<u8>),
|
Keys: (network: NetworkId) -> (Session, Vec<u8>),
|
||||||
// Slash reports to publish onto the Serai network
|
// Slash reports to publish onto the Serai network
|
||||||
SlashReports: (network: ExternalNetworkId) -> (Session, Vec<u8>),
|
SlashReports: (network: NetworkId) -> (Session, Vec<u8>),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -109,7 +94,7 @@ pub struct Canonical;
|
|||||||
impl Canonical {
|
impl Canonical {
|
||||||
pub(crate) fn send(
|
pub(crate) fn send(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
msg: &messages::substrate::CoordinatorMessage,
|
msg: &messages::substrate::CoordinatorMessage,
|
||||||
) {
|
) {
|
||||||
_public_db::Canonical::send(txn, network, msg);
|
_public_db::Canonical::send(txn, network, msg);
|
||||||
@@ -117,7 +102,7 @@ impl Canonical {
|
|||||||
/// Try to receive a canonical event, returning `None` if there is none to receive.
|
/// Try to receive a canonical event, returning `None` if there is none to receive.
|
||||||
pub fn try_recv(
|
pub fn try_recv(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
) -> Option<messages::substrate::CoordinatorMessage> {
|
) -> Option<messages::substrate::CoordinatorMessage> {
|
||||||
_public_db::Canonical::try_recv(txn, network)
|
_public_db::Canonical::try_recv(txn, network)
|
||||||
}
|
}
|
||||||
@@ -141,12 +126,12 @@ impl NewSet {
|
|||||||
/// notifications for all relevant validator sets will be included.
|
/// notifications for all relevant validator sets will be included.
|
||||||
pub struct SignSlashReport;
|
pub struct SignSlashReport;
|
||||||
impl SignSlashReport {
|
impl SignSlashReport {
|
||||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
|
pub(crate) fn send(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||||
_public_db::SignSlashReport::send(txn, set, &());
|
_public_db::SignSlashReport::send(txn, set, &());
|
||||||
}
|
}
|
||||||
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
|
/// Try to receive a notification to sign a slash report, returning `None` if there is none to
|
||||||
/// receive.
|
/// receive.
|
||||||
pub fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<()> {
|
pub fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<()> {
|
||||||
_public_db::SignSlashReport::try_recv(txn, set)
|
_public_db::SignSlashReport::try_recv(txn, set)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -160,7 +145,7 @@ impl Keys {
|
|||||||
/// reported at once.
|
/// reported at once.
|
||||||
pub fn set(
|
pub fn set(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
@@ -180,10 +165,7 @@ impl Keys {
|
|||||||
);
|
);
|
||||||
_public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
|
_public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
|
||||||
}
|
}
|
||||||
pub(crate) fn take(
|
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
network: ExternalNetworkId,
|
|
||||||
) -> Option<(Session, Transaction)> {
|
|
||||||
let (session, tx) = _public_db::Keys::take(txn, network)?;
|
let (session, tx) = _public_db::Keys::take(txn, network)?;
|
||||||
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||||
}
|
}
|
||||||
@@ -193,14 +175,20 @@ impl Keys {
|
|||||||
pub struct SignedBatches;
|
pub struct SignedBatches;
|
||||||
impl SignedBatches {
|
impl SignedBatches {
|
||||||
/// Send a `SignedBatch` to publish onto Serai.
|
/// Send a `SignedBatch` to publish onto Serai.
|
||||||
|
///
|
||||||
|
/// These will be published sequentially. Out-of-order sending risks hanging the task.
|
||||||
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
pub fn send(txn: &mut impl DbTxn, batch: &SignedBatch) {
|
||||||
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
_public_db::SignedBatches::send(txn, batch.batch.network, batch);
|
||||||
}
|
}
|
||||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: ExternalNetworkId) -> Option<SignedBatch> {
|
pub(crate) fn try_recv(txn: &mut impl DbTxn, network: NetworkId) -> Option<SignedBatch> {
|
||||||
_public_db::SignedBatches::try_recv(txn, network)
|
_public_db::SignedBatches::try_recv(txn, network)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// The slash report was invalid.
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub struct InvalidSlashReport;
|
||||||
|
|
||||||
/// The slash reports to publish onto Serai.
|
/// The slash reports to publish onto Serai.
|
||||||
pub struct SlashReports;
|
pub struct SlashReports;
|
||||||
impl SlashReports {
|
impl SlashReports {
|
||||||
@@ -208,30 +196,32 @@ impl SlashReports {
|
|||||||
///
|
///
|
||||||
/// This only saves the most recent slashes as only a single session is eligible to have its
|
/// This only saves the most recent slashes as only a single session is eligible to have its
|
||||||
/// slashes reported at once.
|
/// slashes reported at once.
|
||||||
|
///
|
||||||
|
/// Returns Err if the slashes are invalid. Returns Ok if the slashes weren't detected as
|
||||||
|
/// invalid. Slashes may be considered invalid by the Serai blockchain later even if not detected
|
||||||
|
/// as invalid here.
|
||||||
pub fn set(
|
pub fn set(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
slash_report: SlashReport,
|
slashes: Vec<(SeraiAddress, u32)>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) {
|
) -> Result<(), InvalidSlashReport> {
|
||||||
// If we have a more recent slash report, don't write this historic one
|
// If we have a more recent slash report, don't write this historic one
|
||||||
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
|
if let Some((existing_session, _)) = _public_db::SlashReports::get(txn, set.network) {
|
||||||
if existing_session.0 >= set.session.0 {
|
if existing_session.0 >= set.session.0 {
|
||||||
return;
|
return Ok(());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
let tx = serai_client::validator_sets::SeraiValidatorSets::report_slashes(
|
||||||
set.network,
|
set.network,
|
||||||
slash_report,
|
slashes.try_into().map_err(|_| InvalidSlashReport)?,
|
||||||
signature,
|
signature,
|
||||||
);
|
);
|
||||||
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
|
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
|
||||||
|
Ok(())
|
||||||
}
|
}
|
||||||
pub(crate) fn take(
|
pub(crate) fn take(txn: &mut impl DbTxn, network: NetworkId) -> Option<(Session, Transaction)> {
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
network: ExternalNetworkId,
|
|
||||||
) -> Option<(Session, Transaction)> {
|
|
||||||
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
|
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
|
||||||
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,32 +1,31 @@
|
|||||||
use core::future::Future;
|
use core::future::Future;
|
||||||
use std::sync::Arc;
|
use std::sync::Arc;
|
||||||
|
|
||||||
#[rustfmt::skip]
|
use serai_db::{DbTxn, Db};
|
||||||
use serai_client::{primitives::ExternalNetworkId, in_instructions::primitives::SignedBatch, SeraiError, Serai};
|
|
||||||
|
use serai_client::{primitives::NetworkId, SeraiError, Serai};
|
||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db, create_db};
|
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
use crate::SignedBatches;
|
use crate::SignedBatches;
|
||||||
|
|
||||||
create_db!(
|
|
||||||
CoordinatorSubstrate {
|
|
||||||
LastPublishedBatch: (network: ExternalNetworkId) -> u32,
|
|
||||||
BatchesToPublish: (network: ExternalNetworkId, batch: u32) -> SignedBatch,
|
|
||||||
}
|
|
||||||
);
|
|
||||||
|
|
||||||
/// Publish `SignedBatch`s from `SignedBatches` onto Serai.
|
/// Publish `SignedBatch`s from `SignedBatches` onto Serai.
|
||||||
pub struct PublishBatchTask<D: Db> {
|
pub struct PublishBatchTask<D: Db> {
|
||||||
db: D,
|
db: D,
|
||||||
serai: Arc<Serai>,
|
serai: Arc<Serai>,
|
||||||
network: ExternalNetworkId,
|
network: NetworkId,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> PublishBatchTask<D> {
|
impl<D: Db> PublishBatchTask<D> {
|
||||||
/// Create a task to publish `SignedBatch`s onto Serai.
|
/// Create a task to publish `SignedBatch`s onto Serai.
|
||||||
pub fn new(db: D, serai: Arc<Serai>, network: ExternalNetworkId) -> Self {
|
///
|
||||||
Self { db, serai, network }
|
/// Returns None if `network == NetworkId::Serai`.
|
||||||
|
// TODO: ExternalNetworkId
|
||||||
|
pub fn new(db: D, serai: Arc<Serai>, network: NetworkId) -> Option<Self> {
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
None?
|
||||||
|
};
|
||||||
|
Some(Self { db, serai, network })
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -35,52 +34,32 @@ impl<D: Db> ContinuallyRan for PublishBatchTask<D> {
|
|||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
// Read from SignedBatches, which is sequential, into our own mapping
|
let mut made_progress = false;
|
||||||
|
|
||||||
loop {
|
loop {
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
|
let Some(batch) = SignedBatches::try_recv(&mut txn, self.network) else {
|
||||||
|
// No batch to publish at this time
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
|
|
||||||
// If this is a Batch not yet published, save it into our unordered mapping
|
// Publish this Batch if it hasn't already been published
|
||||||
if LastPublishedBatch::get(&txn, self.network) < Some(batch.batch.id) {
|
|
||||||
BatchesToPublish::set(&mut txn, self.network, batch.batch.id, &batch);
|
|
||||||
}
|
|
||||||
|
|
||||||
txn.commit();
|
|
||||||
}
|
|
||||||
|
|
||||||
// Synchronize our last published batch with the Serai network's
|
|
||||||
let next_to_publish = {
|
|
||||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
|
||||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
|
||||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||||
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
let last_batch = serai.in_instructions().last_batch_for_network(self.network).await?;
|
||||||
|
if last_batch < Some(batch.batch.id) {
|
||||||
let mut txn = self.db.txn();
|
// This stream of Batches *should* be sequential within the larger context of the Serai
|
||||||
let mut our_last_batch = LastPublishedBatch::get(&txn, self.network);
|
// coordinator. In this library, we use a more relaxed definition and don't assert
|
||||||
while our_last_batch < last_batch {
|
// sequence. This does risk hanging the task, if Batch #n+1 is sent before Batch #n, but
|
||||||
let next_batch = our_last_batch.map(|batch| batch + 1).unwrap_or(0);
|
// that is a documented fault of the `SignedBatches` API.
|
||||||
// Clean up the Batch to publish since it's already been published
|
|
||||||
BatchesToPublish::take(&mut txn, self.network, next_batch);
|
|
||||||
our_last_batch = Some(next_batch);
|
|
||||||
}
|
|
||||||
if let Some(last_batch) = our_last_batch {
|
|
||||||
LastPublishedBatch::set(&mut txn, self.network, &last_batch);
|
|
||||||
}
|
|
||||||
last_batch.map(|batch| batch + 1).unwrap_or(0)
|
|
||||||
};
|
|
||||||
|
|
||||||
let made_progress =
|
|
||||||
if let Some(batch) = BatchesToPublish::get(&self.db, self.network, next_to_publish) {
|
|
||||||
self
|
self
|
||||||
.serai
|
.serai
|
||||||
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
.publish(&serai_client::in_instructions::SeraiInInstructions::execute_batch(batch))
|
||||||
.await?;
|
.await?;
|
||||||
true
|
}
|
||||||
} else {
|
|
||||||
false
|
txn.commit();
|
||||||
};
|
made_progress = true;
|
||||||
|
}
|
||||||
Ok(made_progress)
|
Ok(made_progress)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::Session, Serai};
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::Session, Serai};
|
||||||
|
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
@@ -22,78 +22,66 @@ impl<D: Db> PublishSlashReportTask<D> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db> PublishSlashReportTask<D> {
|
|
||||||
// Returns if a slash report was successfully published
|
|
||||||
async fn publish(&mut self, network: ExternalNetworkId) -> Result<bool, String> {
|
|
||||||
let mut txn = self.db.txn();
|
|
||||||
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
|
|
||||||
// No slash report to publish
|
|
||||||
return Ok(false);
|
|
||||||
};
|
|
||||||
|
|
||||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
|
||||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
|
||||||
let serai = self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
let serai = serai.validator_sets();
|
|
||||||
let session_after_slash_report = Session(session.0 + 1);
|
|
||||||
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
let current_session = current_session.map(|session| session.0);
|
|
||||||
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
|
||||||
// active
|
|
||||||
let session_after_slash_report_retired = current_session > Some(session_after_slash_report.0);
|
|
||||||
if session_after_slash_report_retired {
|
|
||||||
// Commit the txn to drain this slash report from the database and not try it again later
|
|
||||||
txn.commit();
|
|
||||||
return Ok(false);
|
|
||||||
}
|
|
||||||
|
|
||||||
if Some(session_after_slash_report.0) != current_session {
|
|
||||||
// We already checked the current session wasn't greater, and they're not equal
|
|
||||||
assert!(current_session < Some(session_after_slash_report.0));
|
|
||||||
// This would mean the Serai node is resyncing and is behind where it prior was
|
|
||||||
Err("have a slash report for a session Serai has yet to retire".to_string())?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this session which should publish a slash report already has, move on
|
|
||||||
let key_pending_slash_report =
|
|
||||||
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
|
||||||
if key_pending_slash_report.is_none() {
|
|
||||||
txn.commit();
|
|
||||||
return Ok(false);
|
|
||||||
};
|
|
||||||
|
|
||||||
match self.serai.publish(&slash_report).await {
|
|
||||||
Ok(()) => {
|
|
||||||
txn.commit();
|
|
||||||
Ok(true)
|
|
||||||
}
|
|
||||||
// This could be specific to this TX (such as an already in mempool error) and it may be
|
|
||||||
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
|
||||||
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
|
||||||
// miniscule compared to the window available to publish the slash report. That makes
|
|
||||||
// this a non-issue.
|
|
||||||
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}")),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
impl<D: Db> ContinuallyRan for PublishSlashReportTask<D> {
|
||||||
type Error = String;
|
type Error = String;
|
||||||
|
|
||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
let mut error = None;
|
for network in serai_client::primitives::NETWORKS {
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
if network == NetworkId::Serai {
|
||||||
let network_res = self.publish(network).await;
|
continue;
|
||||||
// We made progress if any network successfully published their slash report
|
};
|
||||||
made_progress |= network_res == Ok(true);
|
|
||||||
// We want to yield the first error *after* attempting for every network
|
let mut txn = self.db.txn();
|
||||||
error = error.or(network_res.err());
|
let Some((session, slash_report)) = SlashReports::take(&mut txn, network) else {
|
||||||
}
|
// No slash report to publish
|
||||||
// Yield the error
|
continue;
|
||||||
if let Some(error) = error {
|
};
|
||||||
Err(error)?
|
|
||||||
|
let serai =
|
||||||
|
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let serai = serai.validator_sets();
|
||||||
|
let session_after_slash_report = Session(session.0 + 1);
|
||||||
|
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
let current_session = current_session.map(|session| session.0);
|
||||||
|
// Only attempt to publish the slash report for session #n while session #n+1 is still
|
||||||
|
// active
|
||||||
|
let session_after_slash_report_retired =
|
||||||
|
current_session > Some(session_after_slash_report.0);
|
||||||
|
if session_after_slash_report_retired {
|
||||||
|
// Commit the txn to drain this slash report from the database and not try it again later
|
||||||
|
txn.commit();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
if Some(session_after_slash_report.0) != current_session {
|
||||||
|
// We already checked the current session wasn't greater, and they're not equal
|
||||||
|
assert!(current_session < Some(session_after_slash_report.0));
|
||||||
|
// This would mean the Serai node is resyncing and is behind where it prior was
|
||||||
|
Err("have a slash report for a session Serai has yet to retire".to_string())?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this session which should publish a slash report already has, move on
|
||||||
|
let key_pending_slash_report =
|
||||||
|
serai.key_pending_slash_report(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
|
if key_pending_slash_report.is_none() {
|
||||||
|
txn.commit();
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
match self.serai.publish(&slash_report).await {
|
||||||
|
Ok(()) => {
|
||||||
|
txn.commit();
|
||||||
|
made_progress = true;
|
||||||
|
}
|
||||||
|
// This could be specific to this TX (such as an already in mempool error) and it may be
|
||||||
|
// worthwhile to continue iteration with the other pending slash reports. We assume this
|
||||||
|
// error ephemeral and that the latency incurred for this ephemeral error to resolve is
|
||||||
|
// miniscule compared to the window available to publish the slash report. That makes
|
||||||
|
// this a non-issue.
|
||||||
|
Err(e) => Err(format!("couldn't publish slash report transaction: {e:?}"))?,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
Ok(made_progress)
|
Ok(made_progress)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::sync::Arc;
|
|||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
use serai_client::{validator_sets::primitives::ExternalValidatorSet, Serai};
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
||||||
|
|
||||||
use serai_task::ContinuallyRan;
|
use serai_task::ContinuallyRan;
|
||||||
|
|
||||||
@@ -28,19 +28,21 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
|||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
for network in serai_client::primitives::EXTERNAL_NETWORKS {
|
for network in serai_client::primitives::NETWORKS {
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
let mut txn = self.db.txn();
|
let mut txn = self.db.txn();
|
||||||
let Some((session, keys)) = Keys::take(&mut txn, network) else {
|
let Some((session, keys)) = Keys::take(&mut txn, network) else {
|
||||||
// No keys to set
|
// No keys to set
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|
||||||
// This uses the latest finalized block, not the latest cosigned block, which should be
|
|
||||||
// fine as in the worst case, the only impact is no longer attempting TX publication
|
|
||||||
let serai =
|
let serai =
|
||||||
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
self.serai.as_of_latest_finalized_block().await.map_err(|e| format!("{e:?}"))?;
|
||||||
let serai = serai.validator_sets();
|
let serai = serai.validator_sets();
|
||||||
let current_session = serai.session(network.into()).await.map_err(|e| format!("{e:?}"))?;
|
let current_session = serai.session(network).await.map_err(|e| format!("{e:?}"))?;
|
||||||
let current_session = current_session.map(|session| session.0);
|
let current_session = current_session.map(|session| session.0);
|
||||||
// Only attempt to set these keys if this isn't a retired session
|
// Only attempt to set these keys if this isn't a retired session
|
||||||
if Some(session.0) < current_session {
|
if Some(session.0) < current_session {
|
||||||
@@ -58,7 +60,7 @@ impl<D: Db> ContinuallyRan for SetKeysTask<D> {
|
|||||||
|
|
||||||
// If this session already has had its keys set, move on
|
// If this session already has had its keys set, move on
|
||||||
if serai
|
if serai
|
||||||
.keys(ExternalValidatorSet { network, session })
|
.keys(ValidatorSet { network, session })
|
||||||
.await
|
.await
|
||||||
.map_err(|e| format!("{e:?}"))?
|
.map_err(|e| format!("{e:?}"))?
|
||||||
.is_some()
|
.is_some()
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ pub(crate) fn merkle(hash_args: &[[u8; 32]]) -> [u8; 32] {
|
|||||||
let zero = [0; 32];
|
let zero = [0; 32];
|
||||||
let mut interim;
|
let mut interim;
|
||||||
while hashes.len() > 1 {
|
while hashes.len() > 1 {
|
||||||
interim = Vec::with_capacity(hashes.len().div_ceil(2));
|
interim = Vec::with_capacity((hashes.len() + 1) / 2);
|
||||||
|
|
||||||
let mut i = 0;
|
let mut i = 0;
|
||||||
while i < hashes.len() {
|
while i < hashes.len() {
|
||||||
|
|||||||
@@ -21,14 +21,13 @@ workspace = true
|
|||||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
|
||||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
ciphersuite = { path = "../../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
|
||||||
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
|
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||||
|
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||||
|
|
||||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||||
|
|
||||||
serai-db = { path = "../../common/db" }
|
serai-db = { path = "../../common/db" }
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use std::collections::HashMap;
|
|||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::ExternalValidatorSet};
|
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::ValidatorSet};
|
||||||
|
|
||||||
use messages::sign::{VariantSignId, SignId};
|
use messages::sign::{VariantSignId, SignId};
|
||||||
|
|
||||||
@@ -15,35 +15,20 @@ use crate::transaction::SigningProtocolRound;
|
|||||||
|
|
||||||
/// A topic within the database which the group participates in
|
/// A topic within the database which the group participates in
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum Topic {
|
pub(crate) enum Topic {
|
||||||
/// Vote to remove a participant
|
/// Vote to remove a participant
|
||||||
RemoveParticipant {
|
RemoveParticipant { participant: SeraiAddress },
|
||||||
/// The participant to remove
|
|
||||||
participant: SeraiAddress,
|
|
||||||
},
|
|
||||||
|
|
||||||
// DkgParticipation isn't represented here as participations are immediately sent to the
|
// DkgParticipation isn't represented here as participations are immediately sent to the
|
||||||
// processor, not accumulated within this databse
|
// processor, not accumulated within this databse
|
||||||
/// Participation in the signing protocol to confirm the DKG results on Substrate
|
/// Participation in the signing protocol to confirm the DKG results on Substrate
|
||||||
DkgConfirmation {
|
DkgConfirmation { attempt: u32, round: SigningProtocolRound },
|
||||||
/// The attempt number this is for
|
|
||||||
attempt: u32,
|
|
||||||
/// The round of the signing protocol
|
|
||||||
round: SigningProtocolRound,
|
|
||||||
},
|
|
||||||
|
|
||||||
/// The local view of the SlashReport, to be aggregated into the final SlashReport
|
/// The local view of the SlashReport, to be aggregated into the final SlashReport
|
||||||
SlashReport,
|
SlashReport,
|
||||||
|
|
||||||
/// Participation in a signing protocol
|
/// Participation in a signing protocol
|
||||||
Sign {
|
Sign { id: VariantSignId, attempt: u32, round: SigningProtocolRound },
|
||||||
/// The ID of the signing protocol
|
|
||||||
id: VariantSignId,
|
|
||||||
/// The attempt number this is for
|
|
||||||
attempt: u32,
|
|
||||||
/// The round of the signing protocol
|
|
||||||
round: SigningProtocolRound,
|
|
||||||
},
|
|
||||||
}
|
}
|
||||||
|
|
||||||
enum Participating {
|
enum Participating {
|
||||||
@@ -61,7 +46,7 @@ impl Topic {
|
|||||||
attempt: attempt + 1,
|
attempt: attempt + 1,
|
||||||
round: SigningProtocolRound::Preprocess,
|
round: SigningProtocolRound::Preprocess,
|
||||||
}),
|
}),
|
||||||
Topic::SlashReport => None,
|
Topic::SlashReport { .. } => None,
|
||||||
Topic::Sign { id, attempt, round: _ } => {
|
Topic::Sign { id, attempt, round: _ } => {
|
||||||
Some(Topic::Sign { id, attempt: attempt + 1, round: SigningProtocolRound::Preprocess })
|
Some(Topic::Sign { id, attempt: attempt + 1, round: SigningProtocolRound::Preprocess })
|
||||||
}
|
}
|
||||||
@@ -83,7 +68,7 @@ impl Topic {
|
|||||||
}
|
}
|
||||||
SigningProtocolRound::Share => None,
|
SigningProtocolRound::Share => None,
|
||||||
},
|
},
|
||||||
Topic::SlashReport => None,
|
Topic::SlashReport { .. } => None,
|
||||||
Topic::Sign { id, attempt, round } => match round {
|
Topic::Sign { id, attempt, round } => match round {
|
||||||
SigningProtocolRound::Preprocess => {
|
SigningProtocolRound::Preprocess => {
|
||||||
let attempt = attempt + 1;
|
let attempt = attempt + 1;
|
||||||
@@ -94,46 +79,19 @@ impl Topic {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The SignId for this topic
|
// The SignId for this topic
|
||||||
///
|
//
|
||||||
/// Returns None if Topic isn't Topic::Sign
|
// Returns None if Topic isn't Topic::Sign
|
||||||
pub(crate) fn sign_id(self, set: ExternalValidatorSet) -> Option<messages::sign::SignId> {
|
pub(crate) fn sign_id(self, set: ValidatorSet) -> Option<messages::sign::SignId> {
|
||||||
#[allow(clippy::match_same_arms)]
|
#[allow(clippy::match_same_arms)]
|
||||||
match self {
|
match self {
|
||||||
Topic::RemoveParticipant { .. } => None,
|
Topic::RemoveParticipant { .. } => None,
|
||||||
Topic::DkgConfirmation { .. } => None,
|
Topic::DkgConfirmation { .. } => None,
|
||||||
Topic::SlashReport => None,
|
Topic::SlashReport { .. } => None,
|
||||||
Topic::Sign { id, attempt, round: _ } => Some(SignId { session: set.session, id, attempt }),
|
Topic::Sign { id, attempt, round: _ } => Some(SignId { session: set.session, id, attempt }),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The SignId for this DKG Confirmation.
|
|
||||||
///
|
|
||||||
/// This is undefined except for being consistent to the DKG Confirmation signing protocol and
|
|
||||||
/// unique across sets.
|
|
||||||
///
|
|
||||||
/// Returns None if Topic isn't Topic::DkgConfirmation.
|
|
||||||
pub(crate) fn dkg_confirmation_sign_id(
|
|
||||||
self,
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
) -> Option<messages::sign::SignId> {
|
|
||||||
#[allow(clippy::match_same_arms)]
|
|
||||||
match self {
|
|
||||||
Topic::RemoveParticipant { .. } => None,
|
|
||||||
Topic::DkgConfirmation { attempt, round: _ } => Some({
|
|
||||||
let id = {
|
|
||||||
let mut id = [0; 32];
|
|
||||||
let encoded_set = set.encode();
|
|
||||||
id[.. encoded_set.len()].copy_from_slice(&encoded_set);
|
|
||||||
VariantSignId::Batch(id)
|
|
||||||
};
|
|
||||||
SignId { session: set.session, id, attempt }
|
|
||||||
}),
|
|
||||||
Topic::SlashReport => None,
|
|
||||||
Topic::Sign { .. } => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The topic which precedes this topic as a prerequisite
|
/// The topic which precedes this topic as a prerequisite
|
||||||
///
|
///
|
||||||
/// The preceding topic must define this topic as succeeding
|
/// The preceding topic must define this topic as succeeding
|
||||||
@@ -147,7 +105,7 @@ impl Topic {
|
|||||||
Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess })
|
Some(Topic::DkgConfirmation { attempt, round: SigningProtocolRound::Preprocess })
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Topic::SlashReport => None,
|
Topic::SlashReport { .. } => None,
|
||||||
Topic::Sign { id, attempt, round } => match round {
|
Topic::Sign { id, attempt, round } => match round {
|
||||||
SigningProtocolRound::Preprocess => None,
|
SigningProtocolRound::Preprocess => None,
|
||||||
SigningProtocolRound::Share => {
|
SigningProtocolRound::Share => {
|
||||||
@@ -170,7 +128,7 @@ impl Topic {
|
|||||||
}
|
}
|
||||||
SigningProtocolRound::Share => None,
|
SigningProtocolRound::Share => None,
|
||||||
},
|
},
|
||||||
Topic::SlashReport => None,
|
Topic::SlashReport { .. } => None,
|
||||||
Topic::Sign { id, attempt, round } => match round {
|
Topic::Sign { id, attempt, round } => match round {
|
||||||
SigningProtocolRound::Preprocess => {
|
SigningProtocolRound::Preprocess => {
|
||||||
Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share })
|
Some(Topic::Sign { id, attempt, round: SigningProtocolRound::Share })
|
||||||
@@ -180,22 +138,21 @@ impl Topic {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// If this topic requires recognition before entries are permitted for it.
|
fn requires_whitelisting(&self) -> bool {
|
||||||
pub fn requires_recognition(&self) -> bool {
|
|
||||||
#[allow(clippy::match_same_arms)]
|
#[allow(clippy::match_same_arms)]
|
||||||
match self {
|
match self {
|
||||||
// We don't require recognition to remove a participant
|
// We don't require whitelisting to remove a participant
|
||||||
Topic::RemoveParticipant { .. } => false,
|
Topic::RemoveParticipant { .. } => false,
|
||||||
// We don't require recognition for the first attempt, solely the re-attempts
|
// We don't require whitelisting for the first attempt, solely the re-attempts
|
||||||
Topic::DkgConfirmation { attempt, .. } => *attempt != 0,
|
Topic::DkgConfirmation { attempt, .. } => *attempt != 0,
|
||||||
// We don't require recognition for the slash report
|
// We don't require whitelisting for the slash report
|
||||||
Topic::SlashReport => false,
|
Topic::SlashReport { .. } => false,
|
||||||
// We do require recognition for every sign protocol
|
// We do require whitelisting for every sign protocol
|
||||||
Topic::Sign { .. } => true,
|
Topic::Sign { .. } => true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn required_participation(&self, n: u16) -> u16 {
|
fn required_participation(&self, n: u64) -> u64 {
|
||||||
let _ = self;
|
let _ = self;
|
||||||
// All of our topics require 2/3rds participation
|
// All of our topics require 2/3rds participation
|
||||||
((2 * n) / 3) + 1
|
((2 * n) / 3) + 1
|
||||||
@@ -206,7 +163,7 @@ impl Topic {
|
|||||||
match self {
|
match self {
|
||||||
Topic::RemoveParticipant { .. } => Participating::Everyone,
|
Topic::RemoveParticipant { .. } => Participating::Everyone,
|
||||||
Topic::DkgConfirmation { .. } => Participating::Participated,
|
Topic::DkgConfirmation { .. } => Participating::Participated,
|
||||||
Topic::SlashReport => Participating::Everyone,
|
Topic::SlashReport { .. } => Participating::Everyone,
|
||||||
Topic::Sign { .. } => Participating::Participated,
|
Topic::Sign { .. } => Participating::Participated,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -227,48 +184,36 @@ pub(crate) enum DataSet<D: Borshy> {
|
|||||||
create_db!(
|
create_db!(
|
||||||
CoordinatorTributary {
|
CoordinatorTributary {
|
||||||
// The last handled tributary block's (number, hash)
|
// The last handled tributary block's (number, hash)
|
||||||
LastHandledTributaryBlock: (set: ExternalValidatorSet) -> (u64, [u8; 32]),
|
LastHandledTributaryBlock: (set: ValidatorSet) -> (u64, [u8; 32]),
|
||||||
|
|
||||||
// The slash points a validator has accrued, with u32::MAX representing a fatal slash.
|
// The slash points a validator has accrued, with u32::MAX representing a fatal slash.
|
||||||
SlashPoints: (set: ExternalValidatorSet, validator: SeraiAddress) -> u32,
|
SlashPoints: (set: ValidatorSet, validator: SeraiAddress) -> u32,
|
||||||
|
|
||||||
// The cosign intent for a Substrate block
|
// The cosign intent for a Substrate block
|
||||||
CosignIntents: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> CosignIntent,
|
CosignIntents: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> CosignIntent,
|
||||||
// The latest Substrate block to cosign.
|
// The latest Substrate block to cosign.
|
||||||
LatestSubstrateBlockToCosign: (set: ExternalValidatorSet) -> [u8; 32],
|
LatestSubstrateBlockToCosign: (set: ValidatorSet) -> [u8; 32],
|
||||||
// The hash of the block we're actively cosigning.
|
// The hash of the block we're actively cosigning.
|
||||||
ActivelyCosigning: (set: ExternalValidatorSet) -> [u8; 32],
|
ActivelyCosigning: (set: ValidatorSet) -> [u8; 32],
|
||||||
// If this block has already been cosigned.
|
// If this block has already been cosigned.
|
||||||
Cosigned: (set: ExternalValidatorSet, substrate_block_hash: [u8; 32]) -> (),
|
Cosigned: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> (),
|
||||||
|
|
||||||
// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
|
// The plans to whitelist upon a `Transaction::SubstrateBlock` being included on-chain.
|
||||||
SubstrateBlockPlans: (
|
SubstrateBlockPlans: (set: ValidatorSet, substrate_block_hash: [u8; 32]) -> Vec<[u8; 32]>,
|
||||||
set: ExternalValidatorSet,
|
|
||||||
substrate_block_hash: [u8; 32]
|
|
||||||
) -> Vec<[u8; 32]>,
|
|
||||||
|
|
||||||
// The weight accumulated for a topic.
|
// The weight accumulated for a topic.
|
||||||
AccumulatedWeight: (set: ExternalValidatorSet, topic: Topic) -> u16,
|
AccumulatedWeight: (set: ValidatorSet, topic: Topic) -> u64,
|
||||||
// The entries accumulated for a topic, by validator.
|
// The entries accumulated for a topic, by validator.
|
||||||
Accumulated: <D: Borshy>(
|
Accumulated: <D: Borshy>(set: ValidatorSet, topic: Topic, validator: SeraiAddress) -> D,
|
||||||
set: ExternalValidatorSet,
|
|
||||||
topic: Topic,
|
|
||||||
validator: SeraiAddress
|
|
||||||
) -> D,
|
|
||||||
|
|
||||||
// Topics to be recognized as of a certain block number due to the reattempt protocol.
|
// Topics to be recognized as of a certain block number due to the reattempt protocol.
|
||||||
Reattempt: (set: ExternalValidatorSet, block_number: u64) -> Vec<Topic>,
|
Reattempt: (set: ValidatorSet, block_number: u64) -> Vec<Topic>,
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
db_channel!(
|
db_channel!(
|
||||||
CoordinatorTributary {
|
CoordinatorTributary {
|
||||||
// Messages to send to the processor
|
ProcessorMessages: (set: ValidatorSet) -> messages::CoordinatorMessage,
|
||||||
ProcessorMessages: (set: ExternalValidatorSet) -> messages::CoordinatorMessage,
|
|
||||||
// Messages for the DKG confirmation
|
|
||||||
DkgConfirmationMessages: (set: ExternalValidatorSet) -> messages::sign::CoordinatorMessage,
|
|
||||||
// Topics which have been explicitly recognized
|
|
||||||
RecognizedTopics: (set: ExternalValidatorSet) -> Topic,
|
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -276,13 +221,13 @@ pub(crate) struct TributaryDb;
|
|||||||
impl TributaryDb {
|
impl TributaryDb {
|
||||||
pub(crate) fn last_handled_tributary_block(
|
pub(crate) fn last_handled_tributary_block(
|
||||||
getter: &impl Get,
|
getter: &impl Get,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
) -> Option<(u64, [u8; 32])> {
|
) -> Option<(u64, [u8; 32])> {
|
||||||
LastHandledTributaryBlock::get(getter, set)
|
LastHandledTributaryBlock::get(getter, set)
|
||||||
}
|
}
|
||||||
pub(crate) fn set_last_handled_tributary_block(
|
pub(crate) fn set_last_handled_tributary_block(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
block_number: u64,
|
block_number: u64,
|
||||||
block_hash: [u8; 32],
|
block_hash: [u8; 32],
|
||||||
) {
|
) {
|
||||||
@@ -291,26 +236,23 @@ impl TributaryDb {
|
|||||||
|
|
||||||
pub(crate) fn latest_substrate_block_to_cosign(
|
pub(crate) fn latest_substrate_block_to_cosign(
|
||||||
getter: &impl Get,
|
getter: &impl Get,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
) -> Option<[u8; 32]> {
|
) -> Option<[u8; 32]> {
|
||||||
LatestSubstrateBlockToCosign::get(getter, set)
|
LatestSubstrateBlockToCosign::get(getter, set)
|
||||||
}
|
}
|
||||||
pub(crate) fn set_latest_substrate_block_to_cosign(
|
pub(crate) fn set_latest_substrate_block_to_cosign(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: [u8; 32],
|
||||||
) {
|
) {
|
||||||
LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash);
|
LatestSubstrateBlockToCosign::set(txn, set, &substrate_block_hash);
|
||||||
}
|
}
|
||||||
pub(crate) fn actively_cosigning(
|
pub(crate) fn actively_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<[u8; 32]> {
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
) -> Option<[u8; 32]> {
|
|
||||||
ActivelyCosigning::get(txn, set)
|
ActivelyCosigning::get(txn, set)
|
||||||
}
|
}
|
||||||
pub(crate) fn start_cosigning(
|
pub(crate) fn start_cosigning(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: [u8; 32],
|
||||||
substrate_block_number: u64,
|
substrate_block_number: u64,
|
||||||
) {
|
) {
|
||||||
@@ -320,7 +262,7 @@ impl TributaryDb {
|
|||||||
);
|
);
|
||||||
ActivelyCosigning::set(txn, set, &substrate_block_hash);
|
ActivelyCosigning::set(txn, set, &substrate_block_hash);
|
||||||
|
|
||||||
Self::recognize_topic(
|
TributaryDb::recognize_topic(
|
||||||
txn,
|
txn,
|
||||||
set,
|
set,
|
||||||
Topic::Sign {
|
Topic::Sign {
|
||||||
@@ -330,33 +272,29 @@ impl TributaryDb {
|
|||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ExternalValidatorSet) {
|
pub(crate) fn finish_cosigning(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||||
assert!(ActivelyCosigning::take(txn, set).is_some(), "finished cosigning but not cosigning");
|
assert!(ActivelyCosigning::take(txn, set).is_some(), "finished cosigning but not cosigning");
|
||||||
}
|
}
|
||||||
pub(crate) fn mark_cosigned(
|
pub(crate) fn mark_cosigned(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: [u8; 32],
|
||||||
) {
|
) {
|
||||||
Cosigned::set(txn, set, substrate_block_hash, &());
|
Cosigned::set(txn, set, substrate_block_hash, &());
|
||||||
}
|
}
|
||||||
pub(crate) fn cosigned(
|
pub(crate) fn cosigned(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: [u8; 32],
|
||||||
) -> bool {
|
) -> bool {
|
||||||
Cosigned::get(txn, set, substrate_block_hash).is_some()
|
Cosigned::get(txn, set, substrate_block_hash).is_some()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ExternalValidatorSet, topic: Topic) {
|
pub(crate) fn recognize_topic(txn: &mut impl DbTxn, set: ValidatorSet, topic: Topic) {
|
||||||
AccumulatedWeight::set(txn, set, topic, &0);
|
AccumulatedWeight::set(txn, set, topic, &0);
|
||||||
RecognizedTopics::send(txn, set, &topic);
|
|
||||||
}
|
|
||||||
pub(crate) fn recognized(getter: &impl Get, set: ExternalValidatorSet, topic: Topic) -> bool {
|
|
||||||
AccumulatedWeight::get(getter, set, topic).is_some()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ExternalValidatorSet, block_number: u64) {
|
pub(crate) fn start_of_block(txn: &mut impl DbTxn, set: ValidatorSet, block_number: u64) {
|
||||||
for topic in Reattempt::take(txn, set, block_number).unwrap_or(vec![]) {
|
for topic in Reattempt::take(txn, set, block_number).unwrap_or(vec![]) {
|
||||||
/*
|
/*
|
||||||
TODO: Slash all people who preprocessed but didn't share, and add a delay to their
|
TODO: Slash all people who preprocessed but didn't share, and add a delay to their
|
||||||
@@ -374,19 +312,13 @@ impl TributaryDb {
|
|||||||
Self::recognize_topic(txn, set, topic);
|
Self::recognize_topic(txn, set, topic);
|
||||||
if let Some(id) = topic.sign_id(set) {
|
if let Some(id) = topic.sign_id(set) {
|
||||||
Self::send_message(txn, set, messages::sign::CoordinatorMessage::Reattempt { id });
|
Self::send_message(txn, set, messages::sign::CoordinatorMessage::Reattempt { id });
|
||||||
} else if let Some(id) = topic.dkg_confirmation_sign_id(set) {
|
|
||||||
DkgConfirmationMessages::send(
|
|
||||||
txn,
|
|
||||||
set,
|
|
||||||
&messages::sign::CoordinatorMessage::Reattempt { id },
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn fatal_slash(
|
pub(crate) fn fatal_slash(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
validator: SeraiAddress,
|
validator: SeraiAddress,
|
||||||
reason: &str,
|
reason: &str,
|
||||||
) {
|
) {
|
||||||
@@ -396,7 +328,7 @@ impl TributaryDb {
|
|||||||
|
|
||||||
pub(crate) fn is_fatally_slashed(
|
pub(crate) fn is_fatally_slashed(
|
||||||
getter: &impl Get,
|
getter: &impl Get,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
validator: SeraiAddress,
|
validator: SeraiAddress,
|
||||||
) -> bool {
|
) -> bool {
|
||||||
SlashPoints::get(getter, set, validator).unwrap_or(0) == u32::MAX
|
SlashPoints::get(getter, set, validator).unwrap_or(0) == u32::MAX
|
||||||
@@ -405,26 +337,21 @@ impl TributaryDb {
|
|||||||
#[allow(clippy::too_many_arguments)]
|
#[allow(clippy::too_many_arguments)]
|
||||||
pub(crate) fn accumulate<D: Borshy>(
|
pub(crate) fn accumulate<D: Borshy>(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
validators: &[SeraiAddress],
|
validators: &[SeraiAddress],
|
||||||
total_weight: u16,
|
total_weight: u64,
|
||||||
block_number: u64,
|
block_number: u64,
|
||||||
topic: Topic,
|
topic: Topic,
|
||||||
validator: SeraiAddress,
|
validator: SeraiAddress,
|
||||||
validator_weight: u16,
|
validator_weight: u64,
|
||||||
data: &D,
|
data: &D,
|
||||||
) -> DataSet<D> {
|
) -> DataSet<D> {
|
||||||
// This function will only be called once for a (validator, topic) tuple due to how we handle
|
// This function will only be called once for a (validator, topic) tuple due to how we handle
|
||||||
// nonces on transactions (deterministically to the topic)
|
// nonces on transactions (deterministically to the topic)
|
||||||
|
|
||||||
let accumulated_weight = AccumulatedWeight::get(txn, set, topic);
|
let accumulated_weight = AccumulatedWeight::get(txn, set, topic);
|
||||||
if topic.requires_recognition() && accumulated_weight.is_none() {
|
if topic.requires_whitelisting() && accumulated_weight.is_none() {
|
||||||
Self::fatal_slash(
|
Self::fatal_slash(txn, set, validator, "participated in unrecognized topic");
|
||||||
txn,
|
|
||||||
set,
|
|
||||||
validator,
|
|
||||||
"participated in unrecognized topic which requires recognition",
|
|
||||||
);
|
|
||||||
return DataSet::None;
|
return DataSet::None;
|
||||||
}
|
}
|
||||||
let mut accumulated_weight = accumulated_weight.unwrap_or(0);
|
let mut accumulated_weight = accumulated_weight.unwrap_or(0);
|
||||||
@@ -521,7 +448,7 @@ impl TributaryDb {
|
|||||||
|
|
||||||
pub(crate) fn send_message(
|
pub(crate) fn send_message(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
message: impl Into<messages::CoordinatorMessage>,
|
message: impl Into<messages::CoordinatorMessage>,
|
||||||
) {
|
) {
|
||||||
ProcessorMessages::send(txn, set, &message.into());
|
ProcessorMessages::send(txn, set, &message.into());
|
||||||
|
|||||||
@@ -6,11 +6,10 @@ use core::{marker::PhantomData, future::Future};
|
|||||||
use std::collections::HashMap;
|
use std::collections::HashMap;
|
||||||
|
|
||||||
use ciphersuite::group::GroupEncoding;
|
use ciphersuite::group::GroupEncoding;
|
||||||
use dkg::Participant;
|
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::SeraiAddress,
|
primitives::SeraiAddress,
|
||||||
validator_sets::primitives::{ExternalValidatorSet, Slash},
|
validator_sets::primitives::{ValidatorSet, Slash},
|
||||||
};
|
};
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
@@ -28,99 +27,59 @@ use tributary_sdk::{
|
|||||||
use serai_cosign::CosignIntent;
|
use serai_cosign::CosignIntent;
|
||||||
use serai_coordinator_substrate::NewSetInformation;
|
use serai_coordinator_substrate::NewSetInformation;
|
||||||
|
|
||||||
use messages::sign::{VariantSignId, SignId};
|
use messages::sign::VariantSignId;
|
||||||
|
|
||||||
mod transaction;
|
mod transaction;
|
||||||
pub use transaction::{SigningProtocolRound, Signed, Transaction};
|
pub use transaction::{SigningProtocolRound, Signed, Transaction};
|
||||||
|
|
||||||
mod db;
|
mod db;
|
||||||
use db::*;
|
use db::*;
|
||||||
pub use db::Topic;
|
|
||||||
|
|
||||||
/// Messages to send to the Processors.
|
/// Messages to send to the Processors.
|
||||||
pub struct ProcessorMessages;
|
pub struct ProcessorMessages;
|
||||||
impl ProcessorMessages {
|
impl ProcessorMessages {
|
||||||
/// Try to receive a message to send to a Processor.
|
/// Try to receive a message to send to a Processor.
|
||||||
pub fn try_recv(
|
pub fn try_recv(txn: &mut impl DbTxn, set: ValidatorSet) -> Option<messages::CoordinatorMessage> {
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
) -> Option<messages::CoordinatorMessage> {
|
|
||||||
db::ProcessorMessages::try_recv(txn, set)
|
db::ProcessorMessages::try_recv(txn, set)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Messages for the DKG confirmation.
|
|
||||||
pub struct DkgConfirmationMessages;
|
|
||||||
impl DkgConfirmationMessages {
|
|
||||||
/// Receive a message for the DKG confirmation.
|
|
||||||
///
|
|
||||||
/// These messages use the ProcessorMessage API as that's what existing flows are designed
|
|
||||||
/// around, enabling their reuse. The ProcessorMessage includes a VariantSignId which isn't
|
|
||||||
/// applicable to the DKG confirmation (as there's no such variant of the VariantSignId). The
|
|
||||||
/// actual ID is undefined other than it will be consistent to the signing protocol and unique
|
|
||||||
/// across validator sets, with no guarantees of uniqueness across contexts.
|
|
||||||
pub fn try_recv(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
) -> Option<messages::sign::CoordinatorMessage> {
|
|
||||||
db::DkgConfirmationMessages::try_recv(txn, set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The cosign intents.
|
/// The cosign intents.
|
||||||
pub struct CosignIntents;
|
pub struct CosignIntents;
|
||||||
impl CosignIntents {
|
impl CosignIntents {
|
||||||
/// Provide a CosignIntent for this Tributary.
|
/// Provide a CosignIntent for this Tributary.
|
||||||
///
|
///
|
||||||
/// This must be done before the associated `Transaction::Cosign` is provided.
|
/// This must be done before the associated `Transaction::Cosign` is provided.
|
||||||
pub fn provide(txn: &mut impl DbTxn, set: ExternalValidatorSet, intent: &CosignIntent) {
|
pub fn provide(txn: &mut impl DbTxn, set: ValidatorSet, intent: &CosignIntent) {
|
||||||
db::CosignIntents::set(txn, set, intent.block_hash, intent);
|
db::CosignIntents::set(txn, set, intent.block_hash, intent);
|
||||||
}
|
}
|
||||||
fn take(
|
fn take(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: [u8; 32],
|
||||||
) -> Option<CosignIntent> {
|
) -> Option<CosignIntent> {
|
||||||
db::CosignIntents::take(txn, set, substrate_block_hash)
|
db::CosignIntents::take(txn, set, substrate_block_hash)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// An interface to the topics recognized on this Tributary.
|
/// The plans to whitelist upon a `Transaction::SubstrateBlock` being included on-chain.
|
||||||
pub struct RecognizedTopics;
|
|
||||||
impl RecognizedTopics {
|
|
||||||
/// If this topic has been recognized by this Tributary.
|
|
||||||
///
|
|
||||||
/// This will either be by explicit recognition or participation.
|
|
||||||
pub fn recognized(getter: &impl Get, set: ExternalValidatorSet, topic: Topic) -> bool {
|
|
||||||
TributaryDb::recognized(getter, set, topic)
|
|
||||||
}
|
|
||||||
/// The next topic requiring recognition which has been recognized by this Tributary.
|
|
||||||
pub fn try_recv_topic_requiring_recognition(
|
|
||||||
txn: &mut impl DbTxn,
|
|
||||||
set: ExternalValidatorSet,
|
|
||||||
) -> Option<Topic> {
|
|
||||||
db::RecognizedTopics::try_recv(txn, set)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The plans to recognize upon a `Transaction::SubstrateBlock` being included on-chain.
|
|
||||||
pub struct SubstrateBlockPlans;
|
pub struct SubstrateBlockPlans;
|
||||||
impl SubstrateBlockPlans {
|
impl SubstrateBlockPlans {
|
||||||
/// Set the plans to recognize upon the associated `Transaction::SubstrateBlock` being included
|
/// Set the plans to whitelist upon the associated `Transaction::SubstrateBlock` being included
|
||||||
/// on-chain.
|
/// on-chain.
|
||||||
///
|
///
|
||||||
/// This must be done before the associated `Transaction::Cosign` is provided.
|
/// This must be done before the associated `Transaction::Cosign` is provided.
|
||||||
pub fn set(
|
pub fn set(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: [u8; 32],
|
||||||
plans: &Vec<[u8; 32]>,
|
plans: &Vec<[u8; 32]>,
|
||||||
) {
|
) {
|
||||||
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, plans);
|
db::SubstrateBlockPlans::set(txn, set, substrate_block_hash, &plans);
|
||||||
}
|
}
|
||||||
fn take(
|
fn take(
|
||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
set: ExternalValidatorSet,
|
set: ValidatorSet,
|
||||||
substrate_block_hash: [u8; 32],
|
substrate_block_hash: [u8; 32],
|
||||||
) -> Option<Vec<[u8; 32]>> {
|
) -> Option<Vec<[u8; 32]>> {
|
||||||
db::SubstrateBlockPlans::take(txn, set, substrate_block_hash)
|
db::SubstrateBlockPlans::take(txn, set, substrate_block_hash)
|
||||||
@@ -131,32 +90,32 @@ struct ScanBlock<'a, TD: Db, TDT: DbTxn, P: P2p> {
|
|||||||
_td: PhantomData<TD>,
|
_td: PhantomData<TD>,
|
||||||
_p2p: PhantomData<P>,
|
_p2p: PhantomData<P>,
|
||||||
tributary_txn: &'a mut TDT,
|
tributary_txn: &'a mut TDT,
|
||||||
set: &'a NewSetInformation,
|
set: ValidatorSet,
|
||||||
validators: &'a [SeraiAddress],
|
validators: &'a [SeraiAddress],
|
||||||
total_weight: u16,
|
total_weight: u64,
|
||||||
validator_weights: &'a HashMap<SeraiAddress, u16>,
|
validator_weights: &'a HashMap<SeraiAddress, u64>,
|
||||||
}
|
}
|
||||||
impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
impl<'a, TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'a, TD, TDT, P> {
|
||||||
fn potentially_start_cosign(&mut self) {
|
fn potentially_start_cosign(&mut self) {
|
||||||
// Don't start a new cosigning instance if we're actively running one
|
// Don't start a new cosigning instance if we're actively running one
|
||||||
if TributaryDb::actively_cosigning(self.tributary_txn, self.set.set).is_some() {
|
if TributaryDb::actively_cosigning(self.tributary_txn, self.set).is_some() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch the latest intended-to-be-cosigned block
|
// Fetch the latest intended-to-be-cosigned block
|
||||||
let Some(latest_substrate_block_to_cosign) =
|
let Some(latest_substrate_block_to_cosign) =
|
||||||
TributaryDb::latest_substrate_block_to_cosign(self.tributary_txn, self.set.set)
|
TributaryDb::latest_substrate_block_to_cosign(self.tributary_txn, self.set)
|
||||||
else {
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
// If it was already cosigned, return
|
// If it was already cosigned, return
|
||||||
if TributaryDb::cosigned(self.tributary_txn, self.set.set, latest_substrate_block_to_cosign) {
|
if TributaryDb::cosigned(self.tributary_txn, self.set, latest_substrate_block_to_cosign) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
let intent =
|
let intent =
|
||||||
CosignIntents::take(self.tributary_txn, self.set.set, latest_substrate_block_to_cosign)
|
CosignIntents::take(self.tributary_txn, self.set, latest_substrate_block_to_cosign)
|
||||||
.expect("Transaction::Cosign locally provided but CosignIntents wasn't populated");
|
.expect("Transaction::Cosign locally provided but CosignIntents wasn't populated");
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
intent.block_hash, latest_substrate_block_to_cosign,
|
intent.block_hash, latest_substrate_block_to_cosign,
|
||||||
@@ -166,71 +125,20 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
// Mark us as actively cosigning
|
// Mark us as actively cosigning
|
||||||
TributaryDb::start_cosigning(
|
TributaryDb::start_cosigning(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
latest_substrate_block_to_cosign,
|
latest_substrate_block_to_cosign,
|
||||||
intent.block_number,
|
intent.block_number,
|
||||||
);
|
);
|
||||||
// Send the message for the processor to start signing
|
// Send the message for the processor to start signing
|
||||||
TributaryDb::send_message(
|
TributaryDb::send_message(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
messages::coordinator::CoordinatorMessage::CosignSubstrateBlock {
|
||||||
session: self.set.set.session,
|
session: self.set.session,
|
||||||
cosign: intent.into_cosign(self.set.set.network),
|
intent,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn accumulate_dkg_confirmation<D: AsRef<[u8]> + Borshy>(
|
|
||||||
&mut self,
|
|
||||||
block_number: u64,
|
|
||||||
topic: Topic,
|
|
||||||
data: &D,
|
|
||||||
signer: SeraiAddress,
|
|
||||||
) -> Option<(SignId, HashMap<Participant, Vec<u8>>)> {
|
|
||||||
match TributaryDb::accumulate::<D>(
|
|
||||||
self.tributary_txn,
|
|
||||||
self.set.set,
|
|
||||||
self.validators,
|
|
||||||
self.total_weight,
|
|
||||||
block_number,
|
|
||||||
topic,
|
|
||||||
signer,
|
|
||||||
self.validator_weights[&signer],
|
|
||||||
data,
|
|
||||||
) {
|
|
||||||
DataSet::None => None,
|
|
||||||
DataSet::Participating(data_set) => {
|
|
||||||
let id = topic.dkg_confirmation_sign_id(self.set.set).unwrap();
|
|
||||||
|
|
||||||
// This will be used in a MuSig protocol, so the Participant indexes are the validator's
|
|
||||||
// position in the list regardless of their weight
|
|
||||||
let flatten_data_set = |data_set: HashMap<_, D>| {
|
|
||||||
let mut entries = HashMap::with_capacity(usize::from(self.total_weight));
|
|
||||||
for (validator, participation) in data_set {
|
|
||||||
let (index, (_validator, _weight)) = &self
|
|
||||||
.set
|
|
||||||
.validators
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.find(|(_i, (validator_i, _weight))| validator == *validator_i)
|
|
||||||
.unwrap();
|
|
||||||
// The index is zero-indexed yet participants are one-indexed
|
|
||||||
let index = index + 1;
|
|
||||||
|
|
||||||
entries.insert(
|
|
||||||
Participant::new(u16::try_from(index).unwrap()).unwrap(),
|
|
||||||
participation.as_ref().to_vec(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
entries
|
|
||||||
};
|
|
||||||
let data_set = flatten_data_set(data_set);
|
|
||||||
Some((id, data_set))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
fn handle_application_tx(&mut self, block_number: u64, tx: Transaction) {
|
||||||
let signer = |signed: Signed| SeraiAddress(signed.signer().to_bytes());
|
let signer = |signed: Signed| SeraiAddress(signed.signer().to_bytes());
|
||||||
|
|
||||||
@@ -239,14 +147,13 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
// TODO: The fact they can publish these TXs makes this a notable spam vector
|
||||||
if TributaryDb::is_fatally_slashed(
|
if TributaryDb::is_fatally_slashed(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
SeraiAddress(signer.to_bytes()),
|
SeraiAddress(signer.to_bytes()),
|
||||||
) {
|
) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let topic = tx.topic();
|
|
||||||
match tx {
|
match tx {
|
||||||
// Accumulate this vote and fatally slash the participant if past the threshold
|
// Accumulate this vote and fatally slash the participant if past the threshold
|
||||||
Transaction::RemoveParticipant { participant, signed } => {
|
Transaction::RemoveParticipant { participant, signed } => {
|
||||||
@@ -256,7 +163,7 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
if !self.validators.iter().any(|validator| *validator == participant) {
|
if !self.validators.iter().any(|validator| *validator == participant) {
|
||||||
TributaryDb::fatal_slash(
|
TributaryDb::fatal_slash(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
signer,
|
signer,
|
||||||
"voted to remove non-existent participant",
|
"voted to remove non-existent participant",
|
||||||
);
|
);
|
||||||
@@ -265,23 +172,18 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
|
|
||||||
match TributaryDb::accumulate(
|
match TributaryDb::accumulate(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
self.validators,
|
self.validators,
|
||||||
self.total_weight,
|
self.total_weight,
|
||||||
block_number,
|
block_number,
|
||||||
topic.unwrap(),
|
Topic::RemoveParticipant { participant },
|
||||||
signer,
|
signer,
|
||||||
self.validator_weights[&signer],
|
self.validator_weights[&signer],
|
||||||
&(),
|
&(),
|
||||||
) {
|
) {
|
||||||
DataSet::None => {}
|
DataSet::None => {}
|
||||||
DataSet::Participating(_) => {
|
DataSet::Participating(_) => {
|
||||||
TributaryDb::fatal_slash(
|
TributaryDb::fatal_slash(self.tributary_txn, self.set, participant, "voted to remove");
|
||||||
self.tributary_txn,
|
|
||||||
self.set.set,
|
|
||||||
participant,
|
|
||||||
"voted to remove",
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
@@ -290,52 +192,28 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
Transaction::DkgParticipation { participation, signed } => {
|
Transaction::DkgParticipation { participation, signed } => {
|
||||||
TributaryDb::send_message(
|
TributaryDb::send_message(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
messages::key_gen::CoordinatorMessage::Participation {
|
messages::key_gen::CoordinatorMessage::Participation {
|
||||||
session: self.set.set.session,
|
session: self.set.session,
|
||||||
participant: self.set.participant_indexes[&signer(signed)][0],
|
participant: todo!("TODO"),
|
||||||
participation,
|
participation,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Transaction::DkgConfirmationPreprocess { attempt: _, preprocess, signed } => {
|
Transaction::DkgConfirmationPreprocess { attempt, preprocess, signed } => {
|
||||||
let topic = topic.unwrap();
|
// Accumulate the preprocesses into our own FROST attempt manager
|
||||||
let signer = signer(signed);
|
todo!("TODO")
|
||||||
|
|
||||||
let Some((id, data_set)) =
|
|
||||||
self.accumulate_dkg_confirmation(block_number, topic, &preprocess, signer)
|
|
||||||
else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
db::DkgConfirmationMessages::send(
|
|
||||||
self.tributary_txn,
|
|
||||||
self.set.set,
|
|
||||||
&messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set },
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
Transaction::DkgConfirmationShare { attempt: _, share, signed } => {
|
Transaction::DkgConfirmationShare { attempt, share, signed } => {
|
||||||
let topic = topic.unwrap();
|
// Accumulate the shares into our own FROST attempt manager
|
||||||
let signer = signer(signed);
|
todo!("TODO: SetKeysTask")
|
||||||
|
|
||||||
let Some((id, data_set)) =
|
|
||||||
self.accumulate_dkg_confirmation(block_number, topic, &share, signer)
|
|
||||||
else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
db::DkgConfirmationMessages::send(
|
|
||||||
self.tributary_txn,
|
|
||||||
self.set.set,
|
|
||||||
&messages::sign::CoordinatorMessage::Shares { id, shares: data_set },
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::Cosign { substrate_block_hash } => {
|
Transaction::Cosign { substrate_block_hash } => {
|
||||||
// Update the latest intended-to-be-cosigned Substrate block
|
// Update the latest intended-to-be-cosigned Substrate block
|
||||||
TributaryDb::set_latest_substrate_block_to_cosign(
|
TributaryDb::set_latest_substrate_block_to_cosign(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
substrate_block_hash,
|
substrate_block_hash,
|
||||||
);
|
);
|
||||||
// Start a new cosign if we aren't already working on one
|
// Start a new cosign if we aren't already working on one
|
||||||
@@ -348,32 +226,32 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
not-yet-Cosigned cosigns, we flag all cosigned blocks as cosigned. Then, when we choose
|
not-yet-Cosigned cosigns, we flag all cosigned blocks as cosigned. Then, when we choose
|
||||||
the next block to work on, we won't if it's already been cosigned.
|
the next block to work on, we won't if it's already been cosigned.
|
||||||
*/
|
*/
|
||||||
TributaryDb::mark_cosigned(self.tributary_txn, self.set.set, substrate_block_hash);
|
TributaryDb::mark_cosigned(self.tributary_txn, self.set, substrate_block_hash);
|
||||||
|
|
||||||
// If we aren't actively cosigning this block, return
|
// If we aren't actively cosigning this block, return
|
||||||
// This occurs when we have Cosign TXs A, B, C, we received Cosigned for A and start on C,
|
// This occurs when we have Cosign TXs A, B, C, we received Cosigned for A and start on C,
|
||||||
// and then receive Cosigned for B
|
// and then receive Cosigned for B
|
||||||
if TributaryDb::actively_cosigning(self.tributary_txn, self.set.set) !=
|
if TributaryDb::actively_cosigning(self.tributary_txn, self.set) !=
|
||||||
Some(substrate_block_hash)
|
Some(substrate_block_hash)
|
||||||
{
|
{
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Since this is the block we were cosigning, mark us as having finished cosigning
|
// Since this is the block we were cosigning, mark us as having finished cosigning
|
||||||
TributaryDb::finish_cosigning(self.tributary_txn, self.set.set);
|
TributaryDb::finish_cosigning(self.tributary_txn, self.set);
|
||||||
|
|
||||||
// Start working on the next cosign
|
// Start working on the next cosign
|
||||||
self.potentially_start_cosign();
|
self.potentially_start_cosign();
|
||||||
}
|
}
|
||||||
Transaction::SubstrateBlock { hash } => {
|
Transaction::SubstrateBlock { hash } => {
|
||||||
// Recognize all of the IDs this Substrate block causes to be signed
|
// Whitelist all of the IDs this Substrate block causes to be signed
|
||||||
let plans = SubstrateBlockPlans::take(self.tributary_txn, self.set.set, hash).expect(
|
let plans = SubstrateBlockPlans::take(self.tributary_txn, self.set, hash).expect(
|
||||||
"Transaction::SubstrateBlock locally provided but SubstrateBlockPlans wasn't populated",
|
"Transaction::SubstrateBlock locally provided but SubstrateBlockPlans wasn't populated",
|
||||||
);
|
);
|
||||||
for plan in plans {
|
for plan in plans {
|
||||||
TributaryDb::recognize_topic(
|
TributaryDb::recognize_topic(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
Topic::Sign {
|
Topic::Sign {
|
||||||
id: VariantSignId::Transaction(plan),
|
id: VariantSignId::Transaction(plan),
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
@@ -383,10 +261,10 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
Transaction::Batch { hash } => {
|
Transaction::Batch { hash } => {
|
||||||
// Recognize the signing of this batch
|
// Whitelist the signing of this batch
|
||||||
TributaryDb::recognize_topic(
|
TributaryDb::recognize_topic(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
Topic::Sign {
|
Topic::Sign {
|
||||||
id: VariantSignId::Batch(hash),
|
id: VariantSignId::Batch(hash),
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
@@ -401,7 +279,7 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
if slash_points.len() != self.validators.len() {
|
if slash_points.len() != self.validators.len() {
|
||||||
TributaryDb::fatal_slash(
|
TributaryDb::fatal_slash(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
signer,
|
signer,
|
||||||
"slash report was for a distinct amount of signers",
|
"slash report was for a distinct amount of signers",
|
||||||
);
|
);
|
||||||
@@ -411,11 +289,11 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
// Accumulate, and if past the threshold, calculate *the* slash report and start signing it
|
||||||
match TributaryDb::accumulate(
|
match TributaryDb::accumulate(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
self.validators,
|
self.validators,
|
||||||
self.total_weight,
|
self.total_weight,
|
||||||
block_number,
|
block_number,
|
||||||
topic.unwrap(),
|
Topic::SlashReport,
|
||||||
signer,
|
signer,
|
||||||
self.validator_weights[&signer],
|
self.validator_weights[&signer],
|
||||||
&slash_points,
|
&slash_points,
|
||||||
@@ -429,6 +307,10 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
have a supermajority agree the slash should be fatal. If there isn't a supermajority,
|
have a supermajority agree the slash should be fatal. If there isn't a supermajority,
|
||||||
but the median believe the slash should be fatal, we need to fallback to a large
|
but the median believe the slash should be fatal, we need to fallback to a large
|
||||||
constant.
|
constant.
|
||||||
|
|
||||||
|
Also, TODO, each slash point should probably be considered as
|
||||||
|
`MAX_KEY_SHARES_PER_SET * BLOCK_TIME` seconds of downtime. As this time crosses
|
||||||
|
various thresholds (1 day, 3 days, etc), a multiplier should be attached.
|
||||||
*/
|
*/
|
||||||
let mut median_slash_report = Vec::with_capacity(self.validators.len());
|
let mut median_slash_report = Vec::with_capacity(self.validators.len());
|
||||||
for i in 0 .. self.validators.len() {
|
for i in 0 .. self.validators.len() {
|
||||||
@@ -469,7 +351,7 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
|
|
||||||
// Create the resulting slash report
|
// Create the resulting slash report
|
||||||
let mut slash_report = vec![];
|
let mut slash_report = vec![];
|
||||||
for points in amortized_slash_report {
|
for (validator, points) in self.validators.iter().copied().zip(amortized_slash_report) {
|
||||||
// TODO: Natively store this as a `Slash`
|
// TODO: Natively store this as a `Slash`
|
||||||
if points == u32::MAX {
|
if points == u32::MAX {
|
||||||
slash_report.push(Slash::Fatal);
|
slash_report.push(Slash::Fatal);
|
||||||
@@ -482,7 +364,7 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
// Recognize the topic for signing the slash report
|
// Recognize the topic for signing the slash report
|
||||||
TributaryDb::recognize_topic(
|
TributaryDb::recognize_topic(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
Topic::Sign {
|
Topic::Sign {
|
||||||
id: VariantSignId::SlashReport,
|
id: VariantSignId::SlashReport,
|
||||||
attempt: 0,
|
attempt: 0,
|
||||||
@@ -492,24 +374,24 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
// Send the message for the processor to start signing
|
// Send the message for the processor to start signing
|
||||||
TributaryDb::send_message(
|
TributaryDb::send_message(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
messages::coordinator::CoordinatorMessage::SignSlashReport {
|
messages::coordinator::CoordinatorMessage::SignSlashReport {
|
||||||
session: self.set.set.session,
|
session: self.set.session,
|
||||||
slash_report: slash_report.try_into().unwrap(),
|
report: slash_report,
|
||||||
},
|
},
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::Sign { id: _, attempt: _, round, data, signed } => {
|
Transaction::Sign { id, attempt, round, data, signed } => {
|
||||||
let topic = topic.unwrap();
|
let topic = Topic::Sign { id, attempt, round };
|
||||||
let signer = signer(signed);
|
let signer = signer(signed);
|
||||||
|
|
||||||
if data.len() != usize::from(self.validator_weights[&signer]) {
|
if u64::try_from(data.len()).unwrap() != self.validator_weights[&signer] {
|
||||||
TributaryDb::fatal_slash(
|
TributaryDb::fatal_slash(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
signer,
|
signer,
|
||||||
"signer signed with a distinct amount of key shares than they had key shares",
|
"signer signed with a distinct amount of key shares than they had key shares",
|
||||||
);
|
);
|
||||||
@@ -518,7 +400,7 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
|
|
||||||
match TributaryDb::accumulate(
|
match TributaryDb::accumulate(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
self.validators,
|
self.validators,
|
||||||
self.total_weight,
|
self.total_weight,
|
||||||
block_number,
|
block_number,
|
||||||
@@ -529,22 +411,12 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
) {
|
) {
|
||||||
DataSet::None => {}
|
DataSet::None => {}
|
||||||
DataSet::Participating(data_set) => {
|
DataSet::Participating(data_set) => {
|
||||||
let id = topic.sign_id(self.set.set).expect("Topic::Sign didn't have SignId");
|
let id = topic.sign_id(self.set).expect("Topic::Sign didn't have SignId");
|
||||||
let flatten_data_set = |data_set: HashMap<_, Vec<_>>| {
|
let flatten_data_set = |data_set| todo!("TODO");
|
||||||
let mut entries = HashMap::with_capacity(usize::from(self.total_weight));
|
|
||||||
for (validator, shares) in data_set {
|
|
||||||
let indexes = &self.set.participant_indexes[&validator];
|
|
||||||
assert_eq!(indexes.len(), shares.len());
|
|
||||||
for (index, share) in indexes.iter().zip(shares) {
|
|
||||||
entries.insert(*index, share);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
entries
|
|
||||||
};
|
|
||||||
let data_set = flatten_data_set(data_set);
|
let data_set = flatten_data_set(data_set);
|
||||||
TributaryDb::send_message(
|
TributaryDb::send_message(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
match round {
|
match round {
|
||||||
SigningProtocolRound::Preprocess => {
|
SigningProtocolRound::Preprocess => {
|
||||||
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
|
messages::sign::CoordinatorMessage::Preprocesses { id, preprocesses: data_set }
|
||||||
@@ -555,13 +427,13 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
},
|
},
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
fn handle_block(mut self, block_number: u64, block: Block<Transaction>) {
|
||||||
TributaryDb::start_of_block(self.tributary_txn, self.set.set, block_number);
|
TributaryDb::start_of_block(self.tributary_txn, self.set, block_number);
|
||||||
|
|
||||||
for tx in block.transactions {
|
for tx in block.transactions {
|
||||||
match tx {
|
match tx {
|
||||||
@@ -588,7 +460,7 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
// errors, mark the node as fatally slashed
|
// errors, mark the node as fatally slashed
|
||||||
TributaryDb::fatal_slash(
|
TributaryDb::fatal_slash(
|
||||||
self.tributary_txn,
|
self.tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
SeraiAddress(msgs.0.msg.sender),
|
SeraiAddress(msgs.0.msg.sender),
|
||||||
&format!("invalid tendermint messages: {msgs:?}"),
|
&format!("invalid tendermint messages: {msgs:?}"),
|
||||||
);
|
);
|
||||||
@@ -604,10 +476,10 @@ impl<TD: Db, TDT: DbTxn, P: P2p> ScanBlock<'_, TD, TDT, P> {
|
|||||||
/// The task to scan the Tributary, populating `ProcessorMessages`.
|
/// The task to scan the Tributary, populating `ProcessorMessages`.
|
||||||
pub struct ScanTributaryTask<TD: Db, P: P2p> {
|
pub struct ScanTributaryTask<TD: Db, P: P2p> {
|
||||||
tributary_db: TD,
|
tributary_db: TD,
|
||||||
set: NewSetInformation,
|
set: ValidatorSet,
|
||||||
validators: Vec<SeraiAddress>,
|
validators: Vec<SeraiAddress>,
|
||||||
total_weight: u16,
|
total_weight: u64,
|
||||||
validator_weights: HashMap<SeraiAddress, u16>,
|
validator_weights: HashMap<SeraiAddress, u64>,
|
||||||
tributary: TributaryReader<TD, Transaction>,
|
tributary: TributaryReader<TD, Transaction>,
|
||||||
_p2p: PhantomData<P>,
|
_p2p: PhantomData<P>,
|
||||||
}
|
}
|
||||||
@@ -616,13 +488,15 @@ impl<TD: Db, P: P2p> ScanTributaryTask<TD, P> {
|
|||||||
/// Create a new instance of this task.
|
/// Create a new instance of this task.
|
||||||
pub fn new(
|
pub fn new(
|
||||||
tributary_db: TD,
|
tributary_db: TD,
|
||||||
set: NewSetInformation,
|
new_set: &NewSetInformation,
|
||||||
tributary: TributaryReader<TD, Transaction>,
|
tributary: TributaryReader<TD, Transaction>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mut validators = Vec::with_capacity(set.validators.len());
|
let mut validators = Vec::with_capacity(new_set.validators.len());
|
||||||
let mut total_weight = 0;
|
let mut total_weight = 0;
|
||||||
let mut validator_weights = HashMap::with_capacity(set.validators.len());
|
let mut validator_weights = HashMap::with_capacity(new_set.validators.len());
|
||||||
for (validator, weight) in set.validators.iter().copied() {
|
for (validator, weight) in new_set.validators.iter().copied() {
|
||||||
|
let validator = SeraiAddress::from(validator);
|
||||||
|
let weight = u64::from(weight);
|
||||||
validators.push(validator);
|
validators.push(validator);
|
||||||
total_weight += weight;
|
total_weight += weight;
|
||||||
validator_weights.insert(validator, weight);
|
validator_weights.insert(validator, weight);
|
||||||
@@ -630,7 +504,7 @@ impl<TD: Db, P: P2p> ScanTributaryTask<TD, P> {
|
|||||||
|
|
||||||
ScanTributaryTask {
|
ScanTributaryTask {
|
||||||
tributary_db,
|
tributary_db,
|
||||||
set,
|
set: new_set.set,
|
||||||
validators,
|
validators,
|
||||||
total_weight,
|
total_weight,
|
||||||
validator_weights,
|
validator_weights,
|
||||||
@@ -646,7 +520,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
|||||||
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
fn run_iteration(&mut self) -> impl Send + Future<Output = Result<bool, Self::Error>> {
|
||||||
async move {
|
async move {
|
||||||
let (mut last_block_number, mut last_block_hash) =
|
let (mut last_block_number, mut last_block_hash) =
|
||||||
TributaryDb::last_handled_tributary_block(&self.tributary_db, self.set.set)
|
TributaryDb::last_handled_tributary_block(&self.tributary_db, self.set)
|
||||||
.unwrap_or((0, self.tributary.genesis()));
|
.unwrap_or((0, self.tributary.genesis()));
|
||||||
|
|
||||||
let mut made_progress = false;
|
let mut made_progress = false;
|
||||||
@@ -665,7 +539,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
|||||||
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
if !self.tributary.locally_provided_txs_in_block(&block_hash, order) {
|
||||||
return Err(format!(
|
return Err(format!(
|
||||||
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
"didn't have the provided Transactions on-chain for set (ephemeral error): {:?}",
|
||||||
self.set.set
|
self.set
|
||||||
));
|
));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -675,7 +549,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
|||||||
_td: PhantomData::<TD>,
|
_td: PhantomData::<TD>,
|
||||||
_p2p: PhantomData::<P>,
|
_p2p: PhantomData::<P>,
|
||||||
tributary_txn: &mut tributary_txn,
|
tributary_txn: &mut tributary_txn,
|
||||||
set: &self.set,
|
set: self.set,
|
||||||
validators: &self.validators,
|
validators: &self.validators,
|
||||||
total_weight: self.total_weight,
|
total_weight: self.total_weight,
|
||||||
validator_weights: &self.validator_weights,
|
validator_weights: &self.validator_weights,
|
||||||
@@ -683,7 +557,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
|||||||
.handle_block(block_number, block);
|
.handle_block(block_number, block);
|
||||||
TributaryDb::set_last_handled_tributary_block(
|
TributaryDb::set_last_handled_tributary_block(
|
||||||
&mut tributary_txn,
|
&mut tributary_txn,
|
||||||
self.set.set,
|
self.set,
|
||||||
block_number,
|
block_number,
|
||||||
block_hash,
|
block_hash,
|
||||||
);
|
);
|
||||||
@@ -703,6 +577,7 @@ impl<TD: Db, P: P2p> ContinuallyRan for ScanTributaryTask<TD, P> {
|
|||||||
pub fn slash_report_transaction(getter: &impl Get, set: &NewSetInformation) -> Transaction {
|
pub fn slash_report_transaction(getter: &impl Get, set: &NewSetInformation) -> Transaction {
|
||||||
let mut slash_points = Vec::with_capacity(set.validators.len());
|
let mut slash_points = Vec::with_capacity(set.validators.len());
|
||||||
for (validator, _weight) in set.validators.iter().copied() {
|
for (validator, _weight) in set.validators.iter().copied() {
|
||||||
|
let validator = SeraiAddress::from(validator);
|
||||||
slash_points.push(SlashPoints::get(getter, set.set, validator).unwrap_or(0));
|
slash_points.push(SlashPoints::get(getter, set.set, validator).unwrap_or(0));
|
||||||
}
|
}
|
||||||
Transaction::SlashReport { slash_points, signed: Signed::default() }
|
Transaction::SlashReport { slash_points, signed: Signed::default() }
|
||||||
|
|||||||
@@ -25,8 +25,6 @@ use tributary_sdk::{
|
|||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
use crate::db::Topic;
|
|
||||||
|
|
||||||
/// The round this data is for, within a signing protocol.
|
/// The round this data is for, within a signing protocol.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum SigningProtocolRound {
|
pub enum SigningProtocolRound {
|
||||||
@@ -182,7 +180,7 @@ pub enum Transaction {
|
|||||||
///
|
///
|
||||||
/// This is provided after the block has been cosigned.
|
/// This is provided after the block has been cosigned.
|
||||||
///
|
///
|
||||||
/// With the acknowledgement of a Substrate block, we can recognize all the `VariantSignId`s
|
/// With the acknowledgement of a Substrate block, we can whitelist all the `VariantSignId`s
|
||||||
/// resulting from its handling.
|
/// resulting from its handling.
|
||||||
SubstrateBlock {
|
SubstrateBlock {
|
||||||
/// The hash of the Substrate block
|
/// The hash of the Substrate block
|
||||||
@@ -259,7 +257,9 @@ impl TransactionTrait for Transaction {
|
|||||||
|
|
||||||
Transaction::Cosign { .. } => TransactionKind::Provided("Cosign"),
|
Transaction::Cosign { .. } => TransactionKind::Provided("Cosign"),
|
||||||
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
|
Transaction::Cosigned { .. } => TransactionKind::Provided("Cosigned"),
|
||||||
|
// TODO: Provide this
|
||||||
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
|
Transaction::SubstrateBlock { .. } => TransactionKind::Provided("SubstrateBlock"),
|
||||||
|
// TODO: Provide this
|
||||||
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
||||||
|
|
||||||
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
|
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
|
||||||
@@ -318,36 +318,6 @@ impl TransactionTrait for Transaction {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl Transaction {
|
impl Transaction {
|
||||||
/// The topic in the database for this transaction.
|
|
||||||
pub fn topic(&self) -> Option<Topic> {
|
|
||||||
#[allow(clippy::match_same_arms)] // This doesn't make semantic sense here
|
|
||||||
match self {
|
|
||||||
Transaction::RemoveParticipant { participant, .. } => {
|
|
||||||
Some(Topic::RemoveParticipant { participant: *participant })
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgParticipation { .. } => None,
|
|
||||||
Transaction::DkgConfirmationPreprocess { attempt, .. } => {
|
|
||||||
Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Preprocess })
|
|
||||||
}
|
|
||||||
Transaction::DkgConfirmationShare { attempt, .. } => {
|
|
||||||
Some(Topic::DkgConfirmation { attempt: *attempt, round: SigningProtocolRound::Share })
|
|
||||||
}
|
|
||||||
|
|
||||||
// Provided TXs
|
|
||||||
Transaction::Cosign { .. } |
|
|
||||||
Transaction::Cosigned { .. } |
|
|
||||||
Transaction::SubstrateBlock { .. } |
|
|
||||||
Transaction::Batch { .. } => None,
|
|
||||||
|
|
||||||
Transaction::Sign { id, attempt, round, .. } => {
|
|
||||||
Some(Topic::Sign { id: *id, attempt: *attempt, round: *round })
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::SlashReport { .. } => Some(Topic::SlashReport),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Sign a transaction.
|
/// Sign a transaction.
|
||||||
///
|
///
|
||||||
/// Panics if signing a transaction whose type isn't `TransactionKind::Signed`.
|
/// Panics if signing a transaction whose type isn't `TransactionKind::Signed`.
|
||||||
@@ -365,12 +335,10 @@ impl Transaction {
|
|||||||
Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed,
|
Transaction::DkgConfirmationPreprocess { ref mut signed, .. } => signed,
|
||||||
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
||||||
|
|
||||||
Transaction::Cosign { .. } => panic!("signing Cosign transaction (provided)"),
|
Transaction::Cosign { .. } => panic!("signing CosignSubstrateBlock"),
|
||||||
Transaction::Cosigned { .. } => panic!("signing Cosigned transaction (provided)"),
|
Transaction::Cosigned { .. } => panic!("signing Cosigned"),
|
||||||
Transaction::SubstrateBlock { .. } => {
|
Transaction::SubstrateBlock { .. } => panic!("signing SubstrateBlock"),
|
||||||
panic!("signing SubstrateBlock transaction (provided)")
|
Transaction::Batch { .. } => panic!("signing Batch"),
|
||||||
}
|
|
||||||
Transaction::Batch { .. } => panic!("signing Batch transaction (provided)"),
|
|
||||||
|
|
||||||
Transaction::Sign { ref mut signed, .. } => signed,
|
Transaction::Sign { ref mut signed, .. } => signed,
|
||||||
|
|
||||||
|
|||||||
@@ -28,12 +28,6 @@ macro_rules! dalek_curve {
|
|||||||
$Point::generator()
|
$Point::generator()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reduce_512(mut scalar: [u8; 64]) -> Self::F {
|
|
||||||
let res = Scalar::from_bytes_mod_order_wide(&scalar);
|
|
||||||
scalar.zeroize();
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
|
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
|
||||||
Scalar::from_hash(Sha512::new_with_prefix(&[dst, data].concat()))
|
Scalar::from_hash(Sha512::new_with_prefix(&[dst, data].concat()))
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -66,12 +66,6 @@ impl Ciphersuite for Ed448 {
|
|||||||
Point::generator()
|
Point::generator()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reduce_512(mut scalar: [u8; 64]) -> Self::F {
|
|
||||||
let res = Self::hash_to_F(b"Ciphersuite-reduce_512", &scalar);
|
|
||||||
scalar.zeroize();
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
|
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
|
||||||
Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_ref().try_into().unwrap())
|
Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_ref().try_into().unwrap())
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use group::ff::PrimeField;
|
|||||||
|
|
||||||
use elliptic_curve::{
|
use elliptic_curve::{
|
||||||
generic_array::GenericArray,
|
generic_array::GenericArray,
|
||||||
bigint::{NonZero, CheckedAdd, Encoding, U384, U512},
|
bigint::{NonZero, CheckedAdd, Encoding, U384},
|
||||||
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
|
hash2curve::{Expander, ExpandMsg, ExpandMsgXmd},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -31,22 +31,6 @@ macro_rules! kp_curve {
|
|||||||
$lib::ProjectivePoint::GENERATOR
|
$lib::ProjectivePoint::GENERATOR
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reduce_512(scalar: [u8; 64]) -> Self::F {
|
|
||||||
let mut modulus = [0; 64];
|
|
||||||
modulus[32 ..].copy_from_slice(&(Self::F::ZERO - Self::F::ONE).to_bytes());
|
|
||||||
let modulus = U512::from_be_slice(&modulus).checked_add(&U512::ONE).unwrap();
|
|
||||||
|
|
||||||
let mut wide =
|
|
||||||
U512::from_be_bytes(scalar).rem(&NonZero::new(modulus).unwrap()).to_be_bytes();
|
|
||||||
|
|
||||||
let mut array = *GenericArray::from_slice(&wide[32 ..]);
|
|
||||||
let res = $lib::Scalar::from_repr(array).unwrap();
|
|
||||||
|
|
||||||
wide.zeroize();
|
|
||||||
array.zeroize();
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
|
fn hash_to_F(dst: &[u8], msg: &[u8]) -> Self::F {
|
||||||
// While one of these two libraries does support directly hashing to the Scalar field, the
|
// While one of these two libraries does support directly hashing to the Scalar field, the
|
||||||
// other doesn't. While that's probably an oversight, this is a universally working method
|
// other doesn't. While that's probably an oversight, this is a universally working method
|
||||||
|
|||||||
@@ -62,12 +62,6 @@ pub trait Ciphersuite:
|
|||||||
// While group does provide this in its API, privacy coins may want to use a custom basepoint
|
// While group does provide this in its API, privacy coins may want to use a custom basepoint
|
||||||
fn generator() -> Self::G;
|
fn generator() -> Self::G;
|
||||||
|
|
||||||
/// Reduce 512 bits into a uniform scalar.
|
|
||||||
///
|
|
||||||
/// If 512 bits is insufficient to perform a reduction into a uniform scalar, the ciphersuite
|
|
||||||
/// will perform a hash to sample the necessary bits.
|
|
||||||
fn reduce_512(scalar: [u8; 64]) -> Self::F;
|
|
||||||
|
|
||||||
/// Hash the provided domain-separation tag and message to a scalar. Ciphersuites MAY naively
|
/// Hash the provided domain-separation tag and message to a scalar. Ciphersuites MAY naively
|
||||||
/// prefix the tag to the message, enabling transpotion between the two. Accordingly, this
|
/// prefix the tag to the message, enabling transpotion between the two. Accordingly, this
|
||||||
/// function should NOT be used in any scheme where one tag is a valid substring of another
|
/// function should NOT be used in any scheme where one tag is a valid substring of another
|
||||||
@@ -105,9 +99,6 @@ pub trait Ciphersuite:
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Read a canonical point from something implementing std::io::Read.
|
/// Read a canonical point from something implementing std::io::Read.
|
||||||
///
|
|
||||||
/// The provided implementation is safe so long as `GroupEncoding::to_bytes` always returns a
|
|
||||||
/// canonical serialization.
|
|
||||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
#[cfg(any(feature = "alloc", feature = "std"))]
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
|
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
|
||||||
|
|||||||
@@ -92,7 +92,7 @@ impl Neg for FieldElement {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Neg for &FieldElement {
|
impl<'a> Neg for &'a FieldElement {
|
||||||
type Output = FieldElement;
|
type Output = FieldElement;
|
||||||
fn neg(self) -> Self::Output {
|
fn neg(self) -> Self::Output {
|
||||||
(*self).neg()
|
(*self).neg()
|
||||||
@@ -244,16 +244,7 @@ impl FieldElement {
|
|||||||
res *= res;
|
res *= res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
res *= table[usize::from(bits)];
|
||||||
let mut scale_by = FieldElement::ONE;
|
|
||||||
#[allow(clippy::needless_range_loop)]
|
|
||||||
for i in 0 .. 16 {
|
|
||||||
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
|
|
||||||
{
|
|
||||||
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res *= scale_by;
|
|
||||||
bits = 0;
|
bits = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -208,16 +208,7 @@ impl Scalar {
|
|||||||
res *= res;
|
res *= res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
res *= table[usize::from(bits)];
|
||||||
let mut scale_by = Scalar::ONE;
|
|
||||||
#[allow(clippy::needless_range_loop)]
|
|
||||||
for i in 0 .. 16 {
|
|
||||||
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
|
|
||||||
{
|
|
||||||
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res *= scale_by;
|
|
||||||
bits = 0;
|
bits = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -54,7 +54,7 @@ rand = { version = "0.8", default-features = false, features = ["std"] }
|
|||||||
ciphersuite = { path = "../ciphersuite", default-features = false, features = ["ristretto"] }
|
ciphersuite = { path = "../ciphersuite", default-features = false, features = ["ristretto"] }
|
||||||
generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", features = ["tests"] }
|
generalized-bulletproofs = { path = "../evrf/generalized-bulletproofs", features = ["tests"] }
|
||||||
ec-divisors = { path = "../evrf/divisors", features = ["pasta"] }
|
ec-divisors = { path = "../evrf/divisors", features = ["pasta"] }
|
||||||
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
pasta_curves = "0.5"
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
std = [
|
std = [
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ use ciphersuite::{
|
|||||||
};
|
};
|
||||||
use multiexp::multiexp_vartime;
|
use multiexp::multiexp_vartime;
|
||||||
|
|
||||||
use generalized_bulletproofs::{Generators, arithmetic_circuit_proof::*};
|
use generalized_bulletproofs::arithmetic_circuit_proof::*;
|
||||||
use ec_divisors::DivisorCurve;
|
use ec_divisors::DivisorCurve;
|
||||||
|
|
||||||
use crate::{Participant, ThresholdParams, Interpolation, ThresholdCore, ThresholdKeys};
|
use crate::{Participant, ThresholdParams, Interpolation, ThresholdCore, ThresholdKeys};
|
||||||
@@ -277,7 +277,6 @@ impl<C: EvrfCurve> EvrfDkg<C> {
|
|||||||
if evrf_public_keys.iter().any(|key| bool::from(key.is_identity())) {
|
if evrf_public_keys.iter().any(|key| bool::from(key.is_identity())) {
|
||||||
Err(EvrfError::PublicKeyWasIdentity)?;
|
Err(EvrfError::PublicKeyWasIdentity)?;
|
||||||
};
|
};
|
||||||
// This also checks the private key is not 0
|
|
||||||
let evrf_public_key = <C::EmbeddedCurve as Ciphersuite>::generator() * evrf_private_key.deref();
|
let evrf_public_key = <C::EmbeddedCurve as Ciphersuite>::generator() * evrf_private_key.deref();
|
||||||
if !evrf_public_keys.iter().any(|key| *key == evrf_public_key) {
|
if !evrf_public_keys.iter().any(|key| *key == evrf_public_key) {
|
||||||
Err(EvrfError::NotAParticipant)?;
|
Err(EvrfError::NotAParticipant)?;
|
||||||
@@ -360,7 +359,7 @@ impl<C: EvrfCurve> EvrfDkg<C> {
|
|||||||
|
|
||||||
let transcript = Self::initial_transcript(context, evrf_public_keys, t);
|
let transcript = Self::initial_transcript(context, evrf_public_keys, t);
|
||||||
|
|
||||||
let mut evrf_verifier = Generators::batch_verifier();
|
let mut evrf_verifier = generators.0.batch_verifier();
|
||||||
for (i, participation) in participations {
|
for (i, participation) in participations {
|
||||||
let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1];
|
let evrf_public_key = evrf_public_keys[usize::from(u16::from(*i)) - 1];
|
||||||
|
|
||||||
@@ -396,7 +395,7 @@ impl<C: EvrfCurve> EvrfDkg<C> {
|
|||||||
if faulty.contains(i) {
|
if faulty.contains(i) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
let mut evrf_verifier = Generators::batch_verifier();
|
let mut evrf_verifier = generators.0.batch_verifier();
|
||||||
Evrf::<C>::verify(
|
Evrf::<C>::verify(
|
||||||
rng,
|
rng,
|
||||||
&generators.0,
|
&generators.0,
|
||||||
|
|||||||
@@ -129,11 +129,15 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
/// Read a Variable from a theoretical vector commitment tape
|
/// Read a Variable from a theoretical vector commitment tape
|
||||||
fn read_one_from_tape(generators_to_use: usize, start: &mut usize) -> Variable {
|
fn read_one_from_tape(generators_to_use: usize, start: &mut usize) -> Variable {
|
||||||
// Each commitment has twice as many variables as generators in use
|
// Each commitment has twice as many variables as generators in use
|
||||||
let commitment = *start / generators_to_use;
|
let commitment = *start / (2 * generators_to_use);
|
||||||
// The index will be less than the amount of generators in use, as half are left and half are
|
// The index will be less than the amount of generators in use, as half are left and half are
|
||||||
// right
|
// right
|
||||||
let index = *start % generators_to_use;
|
let index = *start % generators_to_use;
|
||||||
let res = Variable::CG { commitment, index };
|
let res = if (*start / generators_to_use) % 2 == 0 {
|
||||||
|
Variable::CG { commitment, index }
|
||||||
|
} else {
|
||||||
|
Variable::CH { commitment, index }
|
||||||
|
};
|
||||||
*start += 1;
|
*start += 1;
|
||||||
res
|
res
|
||||||
}
|
}
|
||||||
@@ -198,8 +202,8 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
padded_pow_of_2 <<= 1;
|
padded_pow_of_2 <<= 1;
|
||||||
}
|
}
|
||||||
// This may as small as 16, which would create an excessive amount of vector commitments
|
// This may as small as 16, which would create an excessive amount of vector commitments
|
||||||
// We set a floor of 2048 rows for bandwidth reasons
|
// We set a floor of 1024 rows for bandwidth reasons
|
||||||
padded_pow_of_2.max(2048)
|
padded_pow_of_2.max(1024)
|
||||||
};
|
};
|
||||||
(expected_muls, generators_to_use)
|
(expected_muls, generators_to_use)
|
||||||
}
|
}
|
||||||
@@ -209,7 +213,7 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
evrf_public_key: (C::F, C::F),
|
evrf_public_key: (C::F, C::F),
|
||||||
coefficients: usize,
|
coefficients: usize,
|
||||||
ecdh_commitments: &[[(C::F, C::F); 2]],
|
ecdh_commitments: &[[(C::F, C::F); 2]],
|
||||||
generator_tables: &[&GeneratorTable<C::F, C::EmbeddedCurveParameters>],
|
generator_tables: &[GeneratorTable<C::F, C::EmbeddedCurveParameters>],
|
||||||
circuit: &mut Circuit<C>,
|
circuit: &mut Circuit<C>,
|
||||||
transcript: &mut impl Transcript,
|
transcript: &mut impl Transcript,
|
||||||
) {
|
) {
|
||||||
@@ -372,10 +376,8 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
let evrf_public_key;
|
let evrf_public_key;
|
||||||
let mut actual_coefficients = Vec::with_capacity(coefficients);
|
let mut actual_coefficients = Vec::with_capacity(coefficients);
|
||||||
{
|
{
|
||||||
// This is checked at a higher level
|
|
||||||
let dlog =
|
let dlog =
|
||||||
ScalarDecomposition::<<C::EmbeddedCurve as Ciphersuite>::F>::new(**evrf_private_key)
|
ScalarDecomposition::<<C::EmbeddedCurve as Ciphersuite>::F>::new(**evrf_private_key);
|
||||||
.expect("eVRF private key was zero");
|
|
||||||
let points = Self::transcript_to_points(transcript, coefficients);
|
let points = Self::transcript_to_points(transcript, coefficients);
|
||||||
|
|
||||||
// Start by pushing the discrete logarithm onto the tape
|
// Start by pushing the discrete logarithm onto the tape
|
||||||
@@ -429,8 +431,7 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
let dlog =
|
let dlog =
|
||||||
ScalarDecomposition::<<C::EmbeddedCurve as Ciphersuite>::F>::new(ecdh_private_key)
|
ScalarDecomposition::<<C::EmbeddedCurve as Ciphersuite>::F>::new(ecdh_private_key);
|
||||||
.expect("ECDH private key was zero");
|
|
||||||
let ecdh_commitment = <C::EmbeddedCurve as Ciphersuite>::generator() * ecdh_private_key;
|
let ecdh_commitment = <C::EmbeddedCurve as Ciphersuite>::generator() * ecdh_private_key;
|
||||||
ecdh_commitments.push(ecdh_commitment);
|
ecdh_commitments.push(ecdh_commitment);
|
||||||
ecdh_commitments_xy.last_mut().unwrap()[j] =
|
ecdh_commitments_xy.last_mut().unwrap()[j] =
|
||||||
@@ -470,10 +471,15 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
Self::muls_and_generators_to_use(coefficients, ecdh_public_keys.len());
|
Self::muls_and_generators_to_use(coefficients, ecdh_public_keys.len());
|
||||||
|
|
||||||
let mut vector_commitments =
|
let mut vector_commitments =
|
||||||
Vec::with_capacity(vector_commitment_tape.len().div_ceil(generators_to_use));
|
Vec::with_capacity(vector_commitment_tape.len().div_ceil(2 * generators_to_use));
|
||||||
for chunk in vector_commitment_tape.chunks(generators_to_use) {
|
for chunk in vector_commitment_tape.chunks(2 * generators_to_use) {
|
||||||
let g_values = chunk[.. generators_to_use.min(chunk.len())].to_vec().into();
|
let g_values = chunk[.. generators_to_use.min(chunk.len())].to_vec().into();
|
||||||
vector_commitments.push(PedersenVectorCommitment { g_values, mask: C::F::random(&mut *rng) });
|
let h_values = chunk[generators_to_use.min(chunk.len()) ..].to_vec().into();
|
||||||
|
vector_commitments.push(PedersenVectorCommitment {
|
||||||
|
g_values,
|
||||||
|
h_values,
|
||||||
|
mask: C::F::random(&mut *rng),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
vector_commitment_tape.zeroize();
|
vector_commitment_tape.zeroize();
|
||||||
@@ -493,7 +499,7 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
.iter()
|
.iter()
|
||||||
.map(|commitment| {
|
.map(|commitment| {
|
||||||
commitment
|
commitment
|
||||||
.commit(generators.g_bold_slice(), generators.h())
|
.commit(generators.g_bold_slice(), generators.h_bold_slice(), generators.h())
|
||||||
.ok_or(AcError::NotEnoughGenerators)
|
.ok_or(AcError::NotEnoughGenerators)
|
||||||
})
|
})
|
||||||
.collect::<Result<_, _>>()?,
|
.collect::<Result<_, _>>()?,
|
||||||
@@ -512,7 +518,7 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
evrf_public_key,
|
evrf_public_key,
|
||||||
coefficients,
|
coefficients,
|
||||||
&ecdh_commitments_xy,
|
&ecdh_commitments_xy,
|
||||||
&generator_tables.iter().collect::<Vec<_>>(),
|
&generator_tables,
|
||||||
&mut circuit,
|
&mut circuit,
|
||||||
&mut transcript,
|
&mut transcript,
|
||||||
);
|
);
|
||||||
@@ -537,7 +543,7 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
let mut agg_weights = Vec::with_capacity(commitments.len());
|
let mut agg_weights = Vec::with_capacity(commitments.len());
|
||||||
agg_weights.push(C::F::ONE);
|
agg_weights.push(C::F::ONE);
|
||||||
while agg_weights.len() < commitments.len() {
|
while agg_weights.len() < commitments.len() {
|
||||||
agg_weights.push(transcript.challenge::<C>());
|
agg_weights.push(transcript.challenge::<C::F>());
|
||||||
}
|
}
|
||||||
let mut x = commitments
|
let mut x = commitments
|
||||||
.iter()
|
.iter()
|
||||||
@@ -548,7 +554,7 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
// Do a Schnorr PoK for the randomness of the aggregated Pedersen commitment
|
// Do a Schnorr PoK for the randomness of the aggregated Pedersen commitment
|
||||||
let mut r = C::F::random(&mut *rng);
|
let mut r = C::F::random(&mut *rng);
|
||||||
transcript.push_point(generators.h() * r);
|
transcript.push_point(generators.h() * r);
|
||||||
let c = transcript.challenge::<C>();
|
let c = transcript.challenge::<C::F>();
|
||||||
transcript.push_scalar(r + (c * x));
|
transcript.push_scalar(r + (c * x));
|
||||||
r.zeroize();
|
r.zeroize();
|
||||||
x.zeroize();
|
x.zeroize();
|
||||||
@@ -609,7 +615,7 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
let coeffs_vc_variables = dlog_len + ((1 + (2 * coefficients)) * dlog_proof_len);
|
let coeffs_vc_variables = dlog_len + ((1 + (2 * coefficients)) * dlog_proof_len);
|
||||||
let ecdhs_vc_variables = ((2 * ecdh_public_keys.len()) * dlog_len) +
|
let ecdhs_vc_variables = ((2 * ecdh_public_keys.len()) * dlog_len) +
|
||||||
((2 * 2 * ecdh_public_keys.len()) * dlog_proof_len);
|
((2 * 2 * ecdh_public_keys.len()) * dlog_proof_len);
|
||||||
let vcs = (coeffs_vc_variables + ecdhs_vc_variables).div_ceil(generators_to_use);
|
let vcs = (coeffs_vc_variables + ecdhs_vc_variables).div_ceil(2 * generators_to_use);
|
||||||
|
|
||||||
let all_commitments =
|
let all_commitments =
|
||||||
transcript.read_commitments(vcs, coefficients + ecdh_public_keys.len()).map_err(|_| ())?;
|
transcript.read_commitments(vcs, coefficients + ecdh_public_keys.len()).map_err(|_| ())?;
|
||||||
@@ -636,7 +642,7 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
<C::EmbeddedCurve as Ciphersuite>::G::to_xy(evrf_public_key).ok_or(())?,
|
<C::EmbeddedCurve as Ciphersuite>::G::to_xy(evrf_public_key).ok_or(())?,
|
||||||
coefficients,
|
coefficients,
|
||||||
&ecdh_keys_xy,
|
&ecdh_keys_xy,
|
||||||
&generator_tables.iter().collect::<Vec<_>>(),
|
&generator_tables,
|
||||||
&mut circuit,
|
&mut circuit,
|
||||||
&mut transcript,
|
&mut transcript,
|
||||||
);
|
);
|
||||||
@@ -659,7 +665,7 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
let mut agg_weights = Vec::with_capacity(commitments.len());
|
let mut agg_weights = Vec::with_capacity(commitments.len());
|
||||||
agg_weights.push(C::F::ONE);
|
agg_weights.push(C::F::ONE);
|
||||||
while agg_weights.len() < commitments.len() {
|
while agg_weights.len() < commitments.len() {
|
||||||
agg_weights.push(transcript.challenge::<C>());
|
agg_weights.push(transcript.challenge::<C::F>());
|
||||||
}
|
}
|
||||||
|
|
||||||
let sum_points =
|
let sum_points =
|
||||||
@@ -671,7 +677,7 @@ impl<C: EvrfCurve> Evrf<C> {
|
|||||||
|
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
let R = transcript.read_point::<C>().map_err(|_| ())?;
|
let R = transcript.read_point::<C>().map_err(|_| ())?;
|
||||||
let c = transcript.challenge::<C>();
|
let c = transcript.challenge::<C::F>();
|
||||||
let s = transcript.read_scalar::<C>().map_err(|_| ())?;
|
let s = transcript.read_scalar::<C>().map_err(|_| ())?;
|
||||||
|
|
||||||
// Doesn't batch verify this as we can't access the internals of the GBP batch verifier
|
// Doesn't batch verify this as we can't access the internals of the GBP batch verifier
|
||||||
|
|||||||
@@ -15,7 +15,7 @@ use ciphersuite::{
|
|||||||
};
|
};
|
||||||
use pasta_curves::{Ep, Eq, Fp, Fq};
|
use pasta_curves::{Ep, Eq, Fp, Fq};
|
||||||
|
|
||||||
use generalized_bulletproofs::{Generators, tests::generators};
|
use generalized_bulletproofs::tests::generators;
|
||||||
use generalized_bulletproofs_ec_gadgets::DiscreteLogParameters;
|
use generalized_bulletproofs_ec_gadgets::DiscreteLogParameters;
|
||||||
|
|
||||||
use crate::evrf::proof::*;
|
use crate::evrf::proof::*;
|
||||||
@@ -35,9 +35,6 @@ impl Ciphersuite for Pallas {
|
|||||||
// This is solely test code so it's fine
|
// This is solely test code so it's fine
|
||||||
Self::F::from_uniform_bytes(&Self::H::digest([dst, msg].concat()).into())
|
Self::F::from_uniform_bytes(&Self::H::digest([dst, msg].concat()).into())
|
||||||
}
|
}
|
||||||
fn reduce_512(scalar: [u8; 64]) -> Self::F {
|
|
||||||
Self::F::from_uniform_bytes(&scalar)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||||
@@ -55,9 +52,6 @@ impl Ciphersuite for Vesta {
|
|||||||
// This is solely test code so it's fine
|
// This is solely test code so it's fine
|
||||||
Self::F::from_uniform_bytes(&Self::H::digest([dst, msg].concat()).into())
|
Self::F::from_uniform_bytes(&Self::H::digest([dst, msg].concat()).into())
|
||||||
}
|
}
|
||||||
fn reduce_512(scalar: [u8; 64]) -> Self::F {
|
|
||||||
Self::F::from_uniform_bytes(&scalar)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub struct VestaParams;
|
pub struct VestaParams;
|
||||||
@@ -74,7 +68,7 @@ impl EvrfCurve for Pallas {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn evrf_proof_test<C: EvrfCurve>() {
|
fn evrf_proof_test<C: EvrfCurve>() {
|
||||||
let generators = generators(2048);
|
let generators = generators(1024);
|
||||||
let vesta_private_key = Zeroizing::new(<C::EmbeddedCurve as Ciphersuite>::F::random(&mut OsRng));
|
let vesta_private_key = Zeroizing::new(<C::EmbeddedCurve as Ciphersuite>::F::random(&mut OsRng));
|
||||||
let ecdh_public_keys = [
|
let ecdh_public_keys = [
|
||||||
<C::EmbeddedCurve as Ciphersuite>::G::random(&mut OsRng),
|
<C::EmbeddedCurve as Ciphersuite>::G::random(&mut OsRng),
|
||||||
@@ -87,7 +81,7 @@ fn evrf_proof_test<C: EvrfCurve>() {
|
|||||||
println!("Proving time: {:?}", time.elapsed());
|
println!("Proving time: {:?}", time.elapsed());
|
||||||
|
|
||||||
let time = Instant::now();
|
let time = Instant::now();
|
||||||
let mut verifier = Generators::batch_verifier();
|
let mut verifier = generators.batch_verifier();
|
||||||
Evrf::<C>::verify(
|
Evrf::<C>::verify(
|
||||||
&mut OsRng,
|
&mut OsRng,
|
||||||
&generators,
|
&generators,
|
||||||
|
|||||||
@@ -28,10 +28,6 @@ impl<C: Ciphersuite> Ciphersuite for AltGenerator<C> {
|
|||||||
C::G::generator() * <C as Ciphersuite>::hash_to_F(b"DKG Promotion Test", b"generator")
|
C::G::generator() * <C as Ciphersuite>::hash_to_F(b"DKG Promotion Test", b"generator")
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reduce_512(scalar: [u8; 64]) -> Self::F {
|
|
||||||
<C as Ciphersuite>::reduce_512(scalar)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
|
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
|
||||||
<C as Ciphersuite>::hash_to_F(dst, data)
|
<C as Ciphersuite>::hash_to_F(dst, data)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,11 +37,11 @@ pub(crate) fn challenge<T: Transcript, F: PrimeField>(transcript: &mut T) -> F {
|
|||||||
// Get a wide amount of bytes to safely reduce without bias
|
// Get a wide amount of bytes to safely reduce without bias
|
||||||
// In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical
|
// In most cases, <=1.5x bytes is enough. 2x is still standard and there's some theoretical
|
||||||
// groups which may technically require more than 1.5x bytes for this to work as intended
|
// groups which may technically require more than 1.5x bytes for this to work as intended
|
||||||
let target_bytes = usize::try_from(F::NUM_BITS).unwrap().div_ceil(8) * 2;
|
let target_bytes = ((usize::try_from(F::NUM_BITS).unwrap() + 7) / 8) * 2;
|
||||||
let mut challenge_bytes = transcript.challenge(b"challenge");
|
let mut challenge_bytes = transcript.challenge(b"challenge");
|
||||||
let challenge_bytes_len = challenge_bytes.as_ref().len();
|
let challenge_bytes_len = challenge_bytes.as_ref().len();
|
||||||
// If the challenge is 32 bytes, and we need 64, we need two challenges
|
// If the challenge is 32 bytes, and we need 64, we need two challenges
|
||||||
let needed_challenges = target_bytes.div_ceil(challenge_bytes_len);
|
let needed_challenges = (target_bytes + (challenge_bytes_len - 1)) / challenge_bytes_len;
|
||||||
|
|
||||||
// The following algorithm should be equivalent to a wide reduction of the challenges,
|
// The following algorithm should be equivalent to a wide reduction of the challenges,
|
||||||
// interpreted as concatenated, big-endian byte string
|
// interpreted as concatenated, big-endian byte string
|
||||||
|
|||||||
@@ -161,16 +161,7 @@ macro_rules! field {
|
|||||||
res *= res;
|
res *= res;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
res *= table[usize::from(bits)];
|
||||||
let mut scale_by = $FieldName(Residue::ONE);
|
|
||||||
#[allow(clippy::needless_range_loop)]
|
|
||||||
for i in 0 .. 16 {
|
|
||||||
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
|
|
||||||
{
|
|
||||||
scale_by = <_>::conditional_select(&scale_by, &table[i], bits.ct_eq(&(i as u8)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res *= scale_by;
|
|
||||||
bits = 0;
|
bits = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -242,16 +242,7 @@ impl Mul<Scalar> for Point {
|
|||||||
res = res.double();
|
res = res.double();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
res += table[usize::from(bits)];
|
||||||
let mut add_by = Point::identity();
|
|
||||||
#[allow(clippy::needless_range_loop)]
|
|
||||||
for i in 0 .. 16 {
|
|
||||||
#[allow(clippy::cast_possible_truncation)] // Safe since 0 .. 16
|
|
||||||
{
|
|
||||||
add_by = <_>::conditional_select(&add_by, &table[i], bits.ct_eq(&(i as u8)));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res += add_by;
|
|
||||||
bits = 0;
|
bits = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,25 +3,19 @@ name = "generalized-bulletproofs-circuit-abstraction"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
description = "An abstraction for arithmetic circuits over Generalized Bulletproofs"
|
description = "An abstraction for arithmetic circuits over Generalized Bulletproofs"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/fcmps/circuit-abstraction"
|
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/circuit-abstraction"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["bulletproofs", "circuit"]
|
keywords = ["bulletproofs", "circuit"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.69"
|
rust-version = "1.80"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false }
|
|
||||||
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||||
|
|
||||||
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false }
|
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
generalized-bulletproofs = { path = "../generalized-bulletproofs", default-features = false }
|
generalized-bulletproofs = { path = "../generalized-bulletproofs" }
|
||||||
|
|
||||||
[features]
|
|
||||||
std = ["std-shims/std", "zeroize/std", "ciphersuite/std", "generalized-bulletproofs/std"]
|
|
||||||
default = ["std"]
|
|
||||||
|
|||||||
@@ -1,14 +1,14 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
#![allow(non_snake_case)]
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
use std_shims::{vec, vec::Vec};
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||||
|
|
||||||
use ciphersuite::{group::ff::Field, Ciphersuite};
|
use ciphersuite::{
|
||||||
|
group::ff::{Field, PrimeField},
|
||||||
|
Ciphersuite,
|
||||||
|
};
|
||||||
|
|
||||||
use generalized_bulletproofs::{
|
use generalized_bulletproofs::{
|
||||||
ScalarVector, PedersenCommitment, PedersenVectorCommitment, ProofGenerators,
|
ScalarVector, PedersenCommitment, PedersenVectorCommitment, ProofGenerators,
|
||||||
@@ -26,28 +26,16 @@ pub trait Transcript {
|
|||||||
///
|
///
|
||||||
/// It is the caller's responsibility to have properly transcripted all variables prior to
|
/// It is the caller's responsibility to have properly transcripted all variables prior to
|
||||||
/// sampling this challenge.
|
/// sampling this challenge.
|
||||||
fn challenge<C: Ciphersuite>(&mut self) -> C::F;
|
fn challenge<F: PrimeField>(&mut self) -> F;
|
||||||
|
|
||||||
/// Sample a challenge as a byte array.
|
|
||||||
///
|
|
||||||
/// It is the caller's responsibility to have properly transcripted all variables prior to
|
|
||||||
/// sampling this challenge.
|
|
||||||
fn challenge_bytes(&mut self) -> [u8; 64];
|
|
||||||
}
|
}
|
||||||
impl Transcript for ProverTranscript {
|
impl Transcript for ProverTranscript {
|
||||||
fn challenge<C: Ciphersuite>(&mut self) -> C::F {
|
fn challenge<F: PrimeField>(&mut self) -> F {
|
||||||
self.challenge::<C>()
|
self.challenge()
|
||||||
}
|
|
||||||
fn challenge_bytes(&mut self) -> [u8; 64] {
|
|
||||||
self.challenge_bytes()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
impl Transcript for VerifierTranscript<'_> {
|
impl Transcript for VerifierTranscript<'_> {
|
||||||
fn challenge<C: Ciphersuite>(&mut self) -> C::F {
|
fn challenge<F: PrimeField>(&mut self) -> F {
|
||||||
self.challenge::<C>()
|
self.challenge()
|
||||||
}
|
|
||||||
fn challenge_bytes(&mut self) -> [u8; 64] {
|
|
||||||
self.challenge_bytes()
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -76,6 +64,7 @@ impl<C: Ciphersuite> Circuit<C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create an instance to prove satisfaction of a circuit with.
|
/// Create an instance to prove satisfaction of a circuit with.
|
||||||
|
// TODO: Take the transcript here
|
||||||
#[allow(clippy::type_complexity)]
|
#[allow(clippy::type_complexity)]
|
||||||
pub fn prove(
|
pub fn prove(
|
||||||
vector_commitments: Vec<PedersenVectorCommitment<C>>,
|
vector_commitments: Vec<PedersenVectorCommitment<C>>,
|
||||||
@@ -89,13 +78,14 @@ impl<C: Ciphersuite> Circuit<C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Create an instance to verify a proof with.
|
/// Create an instance to verify a proof with.
|
||||||
|
// TODO: Take the transcript here
|
||||||
pub fn verify() -> Self {
|
pub fn verify() -> Self {
|
||||||
Self { muls: 0, constraints: vec![], prover: None }
|
Self { muls: 0, constraints: vec![], prover: None }
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Evaluate a linear combination.
|
/// Evaluate a linear combination.
|
||||||
///
|
///
|
||||||
/// Yields WL aL + WR aR + WO aO + WCG CG + WV V + c.
|
/// Yields WL aL + WR aR + WO aO + WCG CG + WCH CH + WV V + c.
|
||||||
///
|
///
|
||||||
/// May panic if the linear combination references non-existent terms.
|
/// May panic if the linear combination references non-existent terms.
|
||||||
///
|
///
|
||||||
@@ -117,6 +107,11 @@ impl<C: Ciphersuite> Circuit<C> {
|
|||||||
res += C.g_values[*j] * weight;
|
res += C.g_values[*j] * weight;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for (WCH, C) in lincomb.WCH().iter().zip(&prover.C) {
|
||||||
|
for (j, weight) in WCH {
|
||||||
|
res += C.h_values[*j] * weight;
|
||||||
|
}
|
||||||
|
}
|
||||||
for (index, weight) in lincomb.WV() {
|
for (index, weight) in lincomb.WV() {
|
||||||
res += prover.V[*index].value * weight;
|
res += prover.V[*index].value * weight;
|
||||||
}
|
}
|
||||||
@@ -181,13 +176,13 @@ impl<C: Ciphersuite> Circuit<C> {
|
|||||||
// We can't deconstruct the witness as it implements Drop (per ZeroizeOnDrop)
|
// We can't deconstruct the witness as it implements Drop (per ZeroizeOnDrop)
|
||||||
// Accordingly, we take the values within it and move forward with those
|
// Accordingly, we take the values within it and move forward with those
|
||||||
let mut aL = vec![];
|
let mut aL = vec![];
|
||||||
core::mem::swap(&mut prover.aL, &mut aL);
|
std::mem::swap(&mut prover.aL, &mut aL);
|
||||||
let mut aR = vec![];
|
let mut aR = vec![];
|
||||||
core::mem::swap(&mut prover.aR, &mut aR);
|
std::mem::swap(&mut prover.aR, &mut aR);
|
||||||
let mut C = vec![];
|
let mut C = vec![];
|
||||||
core::mem::swap(&mut prover.C, &mut C);
|
std::mem::swap(&mut prover.C, &mut C);
|
||||||
let mut V = vec![];
|
let mut V = vec![];
|
||||||
core::mem::swap(&mut prover.V, &mut V);
|
std::mem::swap(&mut prover.V, &mut V);
|
||||||
ArithmeticCircuitWitness::new(ScalarVector::from(aL), ScalarVector::from(aR), C, V)
|
ArithmeticCircuitWitness::new(ScalarVector::from(aL), ScalarVector::from(aR), C, V)
|
||||||
})
|
})
|
||||||
.transpose()?;
|
.transpose()?;
|
||||||
|
|||||||
@@ -3,39 +3,35 @@ name = "ec-divisors"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
description = "A library for calculating elliptic curve divisors"
|
description = "A library for calculating elliptic curve divisors"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/divisors"
|
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/divisors"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["ciphersuite", "ff", "group"]
|
keywords = ["ciphersuite", "ff", "group"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.69"
|
rust-version = "1.71"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false }
|
|
||||||
|
|
||||||
rand_core = { version = "0.6", default-features = false }
|
rand_core = { version = "0.6", default-features = false }
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
|
||||||
|
|
||||||
subtle = { version = "2", default-features = false }
|
subtle = { version = "2", default-features = false, features = ["std"] }
|
||||||
ff = { version = "0.13", default-features = false, features = ["bits"] }
|
ff = { version = "0.13", default-features = false, features = ["std", "bits"] }
|
||||||
group = { version = "0.13", default-features = false }
|
group = { version = "0.13", default-features = false }
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, optional = true }
|
hex = { version = "0.4", optional = true }
|
||||||
dalek-ff-group = { path = "../../dalek-ff-group", default-features = false, optional = true }
|
dalek-ff-group = { path = "../../dalek-ff-group", features = ["std"], optional = true }
|
||||||
pasta_curves = { version = "0.5", git = "https://github.com/kayabaNerve/pasta_curves.git", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616", default-features = false, features = ["bits", "alloc"], optional = true }
|
pasta_curves = { version = "0.5", default-features = false, features = ["bits", "alloc"], optional = true }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand_core = { version = "0.6", features = ["getrandom"] }
|
rand_core = { version = "0.6", features = ["getrandom"] }
|
||||||
|
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
dalek-ff-group = { path = "../../dalek-ff-group", features = ["std"] }
|
dalek-ff-group = { path = "../../dalek-ff-group", features = ["std"] }
|
||||||
pasta_curves = { version = "0.5", git = "https://github.com/kayabaNerve/pasta_curves.git", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616", default-features = false, features = ["bits", "alloc"] }
|
pasta_curves = { version = "0.5", default-features = false, features = ["bits", "alloc"] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
std = ["std-shims/std", "zeroize/std", "subtle/std", "ff/std", "dalek-ff-group?/std"]
|
ed25519 = ["hex", "dalek-ff-group"]
|
||||||
ed25519 = ["hex/alloc", "dalek-ff-group"]
|
|
||||||
pasta = ["pasta_curves"]
|
pasta = ["pasta_curves"]
|
||||||
default = ["std"]
|
|
||||||
|
|||||||
@@ -1,11 +1,8 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
#![allow(non_snake_case)]
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
use std_shims::{vec, vec::Vec};
|
|
||||||
|
|
||||||
use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConditionallySelectable};
|
use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConditionallySelectable};
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||||
|
|
||||||
@@ -21,7 +18,7 @@ pub use poly::Poly;
|
|||||||
mod tests;
|
mod tests;
|
||||||
|
|
||||||
/// A curve usable with this library.
|
/// A curve usable with this library.
|
||||||
pub trait DivisorCurve: Group + ConstantTimeEq + ConditionallySelectable + Zeroize {
|
pub trait DivisorCurve: Group + ConstantTimeEq + ConditionallySelectable {
|
||||||
/// An element of the field this curve is defined over.
|
/// An element of the field this curve is defined over.
|
||||||
type FieldElement: Zeroize + PrimeField + ConditionallySelectable;
|
type FieldElement: Zeroize + PrimeField + ConditionallySelectable;
|
||||||
|
|
||||||
@@ -57,8 +54,6 @@ pub trait DivisorCurve: Group + ConstantTimeEq + ConditionallySelectable + Zeroi
|
|||||||
/// Convert a point to its x and y coordinates.
|
/// Convert a point to its x and y coordinates.
|
||||||
///
|
///
|
||||||
/// Returns None if passed the point at infinity.
|
/// Returns None if passed the point at infinity.
|
||||||
///
|
|
||||||
/// This function may run in time variable to if the point is the identity.
|
|
||||||
fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)>;
|
fn to_xy(point: Self) -> Option<(Self::FieldElement, Self::FieldElement)>;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -276,16 +271,8 @@ pub struct ScalarDecomposition<F: Zeroize + PrimeFieldBits> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
impl<F: Zeroize + PrimeFieldBits> ScalarDecomposition<F> {
|
impl<F: Zeroize + PrimeFieldBits> ScalarDecomposition<F> {
|
||||||
/// Decompose a non-zero scalar.
|
/// Decompose a scalar.
|
||||||
///
|
pub fn new(scalar: F) -> Self {
|
||||||
/// Returns `None` if the scalar is zero.
|
|
||||||
///
|
|
||||||
/// This function is constant time if the scalar is non-zero.
|
|
||||||
pub fn new(scalar: F) -> Option<Self> {
|
|
||||||
if bool::from(scalar.is_zero()) {
|
|
||||||
None?;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
We need the sum of the coefficients to equal F::NUM_BITS. The scalar's bits will be less than
|
We need the sum of the coefficients to equal F::NUM_BITS. The scalar's bits will be less than
|
||||||
F::NUM_BITS. Accordingly, we need to increment the sum of the coefficients without
|
F::NUM_BITS. Accordingly, we need to increment the sum of the coefficients without
|
||||||
@@ -413,12 +400,7 @@ impl<F: Zeroize + PrimeFieldBits> ScalarDecomposition<F> {
|
|||||||
}
|
}
|
||||||
debug_assert!(bool::from(decomposition.iter().sum::<u64>().ct_eq(&num_bits)));
|
debug_assert!(bool::from(decomposition.iter().sum::<u64>().ct_eq(&num_bits)));
|
||||||
|
|
||||||
Some(ScalarDecomposition { scalar, decomposition })
|
ScalarDecomposition { scalar, decomposition }
|
||||||
}
|
|
||||||
|
|
||||||
/// The scalar.
|
|
||||||
pub fn scalar(&self) -> &F {
|
|
||||||
&self.scalar
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// The decomposition of the scalar.
|
/// The decomposition of the scalar.
|
||||||
@@ -432,7 +414,7 @@ impl<F: Zeroize + PrimeFieldBits> ScalarDecomposition<F> {
|
|||||||
///
|
///
|
||||||
/// This function executes in constant time with regards to the scalar.
|
/// This function executes in constant time with regards to the scalar.
|
||||||
///
|
///
|
||||||
/// This function MAY panic if the generator is the point at infinity.
|
/// This function MAY panic if this scalar is zero.
|
||||||
pub fn scalar_mul_divisor<C: Zeroize + DivisorCurve<Scalar = F>>(
|
pub fn scalar_mul_divisor<C: Zeroize + DivisorCurve<Scalar = F>>(
|
||||||
&self,
|
&self,
|
||||||
mut generator: C,
|
mut generator: C,
|
||||||
@@ -448,19 +430,37 @@ impl<F: Zeroize + PrimeFieldBits> ScalarDecomposition<F> {
|
|||||||
divisor_points[0] = -generator * self.scalar;
|
divisor_points[0] = -generator * self.scalar;
|
||||||
|
|
||||||
// Write the decomposition
|
// Write the decomposition
|
||||||
let mut write_above: u64 = 0;
|
let mut write_to: u32 = 1;
|
||||||
for coefficient in &self.decomposition {
|
for coefficient in &self.decomposition {
|
||||||
// Write the generator to every slot except the slots we have already written to.
|
let mut coefficient = *coefficient;
|
||||||
for i in 1 ..= (<C::Scalar as PrimeField>::NUM_BITS as u64) {
|
// Iterate over the maximum amount of iters for this value to be constant time regardless of
|
||||||
divisor_points[i as usize].conditional_assign(&generator, i.ct_gt(&write_above));
|
// any branch prediction algorithms
|
||||||
}
|
for _ in 0 .. <C::Scalar as PrimeField>::NUM_BITS {
|
||||||
|
// Write the generator to the slot we're supposed to
|
||||||
|
/*
|
||||||
|
Without this loop, we'd increment this dependent on the distribution within the
|
||||||
|
decomposition. If the distribution is bottom-heavy, we won't access the tail of
|
||||||
|
`divisor_points` for a while, risking it being ejected out of the cache (causing a cache
|
||||||
|
miss which may not occur with a top-heavy distribution which quickly moves to the tail).
|
||||||
|
|
||||||
// Increase the next write start by the coefficient.
|
This is O(log2(NUM_BITS) ** 3) though, as this the third loop, which is horrific.
|
||||||
write_above += coefficient;
|
*/
|
||||||
|
for i in 1 ..= <C::Scalar as PrimeField>::NUM_BITS {
|
||||||
|
divisor_points[i as usize] =
|
||||||
|
<_>::conditional_select(&divisor_points[i as usize], &generator, i.ct_eq(&write_to));
|
||||||
|
}
|
||||||
|
// If the coefficient isn't zero, increment write_to (so we don't overwrite this generator
|
||||||
|
// when it should be there)
|
||||||
|
let coefficient_not_zero = !coefficient.ct_eq(&0);
|
||||||
|
write_to = <_>::conditional_select(&write_to, &(write_to + 1), coefficient_not_zero);
|
||||||
|
// Subtract one from the coefficient, if it's not zero and won't underflow
|
||||||
|
coefficient =
|
||||||
|
<_>::conditional_select(&coefficient, &coefficient.wrapping_sub(1), coefficient_not_zero);
|
||||||
|
}
|
||||||
generator = generator.double();
|
generator = generator.double();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create a divisor out of the points
|
// Create a divisor out of all points except the last point which is solely scratch
|
||||||
let res = new_divisor(&divisor_points).unwrap();
|
let res = new_divisor(&divisor_points).unwrap();
|
||||||
divisor_points.zeroize();
|
divisor_points.zeroize();
|
||||||
res
|
res
|
||||||
@@ -511,7 +511,6 @@ mod pasta {
|
|||||||
|
|
||||||
#[cfg(any(test, feature = "ed25519"))]
|
#[cfg(any(test, feature = "ed25519"))]
|
||||||
mod ed25519 {
|
mod ed25519 {
|
||||||
use subtle::{Choice, ConditionallySelectable};
|
|
||||||
use group::{
|
use group::{
|
||||||
ff::{Field, PrimeField},
|
ff::{Field, PrimeField},
|
||||||
Group, GroupEncoding,
|
Group, GroupEncoding,
|
||||||
@@ -559,13 +558,9 @@ mod ed25519 {
|
|||||||
((D * edwards_y_sq) + Self::FieldElement::ONE).invert().unwrap())
|
((D * edwards_y_sq) + Self::FieldElement::ONE).invert().unwrap())
|
||||||
.sqrt()
|
.sqrt()
|
||||||
.unwrap();
|
.unwrap();
|
||||||
|
if u8::from(bool::from(edwards_x.is_odd())) != x_is_odd {
|
||||||
// Negate the x coordinate if the sign doesn't match
|
edwards_x = -edwards_x;
|
||||||
edwards_x = <_>::conditional_select(
|
}
|
||||||
&edwards_x,
|
|
||||||
&-edwards_x,
|
|
||||||
edwards_x.is_odd() ^ Choice::from(x_is_odd),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Calculate the x and y coordinates for Wei25519
|
// Calculate the x and y coordinates for Wei25519
|
||||||
let edwards_y_plus_one = Self::FieldElement::ONE + edwards_y;
|
let edwards_y_plus_one = Self::FieldElement::ONE + edwards_y;
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use core::ops::{Add, Neg, Sub, Mul, Rem};
|
use core::ops::{Add, Neg, Sub, Mul, Rem};
|
||||||
use std_shims::{vec, vec::Vec};
|
|
||||||
|
|
||||||
use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConditionallySelectable};
|
use subtle::{Choice, ConstantTimeEq, ConstantTimeGreater, ConditionallySelectable};
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||||
@@ -258,7 +257,7 @@ impl<F: From<u64> + Zeroize + PrimeField> Poly<F> {
|
|||||||
self.zero_coefficient = F::ZERO;
|
self.zero_coefficient = F::ZERO;
|
||||||
|
|
||||||
// Move the x coefficients
|
// Move the x coefficients
|
||||||
core::mem::swap(&mut self.yx_coefficients[power_of_y - 1], &mut self.x_coefficients);
|
std::mem::swap(&mut self.yx_coefficients[power_of_y - 1], &mut self.x_coefficients);
|
||||||
self.x_coefficients = vec![];
|
self.x_coefficients = vec![];
|
||||||
|
|
||||||
self
|
self
|
||||||
@@ -565,7 +564,7 @@ impl<F: From<u64> + Zeroize + PrimeField> Poly<F> {
|
|||||||
quotient = conditional_select_poly(
|
quotient = conditional_select_poly(
|
||||||
quotient,
|
quotient,
|
||||||
// If the dividing coefficient was for y**0 x**0, we return the poly scaled by its inverse
|
// If the dividing coefficient was for y**0 x**0, we return the poly scaled by its inverse
|
||||||
self * denominator_dividing_coefficient_inv,
|
self.clone() * denominator_dividing_coefficient_inv,
|
||||||
denominator_dividing_coefficient.ct_eq(&CoefficientIndex { y_pow: 0, x_pow: 0 }),
|
denominator_dividing_coefficient.ct_eq(&CoefficientIndex { y_pow: 0, x_pow: 0 }),
|
||||||
);
|
);
|
||||||
remainder = conditional_select_poly(
|
remainder = conditional_select_poly(
|
||||||
|
|||||||
@@ -3,25 +3,19 @@ name = "generalized-bulletproofs-ec-gadgets"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
description = "Gadgets for working with an embedded Elliptic Curve in a Generalized Bulletproofs circuit"
|
description = "Gadgets for working with an embedded Elliptic Curve in a Generalized Bulletproofs circuit"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/fcmps/ec-gadgets"
|
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/ec-gadgets"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["bulletproofs", "circuit", "divisors"]
|
keywords = ["bulletproofs", "circuit", "divisors"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.69"
|
rust-version = "1.80"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false }
|
|
||||||
|
|
||||||
generic-array = { version = "1", default-features = false, features = ["alloc"] }
|
generic-array = { version = "1", default-features = false, features = ["alloc"] }
|
||||||
|
|
||||||
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false }
|
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
generalized-bulletproofs-circuit-abstraction = { path = "../circuit-abstraction", default-features = false }
|
generalized-bulletproofs-circuit-abstraction = { path = "../circuit-abstraction" }
|
||||||
|
|
||||||
[features]
|
|
||||||
std = ["std-shims/std", "ciphersuite/std", "generalized-bulletproofs-circuit-abstraction/std"]
|
|
||||||
default = ["std"]
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use core::fmt;
|
use core::fmt;
|
||||||
use std_shims::{vec, vec::Vec};
|
|
||||||
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::ff::{Field, PrimeField, BatchInverter},
|
group::ff::{Field, PrimeField, BatchInverter},
|
||||||
@@ -11,6 +10,11 @@ use generalized_bulletproofs_circuit_abstraction::*;
|
|||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
/// Parameters for a discrete logarithm proof.
|
/// Parameters for a discrete logarithm proof.
|
||||||
|
///
|
||||||
|
/// This isn't required to be implemented by the Field/Group/Ciphersuite, solely a struct, to
|
||||||
|
/// enable parameterization of discrete log proofs to the bitlength of the discrete logarithm.
|
||||||
|
/// While that may be F::NUM_BITS, a discrete log proof a for a full scalar, it could also be 64,
|
||||||
|
/// a discrete log proof for a u64 (such as if opening a Pedersen commitment in-circuit).
|
||||||
pub trait DiscreteLogParameters {
|
pub trait DiscreteLogParameters {
|
||||||
/// The amount of bits used to represent a scalar.
|
/// The amount of bits used to represent a scalar.
|
||||||
type ScalarBits: ArrayLength;
|
type ScalarBits: ArrayLength;
|
||||||
@@ -26,8 +30,8 @@ pub trait DiscreteLogParameters {
|
|||||||
|
|
||||||
/// The amount of y x**i coefficients in a divisor.
|
/// The amount of y x**i coefficients in a divisor.
|
||||||
///
|
///
|
||||||
/// This is the amount of points in a divisor (the amount of bits in a scalar, plus one) divided
|
/// This is the amount of points in a divisor (the amount of bits in a scalar, plus one) plus
|
||||||
/// by two, minus two.
|
/// one, divided by two, minus two.
|
||||||
type YxCoefficients: ArrayLength;
|
type YxCoefficients: ArrayLength;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -102,6 +106,8 @@ pub struct Divisor<Parameters: DiscreteLogParameters> {
|
|||||||
/// exceeding trivial complexity.
|
/// exceeding trivial complexity.
|
||||||
pub y: Variable,
|
pub y: Variable,
|
||||||
/// The coefficients for the `y**1 x**i` terms of the polynomial.
|
/// The coefficients for the `y**1 x**i` terms of the polynomial.
|
||||||
|
// This subtraction enforces the divisor to have at least 4 points which is acceptable.
|
||||||
|
// TODO: Double check these constants
|
||||||
pub yx: GenericArray<Variable, Parameters::YxCoefficients>,
|
pub yx: GenericArray<Variable, Parameters::YxCoefficients>,
|
||||||
/// The coefficients for the `x**i` terms of the polynomial, skipping x**1.
|
/// The coefficients for the `x**i` terms of the polynomial, skipping x**1.
|
||||||
///
|
///
|
||||||
@@ -318,7 +324,7 @@ pub trait EcDlogGadgets<C: Ciphersuite> {
|
|||||||
&self,
|
&self,
|
||||||
transcript: &mut T,
|
transcript: &mut T,
|
||||||
curve: &CurveSpec<C::F>,
|
curve: &CurveSpec<C::F>,
|
||||||
generators: &[&GeneratorTable<C::F, Parameters>],
|
generators: &[GeneratorTable<C::F, Parameters>],
|
||||||
) -> (DiscreteLogChallenge<C::F, Parameters>, Vec<ChallengedGenerator<C::F, Parameters>>);
|
) -> (DiscreteLogChallenge<C::F, Parameters>, Vec<ChallengedGenerator<C::F, Parameters>>);
|
||||||
|
|
||||||
/// Prove this point has the specified discrete logarithm over the specified generator.
|
/// Prove this point has the specified discrete logarithm over the specified generator.
|
||||||
@@ -349,14 +355,12 @@ impl<C: Ciphersuite> EcDlogGadgets<C> for Circuit<C> {
|
|||||||
&self,
|
&self,
|
||||||
transcript: &mut T,
|
transcript: &mut T,
|
||||||
curve: &CurveSpec<C::F>,
|
curve: &CurveSpec<C::F>,
|
||||||
generators: &[&GeneratorTable<C::F, Parameters>],
|
generators: &[GeneratorTable<C::F, Parameters>],
|
||||||
) -> (DiscreteLogChallenge<C::F, Parameters>, Vec<ChallengedGenerator<C::F, Parameters>>) {
|
) -> (DiscreteLogChallenge<C::F, Parameters>, Vec<ChallengedGenerator<C::F, Parameters>>) {
|
||||||
// Get the challenge points
|
// Get the challenge points
|
||||||
let sign_of_points = transcript.challenge_bytes();
|
// TODO: Implement a proper hash to curve
|
||||||
let sign_of_point_0 = (sign_of_points[0] & 1) == 1;
|
|
||||||
let sign_of_point_1 = ((sign_of_points[0] >> 1) & 1) == 1;
|
|
||||||
let (c0_x, c0_y) = loop {
|
let (c0_x, c0_y) = loop {
|
||||||
let c0_x = transcript.challenge::<C>();
|
let c0_x: C::F = transcript.challenge();
|
||||||
let Some(c0_y) =
|
let Some(c0_y) =
|
||||||
Option::<C::F>::from(((c0_x.square() * c0_x) + (curve.a * c0_x) + curve.b).sqrt())
|
Option::<C::F>::from(((c0_x.square() * c0_x) + (curve.a * c0_x) + curve.b).sqrt())
|
||||||
else {
|
else {
|
||||||
@@ -364,16 +368,17 @@ impl<C: Ciphersuite> EcDlogGadgets<C> for Circuit<C> {
|
|||||||
};
|
};
|
||||||
// Takes the even y coordinate as to not be dependent on whatever root the above sqrt
|
// Takes the even y coordinate as to not be dependent on whatever root the above sqrt
|
||||||
// happens to returns
|
// happens to returns
|
||||||
break (c0_x, if bool::from(c0_y.is_odd()) != sign_of_point_0 { -c0_y } else { c0_y });
|
// TODO: Randomly select which to take
|
||||||
|
break (c0_x, if bool::from(c0_y.is_odd()) { -c0_y } else { c0_y });
|
||||||
};
|
};
|
||||||
let (c1_x, c1_y) = loop {
|
let (c1_x, c1_y) = loop {
|
||||||
let c1_x = transcript.challenge::<C>();
|
let c1_x: C::F = transcript.challenge();
|
||||||
let Some(c1_y) =
|
let Some(c1_y) =
|
||||||
Option::<C::F>::from(((c1_x.square() * c1_x) + (curve.a * c1_x) + curve.b).sqrt())
|
Option::<C::F>::from(((c1_x.square() * c1_x) + (curve.a * c1_x) + curve.b).sqrt())
|
||||||
else {
|
else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
break (c1_x, if bool::from(c1_y.is_odd()) != sign_of_point_1 { -c1_y } else { c1_y });
|
break (c1_x, if bool::from(c1_y.is_odd()) { -c1_y } else { c1_y });
|
||||||
};
|
};
|
||||||
|
|
||||||
// mmadd-1998-cmo
|
// mmadd-1998-cmo
|
||||||
@@ -478,7 +483,7 @@ impl<C: Ciphersuite> EcDlogGadgets<C> for Circuit<C> {
|
|||||||
let arg_iter = arg_iter.chain(dlog.iter());
|
let arg_iter = arg_iter.chain(dlog.iter());
|
||||||
for variable in arg_iter {
|
for variable in arg_iter {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
matches!(variable, Variable::CG { .. } | Variable::V(_)),
|
matches!(variable, Variable::CG { .. } | Variable::CH { .. } | Variable::V(_)),
|
||||||
"discrete log proofs requires all arguments belong to commitments",
|
"discrete log proofs requires all arguments belong to commitments",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
#![allow(non_snake_case)]
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
|
|||||||
@@ -17,22 +17,20 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
rustversion = "1"
|
rustversion = "1"
|
||||||
hex-literal = { version = "0.4", default-features = false }
|
hex-literal = { version = "0.4", default-features = false }
|
||||||
|
|
||||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, optional = true }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
rand_core = { version = "0.6", default-features = false }
|
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
|
||||||
|
subtle = { version = "^2.4", default-features = false, features = ["std"] }
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
|
||||||
subtle = { version = "^2.4", default-features = false }
|
|
||||||
|
|
||||||
generic-array = { version = "1", default-features = false }
|
generic-array = { version = "1", default-features = false }
|
||||||
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
|
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
|
||||||
|
|
||||||
dalek-ff-group = { path = "../../dalek-ff-group", version = "0.4", default-features = false }
|
dalek-ff-group = { path = "../../dalek-ff-group", version = "0.4", default-features = false }
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false }
|
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false, features = ["std"] }
|
||||||
ec-divisors = { path = "../divisors", default-features = false }
|
ec-divisors = { path = "../divisors" }
|
||||||
generalized-bulletproofs-ec-gadgets = { path = "../ec-gadgets", default-features = false }
|
generalized-bulletproofs-ec-gadgets = { path = "../ec-gadgets" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
@@ -40,8 +38,3 @@ hex = "0.4"
|
|||||||
rand_core = { version = "0.6", features = ["std"] }
|
rand_core = { version = "0.6", features = ["std"] }
|
||||||
|
|
||||||
ff-group-tests = { path = "../../ff-group-tests" }
|
ff-group-tests = { path = "../../ff-group-tests" }
|
||||||
|
|
||||||
[features]
|
|
||||||
alloc = ["std-shims", "zeroize/alloc", "ciphersuite/alloc"]
|
|
||||||
std = ["std-shims/std", "rand_core/std", "zeroize/std", "subtle/std", "blake2/std", "ciphersuite/std", "ec-divisors/std", "generalized-bulletproofs-ec-gadgets/std"]
|
|
||||||
default = ["std"]
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ This curve was found via
|
|||||||
for finding curves (specifically, curve cycles), modified to search for curves
|
for finding curves (specifically, curve cycles), modified to search for curves
|
||||||
whose field is the Ed25519 scalar field (not the Ed25519 field).
|
whose field is the Ed25519 scalar field (not the Ed25519 field).
|
||||||
|
|
||||||
```ignore
|
```
|
||||||
p = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed
|
p = 0x1000000000000000000000000000000014def9dea2f79cd65812631a5cf5d3ed
|
||||||
q = 0x0fffffffffffffffffffffffffffffffe53f4debb78ff96877063f0306eef96b
|
q = 0x0fffffffffffffffffffffffffffffffe53f4debb78ff96877063f0306eef96b
|
||||||
D = -420435
|
D = -420435
|
||||||
|
|||||||
@@ -1,9 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
|
|
||||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
|
||||||
use std_shims::io::{self, Read};
|
|
||||||
|
|
||||||
use generic_array::typenum::{Sum, Diff, Quot, U, U1, U2};
|
use generic_array::typenum::{Sum, Diff, Quot, U, U1, U2};
|
||||||
use ciphersuite::group::{ff::PrimeField, Group};
|
use ciphersuite::group::{ff::PrimeField, Group};
|
||||||
@@ -37,29 +33,10 @@ impl ciphersuite::Ciphersuite for Embedwards25519 {
|
|||||||
Point::generator()
|
Point::generator()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reduce_512(scalar: [u8; 64]) -> Self::F {
|
|
||||||
Scalar::wide_reduce(scalar)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
|
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
|
||||||
use blake2::Digest;
|
use blake2::Digest;
|
||||||
Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_slice().try_into().unwrap())
|
Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_slice().try_into().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
// We override the provided impl, which compares against the reserialization, because
|
|
||||||
// we already require canonicity
|
|
||||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
|
|
||||||
use ciphersuite::group::GroupEncoding;
|
|
||||||
|
|
||||||
let mut encoding = <Self::G as GroupEncoding>::Repr::default();
|
|
||||||
reader.read_exact(encoding.as_mut())?;
|
|
||||||
|
|
||||||
let point = Option::<Self::G>::from(Self::G::from_bytes(&encoding))
|
|
||||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "invalid point"))?;
|
|
||||||
Ok(point)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl generalized_bulletproofs_ec_gadgets::DiscreteLogParameters for Embedwards25519 {
|
impl generalized_bulletproofs_ec_gadgets::DiscreteLogParameters for Embedwards25519 {
|
||||||
|
|||||||
@@ -46,8 +46,7 @@ impl ConstantTimeEq for Point {
|
|||||||
let y1 = self.y * other.z;
|
let y1 = self.y * other.z;
|
||||||
let y2 = other.y * self.z;
|
let y2 = other.y * self.z;
|
||||||
|
|
||||||
// Both identity or equivalent over their denominators
|
(self.x.is_zero() & other.x.is_zero()) | (x1.ct_eq(&x2) & y1.ct_eq(&y2))
|
||||||
(self.z.is_zero() & other.z.is_zero()) | (x1.ct_eq(&x2) & y1.ct_eq(&y2))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -199,7 +198,6 @@ impl Group for Point {
|
|||||||
Point { x: FieldElement::ZERO, y: FieldElement::ONE, z: FieldElement::ZERO }
|
Point { x: FieldElement::ZERO, y: FieldElement::ONE, z: FieldElement::ZERO }
|
||||||
}
|
}
|
||||||
fn generator() -> Self {
|
fn generator() -> Self {
|
||||||
// Point with the lowest valid x-coordinate
|
|
||||||
Point {
|
Point {
|
||||||
x: FieldElement::from_repr(hex_literal::hex!(
|
x: FieldElement::from_repr(hex_literal::hex!(
|
||||||
"0100000000000000000000000000000000000000000000000000000000000000"
|
"0100000000000000000000000000000000000000000000000000000000000000"
|
||||||
@@ -337,10 +335,8 @@ impl GroupEncoding for Point {
|
|||||||
// If this the identity, set y to 1
|
// If this the identity, set y to 1
|
||||||
let y =
|
let y =
|
||||||
CtOption::conditional_select(&y, &CtOption::new(FieldElement::ONE, 1.into()), is_identity);
|
CtOption::conditional_select(&y, &CtOption::new(FieldElement::ONE, 1.into()), is_identity);
|
||||||
// If this the identity, set y to 1 and z to 0 (instead of 1)
|
|
||||||
let z = <_>::conditional_select(&FieldElement::ONE, &FieldElement::ZERO, is_identity);
|
|
||||||
// Create the point if we have a y solution
|
// Create the point if we have a y solution
|
||||||
let point = y.map(|y| Point { x, y, z });
|
let point = y.map(|y| Point { x, y, z: FieldElement::ONE });
|
||||||
|
|
||||||
let not_negative_zero = !(is_identity & sign);
|
let not_negative_zero = !(is_identity & sign);
|
||||||
// Only return the point if it isn't -0
|
// Only return the point if it isn't -0
|
||||||
|
|||||||
@@ -3,27 +3,25 @@ name = "generalized-bulletproofs"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
description = "Generalized Bulletproofs"
|
description = "Generalized Bulletproofs"
|
||||||
license = "MIT"
|
license = "MIT"
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/generalized-bulletproofs"
|
repository = "https://github.com/serai-dex/serai/tree/develop/crypto/evrf/generalized-bulletproofs"
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = ["ciphersuite", "ff", "group"]
|
keywords = ["ciphersuite", "ff", "group"]
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.69"
|
rust-version = "1.80"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
rustdoc-args = ["--cfg", "docsrs"]
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
std-shims = { path = "../../../common/std-shims", version = "^0.1.1", default-features = false }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
rand_core = { version = "0.6", default-features = false }
|
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
|
||||||
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false }
|
multiexp = { path = "../../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] }
|
||||||
|
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false, features = ["std"] }
|
||||||
multiexp = { path = "../../multiexp", version = "0.4", default-features = false, features = ["batch"] }
|
|
||||||
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
rand_core = { version = "0.6", features = ["getrandom"] }
|
rand_core = { version = "0.6", features = ["getrandom"] }
|
||||||
@@ -33,6 +31,4 @@ transcript = { package = "flexible-transcript", path = "../../transcript", featu
|
|||||||
ciphersuite = { path = "../../ciphersuite", features = ["ristretto"] }
|
ciphersuite = { path = "../../ciphersuite", features = ["ristretto"] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
std = ["std-shims/std", "rand_core/std", "zeroize/std", "blake2/std", "multiexp/std", "ciphersuite/std"]
|
tests = []
|
||||||
tests = ["std"]
|
|
||||||
default = ["std"]
|
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
use std_shims::{vec, vec::Vec};
|
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
use rand_core::{RngCore, CryptoRng};
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||||
@@ -22,10 +20,10 @@ pub use crate::lincomb::{Variable, LinComb};
|
|||||||
/// `aL * aR = aO, WL * aL + WR * aR + WO * aO = WV * V + c`.
|
/// `aL * aR = aO, WL * aL + WR * aR + WO * aO = WV * V + c`.
|
||||||
///
|
///
|
||||||
/// Generalized Bulletproofs modifies this to
|
/// Generalized Bulletproofs modifies this to
|
||||||
/// `aL * aR = aO, WL * aL + WR * aR + WO * aO + WCG * C_G = WV * V + c`.
|
/// `aL * aR = aO, WL * aL + WR * aR + WO * aO + WCG * C_G + WCH * C_H = WV * V + c`.
|
||||||
///
|
///
|
||||||
/// We implement the latter, yet represented (for simplicity) as
|
/// We implement the latter, yet represented (for simplicity) as
|
||||||
/// `aL * aR = aO, WL * aL + WR * aR + WO * aO + WCG * C_G + WV * V + c = 0`.
|
/// `aL * aR = aO, WL * aL + WR * aR + WO * aO + WCG * C_G + WCH * C_H + WV * V + c = 0`.
|
||||||
#[derive(Clone, Debug)]
|
#[derive(Clone, Debug)]
|
||||||
pub struct ArithmeticCircuitStatement<'a, C: Ciphersuite> {
|
pub struct ArithmeticCircuitStatement<'a, C: Ciphersuite> {
|
||||||
generators: ProofGenerators<'a, C>,
|
generators: ProofGenerators<'a, C>,
|
||||||
@@ -35,7 +33,7 @@ pub struct ArithmeticCircuitStatement<'a, C: Ciphersuite> {
|
|||||||
V: PointVector<C>,
|
V: PointVector<C>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Ciphersuite> Zeroize for ArithmeticCircuitStatement<'_, C> {
|
impl<'a, C: Ciphersuite> Zeroize for ArithmeticCircuitStatement<'a, C> {
|
||||||
fn zeroize(&mut self) {
|
fn zeroize(&mut self) {
|
||||||
self.constraints.zeroize();
|
self.constraints.zeroize();
|
||||||
self.C.zeroize();
|
self.C.zeroize();
|
||||||
@@ -204,10 +202,16 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
if c.g_values.len() > n {
|
if c.g_values.len() > n {
|
||||||
Err(AcError::NotEnoughGenerators)?;
|
Err(AcError::NotEnoughGenerators)?;
|
||||||
}
|
}
|
||||||
|
if c.h_values.len() > n {
|
||||||
|
Err(AcError::NotEnoughGenerators)?;
|
||||||
|
}
|
||||||
// The Pedersen vector commitments internally have n terms
|
// The Pedersen vector commitments internally have n terms
|
||||||
while c.g_values.len() < n {
|
while c.g_values.len() < n {
|
||||||
c.g_values.0.push(C::F::ZERO);
|
c.g_values.0.push(C::F::ZERO);
|
||||||
}
|
}
|
||||||
|
while c.h_values.len() < n {
|
||||||
|
c.h_values.0.push(C::F::ZERO);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check the witness's consistency with the statement
|
// Check the witness's consistency with the statement
|
||||||
@@ -223,7 +227,12 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
for (commitment, opening) in self.C.0.iter().zip(witness.c.iter()) {
|
for (commitment, opening) in self.C.0.iter().zip(witness.c.iter()) {
|
||||||
if Some(*commitment) != opening.commit(self.generators.g_bold_slice(), self.generators.h())
|
if Some(*commitment) !=
|
||||||
|
opening.commit(
|
||||||
|
self.generators.g_bold_slice(),
|
||||||
|
self.generators.h_bold_slice(),
|
||||||
|
self.generators.h(),
|
||||||
|
)
|
||||||
{
|
{
|
||||||
Err(AcError::InconsistentWitness)?;
|
Err(AcError::InconsistentWitness)?;
|
||||||
}
|
}
|
||||||
@@ -241,6 +250,11 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
weights.iter().map(|(j, weight)| *weight * c.g_values[*j])
|
weights.iter().map(|(j, weight)| *weight * c.g_values[*j])
|
||||||
}),
|
}),
|
||||||
)
|
)
|
||||||
|
.chain(
|
||||||
|
constraint.WCH.iter().zip(&witness.c).flat_map(|(weights, c)| {
|
||||||
|
weights.iter().map(|(j, weight)| *weight * c.h_values[*j])
|
||||||
|
}),
|
||||||
|
)
|
||||||
.chain(constraint.WV.iter().map(|(i, weight)| *weight * witness.v[*i].value))
|
.chain(constraint.WV.iter().map(|(i, weight)| *weight * witness.v[*i].value))
|
||||||
.chain(core::iter::once(constraint.c))
|
.chain(core::iter::once(constraint.c))
|
||||||
.sum::<C::F>();
|
.sum::<C::F>();
|
||||||
@@ -292,8 +306,8 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
transcript.push_point(AI);
|
transcript.push_point(AI);
|
||||||
transcript.push_point(AO);
|
transcript.push_point(AO);
|
||||||
transcript.push_point(S);
|
transcript.push_point(S);
|
||||||
let y = transcript.challenge::<C>();
|
let y = transcript.challenge();
|
||||||
let z = transcript.challenge::<C>();
|
let z = transcript.challenge();
|
||||||
let YzChallenges { y_inv, z } = self.yz_challenges(y, z);
|
let YzChallenges { y_inv, z } = self.yz_challenges(y, z);
|
||||||
let y = ScalarVector::powers(y, n);
|
let y = ScalarVector::powers(y, n);
|
||||||
|
|
||||||
@@ -304,7 +318,7 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
// polynomial).
|
// polynomial).
|
||||||
|
|
||||||
// ni = n'
|
// ni = n'
|
||||||
let ni = 2 + (2 * (c / 2));
|
let ni = 2 * (c + 1);
|
||||||
// These indexes are from the Generalized Bulletproofs paper
|
// These indexes are from the Generalized Bulletproofs paper
|
||||||
#[rustfmt::skip]
|
#[rustfmt::skip]
|
||||||
let ilr = ni / 2; // 1 if c = 0
|
let ilr = ni / 2; // 1 if c = 0
|
||||||
@@ -365,25 +379,32 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
// r decreasing from n' (skipping jlr)
|
// r decreasing from n' (skipping jlr)
|
||||||
|
|
||||||
let mut cg_weights = Vec::with_capacity(witness.c.len());
|
let mut cg_weights = Vec::with_capacity(witness.c.len());
|
||||||
|
let mut ch_weights = Vec::with_capacity(witness.c.len());
|
||||||
for i in 0 .. witness.c.len() {
|
for i in 0 .. witness.c.len() {
|
||||||
let mut cg = ScalarVector::new(n);
|
let mut cg = ScalarVector::new(n);
|
||||||
|
let mut ch = ScalarVector::new(n);
|
||||||
for (constraint, z) in self.constraints.iter().zip(&z.0) {
|
for (constraint, z) in self.constraints.iter().zip(&z.0) {
|
||||||
if let Some(WCG) = constraint.WCG.get(i) {
|
if let Some(WCG) = constraint.WCG.get(i) {
|
||||||
accumulate_vector(&mut cg, WCG, *z);
|
accumulate_vector(&mut cg, WCG, *z);
|
||||||
}
|
}
|
||||||
|
if let Some(WCH) = constraint.WCH.get(i) {
|
||||||
|
accumulate_vector(&mut ch, WCH, *z);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
cg_weights.push(cg);
|
cg_weights.push(cg);
|
||||||
|
ch_weights.push(ch);
|
||||||
}
|
}
|
||||||
|
|
||||||
for (mut i, (c, cg_weights)) in witness.c.iter().zip(cg_weights).enumerate() {
|
for (i, (c, (cg_weights, ch_weights))) in
|
||||||
if i >= ilr {
|
witness.c.iter().zip(cg_weights.into_iter().zip(ch_weights)).enumerate()
|
||||||
i += 1;
|
{
|
||||||
}
|
let i = i + 1;
|
||||||
// Because i has skipped ilr, j will skip jlr
|
|
||||||
let j = ni - i;
|
let j = ni - i;
|
||||||
|
|
||||||
l[i] = c.g_values.clone();
|
l[i] = c.g_values.clone();
|
||||||
|
l[j] = ch_weights * &y_inv;
|
||||||
r[j] = cg_weights;
|
r[j] = cg_weights;
|
||||||
|
r[i] = (c.h_values.clone() * &y) + &r[i];
|
||||||
}
|
}
|
||||||
|
|
||||||
// Multiply them to obtain t
|
// Multiply them to obtain t
|
||||||
@@ -416,7 +437,7 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
transcript.push_point(multiexp(&[(*t, self.generators.g()), (*tau, self.generators.h())]));
|
transcript.push_point(multiexp(&[(*t, self.generators.g()), (*tau, self.generators.h())]));
|
||||||
}
|
}
|
||||||
|
|
||||||
let x: ScalarVector<C::F> = ScalarVector::powers(transcript.challenge::<C>(), t.len());
|
let x: ScalarVector<C::F> = ScalarVector::powers(transcript.challenge(), t.len());
|
||||||
|
|
||||||
let poly_eval = |poly: &[ScalarVector<C::F>], x: &ScalarVector<_>| -> ScalarVector<_> {
|
let poly_eval = |poly: &[ScalarVector<C::F>], x: &ScalarVector<_>| -> ScalarVector<_> {
|
||||||
let mut res = ScalarVector::<C::F>::new(poly[0].0.len());
|
let mut res = ScalarVector::<C::F>::new(poly[0].0.len());
|
||||||
@@ -456,11 +477,8 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
let mut u = (alpha * x[ilr]) + (beta * x[io]) + (rho * x[is]);
|
let mut u = (alpha * x[ilr]) + (beta * x[io]) + (rho * x[is]);
|
||||||
|
|
||||||
// Incorporate the commitment masks multiplied by the associated power of x
|
// Incorporate the commitment masks multiplied by the associated power of x
|
||||||
for (mut i, commitment) in witness.c.iter().enumerate() {
|
for (i, commitment) in witness.c.iter().enumerate() {
|
||||||
// If this index is ni / 2, skip it
|
let i = i + 1;
|
||||||
if i >= (ni / 2) {
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
u += x[i] * commitment.mask;
|
u += x[i] * commitment.mask;
|
||||||
}
|
}
|
||||||
u
|
u
|
||||||
@@ -480,7 +498,7 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
transcript.push_scalar(tau_x);
|
transcript.push_scalar(tau_x);
|
||||||
transcript.push_scalar(u);
|
transcript.push_scalar(u);
|
||||||
transcript.push_scalar(t_caret);
|
transcript.push_scalar(t_caret);
|
||||||
let ip_x = transcript.challenge::<C>();
|
let ip_x = transcript.challenge();
|
||||||
P_terms.push((ip_x * t_caret, self.generators.g()));
|
P_terms.push((ip_x * t_caret, self.generators.g()));
|
||||||
IpStatement::new(
|
IpStatement::new(
|
||||||
self.generators,
|
self.generators,
|
||||||
@@ -495,27 +513,16 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Verify a proof for this statement.
|
/// Verify a proof for this statement.
|
||||||
///
|
|
||||||
/// This solely queues the statement for batch verification. The resulting BatchVerifier MUST
|
|
||||||
/// still be verified.
|
|
||||||
///
|
|
||||||
/// If this proof returns an error, the BatchVerifier MUST be assumed corrupted and discarded.
|
|
||||||
pub fn verify<R: RngCore + CryptoRng>(
|
pub fn verify<R: RngCore + CryptoRng>(
|
||||||
self,
|
self,
|
||||||
rng: &mut R,
|
rng: &mut R,
|
||||||
verifier: &mut BatchVerifier<C>,
|
verifier: &mut BatchVerifier<C>,
|
||||||
transcript: &mut VerifierTranscript,
|
transcript: &mut VerifierTranscript,
|
||||||
) -> Result<(), AcError> {
|
) -> Result<(), AcError> {
|
||||||
if verifier.g_bold.len() < self.generators.len() {
|
|
||||||
verifier.g_bold.resize(self.generators.len(), C::F::ZERO);
|
|
||||||
verifier.h_bold.resize(self.generators.len(), C::F::ZERO);
|
|
||||||
verifier.h_sum.resize(self.generators.len(), C::F::ZERO);
|
|
||||||
}
|
|
||||||
|
|
||||||
let n = self.n();
|
let n = self.n();
|
||||||
let c = self.c();
|
let c = self.c();
|
||||||
|
|
||||||
let ni = 2 + (2 * (c / 2));
|
let ni = 2 * (c + 1);
|
||||||
|
|
||||||
let ilr = ni / 2;
|
let ilr = ni / 2;
|
||||||
let io = ni;
|
let io = ni;
|
||||||
@@ -528,8 +535,8 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
let AI = transcript.read_point::<C>().map_err(|_| AcError::IncompleteProof)?;
|
let AI = transcript.read_point::<C>().map_err(|_| AcError::IncompleteProof)?;
|
||||||
let AO = transcript.read_point::<C>().map_err(|_| AcError::IncompleteProof)?;
|
let AO = transcript.read_point::<C>().map_err(|_| AcError::IncompleteProof)?;
|
||||||
let S = transcript.read_point::<C>().map_err(|_| AcError::IncompleteProof)?;
|
let S = transcript.read_point::<C>().map_err(|_| AcError::IncompleteProof)?;
|
||||||
let y = transcript.challenge::<C>();
|
let y = transcript.challenge();
|
||||||
let z = transcript.challenge::<C>();
|
let z = transcript.challenge();
|
||||||
let YzChallenges { y_inv, z } = self.yz_challenges(y, z);
|
let YzChallenges { y_inv, z } = self.yz_challenges(y, z);
|
||||||
|
|
||||||
let mut l_weights = ScalarVector::new(n);
|
let mut l_weights = ScalarVector::new(n);
|
||||||
@@ -552,7 +559,7 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
for _ in 0 .. (t_poly_len - ni - 1) {
|
for _ in 0 .. (t_poly_len - ni - 1) {
|
||||||
T_after_ni.push(transcript.read_point::<C>().map_err(|_| AcError::IncompleteProof)?);
|
T_after_ni.push(transcript.read_point::<C>().map_err(|_| AcError::IncompleteProof)?);
|
||||||
}
|
}
|
||||||
let x: ScalarVector<C::F> = ScalarVector::powers(transcript.challenge::<C>(), t_poly_len);
|
let x: ScalarVector<C::F> = ScalarVector::powers(transcript.challenge(), t_poly_len);
|
||||||
|
|
||||||
let tau_x = transcript.read_scalar::<C>().map_err(|_| AcError::IncompleteProof)?;
|
let tau_x = transcript.read_scalar::<C>().map_err(|_| AcError::IncompleteProof)?;
|
||||||
let u = transcript.read_scalar::<C>().map_err(|_| AcError::IncompleteProof)?;
|
let u = transcript.read_scalar::<C>().map_err(|_| AcError::IncompleteProof)?;
|
||||||
@@ -617,25 +624,34 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
h_bold_scalars = h_bold_scalars + &(o_weights * verifier_weight);
|
h_bold_scalars = h_bold_scalars + &(o_weights * verifier_weight);
|
||||||
|
|
||||||
let mut cg_weights = Vec::with_capacity(self.C.len());
|
let mut cg_weights = Vec::with_capacity(self.C.len());
|
||||||
|
let mut ch_weights = Vec::with_capacity(self.C.len());
|
||||||
for i in 0 .. self.C.len() {
|
for i in 0 .. self.C.len() {
|
||||||
let mut cg = ScalarVector::new(n);
|
let mut cg = ScalarVector::new(n);
|
||||||
|
let mut ch = ScalarVector::new(n);
|
||||||
for (constraint, z) in self.constraints.iter().zip(&z.0) {
|
for (constraint, z) in self.constraints.iter().zip(&z.0) {
|
||||||
if let Some(WCG) = constraint.WCG.get(i) {
|
if let Some(WCG) = constraint.WCG.get(i) {
|
||||||
accumulate_vector(&mut cg, WCG, *z);
|
accumulate_vector(&mut cg, WCG, *z);
|
||||||
}
|
}
|
||||||
|
if let Some(WCH) = constraint.WCH.get(i) {
|
||||||
|
accumulate_vector(&mut ch, WCH, *z);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
cg_weights.push(cg);
|
cg_weights.push(cg);
|
||||||
|
ch_weights.push(ch);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Push the terms for C, which increment from 0, and the terms for WC, which decrement from
|
// Push the terms for C, which increment from 0, and the terms for WC, which decrement from
|
||||||
// n'
|
// n'
|
||||||
for (mut i, (C, WCG)) in self.C.0.into_iter().zip(cg_weights).enumerate() {
|
for (i, (C, (WCG, WCH))) in
|
||||||
if i >= (ni / 2) {
|
self.C.0.into_iter().zip(cg_weights.into_iter().zip(ch_weights)).enumerate()
|
||||||
i += 1;
|
{
|
||||||
}
|
let i = i + 1;
|
||||||
let j = ni - i;
|
let j = ni - i;
|
||||||
verifier.additional.push((x[i], C));
|
verifier.additional.push((x[i], C));
|
||||||
h_bold_scalars = h_bold_scalars + &(WCG * x[j]);
|
h_bold_scalars = h_bold_scalars + &(WCG * x[j]);
|
||||||
|
for (i, scalar) in (WCH * &y_inv * x[j]).0.into_iter().enumerate() {
|
||||||
|
verifier.g_bold[i] += scalar;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// All terms for h_bold here have actually been for h_bold', h_bold * y_inv
|
// All terms for h_bold here have actually been for h_bold', h_bold * y_inv
|
||||||
@@ -650,7 +666,7 @@ impl<'a, C: Ciphersuite> ArithmeticCircuitStatement<'a, C> {
|
|||||||
|
|
||||||
// Prove for lines 88, 92 with an Inner-Product statement
|
// Prove for lines 88, 92 with an Inner-Product statement
|
||||||
// This inlines Protocol 1, as our IpStatement implements Protocol 2
|
// This inlines Protocol 1, as our IpStatement implements Protocol 2
|
||||||
let ip_x = transcript.challenge::<C>();
|
let ip_x = transcript.challenge();
|
||||||
// P is amended with this additional term
|
// P is amended with this additional term
|
||||||
verifier.g += verifier_weight * ip_x * t_caret;
|
verifier.g += verifier_weight * ip_x * t_caret;
|
||||||
IpStatement::new(self.generators, y_inv, ip_x, P::Verifier { verifier_weight })
|
IpStatement::new(self.generators, y_inv, ip_x, P::Verifier { verifier_weight })
|
||||||
|
|||||||
@@ -1,5 +1,3 @@
|
|||||||
use std_shims::{vec, vec::Vec};
|
|
||||||
|
|
||||||
use multiexp::multiexp_vartime;
|
use multiexp::multiexp_vartime;
|
||||||
use ciphersuite::{group::ff::Field, Ciphersuite};
|
use ciphersuite::{group::ff::Field, Ciphersuite};
|
||||||
|
|
||||||
@@ -188,7 +186,7 @@ impl<'a, C: Ciphersuite> IpStatement<'a, C> {
|
|||||||
// Now that we've calculate L, R, transcript them to receive x (26-27)
|
// Now that we've calculate L, R, transcript them to receive x (26-27)
|
||||||
transcript.push_point(L);
|
transcript.push_point(L);
|
||||||
transcript.push_point(R);
|
transcript.push_point(R);
|
||||||
let x: C::F = transcript.challenge::<C>();
|
let x: C::F = transcript.challenge();
|
||||||
let x_inv = x.invert().unwrap();
|
let x_inv = x.invert().unwrap();
|
||||||
|
|
||||||
// The prover and verifier now calculate the following (28-31)
|
// The prover and verifier now calculate the following (28-31)
|
||||||
@@ -271,19 +269,11 @@ impl<'a, C: Ciphersuite> IpStatement<'a, C> {
|
|||||||
/// This will return Err if there is an error. This will return Ok if the proof was successfully
|
/// This will return Err if there is an error. This will return Ok if the proof was successfully
|
||||||
/// queued for batch verification. The caller is required to verify the batch in order to ensure
|
/// queued for batch verification. The caller is required to verify the batch in order to ensure
|
||||||
/// the proof is actually correct.
|
/// the proof is actually correct.
|
||||||
///
|
|
||||||
/// If this proof returns an error, the BatchVerifier MUST be assumed corrupted and discarded.
|
|
||||||
pub(crate) fn verify(
|
pub(crate) fn verify(
|
||||||
self,
|
self,
|
||||||
verifier: &mut BatchVerifier<C>,
|
verifier: &mut BatchVerifier<C>,
|
||||||
transcript: &mut VerifierTranscript,
|
transcript: &mut VerifierTranscript,
|
||||||
) -> Result<(), IpError> {
|
) -> Result<(), IpError> {
|
||||||
if verifier.g_bold.len() < self.generators.len() {
|
|
||||||
verifier.g_bold.resize(self.generators.len(), C::F::ZERO);
|
|
||||||
verifier.h_bold.resize(self.generators.len(), C::F::ZERO);
|
|
||||||
verifier.h_sum.resize(self.generators.len(), C::F::ZERO);
|
|
||||||
}
|
|
||||||
|
|
||||||
let IpStatement { generators, h_bold_weights, u, P } = self;
|
let IpStatement { generators, h_bold_weights, u, P } = self;
|
||||||
|
|
||||||
// Calculate the discrete log w.r.t. 2 for the amount of generators present
|
// Calculate the discrete log w.r.t. 2 for the amount of generators present
|
||||||
@@ -306,7 +296,7 @@ impl<'a, C: Ciphersuite> IpStatement<'a, C> {
|
|||||||
for _ in 0 .. lr_len {
|
for _ in 0 .. lr_len {
|
||||||
L.push(transcript.read_point::<C>().map_err(|_| IpError::IncompleteProof)?);
|
L.push(transcript.read_point::<C>().map_err(|_| IpError::IncompleteProof)?);
|
||||||
R.push(transcript.read_point::<C>().map_err(|_| IpError::IncompleteProof)?);
|
R.push(transcript.read_point::<C>().map_err(|_| IpError::IncompleteProof)?);
|
||||||
xs.push(transcript.challenge::<C>());
|
xs.push(transcript.challenge());
|
||||||
}
|
}
|
||||||
|
|
||||||
// We calculate their inverse in batch
|
// We calculate their inverse in batch
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
#![allow(non_snake_case)]
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
use core::fmt;
|
use core::fmt;
|
||||||
use std_shims::{vec, vec::Vec, collections::HashSet};
|
use std::collections::HashSet;
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
|
|
||||||
@@ -71,26 +70,14 @@ pub struct Generators<C: Ciphersuite> {
|
|||||||
#[must_use]
|
#[must_use]
|
||||||
#[derive(Clone)]
|
#[derive(Clone)]
|
||||||
pub struct BatchVerifier<C: Ciphersuite> {
|
pub struct BatchVerifier<C: Ciphersuite> {
|
||||||
/// The summed scalar for the G generator.
|
g: C::F,
|
||||||
pub g: C::F,
|
h: C::F,
|
||||||
/// The summed scalar for the G generator.
|
|
||||||
pub h: C::F,
|
|
||||||
|
|
||||||
/// The summed scalars for the G_bold generators.
|
g_bold: Vec<C::F>,
|
||||||
pub g_bold: Vec<C::F>,
|
h_bold: Vec<C::F>,
|
||||||
/// The summed scalars for the H_bold generators.
|
h_sum: Vec<C::F>,
|
||||||
pub h_bold: Vec<C::F>,
|
|
||||||
/// The summed scalars for the sums of all H generators prior to the index.
|
|
||||||
///
|
|
||||||
/// This is not populated with the full set of summed H generators. This is only populated with
|
|
||||||
/// the powers of 2. Accordingly, an index i specifies a scalar for the sum of all H generators
|
|
||||||
/// from H**2**0 ..= H**2**i.
|
|
||||||
pub h_sum: Vec<C::F>,
|
|
||||||
|
|
||||||
/// Additional (non-fixed) points to include in the multiexp.
|
additional: Vec<(C::F, C::G)>,
|
||||||
///
|
|
||||||
/// This is used for proof-specific elements.
|
|
||||||
pub additional: Vec<(C::F, C::G)>,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Ciphersuite> fmt::Debug for Generators<C> {
|
impl<C: Ciphersuite> fmt::Debug for Generators<C> {
|
||||||
@@ -184,15 +171,15 @@ impl<C: Ciphersuite> Generators<C> {
|
|||||||
Ok(Generators { g, h, g_bold, h_bold, h_sum })
|
Ok(Generators { g, h, g_bold, h_bold, h_sum })
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Create a BatchVerifier for proofs which use a consistent set of generators.
|
/// Create a BatchVerifier for proofs which use these generators.
|
||||||
pub fn batch_verifier() -> BatchVerifier<C> {
|
pub fn batch_verifier(&self) -> BatchVerifier<C> {
|
||||||
BatchVerifier {
|
BatchVerifier {
|
||||||
g: C::F::ZERO,
|
g: C::F::ZERO,
|
||||||
h: C::F::ZERO,
|
h: C::F::ZERO,
|
||||||
|
|
||||||
g_bold: vec![],
|
g_bold: vec![C::F::ZERO; self.g_bold.len()],
|
||||||
h_bold: vec![],
|
h_bold: vec![C::F::ZERO; self.h_bold.len()],
|
||||||
h_sum: vec![],
|
h_sum: vec![C::F::ZERO; self.h_sum.len()],
|
||||||
|
|
||||||
additional: Vec::with_capacity(128),
|
additional: Vec::with_capacity(128),
|
||||||
}
|
}
|
||||||
@@ -260,7 +247,7 @@ impl<C: Ciphersuite> Generators<C> {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<C: Ciphersuite> ProofGenerators<'_, C> {
|
impl<'a, C: Ciphersuite> ProofGenerators<'a, C> {
|
||||||
pub(crate) fn len(&self) -> usize {
|
pub(crate) fn len(&self) -> usize {
|
||||||
self.g_bold.len()
|
self.g_bold.len()
|
||||||
}
|
}
|
||||||
@@ -311,6 +298,8 @@ impl<C: Ciphersuite> PedersenCommitment<C> {
|
|||||||
pub struct PedersenVectorCommitment<C: Ciphersuite> {
|
pub struct PedersenVectorCommitment<C: Ciphersuite> {
|
||||||
/// The values committed to across the `g` (bold) generators.
|
/// The values committed to across the `g` (bold) generators.
|
||||||
pub g_values: ScalarVector<C::F>,
|
pub g_values: ScalarVector<C::F>,
|
||||||
|
/// The values committed to across the `h` (bold) generators.
|
||||||
|
pub h_values: ScalarVector<C::F>,
|
||||||
/// The mask blinding the values committed to.
|
/// The mask blinding the values committed to.
|
||||||
pub mask: C::F,
|
pub mask: C::F,
|
||||||
}
|
}
|
||||||
@@ -320,8 +309,8 @@ impl<C: Ciphersuite> PedersenVectorCommitment<C> {
|
|||||||
///
|
///
|
||||||
/// This function returns None if the amount of generators is less than the amount of values
|
/// This function returns None if the amount of generators is less than the amount of values
|
||||||
/// within the relevant vector.
|
/// within the relevant vector.
|
||||||
pub fn commit(&self, g_bold: &[C::G], h: C::G) -> Option<C::G> {
|
pub fn commit(&self, g_bold: &[C::G], h_bold: &[C::G], h: C::G) -> Option<C::G> {
|
||||||
if g_bold.len() < self.g_values.len() {
|
if (g_bold.len() < self.g_values.len()) || (h_bold.len() < self.h_values.len()) {
|
||||||
None?;
|
None?;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -329,6 +318,9 @@ impl<C: Ciphersuite> PedersenVectorCommitment<C> {
|
|||||||
for pair in self.g_values.0.iter().cloned().zip(g_bold.iter().cloned()) {
|
for pair in self.g_values.0.iter().cloned().zip(g_bold.iter().cloned()) {
|
||||||
terms.push(pair);
|
terms.push(pair);
|
||||||
}
|
}
|
||||||
|
for pair in self.h_values.0.iter().cloned().zip(h_bold.iter().cloned()) {
|
||||||
|
terms.push(pair);
|
||||||
|
}
|
||||||
let res = multiexp(&terms);
|
let res = multiexp(&terms);
|
||||||
terms.zeroize();
|
terms.zeroize();
|
||||||
Some(res)
|
Some(res)
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use core::ops::{Add, Sub, Mul};
|
use core::ops::{Add, Sub, Mul};
|
||||||
use std_shims::{vec, vec::Vec};
|
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
|
|
||||||
@@ -24,6 +23,13 @@ pub enum Variable {
|
|||||||
/// The index of the variable.
|
/// The index of the variable.
|
||||||
index: usize,
|
index: usize,
|
||||||
},
|
},
|
||||||
|
/// A variable within a Pedersen vector commitment, committed to with a generator from `h` (bold).
|
||||||
|
CH {
|
||||||
|
/// The commitment being indexed.
|
||||||
|
commitment: usize,
|
||||||
|
/// The index of the variable.
|
||||||
|
index: usize,
|
||||||
|
},
|
||||||
/// A variable within a Pedersen commitment.
|
/// A variable within a Pedersen commitment.
|
||||||
V(usize),
|
V(usize),
|
||||||
}
|
}
|
||||||
@@ -35,7 +41,7 @@ impl Zeroize for Variable {
|
|||||||
|
|
||||||
/// A linear combination.
|
/// A linear combination.
|
||||||
///
|
///
|
||||||
/// Specifically, `WL aL + WR aR + WO aO + WCG C_G + WV V + c`.
|
/// Specifically, `WL aL + WR aR + WO aO + WCG C_G + WCH C_H + WV V + c`.
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub struct LinComb<F: PrimeField> {
|
pub struct LinComb<F: PrimeField> {
|
||||||
@@ -49,6 +55,7 @@ pub struct LinComb<F: PrimeField> {
|
|||||||
pub(crate) WO: Vec<(usize, F)>,
|
pub(crate) WO: Vec<(usize, F)>,
|
||||||
// Sparse representation once within a commitment
|
// Sparse representation once within a commitment
|
||||||
pub(crate) WCG: Vec<Vec<(usize, F)>>,
|
pub(crate) WCG: Vec<Vec<(usize, F)>>,
|
||||||
|
pub(crate) WCH: Vec<Vec<(usize, F)>>,
|
||||||
// Sparse representation of WV
|
// Sparse representation of WV
|
||||||
pub(crate) WV: Vec<(usize, F)>,
|
pub(crate) WV: Vec<(usize, F)>,
|
||||||
pub(crate) c: F,
|
pub(crate) c: F,
|
||||||
@@ -74,9 +81,15 @@ impl<F: PrimeField> Add<&LinComb<F>> for LinComb<F> {
|
|||||||
while self.WCG.len() < constraint.WCG.len() {
|
while self.WCG.len() < constraint.WCG.len() {
|
||||||
self.WCG.push(vec![]);
|
self.WCG.push(vec![]);
|
||||||
}
|
}
|
||||||
|
while self.WCH.len() < constraint.WCH.len() {
|
||||||
|
self.WCH.push(vec![]);
|
||||||
|
}
|
||||||
for (sWC, cWC) in self.WCG.iter_mut().zip(&constraint.WCG) {
|
for (sWC, cWC) in self.WCG.iter_mut().zip(&constraint.WCG) {
|
||||||
sWC.extend(cWC);
|
sWC.extend(cWC);
|
||||||
}
|
}
|
||||||
|
for (sWC, cWC) in self.WCH.iter_mut().zip(&constraint.WCH) {
|
||||||
|
sWC.extend(cWC);
|
||||||
|
}
|
||||||
self.WV.extend(&constraint.WV);
|
self.WV.extend(&constraint.WV);
|
||||||
self.c += constraint.c;
|
self.c += constraint.c;
|
||||||
self
|
self
|
||||||
@@ -97,9 +110,15 @@ impl<F: PrimeField> Sub<&LinComb<F>> for LinComb<F> {
|
|||||||
while self.WCG.len() < constraint.WCG.len() {
|
while self.WCG.len() < constraint.WCG.len() {
|
||||||
self.WCG.push(vec![]);
|
self.WCG.push(vec![]);
|
||||||
}
|
}
|
||||||
|
while self.WCH.len() < constraint.WCH.len() {
|
||||||
|
self.WCH.push(vec![]);
|
||||||
|
}
|
||||||
for (sWC, cWC) in self.WCG.iter_mut().zip(&constraint.WCG) {
|
for (sWC, cWC) in self.WCG.iter_mut().zip(&constraint.WCG) {
|
||||||
sWC.extend(cWC.iter().map(|(i, weight)| (*i, -*weight)));
|
sWC.extend(cWC.iter().map(|(i, weight)| (*i, -*weight)));
|
||||||
}
|
}
|
||||||
|
for (sWC, cWC) in self.WCH.iter_mut().zip(&constraint.WCH) {
|
||||||
|
sWC.extend(cWC.iter().map(|(i, weight)| (*i, -*weight)));
|
||||||
|
}
|
||||||
self.WV.extend(constraint.WV.iter().map(|(i, weight)| (*i, -*weight)));
|
self.WV.extend(constraint.WV.iter().map(|(i, weight)| (*i, -*weight)));
|
||||||
self.c -= constraint.c;
|
self.c -= constraint.c;
|
||||||
self
|
self
|
||||||
@@ -124,6 +143,11 @@ impl<F: PrimeField> Mul<F> for LinComb<F> {
|
|||||||
*weight *= scalar;
|
*weight *= scalar;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
for WC in self.WCH.iter_mut() {
|
||||||
|
for (_, weight) in WC {
|
||||||
|
*weight *= scalar;
|
||||||
|
}
|
||||||
|
}
|
||||||
for (_, weight) in self.WV.iter_mut() {
|
for (_, weight) in self.WV.iter_mut() {
|
||||||
*weight *= scalar;
|
*weight *= scalar;
|
||||||
}
|
}
|
||||||
@@ -143,6 +167,7 @@ impl<F: PrimeField> LinComb<F> {
|
|||||||
WR: vec![],
|
WR: vec![],
|
||||||
WO: vec![],
|
WO: vec![],
|
||||||
WCG: vec![],
|
WCG: vec![],
|
||||||
|
WCH: vec![],
|
||||||
WV: vec![],
|
WV: vec![],
|
||||||
c: F::ZERO,
|
c: F::ZERO,
|
||||||
}
|
}
|
||||||
@@ -171,6 +196,14 @@ impl<F: PrimeField> LinComb<F> {
|
|||||||
}
|
}
|
||||||
self.WCG[i].push((j, scalar))
|
self.WCG[i].push((j, scalar))
|
||||||
}
|
}
|
||||||
|
Variable::CH { commitment: i, index: j } => {
|
||||||
|
self.highest_c_index = self.highest_c_index.max(Some(i));
|
||||||
|
self.highest_a_index = self.highest_a_index.max(Some(j));
|
||||||
|
while self.WCH.len() <= i {
|
||||||
|
self.WCH.push(vec![]);
|
||||||
|
}
|
||||||
|
self.WCH[i].push((j, scalar))
|
||||||
|
}
|
||||||
Variable::V(i) => {
|
Variable::V(i) => {
|
||||||
self.highest_v_index = self.highest_v_index.max(Some(i));
|
self.highest_v_index = self.highest_v_index.max(Some(i));
|
||||||
self.WV.push((i, scalar));
|
self.WV.push((i, scalar));
|
||||||
@@ -205,6 +238,11 @@ impl<F: PrimeField> LinComb<F> {
|
|||||||
&self.WCG
|
&self.WCG
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// View the current weights for CH.
|
||||||
|
pub fn WCH(&self) -> &[Vec<(usize, F)>] {
|
||||||
|
&self.WCH
|
||||||
|
}
|
||||||
|
|
||||||
/// View the current weights for V.
|
/// View the current weights for V.
|
||||||
pub fn WV(&self) -> &[(usize, F)] {
|
pub fn WV(&self) -> &[(usize, F)] {
|
||||||
&self.WV
|
&self.WV
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use core::ops::{Index, IndexMut};
|
use core::ops::{Index, IndexMut};
|
||||||
use std_shims::vec::Vec;
|
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use core::ops::{Index, IndexMut, Add, Sub, Mul};
|
use core::ops::{Index, IndexMut, Add, Sub, Mul};
|
||||||
use std_shims::{vec, vec::Vec};
|
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
use zeroize::Zeroize;
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,7 @@ use rand_core::{RngCore, OsRng};
|
|||||||
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
ScalarVector, PedersenCommitment, PedersenVectorCommitment, Generators,
|
ScalarVector, PedersenCommitment, PedersenVectorCommitment,
|
||||||
transcript::*,
|
transcript::*,
|
||||||
arithmetic_circuit_proof::{
|
arithmetic_circuit_proof::{
|
||||||
Variable, LinComb, ArithmeticCircuitStatement, ArithmeticCircuitWitness,
|
Variable, LinComb, ArithmeticCircuitStatement, ArithmeticCircuitWitness,
|
||||||
@@ -43,7 +43,7 @@ fn test_zero_arithmetic_circuit() {
|
|||||||
statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap();
|
statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap();
|
||||||
transcript.complete()
|
transcript.complete()
|
||||||
};
|
};
|
||||||
let mut verifier = Generators::batch_verifier();
|
let mut verifier = generators.batch_verifier();
|
||||||
|
|
||||||
let mut transcript = VerifierTranscript::new([0; 32], &proof);
|
let mut transcript = VerifierTranscript::new([0; 32], &proof);
|
||||||
let verifier_commmitments = transcript.read_commitments(0, 1);
|
let verifier_commmitments = transcript.read_commitments(0, 1);
|
||||||
@@ -59,8 +59,14 @@ fn test_vector_commitment_arithmetic_circuit() {
|
|||||||
|
|
||||||
let v1 = <Ristretto as Ciphersuite>::F::random(&mut OsRng);
|
let v1 = <Ristretto as Ciphersuite>::F::random(&mut OsRng);
|
||||||
let v2 = <Ristretto as Ciphersuite>::F::random(&mut OsRng);
|
let v2 = <Ristretto as Ciphersuite>::F::random(&mut OsRng);
|
||||||
|
let v3 = <Ristretto as Ciphersuite>::F::random(&mut OsRng);
|
||||||
|
let v4 = <Ristretto as Ciphersuite>::F::random(&mut OsRng);
|
||||||
let gamma = <Ristretto as Ciphersuite>::F::random(&mut OsRng);
|
let gamma = <Ristretto as Ciphersuite>::F::random(&mut OsRng);
|
||||||
let commitment = (reduced.g_bold(0) * v1) + (reduced.g_bold(1) * v2) + (generators.h() * gamma);
|
let commitment = (reduced.g_bold(0) * v1) +
|
||||||
|
(reduced.g_bold(1) * v2) +
|
||||||
|
(reduced.h_bold(0) * v3) +
|
||||||
|
(reduced.h_bold(1) * v4) +
|
||||||
|
(generators.h() * gamma);
|
||||||
let V = vec![];
|
let V = vec![];
|
||||||
let C = vec![commitment];
|
let C = vec![commitment];
|
||||||
|
|
||||||
@@ -77,14 +83,20 @@ fn test_vector_commitment_arithmetic_circuit() {
|
|||||||
vec![LinComb::empty()
|
vec![LinComb::empty()
|
||||||
.term(<Ristretto as Ciphersuite>::F::ONE, Variable::CG { commitment: 0, index: 0 })
|
.term(<Ristretto as Ciphersuite>::F::ONE, Variable::CG { commitment: 0, index: 0 })
|
||||||
.term(<Ristretto as Ciphersuite>::F::from(2u64), Variable::CG { commitment: 0, index: 1 })
|
.term(<Ristretto as Ciphersuite>::F::from(2u64), Variable::CG { commitment: 0, index: 1 })
|
||||||
.constant(-(v1 + (v2 + v2)))],
|
.term(<Ristretto as Ciphersuite>::F::from(3u64), Variable::CH { commitment: 0, index: 0 })
|
||||||
|
.term(<Ristretto as Ciphersuite>::F::from(4u64), Variable::CH { commitment: 0, index: 1 })
|
||||||
|
.constant(-(v1 + (v2 + v2) + (v3 + v3 + v3) + (v4 + v4 + v4 + v4)))],
|
||||||
commitments.clone(),
|
commitments.clone(),
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
let witness = ArithmeticCircuitWitness::<Ristretto>::new(
|
let witness = ArithmeticCircuitWitness::<Ristretto>::new(
|
||||||
aL,
|
aL,
|
||||||
aR,
|
aR,
|
||||||
vec![PedersenVectorCommitment { g_values: ScalarVector(vec![v1, v2]), mask: gamma }],
|
vec![PedersenVectorCommitment {
|
||||||
|
g_values: ScalarVector(vec![v1, v2]),
|
||||||
|
h_values: ScalarVector(vec![v3, v4]),
|
||||||
|
mask: gamma,
|
||||||
|
}],
|
||||||
vec![],
|
vec![],
|
||||||
)
|
)
|
||||||
.unwrap();
|
.unwrap();
|
||||||
@@ -93,7 +105,7 @@ fn test_vector_commitment_arithmetic_circuit() {
|
|||||||
statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap();
|
statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap();
|
||||||
transcript.complete()
|
transcript.complete()
|
||||||
};
|
};
|
||||||
let mut verifier = Generators::batch_verifier();
|
let mut verifier = generators.batch_verifier();
|
||||||
|
|
||||||
let mut transcript = VerifierTranscript::new([0; 32], &proof);
|
let mut transcript = VerifierTranscript::new([0; 32], &proof);
|
||||||
let verifier_commmitments = transcript.read_commitments(1, 0);
|
let verifier_commmitments = transcript.read_commitments(1, 0);
|
||||||
@@ -127,8 +139,13 @@ fn fuzz_test_arithmetic_circuit() {
|
|||||||
while g_values.0.len() < ((OsRng.next_u64() % 8) + 1).try_into().unwrap() {
|
while g_values.0.len() < ((OsRng.next_u64() % 8) + 1).try_into().unwrap() {
|
||||||
g_values.0.push(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
g_values.0.push(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
||||||
}
|
}
|
||||||
|
let mut h_values = ScalarVector(vec![]);
|
||||||
|
while h_values.0.len() < ((OsRng.next_u64() % 8) + 1).try_into().unwrap() {
|
||||||
|
h_values.0.push(<Ristretto as Ciphersuite>::F::random(&mut OsRng));
|
||||||
|
}
|
||||||
C.push(PedersenVectorCommitment {
|
C.push(PedersenVectorCommitment {
|
||||||
g_values,
|
g_values,
|
||||||
|
h_values,
|
||||||
mask: <Ristretto as Ciphersuite>::F::random(&mut OsRng),
|
mask: <Ristretto as Ciphersuite>::F::random(&mut OsRng),
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
@@ -176,6 +193,13 @@ fn fuzz_test_arithmetic_circuit() {
|
|||||||
constraint = constraint.term(weight, Variable::CG { commitment, index });
|
constraint = constraint.term(weight, Variable::CG { commitment, index });
|
||||||
eval += weight * C.g_values[index];
|
eval += weight * C.g_values[index];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _ in 0 .. (OsRng.next_u64() % 4) {
|
||||||
|
let index = usize::try_from(OsRng.next_u64()).unwrap() % C.h_values.len();
|
||||||
|
let weight = <Ristretto as Ciphersuite>::F::random(&mut OsRng);
|
||||||
|
constraint = constraint.term(weight, Variable::CH { commitment, index });
|
||||||
|
eval += weight * C.h_values[index];
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !V.is_empty() {
|
if !V.is_empty() {
|
||||||
@@ -194,7 +218,11 @@ fn fuzz_test_arithmetic_circuit() {
|
|||||||
|
|
||||||
let mut transcript = Transcript::new([0; 32]);
|
let mut transcript = Transcript::new([0; 32]);
|
||||||
let commitments = transcript.write_commitments(
|
let commitments = transcript.write_commitments(
|
||||||
C.iter().map(|C| C.commit(generators.g_bold_slice(), generators.h()).unwrap()).collect(),
|
C.iter()
|
||||||
|
.map(|C| {
|
||||||
|
C.commit(generators.g_bold_slice(), generators.h_bold_slice(), generators.h()).unwrap()
|
||||||
|
})
|
||||||
|
.collect(),
|
||||||
V.iter().map(|V| V.commit(generators.g(), generators.h())).collect(),
|
V.iter().map(|V| V.commit(generators.g(), generators.h())).collect(),
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -211,7 +239,7 @@ fn fuzz_test_arithmetic_circuit() {
|
|||||||
statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap();
|
statement.clone().prove(&mut OsRng, &mut transcript, witness).unwrap();
|
||||||
transcript.complete()
|
transcript.complete()
|
||||||
};
|
};
|
||||||
let mut verifier = Generators::batch_verifier();
|
let mut verifier = generators.batch_verifier();
|
||||||
|
|
||||||
let mut transcript = VerifierTranscript::new([0; 32], &proof);
|
let mut transcript = VerifierTranscript::new([0; 32], &proof);
|
||||||
let verifier_commmitments = transcript.read_commitments(C.len(), V.len());
|
let verifier_commmitments = transcript.read_commitments(C.len(), V.len());
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ use ciphersuite::{
|
|||||||
};
|
};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
ScalarVector, PointVector, Generators,
|
ScalarVector, PointVector,
|
||||||
transcript::*,
|
transcript::*,
|
||||||
inner_product::{P, IpStatement, IpWitness},
|
inner_product::{P, IpStatement, IpWitness},
|
||||||
tests::generators,
|
tests::generators,
|
||||||
@@ -41,7 +41,7 @@ fn test_zero_inner_product() {
|
|||||||
transcript.complete()
|
transcript.complete()
|
||||||
};
|
};
|
||||||
|
|
||||||
let mut verifier = Generators::batch_verifier();
|
let mut verifier = generators.batch_verifier();
|
||||||
IpStatement::<Ristretto>::new(
|
IpStatement::<Ristretto>::new(
|
||||||
reduced,
|
reduced,
|
||||||
ScalarVector(vec![<Ristretto as Ciphersuite>::F::ONE; 1]),
|
ScalarVector(vec![<Ristretto as Ciphersuite>::F::ONE; 1]),
|
||||||
@@ -58,7 +58,7 @@ fn test_zero_inner_product() {
|
|||||||
fn test_inner_product() {
|
fn test_inner_product() {
|
||||||
// P = sum(g_bold * a, h_bold * b)
|
// P = sum(g_bold * a, h_bold * b)
|
||||||
let generators = generators::<Ristretto>(32);
|
let generators = generators::<Ristretto>(32);
|
||||||
let mut verifier = Generators::batch_verifier();
|
let mut verifier = generators.batch_verifier();
|
||||||
for i in [1, 2, 4, 8, 16, 32] {
|
for i in [1, 2, 4, 8, 16, 32] {
|
||||||
let generators = generators.reduce(i).unwrap();
|
let generators = generators.reduce(i).unwrap();
|
||||||
let g = generators.g();
|
let g = generators.g();
|
||||||
|
|||||||
@@ -1,12 +1,9 @@
|
|||||||
use std_shims::{vec::Vec, io};
|
use std::io;
|
||||||
|
|
||||||
use blake2::{Digest, Blake2b512};
|
use blake2::{Digest, Blake2b512};
|
||||||
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{
|
||||||
group::{
|
group::{ff::PrimeField, GroupEncoding},
|
||||||
ff::{Field, PrimeField},
|
|
||||||
GroupEncoding,
|
|
||||||
},
|
|
||||||
Ciphersuite,
|
Ciphersuite,
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -16,11 +13,27 @@ const SCALAR: u8 = 0;
|
|||||||
const POINT: u8 = 1;
|
const POINT: u8 = 1;
|
||||||
const CHALLENGE: u8 = 2;
|
const CHALLENGE: u8 = 2;
|
||||||
|
|
||||||
fn challenge<C: Ciphersuite>(digest: &mut Blake2b512) -> C::F {
|
fn challenge<F: PrimeField>(digest: &mut Blake2b512) -> F {
|
||||||
digest.update([CHALLENGE]);
|
// Panic if this is such a wide field, we won't successfully perform a reduction into an unbiased
|
||||||
let chl = digest.clone().finalize().into();
|
// scalar
|
||||||
|
debug_assert!((F::NUM_BITS + 128) < 512);
|
||||||
|
|
||||||
let res = C::reduce_512(chl);
|
digest.update([CHALLENGE]);
|
||||||
|
let chl = digest.clone().finalize();
|
||||||
|
|
||||||
|
let mut res = F::ZERO;
|
||||||
|
for (i, mut byte) in chl.iter().cloned().enumerate() {
|
||||||
|
for j in 0 .. 8 {
|
||||||
|
let lsb = byte & 1;
|
||||||
|
let mut bit = F::from(u64::from(lsb));
|
||||||
|
for _ in 0 .. ((i * 8) + j) {
|
||||||
|
bit = bit.double();
|
||||||
|
}
|
||||||
|
res += bit;
|
||||||
|
|
||||||
|
byte >>= 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Negligible probability
|
// Negligible probability
|
||||||
if bool::from(res.is_zero()) {
|
if bool::from(res.is_zero()) {
|
||||||
@@ -70,8 +83,6 @@ impl Transcript {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Push a scalar onto the transcript.
|
/// Push a scalar onto the transcript.
|
||||||
///
|
|
||||||
/// The order and layout of this must be constant to the context.
|
|
||||||
pub fn push_scalar(&mut self, scalar: impl PrimeField) {
|
pub fn push_scalar(&mut self, scalar: impl PrimeField) {
|
||||||
self.digest.update([SCALAR]);
|
self.digest.update([SCALAR]);
|
||||||
let bytes = scalar.to_repr();
|
let bytes = scalar.to_repr();
|
||||||
@@ -80,8 +91,6 @@ impl Transcript {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Push a point onto the transcript.
|
/// Push a point onto the transcript.
|
||||||
///
|
|
||||||
/// The order and layout of this must be constant to the context.
|
|
||||||
pub fn push_point(&mut self, point: impl GroupEncoding) {
|
pub fn push_point(&mut self, point: impl GroupEncoding) {
|
||||||
self.digest.update([POINT]);
|
self.digest.update([POINT]);
|
||||||
let bytes = point.to_bytes();
|
let bytes = point.to_bytes();
|
||||||
@@ -95,11 +104,9 @@ impl Transcript {
|
|||||||
C: Vec<C::G>,
|
C: Vec<C::G>,
|
||||||
V: Vec<C::G>,
|
V: Vec<C::G>,
|
||||||
) -> Commitments<C> {
|
) -> Commitments<C> {
|
||||||
self.digest.update(u32::try_from(C.len()).unwrap().to_le_bytes());
|
|
||||||
for C in &C {
|
for C in &C {
|
||||||
self.push_point(*C);
|
self.push_point(*C);
|
||||||
}
|
}
|
||||||
self.digest.update(u32::try_from(V.len()).unwrap().to_le_bytes());
|
|
||||||
for V in &V {
|
for V in &V {
|
||||||
self.push_point(*V);
|
self.push_point(*V);
|
||||||
}
|
}
|
||||||
@@ -107,14 +114,8 @@ impl Transcript {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Sample a challenge.
|
/// Sample a challenge.
|
||||||
pub fn challenge<C: Ciphersuite>(&mut self) -> C::F {
|
pub fn challenge<F: PrimeField>(&mut self) -> F {
|
||||||
challenge::<C>(&mut self.digest)
|
challenge(&mut self.digest)
|
||||||
}
|
|
||||||
|
|
||||||
/// Sample a challenge as a byte array.
|
|
||||||
pub fn challenge_bytes(&mut self) -> [u8; 64] {
|
|
||||||
self.digest.update([CHALLENGE]);
|
|
||||||
self.digest.clone().finalize().into()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Complete a transcript, yielding the fully serialized proof.
|
/// Complete a transcript, yielding the fully serialized proof.
|
||||||
@@ -138,36 +139,20 @@ impl<'a> VerifierTranscript<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Read a scalar from the transcript.
|
/// Read a scalar from the transcript.
|
||||||
///
|
|
||||||
/// The order and layout of this must be constant to the context.
|
|
||||||
pub fn read_scalar<C: Ciphersuite>(&mut self) -> io::Result<C::F> {
|
pub fn read_scalar<C: Ciphersuite>(&mut self) -> io::Result<C::F> {
|
||||||
// Read the scalar onto the transcript using the serialization present in the transcript
|
|
||||||
self.digest.update([SCALAR]);
|
|
||||||
let scalar_len = <C::F as PrimeField>::Repr::default().as_ref().len();
|
|
||||||
if self.transcript.len() < scalar_len {
|
|
||||||
Err(io::Error::new(io::ErrorKind::Other, "not enough bytes to read_scalar"))?;
|
|
||||||
}
|
|
||||||
self.digest.update(&self.transcript[.. scalar_len]);
|
|
||||||
|
|
||||||
// Read the actual scalar, where `read_F` ensures its canonically serialized
|
|
||||||
let scalar = C::read_F(&mut self.transcript)?;
|
let scalar = C::read_F(&mut self.transcript)?;
|
||||||
|
self.digest.update([SCALAR]);
|
||||||
|
let bytes = scalar.to_repr();
|
||||||
|
self.digest.update(bytes);
|
||||||
Ok(scalar)
|
Ok(scalar)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Read a point from the transcript.
|
/// Read a point from the transcript.
|
||||||
///
|
|
||||||
/// The order and layout of this must be constant to the context.
|
|
||||||
pub fn read_point<C: Ciphersuite>(&mut self) -> io::Result<C::G> {
|
pub fn read_point<C: Ciphersuite>(&mut self) -> io::Result<C::G> {
|
||||||
// Read the point onto the transcript using the serialization present in the transcript
|
|
||||||
self.digest.update([POINT]);
|
|
||||||
let point_len = <C::G as GroupEncoding>::Repr::default().as_ref().len();
|
|
||||||
if self.transcript.len() < point_len {
|
|
||||||
Err(io::Error::new(io::ErrorKind::Other, "not enough bytes to read_point"))?;
|
|
||||||
}
|
|
||||||
self.digest.update(&self.transcript[.. point_len]);
|
|
||||||
|
|
||||||
// Read the actual point, where `read_G` ensures its canonically serialized
|
|
||||||
let point = C::read_G(&mut self.transcript)?;
|
let point = C::read_G(&mut self.transcript)?;
|
||||||
|
self.digest.update([POINT]);
|
||||||
|
let bytes = point.to_bytes();
|
||||||
|
self.digest.update(bytes);
|
||||||
Ok(point)
|
Ok(point)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -180,12 +165,10 @@ impl<'a> VerifierTranscript<'a> {
|
|||||||
C: usize,
|
C: usize,
|
||||||
V: usize,
|
V: usize,
|
||||||
) -> io::Result<Commitments<C>> {
|
) -> io::Result<Commitments<C>> {
|
||||||
self.digest.update(u32::try_from(C).unwrap().to_le_bytes());
|
|
||||||
let mut C_vec = Vec::with_capacity(C);
|
let mut C_vec = Vec::with_capacity(C);
|
||||||
for _ in 0 .. C {
|
for _ in 0 .. C {
|
||||||
C_vec.push(self.read_point::<C>()?);
|
C_vec.push(self.read_point::<C>()?);
|
||||||
}
|
}
|
||||||
self.digest.update(u32::try_from(V).unwrap().to_le_bytes());
|
|
||||||
let mut V_vec = Vec::with_capacity(V);
|
let mut V_vec = Vec::with_capacity(V);
|
||||||
for _ in 0 .. V {
|
for _ in 0 .. V {
|
||||||
V_vec.push(self.read_point::<C>()?);
|
V_vec.push(self.read_point::<C>()?);
|
||||||
@@ -194,17 +177,11 @@ impl<'a> VerifierTranscript<'a> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/// Sample a challenge.
|
/// Sample a challenge.
|
||||||
pub fn challenge<C: Ciphersuite>(&mut self) -> C::F {
|
pub fn challenge<F: PrimeField>(&mut self) -> F {
|
||||||
challenge::<C>(&mut self.digest)
|
challenge(&mut self.digest)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Sample a challenge as a byte array.
|
/// Complete the transcript, returning the advanced slice.
|
||||||
pub fn challenge_bytes(&mut self) -> [u8; 64] {
|
|
||||||
self.digest.update([CHALLENGE]);
|
|
||||||
self.digest.clone().finalize().into()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Complete the transcript transcript, yielding what remains.
|
|
||||||
pub fn complete(self) -> &'a [u8] {
|
pub fn complete(self) -> &'a [u8] {
|
||||||
self.transcript
|
self.transcript
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,22 +17,20 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
rustversion = "1"
|
rustversion = "1"
|
||||||
hex-literal = { version = "0.4", default-features = false }
|
hex-literal = { version = "0.4", default-features = false }
|
||||||
|
|
||||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, optional = true }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
rand_core = { version = "0.6", default-features = false }
|
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
|
||||||
|
subtle = { version = "^2.4", default-features = false, features = ["std"] }
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
|
||||||
subtle = { version = "^2.4", default-features = false }
|
|
||||||
|
|
||||||
generic-array = { version = "0.14", default-features = false }
|
generic-array = { version = "0.14", default-features = false }
|
||||||
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
|
crypto-bigint = { version = "0.5", default-features = false, features = ["zeroize"] }
|
||||||
|
|
||||||
k256 = { version = "0.13", default-features = false, features = ["arithmetic"] }
|
k256 = { version = "0.13", default-features = false, features = ["arithmetic"] }
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false }
|
ciphersuite = { path = "../../ciphersuite", version = "0.4", default-features = false, features = ["std"] }
|
||||||
ec-divisors = { path = "../divisors", default-features = false }
|
ec-divisors = { path = "../divisors" }
|
||||||
generalized-bulletproofs-ec-gadgets = { path = "../ec-gadgets", default-features = false }
|
generalized-bulletproofs-ec-gadgets = { path = "../ec-gadgets" }
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
hex = "0.4"
|
hex = "0.4"
|
||||||
@@ -40,8 +38,3 @@ hex = "0.4"
|
|||||||
rand_core = { version = "0.6", features = ["std"] }
|
rand_core = { version = "0.6", features = ["std"] }
|
||||||
|
|
||||||
ff-group-tests = { path = "../../ff-group-tests" }
|
ff-group-tests = { path = "../../ff-group-tests" }
|
||||||
|
|
||||||
[features]
|
|
||||||
alloc = ["std-shims", "zeroize/alloc", "ciphersuite/alloc"]
|
|
||||||
std = ["std-shims/std", "rand_core/std", "zeroize/std", "subtle/std", "blake2/std", "ciphersuite/std", "ec-divisors/std", "generalized-bulletproofs-ec-gadgets/std"]
|
|
||||||
default = ["std"]
|
|
||||||
|
|||||||
@@ -1,9 +1,5 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![doc = include_str!("../README.md")]
|
#![doc = include_str!("../README.md")]
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
|
|
||||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
|
||||||
use std_shims::io::{self, Read};
|
|
||||||
|
|
||||||
use generic_array::typenum::{Sum, Diff, Quot, U, U1, U2};
|
use generic_array::typenum::{Sum, Diff, Quot, U, U1, U2};
|
||||||
use ciphersuite::group::{ff::PrimeField, Group};
|
use ciphersuite::group::{ff::PrimeField, Group};
|
||||||
@@ -37,29 +33,10 @@ impl ciphersuite::Ciphersuite for Secq256k1 {
|
|||||||
Point::generator()
|
Point::generator()
|
||||||
}
|
}
|
||||||
|
|
||||||
fn reduce_512(scalar: [u8; 64]) -> Self::F {
|
|
||||||
Scalar::wide_reduce(scalar)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
|
fn hash_to_F(dst: &[u8], data: &[u8]) -> Self::F {
|
||||||
use blake2::Digest;
|
use blake2::Digest;
|
||||||
Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_slice().try_into().unwrap())
|
Scalar::wide_reduce(Self::H::digest([dst, data].concat()).as_slice().try_into().unwrap())
|
||||||
}
|
}
|
||||||
|
|
||||||
// We override the provided impl, which compares against the reserialization, because
|
|
||||||
// we already require canonicity
|
|
||||||
#[cfg(any(feature = "alloc", feature = "std"))]
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
|
|
||||||
use ciphersuite::group::GroupEncoding;
|
|
||||||
|
|
||||||
let mut encoding = <Self::G as GroupEncoding>::Repr::default();
|
|
||||||
reader.read_exact(encoding.as_mut())?;
|
|
||||||
|
|
||||||
let point = Option::<Self::G>::from(Self::G::from_bytes(&encoding))
|
|
||||||
.ok_or_else(|| io::Error::new(io::ErrorKind::Other, "invalid point"))?;
|
|
||||||
Ok(point)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl generalized_bulletproofs_ec_gadgets::DiscreteLogParameters for Secq256k1 {
|
impl generalized_bulletproofs_ec_gadgets::DiscreteLogParameters for Secq256k1 {
|
||||||
|
|||||||
@@ -40,8 +40,7 @@ impl ConstantTimeEq for Point {
|
|||||||
let y1 = self.y * other.z;
|
let y1 = self.y * other.z;
|
||||||
let y2 = other.y * self.z;
|
let y2 = other.y * self.z;
|
||||||
|
|
||||||
// Identity or equivalent
|
(self.x.is_zero() & other.x.is_zero()) | (x1.ct_eq(&x2) & y1.ct_eq(&y2))
|
||||||
(self.z.is_zero() & other.z.is_zero()) | (x1.ct_eq(&x2) & y1.ct_eq(&y2))
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -193,7 +192,6 @@ impl Group for Point {
|
|||||||
Point { x: FieldElement::ZERO, y: FieldElement::ONE, z: FieldElement::ZERO }
|
Point { x: FieldElement::ZERO, y: FieldElement::ONE, z: FieldElement::ZERO }
|
||||||
}
|
}
|
||||||
fn generator() -> Self {
|
fn generator() -> Self {
|
||||||
// Point with the lowest valid x-coordinate
|
|
||||||
Point {
|
Point {
|
||||||
x: FieldElement::from_repr(
|
x: FieldElement::from_repr(
|
||||||
hex_literal::hex!("0000000000000000000000000000000000000000000000000000000000000001")
|
hex_literal::hex!("0000000000000000000000000000000000000000000000000000000000000001")
|
||||||
@@ -336,10 +334,8 @@ impl GroupEncoding for Point {
|
|||||||
// If this the identity, set y to 1
|
// If this the identity, set y to 1
|
||||||
let y =
|
let y =
|
||||||
CtOption::conditional_select(&y, &CtOption::new(FieldElement::ONE, 1.into()), is_identity);
|
CtOption::conditional_select(&y, &CtOption::new(FieldElement::ONE, 1.into()), is_identity);
|
||||||
// If this the identity, set y to 1 and z to 0 (instead of 1)
|
|
||||||
let z = <_>::conditional_select(&FieldElement::ONE, &FieldElement::ZERO, is_identity);
|
|
||||||
// Create the point if we have a y solution
|
// Create the point if we have a y solution
|
||||||
let point = y.map(|y| Point { x, y, z });
|
let point = y.map(|y| Point { x, y, z: FieldElement::ONE });
|
||||||
|
|
||||||
let not_negative_zero = !(is_identity & sign);
|
let not_negative_zero = !(is_identity & sign);
|
||||||
// Only return the point if it isn't -0 and the sign byte wasn't malleated
|
// Only return the point if it isn't -0 and the sign byte wasn't malleated
|
||||||
|
|||||||
@@ -17,12 +17,12 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
rand_core = "0.9"
|
rand_core = "0.6"
|
||||||
|
|
||||||
subtle = "^2.4"
|
subtle = "^2.4"
|
||||||
|
|
||||||
ff = { version = "0.14.0-pre.0", features = ["bits"] }
|
ff = { version = "0.13", features = ["bits"] }
|
||||||
group = "0.14.0-pre.0"
|
group = "0.13"
|
||||||
|
|
||||||
[dev-dependencies]
|
[dev-dependencies]
|
||||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic", "bits"] }
|
k256 = { version = "^0.13.1", default-features = false, features = ["std", "arithmetic", "bits"] }
|
||||||
@@ -30,4 +30,4 @@ p256 = { version = "^0.13.1", default-features = false, features = ["std", "arit
|
|||||||
|
|
||||||
bls12_381 = "0.8"
|
bls12_381 = "0.8"
|
||||||
|
|
||||||
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
pasta_curves = "0.5"
|
||||||
|
|||||||
@@ -154,20 +154,18 @@ pub fn test_group<R: RngCore, G: Group>(rng: &mut R) {
|
|||||||
|
|
||||||
/// Test encoding and decoding of group elements.
|
/// Test encoding and decoding of group elements.
|
||||||
pub fn test_encoding<G: PrimeGroup>() {
|
pub fn test_encoding<G: PrimeGroup>() {
|
||||||
let test = |point: G, msg| -> G {
|
let test = |point: G, msg| {
|
||||||
let bytes = point.to_bytes();
|
let bytes = point.to_bytes();
|
||||||
let mut repr = G::Repr::default();
|
let mut repr = G::Repr::default();
|
||||||
repr.as_mut().copy_from_slice(bytes.as_ref());
|
repr.as_mut().copy_from_slice(bytes.as_ref());
|
||||||
let decoded = G::from_bytes(&repr).unwrap();
|
assert_eq!(point, G::from_bytes(&repr).unwrap(), "{msg} couldn't be encoded and decoded");
|
||||||
assert_eq!(point, decoded, "{msg} couldn't be encoded and decoded");
|
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
point,
|
point,
|
||||||
G::from_bytes_unchecked(&repr).unwrap(),
|
G::from_bytes_unchecked(&repr).unwrap(),
|
||||||
"{msg} couldn't be encoded and decoded",
|
"{msg} couldn't be encoded and decoded",
|
||||||
);
|
);
|
||||||
decoded
|
|
||||||
};
|
};
|
||||||
assert!(bool::from(test(G::identity(), "identity").is_identity()));
|
test(G::identity(), "identity");
|
||||||
test(G::generator(), "generator");
|
test(G::generator(), "generator");
|
||||||
test(G::generator() + G::generator(), "(generator * 2)");
|
test(G::generator() + G::generator(), "(generator * 2)");
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -203,15 +203,14 @@ pub trait SignMachine<S>: Send + Sync + Sized {
|
|||||||
/// SignatureMachine this SignMachine turns into.
|
/// SignatureMachine this SignMachine turns into.
|
||||||
type SignatureMachine: SignatureMachine<S, SignatureShare = Self::SignatureShare>;
|
type SignatureMachine: SignatureMachine<S, SignatureShare = Self::SignatureShare>;
|
||||||
|
|
||||||
/// Cache this preprocess for usage later.
|
/// Cache this preprocess for usage later. This cached preprocess MUST only be used once. Reuse
|
||||||
///
|
/// of it enables recovery of your private key share. Third-party recovery of a cached preprocess
|
||||||
/// This cached preprocess MUST only be used once. Reuse of it enables recovery of your private
|
/// also enables recovery of your private key share, so this MUST be treated with the same
|
||||||
/// key share. Third-party recovery of a cached preprocess also enables recovery of your private
|
/// security as your private key share.
|
||||||
/// key share, so this MUST be treated with the same security as your private key share.
|
|
||||||
fn cache(self) -> CachedPreprocess;
|
fn cache(self) -> CachedPreprocess;
|
||||||
|
|
||||||
/// Create a sign machine from a cached preprocess.
|
/// Create a sign machine from a cached preprocess.
|
||||||
///
|
|
||||||
/// After this, the preprocess must be deleted so it's never reused. Any reuse will presumably
|
/// After this, the preprocess must be deleted so it's never reused. Any reuse will presumably
|
||||||
/// cause the signer to leak their secret share.
|
/// cause the signer to leak their secret share.
|
||||||
fn from_cache(
|
fn from_cache(
|
||||||
@@ -220,14 +219,11 @@ pub trait SignMachine<S>: Send + Sync + Sized {
|
|||||||
cache: CachedPreprocess,
|
cache: CachedPreprocess,
|
||||||
) -> (Self, Self::Preprocess);
|
) -> (Self, Self::Preprocess);
|
||||||
|
|
||||||
/// Read a Preprocess message.
|
/// Read a Preprocess message. Despite taking self, this does not save the preprocess.
|
||||||
///
|
/// It must be externally cached and passed into sign.
|
||||||
/// Despite taking self, this does not save the preprocess. It must be externally cached and
|
|
||||||
/// passed into sign.
|
|
||||||
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess>;
|
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess>;
|
||||||
|
|
||||||
/// Sign a message.
|
/// Sign a message.
|
||||||
///
|
|
||||||
/// Takes in the participants' preprocess messages. Returns the signature share to be broadcast
|
/// Takes in the participants' preprocess messages. Returns the signature share to be broadcast
|
||||||
/// to all participants, over an authenticated channel. The parties who participate here will
|
/// to all participants, over an authenticated channel. The parties who participate here will
|
||||||
/// become the signing set for this session.
|
/// become the signing set for this session.
|
||||||
|
|||||||
@@ -59,7 +59,7 @@ pub(crate) fn prep_bits<G: Group<Scalar: PrimeFieldBits>>(
|
|||||||
for pair in pairs {
|
for pair in pairs {
|
||||||
let p = groupings.len();
|
let p = groupings.len();
|
||||||
let mut bits = pair.0.to_le_bits();
|
let mut bits = pair.0.to_le_bits();
|
||||||
groupings.push(vec![0; bits.len().div_ceil(w_usize)]);
|
groupings.push(vec![0; (bits.len() + (w_usize - 1)) / w_usize]);
|
||||||
|
|
||||||
for (i, mut bit) in bits.iter_mut().enumerate() {
|
for (i, mut bit) in bits.iter_mut().enumerate() {
|
||||||
let mut bit = u8_from_bool(&mut bit);
|
let mut bit = u8_from_bool(&mut bit);
|
||||||
|
|||||||
@@ -31,8 +31,9 @@ fn weight<D: Send + Clone + SecureDigest, F: PrimeField>(digest: &mut DigestTran
|
|||||||
// Derive a scalar from enough bits of entropy that bias is < 2^128
|
// Derive a scalar from enough bits of entropy that bias is < 2^128
|
||||||
// This can't be const due to its usage of a generic
|
// This can't be const due to its usage of a generic
|
||||||
// Also due to the usize::try_from, yet that could be replaced with an `as`
|
// Also due to the usize::try_from, yet that could be replaced with an `as`
|
||||||
|
// The + 7 forces it to round up
|
||||||
#[allow(non_snake_case)]
|
#[allow(non_snake_case)]
|
||||||
let BYTES: usize = usize::try_from((F::NUM_BITS + 128).div_ceil(8)).unwrap();
|
let BYTES: usize = usize::try_from(((F::NUM_BITS + 128) + 7) / 8).unwrap();
|
||||||
|
|
||||||
let mut remaining = BYTES;
|
let mut remaining = BYTES;
|
||||||
|
|
||||||
|
|||||||
@@ -121,10 +121,7 @@ license-files = [
|
|||||||
multiple-versions = "warn"
|
multiple-versions = "warn"
|
||||||
wildcards = "warn"
|
wildcards = "warn"
|
||||||
highlight = "all"
|
highlight = "all"
|
||||||
deny = [
|
deny = [ { name = "serde_derive", version = ">=1.0.172, <1.0.185" } ]
|
||||||
{ name = "serde_derive", version = ">=1.0.172, <1.0.185" },
|
|
||||||
{ name = "hashbrown", version = ">=0.15" },
|
|
||||||
]
|
|
||||||
|
|
||||||
[sources]
|
[sources]
|
||||||
unknown-registry = "deny"
|
unknown-registry = "deny"
|
||||||
@@ -135,4 +132,5 @@ allow-git = [
|
|||||||
"https://github.com/serai-dex/substrate-bip39",
|
"https://github.com/serai-dex/substrate-bip39",
|
||||||
"https://github.com/serai-dex/substrate",
|
"https://github.com/serai-dex/substrate",
|
||||||
"https://github.com/kayabaNerve/pasta_curves",
|
"https://github.com/kayabaNerve/pasta_curves",
|
||||||
|
"https://github.com/alloy-rs/core",
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -5,20 +5,20 @@ GEM
|
|||||||
public_suffix (>= 2.0.2, < 7.0)
|
public_suffix (>= 2.0.2, < 7.0)
|
||||||
bigdecimal (3.1.8)
|
bigdecimal (3.1.8)
|
||||||
colorator (1.1.0)
|
colorator (1.1.0)
|
||||||
concurrent-ruby (1.3.4)
|
concurrent-ruby (1.3.3)
|
||||||
em-websocket (0.5.3)
|
em-websocket (0.5.3)
|
||||||
eventmachine (>= 0.12.9)
|
eventmachine (>= 0.12.9)
|
||||||
http_parser.rb (~> 0)
|
http_parser.rb (~> 0)
|
||||||
eventmachine (1.2.7)
|
eventmachine (1.2.7)
|
||||||
ffi (1.17.0-x86_64-linux-gnu)
|
ffi (1.17.0-x86_64-linux-gnu)
|
||||||
forwardable-extended (2.6.0)
|
forwardable-extended (2.6.0)
|
||||||
google-protobuf (4.28.2-x86_64-linux)
|
google-protobuf (4.27.3-x86_64-linux)
|
||||||
bigdecimal
|
bigdecimal
|
||||||
rake (>= 13)
|
rake (>= 13)
|
||||||
http_parser.rb (0.8.0)
|
http_parser.rb (0.8.0)
|
||||||
i18n (1.14.6)
|
i18n (1.14.5)
|
||||||
concurrent-ruby (~> 1.0)
|
concurrent-ruby (~> 1.0)
|
||||||
jekyll (4.3.4)
|
jekyll (4.3.3)
|
||||||
addressable (~> 2.4)
|
addressable (~> 2.4)
|
||||||
colorator (~> 1.0)
|
colorator (~> 1.0)
|
||||||
em-websocket (~> 0.5)
|
em-websocket (~> 0.5)
|
||||||
@@ -63,15 +63,17 @@ GEM
|
|||||||
rb-fsevent (0.11.2)
|
rb-fsevent (0.11.2)
|
||||||
rb-inotify (0.11.1)
|
rb-inotify (0.11.1)
|
||||||
ffi (~> 1.0)
|
ffi (~> 1.0)
|
||||||
rexml (3.3.7)
|
rexml (3.3.4)
|
||||||
rouge (4.4.0)
|
strscan
|
||||||
|
rouge (4.3.0)
|
||||||
safe_yaml (1.0.5)
|
safe_yaml (1.0.5)
|
||||||
sass-embedded (1.79.3-x86_64-linux-gnu)
|
sass-embedded (1.77.8-x86_64-linux-gnu)
|
||||||
google-protobuf (~> 4.27)
|
google-protobuf (~> 4.26)
|
||||||
|
strscan (3.1.0)
|
||||||
terminal-table (3.0.2)
|
terminal-table (3.0.2)
|
||||||
unicode-display_width (>= 1.1.1, < 3)
|
unicode-display_width (>= 1.1.1, < 3)
|
||||||
unicode-display_width (2.6.0)
|
unicode-display_width (2.5.0)
|
||||||
webrick (1.8.2)
|
webrick (1.8.1)
|
||||||
|
|
||||||
PLATFORMS
|
PLATFORMS
|
||||||
x86_64-linux
|
x86_64-linux
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ pub(crate) use std::{
|
|||||||
pub(crate) use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
pub(crate) use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
pub(crate) use schnorr_signatures::SchnorrSignature;
|
pub(crate) use schnorr_signatures::SchnorrSignature;
|
||||||
|
|
||||||
pub(crate) use serai_primitives::ExternalNetworkId;
|
pub(crate) use serai_primitives::NetworkId;
|
||||||
|
|
||||||
pub(crate) use tokio::{
|
pub(crate) use tokio::{
|
||||||
io::{AsyncReadExt, AsyncWriteExt},
|
io::{AsyncReadExt, AsyncWriteExt},
|
||||||
@@ -197,7 +197,10 @@ async fn main() {
|
|||||||
KEYS.write().unwrap().insert(service, key);
|
KEYS.write().unwrap().insert(service, key);
|
||||||
let mut queues = QUEUES.write().unwrap();
|
let mut queues = QUEUES.write().unwrap();
|
||||||
if service == Service::Coordinator {
|
if service == Service::Coordinator {
|
||||||
for network in serai_primitives::EXTERNAL_NETWORKS {
|
for network in serai_primitives::NETWORKS {
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
queues.insert(
|
queues.insert(
|
||||||
(service, Service::Processor(network)),
|
(service, Service::Processor(network)),
|
||||||
RwLock::new(Queue(db.clone(), service, Service::Processor(network))),
|
RwLock::new(Queue(db.clone(), service, Service::Processor(network))),
|
||||||
@@ -211,13 +214,17 @@ async fn main() {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
// Make queues for each ExternalNetworkId
|
// Make queues for each NetworkId, other than Serai
|
||||||
for network in serai_primitives::EXTERNAL_NETWORKS {
|
for network in serai_primitives::NETWORKS {
|
||||||
|
if network == NetworkId::Serai {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
// Use a match so we error if the list of NetworkIds changes
|
// Use a match so we error if the list of NetworkIds changes
|
||||||
let Some(key) = read_key(match network {
|
let Some(key) = read_key(match network {
|
||||||
ExternalNetworkId::Bitcoin => "BITCOIN_KEY",
|
NetworkId::Serai => unreachable!(),
|
||||||
ExternalNetworkId::Ethereum => "ETHEREUM_KEY",
|
NetworkId::Bitcoin => "BITCOIN_KEY",
|
||||||
ExternalNetworkId::Monero => "MONERO_KEY",
|
NetworkId::Ethereum => "ETHEREUM_KEY",
|
||||||
|
NetworkId::Monero => "MONERO_KEY",
|
||||||
}) else {
|
}) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
|
|||||||
@@ -3,11 +3,11 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
|||||||
|
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_primitives::ExternalNetworkId;
|
use serai_primitives::NetworkId;
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum Service {
|
pub enum Service {
|
||||||
Processor(ExternalNetworkId),
|
Processor(NetworkId),
|
||||||
Coordinator,
|
Coordinator,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -21,8 +21,8 @@ tower = "0.5"
|
|||||||
serde_json = { version = "1", default-features = false }
|
serde_json = { version = "1", default-features = false }
|
||||||
simple-request = { path = "../../../common/request", version = "0.1", default-features = false }
|
simple-request = { path = "../../../common/request", version = "0.1", default-features = false }
|
||||||
|
|
||||||
alloy-json-rpc = { version = "0.14", default-features = false }
|
alloy-json-rpc = { version = "0.9", default-features = false }
|
||||||
alloy-transport = { version = "0.14", default-features = false }
|
alloy-transport = { version = "0.9", default-features = false }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
default = ["tls"]
|
default = ["tls"]
|
||||||
|
|||||||
@@ -29,14 +29,14 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
|||||||
|
|
||||||
k256 = { version = "^0.13.1", default-features = false, features = ["ecdsa"] }
|
k256 = { version = "^0.13.1", default-features = false, features = ["ecdsa"] }
|
||||||
|
|
||||||
alloy-core = { version = "1", default-features = false }
|
alloy-core = { version = "0.8", default-features = false }
|
||||||
alloy-sol-types = { version = "1", default-features = false }
|
alloy-sol-types = { version = "0.8", default-features = false }
|
||||||
|
|
||||||
alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false }
|
alloy-simple-request-transport = { path = "../../../networks/ethereum/alloy-simple-request-transport", default-features = false }
|
||||||
alloy-rpc-types-eth = { version = "0.14", default-features = false }
|
alloy-rpc-types-eth = { version = "0.9", default-features = false }
|
||||||
alloy-rpc-client = { version = "0.14", default-features = false }
|
alloy-rpc-client = { version = "0.9", default-features = false }
|
||||||
alloy-provider = { version = "0.14", default-features = false }
|
alloy-provider = { version = "0.9", default-features = false }
|
||||||
|
|
||||||
alloy-node-bindings = { version = "0.14", default-features = false }
|
alloy-node-bindings = { version = "0.9", default-features = false }
|
||||||
|
|
||||||
tokio = { version = "1", default-features = false, features = ["macros"] }
|
tokio = { version = "1", default-features = false, features = ["macros"] }
|
||||||
|
|||||||
@@ -2,5 +2,4 @@
|
|||||||
|
|
||||||
An Ethereum contract to verify Schnorr signatures.
|
An Ethereum contract to verify Schnorr signatures.
|
||||||
|
|
||||||
This crate will fail to build if the expected version of `solc` is not
|
This crate will fail to build if `solc` is not installed and available.
|
||||||
installed and available.
|
|
||||||
|
|||||||
@@ -3,6 +3,10 @@
|
|||||||
#![deny(missing_docs)]
|
#![deny(missing_docs)]
|
||||||
#![allow(non_snake_case)]
|
#![allow(non_snake_case)]
|
||||||
|
|
||||||
|
/// The initialization bytecode of the Schnorr library.
|
||||||
|
pub const INIT_BYTECODE: &str =
|
||||||
|
include_str!(concat!(env!("OUT_DIR"), "/ethereum-schnorr-contract/Schnorr.bin"));
|
||||||
|
|
||||||
mod public_key;
|
mod public_key;
|
||||||
pub use public_key::PublicKey;
|
pub use public_key::PublicKey;
|
||||||
mod signature;
|
mod signature;
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use subtle::Choice;
|
use subtle::Choice;
|
||||||
use group::{ff::PrimeField, Group};
|
use group::ff::PrimeField;
|
||||||
use k256::{
|
use k256::{
|
||||||
elliptic_curve::{
|
elliptic_curve::{
|
||||||
ops::Reduce,
|
ops::Reduce,
|
||||||
@@ -22,10 +22,6 @@ impl PublicKey {
|
|||||||
/// bounds such as parity).
|
/// bounds such as parity).
|
||||||
#[must_use]
|
#[must_use]
|
||||||
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
||||||
if bool::from(A.is_identity()) {
|
|
||||||
None?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let affine = A.to_affine();
|
let affine = A.to_affine();
|
||||||
|
|
||||||
// Only allow even keys to save a word within Ethereum
|
// Only allow even keys to save a word within Ethereum
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ mod abi {
|
|||||||
pub(crate) use TestSchnorr::*;
|
pub(crate) use TestSchnorr::*;
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn setup_test() -> (AnvilInstance, Arc<RootProvider>, Address) {
|
async fn setup_test() -> (AnvilInstance, Arc<RootProvider<SimpleRequest>>, Address) {
|
||||||
let anvil = Anvil::new().spawn();
|
let anvil = Anvil::new().spawn();
|
||||||
|
|
||||||
let provider = Arc::new(RootProvider::new(
|
let provider = Arc::new(RootProvider::new(
|
||||||
@@ -61,7 +61,7 @@ async fn setup_test() -> (AnvilInstance, Arc<RootProvider>, Address) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
async fn call_verify(
|
async fn call_verify(
|
||||||
provider: &RootProvider,
|
provider: &RootProvider<SimpleRequest>,
|
||||||
address: Address,
|
address: Address,
|
||||||
public_key: &PublicKey,
|
public_key: &PublicKey,
|
||||||
message: &[u8],
|
message: &[u8],
|
||||||
@@ -80,8 +80,10 @@ async fn call_verify(
|
|||||||
.abi_encode()
|
.abi_encode()
|
||||||
.into(),
|
.into(),
|
||||||
));
|
));
|
||||||
let bytes = provider.call(call).await.unwrap();
|
let bytes = provider.call(&call).await.unwrap();
|
||||||
abi::verifyCall::abi_decode_returns(&bytes).unwrap()
|
let res = abi::verifyCall::abi_decode_returns(&bytes, true).unwrap();
|
||||||
|
|
||||||
|
res._0
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::test]
|
#[tokio::test]
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user