mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-10 21:19:24 +00:00
Compare commits
48 Commits
next
...
1b755a5d48
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
1b755a5d48 | ||
|
|
e5efcd56ba | ||
|
|
5d60b3c2ae | ||
|
|
ae923b24ff | ||
|
|
d304cd97e1 | ||
|
|
2b56dcdf3f | ||
|
|
90804c4c30 | ||
|
|
46caca2f51 | ||
|
|
2077e485bb | ||
|
|
28dbef8a1c | ||
|
|
3541197aa5 | ||
|
|
a2209dd6ff | ||
|
|
2032cf355f | ||
|
|
fe41b09fd4 | ||
|
|
74bad049a7 | ||
|
|
72fefb3d85 | ||
|
|
200c1530a4 | ||
|
|
5736b87b57 | ||
|
|
ada94e8c5d | ||
|
|
75240ed327 | ||
|
|
6177cf5c07 | ||
|
|
0d38dc96b6 | ||
|
|
e8094523ff | ||
|
|
53a64bc7e2 | ||
|
|
3c6e889732 | ||
|
|
354efc0192 | ||
|
|
e20058feae | ||
|
|
09f0714894 | ||
|
|
d3d539553c | ||
|
|
b08ae8e6a7 | ||
|
|
35db2924b4 | ||
|
|
bfff823bf7 | ||
|
|
352af85498 | ||
|
|
ecad89b269 | ||
|
|
48f5ed71d7 | ||
|
|
ed9cbdd8e0 | ||
|
|
0ac11defcc | ||
|
|
24e89316d5 | ||
|
|
3f03dac050 | ||
|
|
820b710928 | ||
|
|
88c7ae3e7d | ||
|
|
dd5e43760d | ||
|
|
776e417fd2 | ||
|
|
2f8ce15a92 | ||
|
|
af56304676 | ||
|
|
62a2c4f20e | ||
|
|
c69841710a | ||
|
|
3158590675 |
2
.github/actions/bitcoin/action.yml
vendored
2
.github/actions/bitcoin/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: "30.0"
|
||||
default: "29.1"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
21
.github/actions/build-dependencies/action.yml
vendored
21
.github/actions/build-dependencies/action.yml
vendored
@@ -7,10 +7,6 @@ runs:
|
||||
- name: Remove unused packages
|
||||
shell: bash
|
||||
run: |
|
||||
# Ensure the repositories are synced
|
||||
sudo apt update -y
|
||||
|
||||
# Actually perform the removals
|
||||
sudo apt remove -y "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
||||
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
||||
@@ -18,9 +14,8 @@ runs:
|
||||
sudo apt remove -y --allow-remove-essential -f shim-signed *python3*
|
||||
# This removal command requires the prior removals due to unmet dependencies otherwise
|
||||
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
||||
|
||||
# Reinstall python3 as a general dependency of a functional operating system
|
||||
sudo apt install -y python3 --fix-missing
|
||||
sudo apt install python3
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Remove unused packages
|
||||
@@ -38,23 +33,19 @@ runs:
|
||||
shell: bash
|
||||
run: |
|
||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||
sudo apt install -y ca-certificates protobuf-compiler libclang-dev
|
||||
sudo apt install -y ca-certificates protobuf-compiler
|
||||
elif [ "$RUNNER_OS" == "Windows" ]; then
|
||||
choco install protoc
|
||||
elif [ "$RUNNER_OS" == "macOS" ]; then
|
||||
brew install protobuf llvm
|
||||
HOMEBREW_ROOT_PATH=/opt/homebrew # Apple Silicon
|
||||
if [ $(uname -m) = "x86_64" ]; then HOMEBREW_ROOT_PATH=/usr/local; fi # Intel
|
||||
ls $HOMEBREW_ROOT_PATH/opt/llvm/lib | grep "libclang.dylib" # Make sure this installed `libclang`
|
||||
echo "DYLD_LIBRARY_PATH=$HOMEBREW_ROOT_PATH/opt/llvm/lib:$DYLD_LIBRARY_PATH" >> "$GITHUB_ENV"
|
||||
brew install protobuf
|
||||
fi
|
||||
|
||||
- name: Install solc
|
||||
shell: bash
|
||||
run: |
|
||||
cargo +1.91 install svm-rs --version =0.5.19
|
||||
svm install 0.8.29
|
||||
svm use 0.8.29
|
||||
cargo +1.89 install svm-rs --version =0.5.18
|
||||
svm install 0.8.26
|
||||
svm use 0.8.26
|
||||
|
||||
- name: Remove preinstalled Docker
|
||||
shell: bash
|
||||
|
||||
2
.github/actions/monero-wallet-rpc/action.yml
vendored
2
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: v0.18.4.3
|
||||
default: v0.18.3.4
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
2
.github/actions/monero/action.yml
vendored
2
.github/actions/monero/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: v0.18.4.3
|
||||
default: v0.18.3.4
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
4
.github/actions/test-dependencies/action.yml
vendored
4
.github/actions/test-dependencies/action.yml
vendored
@@ -5,12 +5,12 @@ inputs:
|
||||
monero-version:
|
||||
description: "Monero version to download and run as a regtest node"
|
||||
required: false
|
||||
default: v0.18.4.3
|
||||
default: v0.18.3.4
|
||||
|
||||
bitcoin-version:
|
||||
description: "Bitcoin version to download and run as a regtest node"
|
||||
required: false
|
||||
default: "30.0"
|
||||
default: "29.1"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
|
||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
||||
nightly-2025-11-11
|
||||
nightly-2025-09-01
|
||||
|
||||
2
.github/workflows/daily-deny.yml
vendored
2
.github/workflows/daily-deny.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
key: rust-advisory-db
|
||||
|
||||
- name: Install cargo deny
|
||||
run: cargo +1.91 install cargo-deny --version =0.18.5
|
||||
run: cargo +1.89 install cargo-deny --version =0.18.3
|
||||
|
||||
- name: Run cargo deny
|
||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||
|
||||
26
.github/workflows/lint.yml
vendored
26
.github/workflows/lint.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
clippy:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-15-intel, macos-latest, windows-latest]
|
||||
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
@@ -26,7 +26,7 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install nightly rust
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c clippy
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-src -c clippy
|
||||
|
||||
- name: Run Clippy
|
||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||
@@ -52,7 +52,7 @@ jobs:
|
||||
key: rust-advisory-db
|
||||
|
||||
- name: Install cargo deny
|
||||
run: cargo +1.91 install cargo-deny --version =0.18.5
|
||||
run: cargo +1.89 install cargo-deny --version =0.18.3
|
||||
|
||||
- name: Run cargo deny
|
||||
run: cargo deny -L error --all-features check --hide-inclusion-graph
|
||||
@@ -88,8 +88,8 @@ jobs:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- name: Verify all dependencies are in use
|
||||
run: |
|
||||
cargo +1.91 install cargo-machete --version =0.9.1
|
||||
cargo +1.91 machete
|
||||
cargo +1.89 install cargo-machete --version =0.8.0
|
||||
cargo +1.89 machete
|
||||
|
||||
msrv:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -98,7 +98,7 @@ jobs:
|
||||
- name: Verify claimed `rust-version`
|
||||
shell: bash
|
||||
run: |
|
||||
cargo +1.91 install cargo-msrv --version =0.18.4
|
||||
cargo +1.89 install cargo-msrv --version =0.18.4
|
||||
|
||||
function check_msrv {
|
||||
# We `cd` into the directory passed as the first argument, but will return to the
|
||||
@@ -146,15 +146,17 @@ jobs:
|
||||
cargo_toml_lines=$(cat ./Cargo.toml | wc -l)
|
||||
# Keep all lines after the start of the array, then keep all lines before the next "]"
|
||||
members=$(cat Cargo.toml | grep "members\ \=\ \[" -m1 -A$cargo_toml_lines | grep "]" -m1 -B$cargo_toml_lines)
|
||||
# Prune `members = [` to `[` by replacing the first line with just `[`
|
||||
members=$(echo "$members" | sed "1s/.*/\[/")
|
||||
|
||||
# Parse out any comments, whitespace, including comments post-fixed on the same line as an entry
|
||||
# We accomplish the latter by pruning all characters after the entry's ","
|
||||
members=$(echo "$members" | grep -Ev "^[[:space:]]*(#|$)" | awk -F',' '{print $1","}')
|
||||
# Replace the first line, which was "members = [" and is now "members = [,", with "["
|
||||
members=$(echo "$members" | sed "1s/.*/\[/")
|
||||
# Correct the last line, which was malleated to "],"
|
||||
members=$(echo "$members" | sed "$(echo "$members" | wc -l)s/\]\,/\]/")
|
||||
|
||||
# Don't check the patches
|
||||
members=$(echo "$members" | grep -v "patches")
|
||||
# Don't check the following
|
||||
# Most of these are binaries, with the exception of the Substrate runtime which has a
|
||||
# bespoke build pipeline
|
||||
@@ -190,12 +192,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Slither
|
||||
run: |
|
||||
python3 -m pip install solc-select
|
||||
solc-select install 0.8.26
|
||||
solc-select use 0.8.26
|
||||
|
||||
python3 -m pip install slither-analyzer
|
||||
|
||||
slither --include-paths ./networks/ethereum/schnorr/contracts/Schnorr.sol
|
||||
|
||||
4
.github/workflows/pages.yml
vendored
4
.github/workflows/pages.yml
vendored
@@ -69,8 +69,8 @@ jobs:
|
||||
uses: ./.github/actions/build-dependencies
|
||||
- name: Buld Rust docs
|
||||
run: |
|
||||
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs
|
||||
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --no-deps --all-features
|
||||
rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32v1-none -c rust-docs -c rust-src
|
||||
RUSTDOCFLAGS="--cfg docsrs" cargo +${{ steps.nightly.outputs.version }} doc --workspace --all-features
|
||||
mv target/doc docs/_site/rust
|
||||
|
||||
- name: Upload artifact
|
||||
|
||||
1
.github/workflows/tests.yml
vendored
1
.github/workflows/tests.yml
vendored
@@ -61,6 +61,7 @@ jobs:
|
||||
-p serai-monero-processor \
|
||||
-p tendermint-machine \
|
||||
-p tributary-sdk \
|
||||
-p serai-cosign-types \
|
||||
-p serai-cosign \
|
||||
-p serai-coordinator-substrate \
|
||||
-p serai-coordinator-tributary \
|
||||
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,14 +1,7 @@
|
||||
target
|
||||
|
||||
# Don't commit any `Cargo.lock` which aren't the workspace's
|
||||
Cargo.lock
|
||||
!./Cargo.lock
|
||||
|
||||
# Don't commit any `Dockerfile`, as they're auto-generated, except the only one which isn't
|
||||
Dockerfile
|
||||
Dockerfile.fast-epoch
|
||||
!orchestration/runtime/Dockerfile
|
||||
|
||||
.test-logs
|
||||
|
||||
.vscode
|
||||
|
||||
3313
Cargo.lock
generated
3313
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
79
Cargo.toml
79
Cargo.toml
@@ -1,6 +1,17 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
# Rewrites/redirects
|
||||
"patches/option-ext",
|
||||
"patches/directories-next",
|
||||
|
||||
# monero-oxide expects `ciphersuite`, yet the `ciphersuite` in-tree here has breaking changes
|
||||
# This re-exports the in-tree `ciphersuite` _without_ changes breaking to monero-oxide
|
||||
# Not included in workspace to prevent having two crates with the same name (an error)
|
||||
# "patches/ciphersuite",
|
||||
# Same for `dalek-ff-group`
|
||||
# "patches/dalek-ff-group",
|
||||
|
||||
"common/std-shims",
|
||||
"common/zalloc",
|
||||
"common/patchable-async-sleep",
|
||||
@@ -69,6 +80,7 @@ members = [
|
||||
|
||||
"coordinator/tributary-sdk/tendermint",
|
||||
"coordinator/tributary-sdk",
|
||||
"coordinator/cosign/types",
|
||||
"coordinator/cosign",
|
||||
"coordinator/substrate",
|
||||
"coordinator/tributary",
|
||||
@@ -77,31 +89,17 @@ members = [
|
||||
"coordinator",
|
||||
|
||||
"substrate/primitives",
|
||||
|
||||
"substrate/coins/primitives",
|
||||
"substrate/coins/pallet",
|
||||
|
||||
"substrate/dex/pallet",
|
||||
|
||||
"substrate/validator-sets/primitives",
|
||||
"substrate/validator-sets/pallet",
|
||||
|
||||
"substrate/genesis-liquidity/primitives",
|
||||
"substrate/genesis-liquidity/pallet",
|
||||
|
||||
"substrate/emissions/primitives",
|
||||
"substrate/emissions/pallet",
|
||||
|
||||
"substrate/economic-security/pallet",
|
||||
|
||||
"substrate/in-instructions/primitives",
|
||||
"substrate/in-instructions/pallet",
|
||||
|
||||
"substrate/signals/primitives",
|
||||
"substrate/signals/pallet",
|
||||
|
||||
"substrate/abi",
|
||||
|
||||
"substrate/coins",
|
||||
"substrate/validator-sets",
|
||||
"substrate/signals",
|
||||
"substrate/dex",
|
||||
"substrate/genesis-liquidity",
|
||||
"substrate/economic-security",
|
||||
"substrate/emissions",
|
||||
"substrate/in-instructions",
|
||||
|
||||
"substrate/runtime",
|
||||
"substrate/node",
|
||||
|
||||
@@ -172,15 +170,9 @@ panic = "unwind"
|
||||
overflow-checks = true
|
||||
|
||||
[patch.crates-io]
|
||||
# Point to empty crates for unused crates in our tree
|
||||
ark-ff-3 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.3" }
|
||||
ark-ff-4 = { package = "ark-ff", path = "patches/ethereum/ark-ff-0.4" }
|
||||
c-kzg = { path = "patches/ethereum/c-kzg" }
|
||||
secp256k1-30 = { package = "secp256k1", path = "patches/ethereum/secp256k1-30" }
|
||||
|
||||
# Dependencies from monero-oxide which originate from within our own tree
|
||||
std-shims = { path = "patches/std-shims" }
|
||||
simple-request = { path = "patches/simple-request" }
|
||||
std-shims = { path = "common/std-shims" }
|
||||
simple-request = { path = "common/request" }
|
||||
multiexp = { path = "crypto/multiexp" }
|
||||
flexible-transcript = { path = "crypto/transcript" }
|
||||
ciphersuite = { path = "patches/ciphersuite" }
|
||||
@@ -188,13 +180,6 @@ dalek-ff-group = { path = "patches/dalek-ff-group" }
|
||||
minimal-ed448 = { path = "crypto/ed448" }
|
||||
modular-frost = { path = "crypto/frost" }
|
||||
|
||||
# This has a non-deprecated `std` alternative since Rust's 2024 edition
|
||||
home = { path = "patches/home" }
|
||||
|
||||
# Updates to the latest version
|
||||
darling = { path = "patches/darling" }
|
||||
thiserror = { path = "patches/thiserror" }
|
||||
|
||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||
|
||||
@@ -206,22 +191,19 @@ lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev
|
||||
option-ext = { path = "patches/option-ext" }
|
||||
directories-next = { path = "patches/directories-next" }
|
||||
|
||||
# Patch from a fork back to upstream
|
||||
parity-bip39 = { path = "patches/parity-bip39" }
|
||||
|
||||
# Patch to include `FromUniformBytes<64>` over `Scalar`
|
||||
# Patch to include `FromUniformBytes<64>` over Scalar
|
||||
k256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||
p256 = { git = "https://github.com/kayabaNerve/elliptic-curves", rev = "4994c9ab163781a88cd4a49beae812a89a44e8c3" }
|
||||
|
||||
# `jemalloc` conflicts with `mimalloc`, so patch to a `rocksdb` which never uses `jemalloc`
|
||||
librocksdb-sys = { path = "patches/librocksdb-sys" }
|
||||
# Patch due to `std` now including the required functionality
|
||||
is_terminal_polyfill = { path = "./patches/is_terminal_polyfill" }
|
||||
|
||||
[workspace.lints.clippy]
|
||||
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
||||
manual_is_multiple_of = "allow"
|
||||
unwrap_or_default = "allow"
|
||||
map_unwrap_or = "allow"
|
||||
needless_continue = "allow"
|
||||
manual_is_multiple_of = "allow"
|
||||
incompatible_msrv = "allow" # Manually verified with a GitHub workflow
|
||||
borrow_as_ptr = "deny"
|
||||
cast_lossless = "deny"
|
||||
cast_possible_truncation = "deny"
|
||||
@@ -260,7 +242,7 @@ redundant_closure_for_method_calls = "deny"
|
||||
redundant_else = "deny"
|
||||
string_add_assign = "deny"
|
||||
string_slice = "deny"
|
||||
unchecked_time_subtraction = "deny"
|
||||
unchecked_duration_subtraction = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unnecessary_box_returns = "deny"
|
||||
unnecessary_join = "deny"
|
||||
@@ -269,6 +251,3 @@ unnested_or_patterns = "deny"
|
||||
unused_async = "deny"
|
||||
unused_self = "deny"
|
||||
zero_sized_map_values = "deny"
|
||||
|
||||
[workspace.lints.rust]
|
||||
unused = "allow" # TODO: https://github.com/rust-lang/rust/issues/147648
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
# eVRF DKG
|
||||
|
||||
In 2024, the [eVRF paper](https://eprint.iacr.org/2024/397) was published to
|
||||
the IACR preprint server. Within it was a one-round unbiased DKG and a
|
||||
one-round unbiased threshold DKG. Unfortunately, both simply describe
|
||||
communication of the secret shares as 'Alice sends $s_b$ to Bob'. This causes,
|
||||
in practice, the need for an additional round of communication to occur where
|
||||
all participants confirm they received their secret shares.
|
||||
|
||||
Within Serai, it was posited to use the same premises as the DDH eVRF itself to
|
||||
achieve a verifiable encryption scheme. This allows the secret shares to be
|
||||
posted to any 'bulletin board' (such as a blockchain) and for all observers to
|
||||
confirm:
|
||||
|
||||
- A participant participated
|
||||
- The secret shares sent can be received by the intended recipient so long as
|
||||
they can access the bulletin board
|
||||
|
||||
Additionally, Serai desired a robust scheme (albeit with an biased key as the
|
||||
output, which is fine for our purposes). Accordingly, our implementation
|
||||
instantiates the threshold eVRF DKG from the eVRF paper, with our own proposal
|
||||
for verifiable encryption, with the caller allowed to decide the set of
|
||||
participants. They may:
|
||||
|
||||
- Select everyone, collapsing to the non-threshold unbiased DKG from the eVRF
|
||||
paper
|
||||
- Select a pre-determined set, collapsing to the threshold unbaised DKG from
|
||||
the eVRF paper
|
||||
- Select a post-determined set (with any solution for the Common Subset
|
||||
problem), allowing achieving a robust threshold biased DKG
|
||||
|
||||
Note that the eVRF paper proposes using the eVRF to sample coefficients yet
|
||||
this is unnecessary when the resulting key will be biased. Any proof of
|
||||
knowledge for the coefficients, as necessary for their extraction within the
|
||||
security proofs, would be sufficient.
|
||||
|
||||
MAGIC Grants contracted HashCloak to formalize Serai's proposal for a DKG and
|
||||
provide proofs for its security. This resulted in
|
||||
[this paper](<./Security Proofs.pdf>).
|
||||
|
||||
Our implementation itself is then built on top of the audited
|
||||
[`generalized-bulletproofs`](https://github.com/kayabaNerve/monero-oxide/tree/generalized-bulletproofs/audits/crypto/generalized-bulletproofs)
|
||||
and
|
||||
[`generalized-bulletproofs-ec-gadgets`](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/fcmps).
|
||||
|
||||
Note we do not use the originally premised DDH eVRF yet the one premised on
|
||||
elliptic curve divisors, the methodology of which is commented on
|
||||
[here](https://github.com/monero-oxide/monero-oxide/tree/fcmp%2B%2B/audits/divisors).
|
||||
|
||||
Our implementation itself is unaudited at this time however.
|
||||
Binary file not shown.
@@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
parity-db = { version = "0.5", default-features = false, optional = true }
|
||||
parity-db = { version = "0.5", default-features = false, features = ["arc"], optional = true }
|
||||
rocksdb = { version = "0.24", default-features = false, features = ["zstd"], optional = true }
|
||||
|
||||
[features]
|
||||
|
||||
@@ -15,7 +15,7 @@ pub fn serai_db_key(
|
||||
///
|
||||
/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro
|
||||
/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key,
|
||||
/// they must be `scale` encodable. The return type is used to auto encode and decode the database
|
||||
/// they must be `borsh` serializable. The return type is used to auto (de)serialize the database
|
||||
/// value bytes using `borsh`.
|
||||
///
|
||||
/// # Arguments
|
||||
@@ -54,11 +54,10 @@ macro_rules! create_db {
|
||||
)?;
|
||||
impl$(<$($generic_name: $generic_type),+>)? $field_name$(<$($generic_name),+>)? {
|
||||
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||
use scale::Encode;
|
||||
$crate::serai_db_key(
|
||||
stringify!($db_name).as_bytes(),
|
||||
stringify!($field_name).as_bytes(),
|
||||
($($arg),*).encode()
|
||||
&borsh::to_vec(&($($arg),*)).unwrap(),
|
||||
)
|
||||
}
|
||||
pub(crate) fn set(
|
||||
|
||||
2
common/env/src/lib.rs
vendored
2
common/env/src/lib.rs
vendored
@@ -1,5 +1,5 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
|
||||
// Obtain a variable from the Serai environment/secret store.
|
||||
pub fn var(variable: &str) -> Option<String> {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
[package]
|
||||
name = "simple-request"
|
||||
version = "0.3.0"
|
||||
version = "0.1.0"
|
||||
description = "A simple HTTP(S) request library"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/request"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["http", "https", "async", "request", "ssl"]
|
||||
edition = "2021"
|
||||
@@ -19,10 +19,9 @@ workspace = true
|
||||
[dependencies]
|
||||
tower-service = { version = "0.3", default-features = false }
|
||||
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
||||
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy"] }
|
||||
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
|
||||
http-body-util = { version = "0.1", default-features = false }
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["sync"] }
|
||||
tokio = { version = "1", default-features = false }
|
||||
|
||||
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||
|
||||
@@ -30,8 +29,6 @@ zeroize = { version = "1", optional = true }
|
||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||
|
||||
[features]
|
||||
tokio = ["hyper-util/tokio"]
|
||||
tls = ["tokio", "hyper-rustls"]
|
||||
webpki-roots = ["tls", "hyper-rustls/webpki-roots"]
|
||||
tls = ["hyper-rustls"]
|
||||
basic-auth = ["zeroize", "base64ct"]
|
||||
default = ["tls"]
|
||||
|
||||
@@ -1,20 +1,19 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
use core::{pin::Pin, future::Future};
|
||||
use std::sync::Arc;
|
||||
|
||||
use futures_util::FutureExt;
|
||||
use ::tokio::sync::Mutex;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use tower_service::Service as TowerService;
|
||||
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest, rt::Executor};
|
||||
pub use hyper;
|
||||
|
||||
use hyper_util::client::legacy::{Client as HyperClient, connect::HttpConnector};
|
||||
|
||||
#[cfg(feature = "tls")]
|
||||
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
||||
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
|
||||
use hyper_util::{
|
||||
rt::tokio::TokioExecutor,
|
||||
client::legacy::{Client as HyperClient, connect::HttpConnector},
|
||||
};
|
||||
pub use hyper;
|
||||
|
||||
mod request;
|
||||
pub use request::*;
|
||||
@@ -38,86 +37,52 @@ type Connector = HttpConnector;
|
||||
type Connector = HttpsConnector<HttpConnector>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum Connection<
|
||||
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||
> {
|
||||
enum Connection {
|
||||
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
||||
Connection {
|
||||
executor: E,
|
||||
connector: Connector,
|
||||
host: Uri,
|
||||
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
||||
},
|
||||
}
|
||||
|
||||
/// An HTTP client.
|
||||
///
|
||||
/// `tls` is only guaranteed to work when using the `tokio` executor. Instantiating a client when
|
||||
/// the `tls` feature is active without using the `tokio` executor will cause errors.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Client<
|
||||
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||
> {
|
||||
connection: Connection<E>,
|
||||
pub struct Client {
|
||||
connection: Connection,
|
||||
}
|
||||
|
||||
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||
Client<E>
|
||||
{
|
||||
#[allow(clippy::unnecessary_wraps)]
|
||||
fn connector() -> Result<Connector, Error> {
|
||||
impl Client {
|
||||
fn connector() -> Connector {
|
||||
let mut res = HttpConnector::new();
|
||||
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
||||
res.set_nodelay(true);
|
||||
res.set_reuse_address(true);
|
||||
|
||||
#[cfg(feature = "tls")]
|
||||
if core::any::TypeId::of::<E>() !=
|
||||
core::any::TypeId::of::<hyper_util::rt::tokio::TokioExecutor>()
|
||||
{
|
||||
Err(Error::ConnectionError(
|
||||
"`tls` feature enabled but not using the `tokio` executor".into(),
|
||||
))?;
|
||||
}
|
||||
|
||||
#[cfg(feature = "tls")]
|
||||
res.enforce_http(false);
|
||||
#[cfg(feature = "tls")]
|
||||
let https = HttpsConnectorBuilder::new().with_native_roots();
|
||||
#[cfg(all(feature = "tls", not(feature = "webpki-roots")))]
|
||||
let https = https.map_err(|e| {
|
||||
Error::ConnectionError(
|
||||
format!("couldn't load system's SSL root certificates and webpki-roots unavilable: {e:?}")
|
||||
.into(),
|
||||
)
|
||||
})?;
|
||||
// Fallback to `webpki-roots` if present
|
||||
#[cfg(all(feature = "tls", feature = "webpki-roots"))]
|
||||
let https = https.unwrap_or(HttpsConnectorBuilder::new().with_webpki_roots());
|
||||
#[cfg(feature = "tls")]
|
||||
let res = https.https_or_http().enable_http1().wrap_connector(res);
|
||||
|
||||
Ok(res)
|
||||
let res = HttpsConnectorBuilder::new()
|
||||
.with_native_roots()
|
||||
.expect("couldn't fetch system's SSL roots")
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.wrap_connector(res);
|
||||
res
|
||||
}
|
||||
|
||||
pub fn with_executor_and_connection_pool(executor: E) -> Result<Client<E>, Error> {
|
||||
Ok(Client {
|
||||
pub fn with_connection_pool() -> Client {
|
||||
Client {
|
||||
connection: Connection::ConnectionPool(
|
||||
HyperClient::builder(executor)
|
||||
HyperClient::builder(TokioExecutor::new())
|
||||
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
||||
.build(Self::connector()?),
|
||||
.build(Self::connector()),
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn with_executor_and_without_connection_pool(
|
||||
executor: E,
|
||||
host: &str,
|
||||
) -> Result<Client<E>, Error> {
|
||||
pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
|
||||
Ok(Client {
|
||||
connection: Connection::Connection {
|
||||
executor,
|
||||
connector: Self::connector()?,
|
||||
connector: Self::connector(),
|
||||
host: {
|
||||
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
||||
if uri.host().is_none() {
|
||||
@@ -130,9 +95,9 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_, E>, Error> {
|
||||
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
|
||||
let request: Request = request.into();
|
||||
let Request { mut request, response_size_limit } = request;
|
||||
let mut request = request.0;
|
||||
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
||||
match &self.connection {
|
||||
Connection::ConnectionPool(_) => {}
|
||||
@@ -166,7 +131,7 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
|
||||
Connection::ConnectionPool(client) => {
|
||||
client.request(request).await.map_err(Error::HyperUtil)?
|
||||
}
|
||||
Connection::Connection { executor, connector, host, connection } => {
|
||||
Connection::Connection { connector, host, connection } => {
|
||||
let mut connection_lock = connection.lock().await;
|
||||
|
||||
// If there's not a connection...
|
||||
@@ -178,46 +143,28 @@ impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Outpu
|
||||
let call_res = call_res.map_err(Error::ConnectionError);
|
||||
let (requester, connection) =
|
||||
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
||||
// This task will die when we drop the requester
|
||||
executor.execute(Box::pin(connection.map(|_| ())));
|
||||
// This will die when we drop the requester, so we don't need to track an AbortHandle
|
||||
// for it
|
||||
tokio::spawn(connection);
|
||||
*connection_lock = Some(requester);
|
||||
}
|
||||
|
||||
let connection = connection_lock.as_mut().expect("lock over the connection was poisoned");
|
||||
let connection = connection_lock.as_mut().unwrap();
|
||||
let mut err = connection.ready().await.err();
|
||||
if err.is_none() {
|
||||
// Send the request
|
||||
let response = connection.send_request(request).await;
|
||||
if let Ok(response) = response {
|
||||
return Ok(Response { response, size_limit: response_size_limit, client: self });
|
||||
let res = connection.send_request(request).await;
|
||||
if let Ok(res) = res {
|
||||
return Ok(Response(res, self));
|
||||
}
|
||||
err = response.err();
|
||||
err = res.err();
|
||||
}
|
||||
// Since this connection has been put into an error state, drop it
|
||||
*connection_lock = None;
|
||||
Err(Error::Hyper(err.expect("only here if `err` is some yet no error")))?
|
||||
Err(Error::Hyper(err.unwrap()))?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Response { response, size_limit: response_size_limit, client: self })
|
||||
Ok(Response(response, self))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "tokio")]
|
||||
mod tokio {
|
||||
use hyper_util::rt::tokio::TokioExecutor;
|
||||
use super::*;
|
||||
|
||||
pub type TokioClient = Client<TokioExecutor>;
|
||||
impl Client<TokioExecutor> {
|
||||
pub fn with_connection_pool() -> Result<Self, Error> {
|
||||
Self::with_executor_and_connection_pool(TokioExecutor::new())
|
||||
}
|
||||
|
||||
pub fn without_connection_pool(host: &str) -> Result<Self, Error> {
|
||||
Self::with_executor_and_without_connection_pool(TokioExecutor::new(), host)
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(feature = "tokio")]
|
||||
pub use tokio::TokioClient;
|
||||
|
||||
@@ -7,15 +7,11 @@ pub use http_body_util::Full;
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Request {
|
||||
pub(crate) request: hyper::Request<Full<Bytes>>,
|
||||
pub(crate) response_size_limit: Option<usize>,
|
||||
}
|
||||
|
||||
pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
|
||||
impl Request {
|
||||
#[cfg(feature = "basic-auth")]
|
||||
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
|
||||
if let Some(authority) = self.request.uri().authority() {
|
||||
if let Some(authority) = self.0.uri().authority() {
|
||||
let authority = authority.as_str();
|
||||
if authority.contains('@') {
|
||||
// Decode the username and password from the URI
|
||||
@@ -40,10 +36,9 @@ impl Request {
|
||||
let mut formatted = format!("{username}:{password}");
|
||||
let mut encoded = Base64::encode_string(formatted.as_bytes());
|
||||
formatted.zeroize();
|
||||
self.request.headers_mut().insert(
|
||||
self.0.headers_mut().insert(
|
||||
hyper::header::AUTHORIZATION,
|
||||
HeaderValue::from_str(&format!("Basic {encoded}"))
|
||||
.expect("couldn't form header from base64-encoded string"),
|
||||
HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(),
|
||||
);
|
||||
encoded.zeroize();
|
||||
}
|
||||
@@ -64,17 +59,9 @@ impl Request {
|
||||
pub fn with_basic_auth(&mut self) {
|
||||
let _ = self.basic_auth_from_uri();
|
||||
}
|
||||
|
||||
/// Set a size limit for the response.
|
||||
///
|
||||
/// This may be exceeded by a single HTTP frame and accordingly isn't perfect.
|
||||
pub fn set_response_size_limit(&mut self, response_size_limit: Option<usize>) {
|
||||
self.response_size_limit = response_size_limit;
|
||||
}
|
||||
}
|
||||
|
||||
impl From<hyper::Request<Full<Bytes>>> for Request {
|
||||
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
||||
Request { request, response_size_limit: None }
|
||||
Request(request)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,54 +1,24 @@
|
||||
use core::{pin::Pin, future::Future};
|
||||
use std::io;
|
||||
|
||||
use hyper::{
|
||||
StatusCode,
|
||||
header::{HeaderValue, HeaderMap},
|
||||
body::Incoming,
|
||||
rt::Executor,
|
||||
body::{Buf, Incoming},
|
||||
};
|
||||
use http_body_util::BodyExt;
|
||||
|
||||
use futures_util::{Stream, StreamExt};
|
||||
|
||||
use crate::{Client, Error};
|
||||
|
||||
// Borrows the client so its async task lives as long as this response exists.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
pub struct Response<
|
||||
'a,
|
||||
E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>,
|
||||
> {
|
||||
pub(crate) response: hyper::Response<Incoming>,
|
||||
pub(crate) size_limit: Option<usize>,
|
||||
pub(crate) client: &'a Client<E>,
|
||||
}
|
||||
|
||||
impl<E: 'static + Send + Sync + Clone + Executor<Pin<Box<dyn Send + Future<Output = ()>>>>>
|
||||
Response<'_, E>
|
||||
{
|
||||
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
|
||||
impl Response<'_> {
|
||||
pub fn status(&self) -> StatusCode {
|
||||
self.response.status()
|
||||
self.0.status()
|
||||
}
|
||||
pub fn headers(&self) -> &HeaderMap<HeaderValue> {
|
||||
self.response.headers()
|
||||
self.0.headers()
|
||||
}
|
||||
pub async fn body(self) -> Result<impl std::io::Read, Error> {
|
||||
let mut body = self.response.into_body().into_data_stream();
|
||||
let mut res: Vec<u8> = vec![];
|
||||
loop {
|
||||
if let Some(size_limit) = self.size_limit {
|
||||
let (lower, upper) = body.size_hint();
|
||||
if res.len().wrapping_add(upper.unwrap_or(lower)) > size_limit.min(usize::MAX - 1) {
|
||||
Err(Error::ConnectionError("response exceeded size limit".into()))?;
|
||||
}
|
||||
}
|
||||
|
||||
let Some(part) = body.next().await else { break };
|
||||
let part = part.map_err(Error::Hyper)?;
|
||||
res.extend(part.as_ref());
|
||||
}
|
||||
Ok(io::Cursor::new(res))
|
||||
Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "std-shims"
|
||||
version = "0.1.5"
|
||||
version = "0.1.4"
|
||||
description = "A series of std shims to make alloc more feasible"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
||||
@@ -18,10 +18,9 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustversion = { version = "1", default-features = false }
|
||||
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "fair_mutex", "once", "lazy"] }
|
||||
hashbrown = { version = "0.16", default-features = false, features = ["default-hasher", "inline-more"], optional = true }
|
||||
spin = { version = "0.10", default-features = false, features = ["use_ticket_mutex", "once", "lazy"] }
|
||||
hashbrown = { version = "0.16", default-features = false, features = ["default-hasher", "inline-more"] }
|
||||
|
||||
[features]
|
||||
alloc = ["hashbrown"]
|
||||
std = ["alloc", "spin/std"]
|
||||
std = []
|
||||
default = ["std"]
|
||||
|
||||
@@ -1,28 +1,11 @@
|
||||
# `std` shims
|
||||
# std shims
|
||||
|
||||
`std-shims` is a Rust crate with two purposes:
|
||||
- Expand the functionality of `core` and `alloc`
|
||||
- Polyfill functionality only available on newer version of Rust
|
||||
A crate which passes through to std when the default `std` feature is enabled,
|
||||
yet provides a series of shims when it isn't.
|
||||
|
||||
The goal is to make supporting no-`std` environments, and older versions of
|
||||
Rust, as simple as possible. For most use cases, replacing `std::` with
|
||||
`std_shims::` and adding `use std_shims::prelude::*` is sufficient to take full
|
||||
advantage of `std-shims`.
|
||||
No guarantee of one-to-one parity is provided. The shims provided aim to be sufficient for the
|
||||
average case.
|
||||
|
||||
# API Surface
|
||||
|
||||
`std-shims` only aims to have items _mutually available_ between `alloc` (with
|
||||
extra dependencies) and `std` publicly exposed. Items exclusive to `std`, with
|
||||
no shims available, will not be exported by `std-shims`.
|
||||
|
||||
# Dependencies
|
||||
|
||||
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization
|
||||
primitives are provided via `spin` (avoiding a requirement on
|
||||
`critical-section`). Sections of `std::io` are independently matched as
|
||||
possible. `rustversion` is used to detect when to provide polyfills.
|
||||
|
||||
# Disclaimer
|
||||
|
||||
No guarantee of one-to-one parity is provided. The shims provided aim to be
|
||||
sufficient for the average case. Pull requests are _welcome_.
|
||||
`HashSet` and `HashMap` are provided via `hashbrown`. Synchronization primitives are provided via
|
||||
`spin` (avoiding a requirement on `critical-section`).
|
||||
types are not guaranteed to be
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||
pub use extern_alloc::collections::*;
|
||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||
pub use hashbrown::{HashSet, HashMap};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::collections::*;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use alloc::collections::*;
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use hashbrown::{HashSet, HashMap};
|
||||
|
||||
@@ -1,74 +1,42 @@
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::io::*;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
mod shims {
|
||||
use core::fmt::{self, Debug, Display, Formatter};
|
||||
#[cfg(feature = "alloc")]
|
||||
use extern_alloc::{boxed::Box, vec::Vec};
|
||||
use crate::error::Error as CoreError;
|
||||
use core::fmt::{Debug, Formatter};
|
||||
use alloc::{boxed::Box, vec::Vec};
|
||||
|
||||
/// The kind of error.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub enum ErrorKind {
|
||||
UnexpectedEof,
|
||||
Other,
|
||||
}
|
||||
|
||||
/// An error.
|
||||
#[derive(Debug)]
|
||||
pub struct Error {
|
||||
kind: ErrorKind,
|
||||
#[cfg(feature = "alloc")]
|
||||
error: Box<dyn Send + Sync + CoreError>,
|
||||
error: Box<dyn Send + Sync>,
|
||||
}
|
||||
|
||||
impl Display for Error {
|
||||
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
|
||||
<Self as Debug>::fmt(self, f)
|
||||
impl Debug for Error {
|
||||
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
|
||||
fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
impl CoreError for Error {}
|
||||
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
pub trait IntoBoxSendSyncError {}
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
impl<I> IntoBoxSendSyncError for I {}
|
||||
#[cfg(feature = "alloc")]
|
||||
pub trait IntoBoxSendSyncError: Into<Box<dyn Send + Sync + CoreError>> {}
|
||||
#[cfg(feature = "alloc")]
|
||||
impl<I: Into<Box<dyn Send + Sync + CoreError>>> IntoBoxSendSyncError for I {}
|
||||
|
||||
impl Error {
|
||||
/// Create a new error.
|
||||
///
|
||||
/// The error object itself is silently dropped when `alloc` is not enabled.
|
||||
#[allow(unused)]
|
||||
pub fn new<E: 'static + IntoBoxSendSyncError>(kind: ErrorKind, error: E) -> Error {
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
let res = Error { kind };
|
||||
#[cfg(feature = "alloc")]
|
||||
let res = Error { kind, error: error.into() };
|
||||
res
|
||||
pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {
|
||||
Error { kind, error: Box::new(error) }
|
||||
}
|
||||
|
||||
/// Create a new error with `io::ErrorKind::Other` as its kind.
|
||||
///
|
||||
/// The error object itself is silently dropped when `alloc` is not enabled.
|
||||
#[allow(unused)]
|
||||
pub fn other<E: 'static + IntoBoxSendSyncError>(error: E) -> Error {
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
let res = Error { kind: ErrorKind::Other };
|
||||
#[cfg(feature = "alloc")]
|
||||
let res = Error { kind: ErrorKind::Other, error: error.into() };
|
||||
res
|
||||
pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
|
||||
Error { kind: ErrorKind::Other, error: Box::new(error) }
|
||||
}
|
||||
|
||||
/// The kind of error.
|
||||
pub fn kind(&self) -> ErrorKind {
|
||||
self.kind
|
||||
}
|
||||
|
||||
/// Retrieve the inner error.
|
||||
#[cfg(feature = "alloc")]
|
||||
pub fn into_inner(self) -> Option<Box<dyn Send + Sync + CoreError>> {
|
||||
pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
|
||||
Some(self.error)
|
||||
}
|
||||
}
|
||||
@@ -96,12 +64,6 @@ mod shims {
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: Read> Read for &mut R {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||
R::read(*self, buf)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait BufRead: Read {
|
||||
fn fill_buf(&mut self) -> Result<&[u8]>;
|
||||
fn consume(&mut self, amt: usize);
|
||||
@@ -126,7 +88,6 @@ mod shims {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
impl Write for Vec<u8> {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
||||
self.extend(buf);
|
||||
@@ -134,8 +95,6 @@ mod shims {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use shims::*;
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::io::{ErrorKind, Error, Result, Read, BufRead, Write};
|
||||
|
||||
@@ -1,45 +1,18 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
pub use core::*;
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
pub use core::{alloc, borrow, ffi, fmt, slice, str, task};
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
#[rustversion::before(1.81)]
|
||||
pub mod error {
|
||||
use core::fmt::Debug::Display;
|
||||
pub trait Error: Debug + Display {}
|
||||
}
|
||||
#[cfg(not(feature = "std"))]
|
||||
#[rustversion::since(1.81)]
|
||||
pub use core::error;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
extern crate alloc as extern_alloc;
|
||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||
pub use extern_alloc::{alloc, borrow, boxed, ffi, fmt, rc, slice, str, string, task, vec, format};
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::{alloc, borrow, boxed, error, ffi, fmt, rc, slice, str, string, task, vec, format};
|
||||
pub extern crate alloc;
|
||||
|
||||
pub mod sync;
|
||||
pub mod collections;
|
||||
pub mod io;
|
||||
pub mod sync;
|
||||
|
||||
pub use alloc::vec;
|
||||
pub use alloc::str;
|
||||
pub use alloc::string;
|
||||
|
||||
pub mod prelude {
|
||||
// Shim the `std` prelude
|
||||
#[cfg(feature = "alloc")]
|
||||
pub use extern_alloc::{
|
||||
format, vec,
|
||||
borrow::ToOwned,
|
||||
boxed::Box,
|
||||
vec::Vec,
|
||||
string::{String, ToString},
|
||||
};
|
||||
|
||||
// Shim `div_ceil`
|
||||
#[rustversion::before(1.73)]
|
||||
#[doc(hidden)]
|
||||
pub trait StdShimsDivCeil {
|
||||
@@ -80,7 +53,6 @@ pub mod prelude {
|
||||
}
|
||||
}
|
||||
|
||||
// Shim `io::Error::other`
|
||||
#[cfg(feature = "std")]
|
||||
#[rustversion::before(1.74)]
|
||||
#[doc(hidden)]
|
||||
|
||||
@@ -1,28 +1,19 @@
|
||||
pub use core::sync::atomic;
|
||||
#[cfg(all(feature = "alloc", not(feature = "std")))]
|
||||
pub use extern_alloc::sync::{Arc, Weak};
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::{Arc, Weak};
|
||||
pub use core::sync::*;
|
||||
pub use alloc::sync::*;
|
||||
|
||||
mod mutex_shim {
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use spin::{Mutex, MutexGuard};
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::{Mutex, MutexGuard};
|
||||
pub use std::sync::*;
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use spin::*;
|
||||
|
||||
/// A shimmed `Mutex` with an API mutual to `spin` and `std`.
|
||||
#[derive(Default, Debug)]
|
||||
pub struct ShimMutex<T>(Mutex<T>);
|
||||
impl<T> ShimMutex<T> {
|
||||
/// Construct a new `Mutex`.
|
||||
pub const fn new(value: T) -> Self {
|
||||
Self(Mutex::new(value))
|
||||
}
|
||||
|
||||
/// Acquire a lock on the contents of the `Mutex`.
|
||||
///
|
||||
/// On no-`std` environments, this may spin until the lock is acquired. On `std` environments,
|
||||
/// this may panic if the `Mutex` was poisoned.
|
||||
pub fn lock(&self) -> MutexGuard<'_, T> {
|
||||
#[cfg(feature = "std")]
|
||||
let res = self.0.lock().unwrap();
|
||||
@@ -34,12 +25,11 @@ mod mutex_shim {
|
||||
}
|
||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||
|
||||
#[rustversion::before(1.80)]
|
||||
pub use spin::Lazy as LazyLock;
|
||||
|
||||
#[rustversion::since(1.80)]
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use spin::Lazy as LazyLock;
|
||||
#[rustversion::before(1.80)]
|
||||
#[cfg(feature = "std")]
|
||||
pub use spin::Lazy as LazyLock;
|
||||
#[rustversion::since(1.80)]
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::LazyLock;
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||
|
||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||
|
||||
@@ -31,7 +31,6 @@ frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
zalloc = { path = "../common/zalloc" }
|
||||
@@ -43,7 +42,7 @@ messages = { package = "serai-processor-messages", path = "../processor/messages
|
||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||
tributary-sdk = { path = "./tributary-sdk" }
|
||||
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||
|
||||
@@ -21,9 +21,8 @@ workspace = true
|
||||
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
@@ -31,3 +30,5 @@ tokio = { version = "1", default-features = false }
|
||||
|
||||
serai-db = { path = "../../common/db", version = "0.1.1" }
|
||||
serai-task = { path = "../../common/task", version = "0.1" }
|
||||
|
||||
serai-cosign-types = { path = "./types" }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
@@ -7,7 +7,6 @@ use std::{sync::Arc, collections::HashMap, time::Instant};
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{
|
||||
@@ -19,6 +18,8 @@ use serai_client::{
|
||||
use serai_db::*;
|
||||
use serai_task::*;
|
||||
|
||||
use serai_cosign_types::*;
|
||||
|
||||
/// The cosigns which are intended to be performed.
|
||||
mod intend;
|
||||
/// The evaluator of the cosigns.
|
||||
@@ -78,68 +79,6 @@ enum HasEvents {
|
||||
No,
|
||||
}
|
||||
|
||||
/// An intended cosign.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct CosignIntent {
|
||||
/// The global session this cosign is being performed under.
|
||||
pub global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
pub block_hash: [u8; 32],
|
||||
/// If this cosign must be handled before further cosigns are.
|
||||
pub notable: bool,
|
||||
}
|
||||
|
||||
/// A cosign.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode, BorshSerialize, BorshDeserialize)]
|
||||
pub struct Cosign {
|
||||
/// The global session this cosign is being performed under.
|
||||
pub global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
pub block_hash: [u8; 32],
|
||||
/// The actual cosigner.
|
||||
pub cosigner: ExternalNetworkId,
|
||||
}
|
||||
|
||||
impl CosignIntent {
|
||||
/// Convert this into a `Cosign`.
|
||||
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
|
||||
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
|
||||
Cosign { global_session, block_number, block_hash, cosigner }
|
||||
}
|
||||
}
|
||||
|
||||
impl Cosign {
|
||||
/// The message to sign to sign this cosign.
|
||||
///
|
||||
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
|
||||
pub fn signature_message(&self) -> Vec<u8> {
|
||||
// We use a schnorrkel context to domain-separate this
|
||||
self.encode()
|
||||
}
|
||||
}
|
||||
|
||||
/// A signed cosign.
|
||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct SignedCosign {
|
||||
/// The cosign.
|
||||
pub cosign: Cosign,
|
||||
/// The signature for the cosign.
|
||||
pub signature: [u8; 64],
|
||||
}
|
||||
|
||||
impl SignedCosign {
|
||||
fn verify_signature(&self, signer: serai_client::Public) -> bool {
|
||||
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
||||
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
||||
|
||||
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
|
||||
}
|
||||
}
|
||||
|
||||
create_db! {
|
||||
Cosign {
|
||||
// The following are populated by the intend task and used throughout the library
|
||||
|
||||
25
coordinator/cosign/types/Cargo.toml
Normal file
25
coordinator/cosign/types/Cargo.toml
Normal file
@@ -0,0 +1,25 @@
|
||||
[package]
|
||||
name = "serai-cosign-types"
|
||||
version = "0.1.0"
|
||||
description = "Evaluator of cosigns for the Serai network"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator/cosign"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
rust-version = "1.85"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
serai-primitives = { path = "../../../substrate/primitives", default-features = false, features = ["std"] }
|
||||
72
coordinator/cosign/types/src/lib.rs
Normal file
72
coordinator/cosign/types/src/lib.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![deny(missing_docs)]
|
||||
//! Types used when cosigning Serai. For more info, please see `serai-cosign`.
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_primitives::{crypto::Public, network_id::ExternalNetworkId};
|
||||
|
||||
/// The schnorrkel context to used when signing a cosign.
|
||||
pub const COSIGN_CONTEXT: &[u8] = b"/serai/coordinator/cosign";
|
||||
|
||||
/// An intended cosign.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct CosignIntent {
|
||||
/// The global session this cosign is being performed under.
|
||||
pub global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
pub block_hash: [u8; 32],
|
||||
/// If this cosign must be handled before further cosigns are.
|
||||
pub notable: bool,
|
||||
}
|
||||
|
||||
/// A cosign.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct Cosign {
|
||||
/// The global session this cosign is being performed under.
|
||||
pub global_session: [u8; 32],
|
||||
/// The number of the block to cosign.
|
||||
pub block_number: u64,
|
||||
/// The hash of the block to cosign.
|
||||
pub block_hash: [u8; 32],
|
||||
/// The actual cosigner.
|
||||
pub cosigner: ExternalNetworkId,
|
||||
}
|
||||
|
||||
impl CosignIntent {
|
||||
/// Convert this into a `Cosign`.
|
||||
pub fn into_cosign(self, cosigner: ExternalNetworkId) -> Cosign {
|
||||
let CosignIntent { global_session, block_number, block_hash, notable: _ } = self;
|
||||
Cosign { global_session, block_number, block_hash, cosigner }
|
||||
}
|
||||
}
|
||||
|
||||
impl Cosign {
|
||||
/// The message to sign to sign this cosign.
|
||||
///
|
||||
/// This must be signed with schnorrkel, the context set to `COSIGN_CONTEXT`.
|
||||
pub fn signature_message(&self) -> Vec<u8> {
|
||||
// We use a schnorrkel context to domain-separate this
|
||||
borsh::to_vec(self).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
/// A signed cosign.
|
||||
#[derive(Clone, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct SignedCosign {
|
||||
/// The cosign.
|
||||
pub cosign: Cosign,
|
||||
/// The signature for the cosign.
|
||||
pub signature: [u8; 64],
|
||||
}
|
||||
|
||||
impl SignedCosign {
|
||||
/// Verify a cosign's signature.
|
||||
pub fn verify_signature(&self, signer: Public) -> bool {
|
||||
let Ok(signer) = schnorrkel::PublicKey::from_bytes(&signer.0) else { return false };
|
||||
let Ok(signature) = schnorrkel::Signature::from_bytes(&self.signature) else { return false };
|
||||
|
||||
signer.verify_simple(COSIGN_CONTEXT, &self.cosign.signature_message(), &signature).is_ok()
|
||||
}
|
||||
}
|
||||
@@ -22,7 +22,7 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
|
||||
|
||||
serai-db = { path = "../../common/db", version = "0.1" }
|
||||
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] }
|
||||
serai-cosign = { path = "../cosign" }
|
||||
tributary-sdk = { path = "../tributary-sdk" }
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ schnorrkel = { version = "0.11", default-features = false, features = ["std"] }
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
serai-client = { path = "../../../substrate/client", default-features = false, features = ["serai"] }
|
||||
serai-cosign = { path = "../../cosign" }
|
||||
tributary-sdk = { path = "../../tributary-sdk" }
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
|
||||
@@ -92,8 +92,7 @@ impl SwarmTask {
|
||||
}
|
||||
}
|
||||
gossip::Event::Subscribed { .. } | gossip::Event::Unsubscribed { .. } => {}
|
||||
gossip::Event::GossipsubNotSupported { peer_id } |
|
||||
gossip::Event::SlowPeer { peer_id, .. } => {
|
||||
gossip::Event::GossipsubNotSupported { peer_id } => {
|
||||
let _: Result<_, _> = self.swarm.disconnect_peer_id(peer_id);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use core::future::Future;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet};
|
||||
use serai_primitives::{MAX_KEY_SHARES_PER_SET, ExternalValidatorSet};
|
||||
|
||||
use futures_lite::FutureExt;
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
@@ -7,7 +7,7 @@ use std::collections::HashMap;
|
||||
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{primitives::ExternalNetworkId, validator_sets::primitives::ExternalValidatorSet};
|
||||
use serai_primitives::{network_id::ExternalNetworkId, validator_sets::ExternalValidatorSet};
|
||||
|
||||
use serai_db::Db;
|
||||
use tributary_sdk::{ReadWrite, TransactionTrait, Tributary, TributaryReader};
|
||||
|
||||
@@ -103,7 +103,7 @@ mod _internal_db {
|
||||
// Tributary transactions to publish from the DKG confirmation task
|
||||
TributaryTransactionsFromDkgConfirmation: (set: ExternalValidatorSet) -> Transaction,
|
||||
// Participants to remove
|
||||
RemoveParticipant: (set: ExternalValidatorSet) -> u16,
|
||||
RemoveParticipant: (set: ExternalValidatorSet) -> Participant,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -139,11 +139,10 @@ impl RemoveParticipant {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn, set: ExternalValidatorSet, participant: Participant) {
|
||||
// If this set has yet to be retired, send this transaction
|
||||
if RetiredTributary::get(txn, set.network).map(|session| session.0) < Some(set.session.0) {
|
||||
_internal_db::RemoveParticipant::send(txn, set, &u16::from(participant));
|
||||
_internal_db::RemoveParticipant::send(txn, set, &participant);
|
||||
}
|
||||
}
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn, set: ExternalValidatorSet) -> Option<Participant> {
|
||||
_internal_db::RemoveParticipant::try_recv(txn, set)
|
||||
.map(|i| Participant::new(i).expect("sent invalid participant index for removal"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,7 +284,7 @@ async fn handle_network(
|
||||
&mut txn,
|
||||
ExternalValidatorSet { network, session },
|
||||
slash_report,
|
||||
Signature::from(signature),
|
||||
Signature(signature),
|
||||
);
|
||||
}
|
||||
},
|
||||
|
||||
@@ -11,7 +11,6 @@ use tokio::sync::mpsc;
|
||||
|
||||
use serai_db::{Get, DbTxn, Db as DbTrait, create_db, db_channel};
|
||||
|
||||
use scale::Encode;
|
||||
use serai_client::validator_sets::primitives::ExternalValidatorSet;
|
||||
|
||||
use tributary_sdk::{TransactionKind, TransactionError, ProvidedError, TransactionTrait, Tributary};
|
||||
@@ -479,7 +478,8 @@ pub(crate) async fn spawn_tributary<P: P2p>(
|
||||
return;
|
||||
}
|
||||
|
||||
let genesis = <[u8; 32]>::from(Blake2s::<U32>::digest((set.serai_block, set.set).encode()));
|
||||
let genesis =
|
||||
<[u8; 32]>::from(Blake2s::<U32>::digest(borsh::to_vec(&(set.serai_block, set.set)).unwrap()));
|
||||
|
||||
// Since the Serai block will be finalized, then cosigned, before we handle this, this time will
|
||||
// be a couple of minutes stale. While the Tributary will still function with a start time in the
|
||||
|
||||
@@ -20,12 +20,11 @@ workspace = true
|
||||
[dependencies]
|
||||
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive", "bit-vec"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
||||
|
||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai", "borsh"] }
|
||||
serai-client = { path = "../../substrate/client", version = "0.1", default-features = false, features = ["serai"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use dkg::Participant;
|
||||
@@ -178,14 +177,13 @@ impl Keys {
|
||||
signature_participants,
|
||||
signature,
|
||||
);
|
||||
_public_db::Keys::set(txn, set.network, &(set.session, tx.encode()));
|
||||
_public_db::Keys::set(txn, set.network, &(set.session, tx));
|
||||
}
|
||||
pub(crate) fn take(
|
||||
txn: &mut impl DbTxn,
|
||||
network: ExternalNetworkId,
|
||||
) -> Option<(Session, Transaction)> {
|
||||
let (session, tx) = _public_db::Keys::take(txn, network)?;
|
||||
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||
_public_db::Keys::take(txn, network)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -226,13 +224,12 @@ impl SlashReports {
|
||||
slash_report,
|
||||
signature,
|
||||
);
|
||||
_public_db::SlashReports::set(txn, set.network, &(set.session, tx.encode()));
|
||||
_public_db::SlashReports::set(txn, set.network, &(set.session, tx));
|
||||
}
|
||||
pub(crate) fn take(
|
||||
txn: &mut impl DbTxn,
|
||||
network: ExternalNetworkId,
|
||||
) -> Option<(Session, Transaction)> {
|
||||
let (session, tx) = _public_db::SlashReports::take(txn, network)?;
|
||||
Some((session, <_>::decode(&mut tx.as_slice()).unwrap()))
|
||||
_public_db::SlashReports::take(txn, network)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -36,7 +36,7 @@ log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
serai-db = { path = "../../common/db", version = "0.1" }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std", "sink", "channel"] }
|
||||
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||
tendermint = { package = "tendermint-machine", path = "./tendermint", version = "0.2" }
|
||||
|
||||
@@ -5,7 +5,7 @@ use ciphersuite::{group::GroupEncoding, *};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db};
|
||||
|
||||
use scale::Decode;
|
||||
use borsh::BorshDeserialize;
|
||||
|
||||
use tendermint::ext::{Network, Commit};
|
||||
|
||||
@@ -62,7 +62,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
D::key(
|
||||
b"tributary_blockchain",
|
||||
b"next_nonce",
|
||||
[genesis.as_ref(), signer.to_bytes().as_ref(), order].concat(),
|
||||
[genesis.as_slice(), signer.to_bytes().as_slice(), order].concat(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -109,7 +109,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
|
||||
pub(crate) fn block_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Block<T>> {
|
||||
db.get(Self::block_key(&genesis, block))
|
||||
.map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_ref()).unwrap())
|
||||
.map(|bytes| Block::<T>::read::<&[u8]>(&mut bytes.as_slice()).unwrap())
|
||||
}
|
||||
|
||||
pub(crate) fn commit_from_db(db: &D, genesis: [u8; 32], block: &[u8; 32]) -> Option<Vec<u8>> {
|
||||
@@ -169,7 +169,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
// we must have a commit per valid hash
|
||||
let commit = Self::commit_from_db(db, genesis, &hash).unwrap();
|
||||
// commit has to be valid if it is coming from our db
|
||||
Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())
|
||||
Some(Commit::<N::SignatureScheme>::deserialize_reader(&mut commit.as_slice()).unwrap())
|
||||
};
|
||||
let unsigned_in_chain =
|
||||
|hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some();
|
||||
@@ -244,7 +244,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
let commit = |block: u64| -> Option<Commit<N::SignatureScheme>> {
|
||||
let commit = self.commit_by_block_number(block)?;
|
||||
// commit has to be valid if it is coming from our db
|
||||
Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())
|
||||
Some(Commit::<N::SignatureScheme>::deserialize_reader(&mut commit.as_slice()).unwrap())
|
||||
};
|
||||
|
||||
let mut txn_db = db.clone();
|
||||
|
||||
@@ -3,10 +3,11 @@ use std::{sync::Arc, io};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use borsh::BorshDeserialize;
|
||||
|
||||
use ciphersuite::*;
|
||||
use dalek_ff_group::Ristretto;
|
||||
|
||||
use scale::Decode;
|
||||
use futures_channel::mpsc::UnboundedReceiver;
|
||||
use futures_util::{StreamExt, SinkExt};
|
||||
use ::tendermint::{
|
||||
@@ -177,7 +178,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
let block_number = BlockNumber(blockchain.block_number());
|
||||
|
||||
let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) {
|
||||
Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time
|
||||
Commit::<Validators>::deserialize_reader(&mut commit.as_slice()).unwrap().end_time
|
||||
} else {
|
||||
start_time
|
||||
};
|
||||
@@ -276,8 +277,8 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
}
|
||||
|
||||
let block = TendermintBlock(block.serialize());
|
||||
let mut commit_ref = commit.as_ref();
|
||||
let Ok(commit) = Commit::<Arc<Validators>>::decode(&mut commit_ref) else {
|
||||
let mut commit_ref = commit.as_slice();
|
||||
let Ok(commit) = Commit::<Arc<Validators>>::deserialize_reader(&mut commit_ref) else {
|
||||
log::error!("sent an invalidly serialized commit");
|
||||
return false;
|
||||
};
|
||||
@@ -327,7 +328,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
|
||||
Some(&TENDERMINT_MESSAGE) => {
|
||||
let Ok(msg) =
|
||||
SignedMessageFor::<TendermintNetwork<D, T, P>>::decode::<&[u8]>(&mut &msg[1 ..])
|
||||
SignedMessageFor::<TendermintNetwork<D, T, P>>::deserialize_reader(&mut &msg[1 ..])
|
||||
else {
|
||||
log::error!("received invalid tendermint message");
|
||||
return false;
|
||||
@@ -367,15 +368,17 @@ impl<D: Db, T: TransactionTrait> TributaryReader<D, T> {
|
||||
Blockchain::<D, T>::commit_from_db(&self.0, self.1, hash)
|
||||
}
|
||||
pub fn parsed_commit(&self, hash: &[u8; 32]) -> Option<Commit<Validators>> {
|
||||
self.commit(hash).map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap())
|
||||
self
|
||||
.commit(hash)
|
||||
.map(|commit| Commit::<Validators>::deserialize_reader(&mut commit.as_slice()).unwrap())
|
||||
}
|
||||
pub fn block_after(&self, hash: &[u8; 32]) -> Option<[u8; 32]> {
|
||||
Blockchain::<D, T>::block_after(&self.0, self.1, hash)
|
||||
}
|
||||
pub fn time_of_block(&self, hash: &[u8; 32]) -> Option<u64> {
|
||||
self
|
||||
.commit(hash)
|
||||
.map(|commit| Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time)
|
||||
self.commit(hash).map(|commit| {
|
||||
Commit::<Validators>::deserialize_reader(&mut commit.as_slice()).unwrap().end_time
|
||||
})
|
||||
}
|
||||
|
||||
pub fn locally_provided_txs_in_block(&self, hash: &[u8; 32], order: &str) -> bool {
|
||||
|
||||
@@ -21,7 +21,7 @@ use schnorr::{
|
||||
|
||||
use serai_db::Db;
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use tendermint::{
|
||||
SignedMessageFor,
|
||||
ext::{
|
||||
@@ -248,7 +248,7 @@ impl Weights for Validators {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct TendermintBlock(pub Vec<u8>);
|
||||
impl BlockTrait for TendermintBlock {
|
||||
type Id = [u8; 32];
|
||||
@@ -300,7 +300,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
fn broadcast(&mut self, msg: SignedMessageFor<Self>) -> impl Send + Future<Output = ()> {
|
||||
async move {
|
||||
let mut to_broadcast = vec![TENDERMINT_MESSAGE];
|
||||
to_broadcast.extend(msg.encode());
|
||||
msg.serialize(&mut to_broadcast).unwrap();
|
||||
self.p2p.broadcast(self.genesis, to_broadcast).await
|
||||
}
|
||||
}
|
||||
@@ -390,7 +390,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
return invalid_block();
|
||||
};
|
||||
|
||||
let encoded_commit = commit.encode();
|
||||
let encoded_commit = borsh::to_vec(&commit).unwrap();
|
||||
loop {
|
||||
let block_res = self.blockchain.write().await.add_block::<Self>(
|
||||
&block,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::io;
|
||||
|
||||
use scale::{Encode, Decode, IoReader};
|
||||
use borsh::BorshDeserialize;
|
||||
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
@@ -27,14 +27,14 @@ pub enum TendermintTx {
|
||||
|
||||
impl ReadWrite for TendermintTx {
|
||||
fn read<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
Evidence::decode(&mut IoReader(reader))
|
||||
Evidence::deserialize_reader(reader)
|
||||
.map(TendermintTx::SlashEvidence)
|
||||
.map_err(|_| io::Error::new(io::ErrorKind::InvalidData, "invalid evidence format"))
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
TendermintTx::SlashEvidence(ev) => writer.write_all(&ev.encode()),
|
||||
TendermintTx::SlashEvidence(ev) => writer.write_all(&borsh::to_vec(&ev).unwrap()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,11 +7,9 @@ use rand::{RngCore, CryptoRng, rngs::OsRng};
|
||||
use blake2::{Digest, Blake2s256};
|
||||
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::*;
|
||||
use ciphersuite::{group::Group, *};
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
use scale::Encode;
|
||||
|
||||
use ::tendermint::{
|
||||
ext::{Network, Signer as SignerTrait, SignatureScheme, BlockNumber, RoundNumber},
|
||||
SignedMessageFor, DataFor, Message, SignedMessage, Data, Evidence,
|
||||
@@ -200,7 +198,7 @@ pub async fn signed_from_data<N: Network>(
|
||||
round: RoundNumber(round_number),
|
||||
data,
|
||||
};
|
||||
let sig = signer.sign(&msg.encode()).await;
|
||||
let sig = signer.sign(&borsh::to_vec(&msg).unwrap()).await;
|
||||
SignedMessage { msg, sig }
|
||||
}
|
||||
|
||||
@@ -213,5 +211,5 @@ pub async fn random_evidence_tx<N: Network>(
|
||||
let data = Data::Proposal(Some(RoundNumber(0)), b);
|
||||
let signer_id = signer.validator_id().await.unwrap();
|
||||
let signed = signed_from_data::<N>(signer, signer_id, 0, 0, data).await;
|
||||
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode()))
|
||||
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(borsh::to_vec(&signed).unwrap()))
|
||||
}
|
||||
|
||||
@@ -6,8 +6,6 @@ use rand::{RngCore, rngs::OsRng};
|
||||
use dalek_ff_group::Ristretto;
|
||||
use ciphersuite::*;
|
||||
|
||||
use scale::Encode;
|
||||
|
||||
use tendermint::{
|
||||
time::CanonicalInstant,
|
||||
round::RoundData,
|
||||
@@ -52,7 +50,10 @@ async fn invalid_valid_round() {
|
||||
async move {
|
||||
let data = Data::Proposal(valid_round, TendermintBlock(vec![]));
|
||||
let signed = signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, data).await;
|
||||
(signed.clone(), TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode())))
|
||||
(
|
||||
signed.clone(),
|
||||
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(borsh::to_vec(&signed).unwrap())),
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -70,7 +71,8 @@ async fn invalid_valid_round() {
|
||||
let mut random_sig = [0u8; 64];
|
||||
OsRng.fill_bytes(&mut random_sig);
|
||||
signed.sig = random_sig;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::InvalidValidRound(signed.encode()));
|
||||
let tx =
|
||||
TendermintTx::SlashEvidence(Evidence::InvalidValidRound(borsh::to_vec(&signed).unwrap()));
|
||||
|
||||
// should fail
|
||||
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
|
||||
@@ -90,7 +92,10 @@ async fn invalid_precommit_signature() {
|
||||
let signed =
|
||||
signed_from_data::<N>(signer.clone().into(), signer_id, 1, 0, Data::Precommit(precommit))
|
||||
.await;
|
||||
(signed.clone(), TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode())))
|
||||
(
|
||||
signed.clone(),
|
||||
TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(borsh::to_vec(&signed).unwrap())),
|
||||
)
|
||||
}
|
||||
};
|
||||
|
||||
@@ -120,7 +125,8 @@ async fn invalid_precommit_signature() {
|
||||
let mut random_sig = [0u8; 64];
|
||||
OsRng.fill_bytes(&mut random_sig);
|
||||
signed.sig = random_sig;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(signed.encode()));
|
||||
let tx =
|
||||
TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(borsh::to_vec(&signed).unwrap()));
|
||||
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
|
||||
}
|
||||
}
|
||||
@@ -138,24 +144,32 @@ async fn evidence_with_prevote() {
|
||||
// it should fail for all reasons.
|
||||
let mut txs = vec![];
|
||||
txs.push(TendermintTx::SlashEvidence(Evidence::InvalidPrecommit(
|
||||
signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
|
||||
.await
|
||||
.encode(),
|
||||
borsh::to_vec(
|
||||
&&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
|
||||
.await,
|
||||
)
|
||||
.unwrap(),
|
||||
)));
|
||||
txs.push(TendermintTx::SlashEvidence(Evidence::InvalidValidRound(
|
||||
signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
|
||||
.await
|
||||
.encode(),
|
||||
borsh::to_vec(
|
||||
&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
|
||||
.await,
|
||||
)
|
||||
.unwrap(),
|
||||
)));
|
||||
// Since these require a second message, provide this one again
|
||||
// ConflictingMessages can be fired for actually conflicting Prevotes however
|
||||
txs.push(TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
|
||||
.await
|
||||
.encode(),
|
||||
signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
|
||||
.await
|
||||
.encode(),
|
||||
borsh::to_vec(
|
||||
&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
|
||||
.await,
|
||||
)
|
||||
.unwrap(),
|
||||
borsh::to_vec(
|
||||
&signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
|
||||
.await,
|
||||
)
|
||||
.unwrap(),
|
||||
)));
|
||||
txs
|
||||
}
|
||||
@@ -189,16 +203,16 @@ async fn conflicting_msgs_evidence_tx() {
|
||||
// non-conflicting data should fail
|
||||
let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_1.encode(),
|
||||
signed_1.encode(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
));
|
||||
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
|
||||
|
||||
// conflicting data should pass
|
||||
let signed_2 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_1.encode(),
|
||||
signed_2.encode(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
borsh::to_vec(&signed_2).unwrap(),
|
||||
));
|
||||
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();
|
||||
|
||||
@@ -206,16 +220,16 @@ async fn conflicting_msgs_evidence_tx() {
|
||||
// (except for Precommit)
|
||||
let signed_2 = signed_for_b_r(0, 1, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_1.encode(),
|
||||
signed_2.encode(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
borsh::to_vec(&signed_2).unwrap(),
|
||||
));
|
||||
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
|
||||
|
||||
// Proposals for different block numbers should also fail as evidence
|
||||
let signed_2 = signed_for_b_r(1, 0, Data::Proposal(None, TendermintBlock(vec![0x22]))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_1.encode(),
|
||||
signed_2.encode(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
borsh::to_vec(&signed_2).unwrap(),
|
||||
));
|
||||
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
|
||||
}
|
||||
@@ -225,16 +239,16 @@ async fn conflicting_msgs_evidence_tx() {
|
||||
// non-conflicting data should fail
|
||||
let signed_1 = signed_for_b_r(0, 0, Data::Prevote(Some([0x11; 32]))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_1.encode(),
|
||||
signed_1.encode(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
));
|
||||
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
|
||||
|
||||
// conflicting data should pass
|
||||
let signed_2 = signed_for_b_r(0, 0, Data::Prevote(Some([0x22; 32]))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_1.encode(),
|
||||
signed_2.encode(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
borsh::to_vec(&signed_2).unwrap(),
|
||||
));
|
||||
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();
|
||||
|
||||
@@ -242,16 +256,16 @@ async fn conflicting_msgs_evidence_tx() {
|
||||
// (except for Precommit)
|
||||
let signed_2 = signed_for_b_r(0, 1, Data::Prevote(Some([0x22; 32]))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_1.encode(),
|
||||
signed_2.encode(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
borsh::to_vec(&signed_2).unwrap(),
|
||||
));
|
||||
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
|
||||
|
||||
// Proposals for different block numbers should also fail as evidence
|
||||
let signed_2 = signed_for_b_r(1, 0, Data::Prevote(Some([0x22; 32]))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_1.encode(),
|
||||
signed_2.encode(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
borsh::to_vec(&signed_2).unwrap(),
|
||||
));
|
||||
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
|
||||
}
|
||||
@@ -273,8 +287,8 @@ async fn conflicting_msgs_evidence_tx() {
|
||||
.await;
|
||||
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_1.encode(),
|
||||
signed_2.encode(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
borsh::to_vec(&signed_2).unwrap(),
|
||||
));
|
||||
|
||||
// update schema so that we don't fail due to invalid signature
|
||||
@@ -292,8 +306,8 @@ async fn conflicting_msgs_evidence_tx() {
|
||||
let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![]))).await;
|
||||
let signed_2 = signed_for_b_r(0, 0, Data::Prevote(None)).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingMessages(
|
||||
signed_1.encode(),
|
||||
signed_2.encode(),
|
||||
borsh::to_vec(&signed_1).unwrap(),
|
||||
borsh::to_vec(&signed_2).unwrap(),
|
||||
));
|
||||
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
parity-scale-codec = { version = "3", default-features = false, features = ["std", "derive"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std", "async-await-macro", "sink", "channel"] }
|
||||
futures-channel = { version = "0.3", default-features = false, features = ["std", "sink"] }
|
||||
|
||||
@@ -3,33 +3,41 @@ use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use parity_scale_codec::{Encode, Decode};
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use crate::{SignedMessageFor, SlashEvent, commit_msg};
|
||||
|
||||
/// An alias for a series of traits required for a type to be usable as a validator ID,
|
||||
/// automatically implemented for all types satisfying those traits.
|
||||
pub trait ValidatorId:
|
||||
Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode
|
||||
Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + BorshSerialize + BorshDeserialize
|
||||
{
|
||||
}
|
||||
impl<V: Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + Encode + Decode> ValidatorId
|
||||
for V
|
||||
#[rustfmt::skip]
|
||||
impl<
|
||||
V: Send + Sync + Clone + Copy + PartialEq + Eq + Hash + Debug + BorshSerialize + BorshDeserialize,
|
||||
> ValidatorId for V
|
||||
{
|
||||
}
|
||||
|
||||
/// An alias for a series of traits required for a type to be usable as a signature,
|
||||
/// automatically implemented for all types satisfying those traits.
|
||||
pub trait Signature: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {}
|
||||
impl<S: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode> Signature for S {}
|
||||
pub trait Signature:
|
||||
Send + Sync + Clone + PartialEq + Eq + Debug + BorshSerialize + BorshDeserialize
|
||||
{
|
||||
}
|
||||
impl<S: Send + Sync + Clone + PartialEq + Eq + Debug + BorshSerialize + BorshDeserialize> Signature
|
||||
for S
|
||||
{
|
||||
}
|
||||
|
||||
// Type aliases which are distinct according to the type system
|
||||
|
||||
/// A struct containing a Block Number, wrapped to have a distinct type.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct BlockNumber(pub u64);
|
||||
/// A struct containing a round number, wrapped to have a distinct type.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct RoundNumber(pub u32);
|
||||
|
||||
/// A signer for a validator.
|
||||
@@ -127,7 +135,7 @@ impl<S: SignatureScheme> SignatureScheme for Arc<S> {
|
||||
/// A commit for a specific block.
|
||||
///
|
||||
/// The list of validators have weight exceeding the threshold for a valid commit.
|
||||
#[derive(PartialEq, Debug, Encode, Decode)]
|
||||
#[derive(PartialEq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct Commit<S: SignatureScheme> {
|
||||
/// End time of the round which created this commit, used as the start time of the next block.
|
||||
pub end_time: u64,
|
||||
@@ -185,7 +193,7 @@ impl<W: Weights> Weights for Arc<W> {
|
||||
}
|
||||
|
||||
/// Simplified error enum representing a block's validity.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error, Encode, Decode)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Error, BorshSerialize, BorshDeserialize)]
|
||||
pub enum BlockError {
|
||||
/// Malformed block which is wholly invalid.
|
||||
#[error("invalid block")]
|
||||
@@ -197,9 +205,20 @@ pub enum BlockError {
|
||||
}
|
||||
|
||||
/// Trait representing a Block.
|
||||
pub trait Block: Send + Sync + Clone + PartialEq + Eq + Debug + Encode + Decode {
|
||||
pub trait Block:
|
||||
Send + Sync + Clone + PartialEq + Eq + Debug + BorshSerialize + BorshDeserialize
|
||||
{
|
||||
// Type used to identify blocks. Presumably a cryptographic hash of the block.
|
||||
type Id: Send + Sync + Copy + Clone + PartialEq + Eq + AsRef<[u8]> + Debug + Encode + Decode;
|
||||
type Id: Send
|
||||
+ Sync
|
||||
+ Copy
|
||||
+ Clone
|
||||
+ PartialEq
|
||||
+ Eq
|
||||
+ AsRef<[u8]>
|
||||
+ Debug
|
||||
+ BorshSerialize
|
||||
+ BorshDeserialize;
|
||||
|
||||
/// Return the deterministic, unique ID for this block.
|
||||
fn id(&self) -> Self::Id;
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
#![expect(clippy::cast_possible_truncation)]
|
||||
|
||||
use core::fmt::Debug;
|
||||
|
||||
use std::{
|
||||
@@ -8,7 +6,7 @@ use std::{
|
||||
collections::{VecDeque, HashMap},
|
||||
};
|
||||
|
||||
use parity_scale_codec::{Encode, Decode, IoReader};
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use futures_channel::mpsc;
|
||||
use futures_util::{
|
||||
@@ -43,14 +41,14 @@ pub fn commit_msg(end_time: u64, id: &[u8]) -> Vec<u8> {
|
||||
[&end_time.to_le_bytes(), id].concat()
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, Encode, Decode)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum Step {
|
||||
Propose,
|
||||
Prevote,
|
||||
Precommit,
|
||||
}
|
||||
|
||||
#[derive(Clone, Eq, Debug, Encode, Decode)]
|
||||
#[derive(Clone, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum Data<B: Block, S: Signature> {
|
||||
Proposal(Option<RoundNumber>, B),
|
||||
Prevote(Option<B::Id>),
|
||||
@@ -92,7 +90,7 @@ impl<B: Block, S: Signature> Data<B, S> {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct Message<V: ValidatorId, B: Block, S: Signature> {
|
||||
pub sender: V,
|
||||
pub block: BlockNumber,
|
||||
@@ -102,7 +100,7 @@ pub struct Message<V: ValidatorId, B: Block, S: Signature> {
|
||||
}
|
||||
|
||||
/// A signed Tendermint consensus message to be broadcast to the other validators.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub struct SignedMessage<V: ValidatorId, B: Block, S: Signature> {
|
||||
pub msg: Message<V, B, S>,
|
||||
pub sig: S,
|
||||
@@ -119,18 +117,18 @@ impl<V: ValidatorId, B: Block, S: Signature> SignedMessage<V, B, S> {
|
||||
&self,
|
||||
signer: &Scheme,
|
||||
) -> bool {
|
||||
signer.verify(self.msg.sender, &self.msg.encode(), &self.sig)
|
||||
signer.verify(self.msg.sender, &borsh::to_vec(&self.msg).unwrap(), &self.sig)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, Decode)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum SlashReason {
|
||||
FailToPropose,
|
||||
InvalidBlock,
|
||||
InvalidProposer,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum Evidence {
|
||||
ConflictingMessages(Vec<u8>, Vec<u8>),
|
||||
InvalidPrecommit(Vec<u8>),
|
||||
@@ -161,7 +159,7 @@ pub type SignedMessageFor<N> = SignedMessage<
|
||||
>;
|
||||
|
||||
pub fn decode_signed_message<N: Network>(mut data: &[u8]) -> Option<SignedMessageFor<N>> {
|
||||
SignedMessageFor::<N>::decode(&mut data).ok()
|
||||
SignedMessageFor::<N>::deserialize_reader(&mut data).ok()
|
||||
}
|
||||
|
||||
fn decode_and_verify_signed_message<N: Network>(
|
||||
@@ -341,7 +339,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
target: "tendermint",
|
||||
"proposer for block {}, round {round:?} was {} (me: {res})",
|
||||
self.block.number.0,
|
||||
hex::encode(proposer.encode()),
|
||||
hex::encode(borsh::to_vec(&proposer).unwrap()),
|
||||
);
|
||||
res
|
||||
}
|
||||
@@ -422,7 +420,11 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
// TODO: If the new slash event has evidence, emit to prevent a low-importance slash from
|
||||
// cancelling emission of high-importance slashes
|
||||
if !self.block.slashes.contains(&validator) {
|
||||
log::info!(target: "tendermint", "Slashing validator {}", hex::encode(validator.encode()));
|
||||
log::info!(
|
||||
target: "tendermint",
|
||||
"Slashing validator {}",
|
||||
hex::encode(borsh::to_vec(&validator).unwrap()),
|
||||
);
|
||||
self.block.slashes.insert(validator);
|
||||
self.network.slash(validator, slash_event).await;
|
||||
}
|
||||
@@ -672,7 +674,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
self
|
||||
.slash(
|
||||
msg.sender,
|
||||
SlashEvent::WithEvidence(Evidence::InvalidPrecommit(signed.encode())),
|
||||
SlashEvent::WithEvidence(Evidence::InvalidPrecommit(borsh::to_vec(&signed).unwrap())),
|
||||
)
|
||||
.await;
|
||||
Err(TendermintError::Malicious)?;
|
||||
@@ -743,7 +745,10 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
self.broadcast(Data::Prevote(None));
|
||||
}
|
||||
self
|
||||
.slash(msg.sender, SlashEvent::WithEvidence(Evidence::InvalidValidRound(msg.encode())))
|
||||
.slash(
|
||||
msg.sender,
|
||||
SlashEvent::WithEvidence(Evidence::InvalidValidRound(borsh::to_vec(&msg).unwrap())),
|
||||
)
|
||||
.await;
|
||||
Err(TendermintError::Malicious)?;
|
||||
}
|
||||
@@ -1034,7 +1039,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
|
||||
while !messages.is_empty() {
|
||||
self.network.broadcast(
|
||||
SignedMessageFor::<N>::decode(&mut IoReader(&mut messages))
|
||||
SignedMessageFor::<N>::deserialize_reader(&mut messages)
|
||||
.expect("saved invalid message to DB")
|
||||
).await;
|
||||
}
|
||||
@@ -1059,7 +1064,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
} {
|
||||
if our_message {
|
||||
assert!(sig.is_none());
|
||||
sig = Some(self.signer.sign(&msg.encode()).await);
|
||||
sig = Some(self.signer.sign(&borsh::to_vec(&msg).unwrap()).await);
|
||||
}
|
||||
let sig = sig.unwrap();
|
||||
|
||||
@@ -1079,7 +1084,7 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
let message_tape_key = message_tape_key(self.genesis);
|
||||
let mut txn = self.db.txn();
|
||||
let mut message_tape = txn.get(&message_tape_key).unwrap_or(vec![]);
|
||||
message_tape.extend(signed_msg.encode());
|
||||
signed_msg.serialize(&mut message_tape).unwrap();
|
||||
txn.put(&message_tape_key, message_tape);
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
use std::{sync::Arc, collections::HashMap};
|
||||
|
||||
use parity_scale_codec::Encode;
|
||||
|
||||
use crate::{ext::*, RoundNumber, Step, DataFor, SignedMessageFor, Evidence};
|
||||
|
||||
type RoundLog<N> = HashMap<<N as Network>::ValidatorId, HashMap<Step, SignedMessageFor<N>>>;
|
||||
@@ -39,7 +37,10 @@ impl<N: Network> MessageLog<N> {
|
||||
target: "tendermint",
|
||||
"Validator sent multiple messages for the same block + round + step"
|
||||
);
|
||||
Err(Evidence::ConflictingMessages(existing.encode(), signed.encode()))?;
|
||||
Err(Evidence::ConflictingMessages(
|
||||
borsh::to_vec(&existing).unwrap(),
|
||||
borsh::to_vec(&signed).unwrap(),
|
||||
))?;
|
||||
}
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::{
|
||||
time::{UNIX_EPOCH, SystemTime, Duration},
|
||||
};
|
||||
|
||||
use parity_scale_codec::{Encode, Decode};
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use futures_util::sink::SinkExt;
|
||||
use tokio::{sync::RwLock, time::sleep};
|
||||
@@ -89,7 +89,7 @@ impl Weights for TestWeights {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
struct TestBlock {
|
||||
id: TestBlockId,
|
||||
valid: Result<(), BlockError>,
|
||||
|
||||
@@ -21,7 +21,6 @@ workspace = true
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
blake2 = { version = "0.11.0-rc.0", default-features = false, features = ["alloc"] }
|
||||
@@ -30,7 +29,7 @@ dalek-ff-group = { path = "../../crypto/dalek-ff-group", default-features = fals
|
||||
dkg = { path = "../../crypto/dkg", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../../crypto/schnorr", default-features = false, features = ["std"] }
|
||||
|
||||
serai-client = { path = "../../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
serai-primitives = { path = "../../substrate/primitives", default-features = false, features = ["std"] }
|
||||
|
||||
serai-db = { path = "../../common/db" }
|
||||
serai-task = { path = "../../common/task", version = "0.1" }
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
#![expect(clippy::cast_possible_truncation)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::ExternalValidatorSet};
|
||||
use serai_primitives::{address::SeraiAddress, validator_sets::primitives::ExternalValidatorSet};
|
||||
|
||||
use messages::sign::{VariantSignId, SignId};
|
||||
|
||||
@@ -16,7 +13,7 @@ use serai_cosign::CosignIntent;
|
||||
use crate::transaction::SigningProtocolRound;
|
||||
|
||||
/// A topic within the database which the group participates in
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum Topic {
|
||||
/// Vote to remove a participant
|
||||
RemoveParticipant {
|
||||
@@ -125,7 +122,7 @@ impl Topic {
|
||||
Topic::DkgConfirmation { attempt, round: _ } => Some({
|
||||
let id = {
|
||||
let mut id = [0; 32];
|
||||
let encoded_set = set.encode();
|
||||
let encoded_set = borsh::to_vec(set).unwrap();
|
||||
id[.. encoded_set.len()].copy_from_slice(&encoded_set);
|
||||
VariantSignId::Batch(id)
|
||||
};
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![deny(missing_docs)]
|
||||
|
||||
@@ -8,9 +8,9 @@ use std::collections::HashMap;
|
||||
use ciphersuite::group::GroupEncoding;
|
||||
use dkg::Participant;
|
||||
|
||||
use serai_client::{
|
||||
primitives::SeraiAddress,
|
||||
validator_sets::primitives::{ExternalValidatorSet, Slash},
|
||||
use serai_primitives::{
|
||||
address::SeraiAddress,
|
||||
validator_sets::{ExternalValidatorSet, Slash},
|
||||
};
|
||||
|
||||
use serai_db::*;
|
||||
|
||||
@@ -12,10 +12,9 @@ use ciphersuite::{
|
||||
use dalek_ff_group::Ristretto;
|
||||
use schnorr::SchnorrSignature;
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use serai_client::{primitives::SeraiAddress, validator_sets::primitives::MAX_KEY_SHARES_PER_SET};
|
||||
use serai_primitives::{addess::SeraiAddress, validator_sets::MAX_KEY_SHARES_PER_SET};
|
||||
|
||||
use messages::sign::VariantSignId;
|
||||
|
||||
@@ -29,7 +28,7 @@ use tributary_sdk::{
|
||||
use crate::db::Topic;
|
||||
|
||||
/// The round this data is for, within a signing protocol.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, BorshSerialize, BorshDeserialize)]
|
||||
pub enum SigningProtocolRound {
|
||||
/// A preprocess.
|
||||
Preprocess,
|
||||
@@ -242,19 +241,20 @@ impl TransactionTrait for Transaction {
|
||||
fn kind(&self) -> TransactionKind {
|
||||
match self {
|
||||
Transaction::RemoveParticipant { participant, signed } => TransactionKind::Signed(
|
||||
(b"RemoveParticipant", participant).encode(),
|
||||
borsh::to_vec(&(b"RemoveParticipant".as_slice(), participant)).unwrap(),
|
||||
signed.to_tributary_signed(0),
|
||||
),
|
||||
|
||||
Transaction::DkgParticipation { signed, .. } => {
|
||||
TransactionKind::Signed(b"DkgParticipation".encode(), signed.to_tributary_signed(0))
|
||||
}
|
||||
Transaction::DkgParticipation { signed, .. } => TransactionKind::Signed(
|
||||
borsh::to_vec(b"DkgParticipation".as_slice()).unwrap(),
|
||||
signed.to_tributary_signed(0),
|
||||
),
|
||||
Transaction::DkgConfirmationPreprocess { attempt, signed, .. } => TransactionKind::Signed(
|
||||
(b"DkgConfirmation", attempt).encode(),
|
||||
borsh::to_vec(b"DkgConfirmation".as_slice(), attempt).unwrap(),
|
||||
signed.to_tributary_signed(0),
|
||||
),
|
||||
Transaction::DkgConfirmationShare { attempt, signed, .. } => TransactionKind::Signed(
|
||||
(b"DkgConfirmation", attempt).encode(),
|
||||
borsh::to_vec(b"DkgConfirmation".as_slice(), attempt).unwrap(),
|
||||
signed.to_tributary_signed(1),
|
||||
),
|
||||
|
||||
@@ -264,13 +264,14 @@ impl TransactionTrait for Transaction {
|
||||
Transaction::Batch { .. } => TransactionKind::Provided("Batch"),
|
||||
|
||||
Transaction::Sign { id, attempt, round, signed, .. } => TransactionKind::Signed(
|
||||
(b"Sign", id, attempt).encode(),
|
||||
borsh::to_vec(b"Sign".as_slice(), id, attempt).unwrap(),
|
||||
signed.to_tributary_signed(round.nonce()),
|
||||
),
|
||||
|
||||
Transaction::SlashReport { signed, .. } => {
|
||||
TransactionKind::Signed(b"SlashReport".encode(), signed.to_tributary_signed(0))
|
||||
}
|
||||
Transaction::SlashReport { signed, .. } => TransactionKind::Signed(
|
||||
borsh::to_vec(b"SlashReport".as_slice()).unwrap(),
|
||||
signed.to_tributary_signed(0),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
std-shims = { path = "../../common/std-shims", version = "0.1.4", default-features = false }
|
||||
std-shims = { path = "../../common/std-shims", version = "0.1.4", default-features = false, optional = true }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
|
||||
subtle = { version = "^2.4", default-features = false }
|
||||
@@ -33,7 +33,7 @@ hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
ff-group-tests = { version = "0.13", path = "../ff-group-tests" }
|
||||
|
||||
[features]
|
||||
alloc = ["zeroize/alloc", "digest/alloc", "ff/alloc"]
|
||||
alloc = ["std-shims", "zeroize/alloc", "digest/alloc", "ff/alloc"]
|
||||
std = [
|
||||
"alloc",
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
@@ -1,10 +1,12 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("lib.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
use core::fmt::Debug;
|
||||
#[cfg(feature = "alloc")]
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
#[cfg(feature = "alloc")]
|
||||
use std_shims::io::{self, Read};
|
||||
|
||||
use subtle::{CtOption, ConstantTimeEq, ConditionallySelectable};
|
||||
@@ -110,6 +112,7 @@ pub trait GroupCanonicalEncoding: WrappedGroup {
|
||||
}
|
||||
|
||||
/// `std::io` extensions for `GroupCanonicalEncoding.`
|
||||
#[cfg(feature = "alloc")]
|
||||
#[allow(non_snake_case)]
|
||||
pub trait GroupIo: GroupCanonicalEncoding {
|
||||
/// Read a canonical field element from something implementing `std::io::Read`.
|
||||
@@ -126,6 +129,8 @@ pub trait GroupIo: GroupCanonicalEncoding {
|
||||
}
|
||||
|
||||
/// Read a canonical point from something implementing `std::io::Read`.
|
||||
#[cfg(feature = "alloc")]
|
||||
#[allow(non_snake_case)]
|
||||
fn read_G<R: Read>(reader: &mut R) -> io::Result<Self::G> {
|
||||
let mut bytes = <Self::G as GroupEncoding>::Repr::default();
|
||||
reader.read_exact(bytes.as_mut())?;
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
#![allow(deprecated)]
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![no_std] // Prevents writing new code, in what should be a simple wrapper, which requires std
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![allow(clippy::redundant_closure_call)]
|
||||
|
||||
@@ -21,14 +21,21 @@ zeroize = { version = "^1.5", default-features = false, features = ["zeroize_der
|
||||
|
||||
thiserror = { version = "2", default-features = false }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false, features = ["alloc"] }
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
|
||||
|
||||
borsh = { version = "1", default-features = false, features = ["derive", "de_strict_order"], optional = true }
|
||||
|
||||
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] }
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"thiserror/std",
|
||||
|
||||
"std-shims/std",
|
||||
|
||||
"borsh?/std",
|
||||
|
||||
"ciphersuite/std",
|
||||
]
|
||||
borsh = ["dep:borsh"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -20,7 +20,7 @@ workspace = true
|
||||
zeroize = { version = "^1.5", default-features = false }
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, features = ["alloc"] }
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
|
||||
|
||||
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
|
||||
dkg = { path = "../", version = "0.6", default-features = false }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![no_std]
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ rand_core = { version = "0.6", default-features = false, features = ["alloc"] }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["alloc", "zeroize_derive"] }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, features = ["alloc"] }
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
|
||||
|
||||
@@ -34,10 +34,10 @@ generic-array = { version = "1", default-features = false, features = ["alloc"]
|
||||
blake2 = { version = "0.11.0-rc.2", default-features = false }
|
||||
rand_chacha = { version = "0.3", default-features = false }
|
||||
|
||||
generalized-bulletproofs = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", default-features = false }
|
||||
ec-divisors = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", default-features = false }
|
||||
generalized-bulletproofs-circuit-abstraction = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", default-features = false }
|
||||
generalized-bulletproofs-ec-gadgets = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", default-features = false }
|
||||
generalized-bulletproofs = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", default-features = false }
|
||||
ec-divisors = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", default-features = false }
|
||||
generalized-bulletproofs-circuit-abstraction = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", default-features = false }
|
||||
generalized-bulletproofs-ec-gadgets = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", default-features = false }
|
||||
|
||||
dkg = { path = "..", default-features = false }
|
||||
|
||||
@@ -52,7 +52,7 @@ rand = { version = "0.8", default-features = false, features = ["std"] }
|
||||
ciphersuite = { path = "../../ciphersuite", default-features = false, features = ["std"] }
|
||||
embedwards25519 = { path = "../../embedwards25519", default-features = false, features = ["std"] }
|
||||
dalek-ff-group = { path = "../../dalek-ff-group", default-features = false, features = ["std"] }
|
||||
generalized-bulletproofs = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", features = ["tests"] }
|
||||
generalized-bulletproofs = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", features = ["tests"] }
|
||||
dkg-recovery = { path = "../recovery" }
|
||||
|
||||
[features]
|
||||
|
||||
@@ -26,9 +26,21 @@ presented in section 4.2 is extended, with the following changes:
|
||||
just one round.
|
||||
|
||||
For a gist of the verifiable encryption scheme, please see
|
||||
https://gist.github.com/kayabaNerve/cfbde74b0660dfdf8dd55326d6ec33d7. For
|
||||
security proofs and audit information, please see
|
||||
[here](../../../audits/crypto/dkg/evrf).
|
||||
https://gist.github.com/kayabaNerve/cfbde74b0660dfdf8dd55326d6ec33d7. Security
|
||||
proofs are currently being worked on.
|
||||
|
||||
---
|
||||
|
||||
This library relies on an implementation of Bulletproofs and various
|
||||
zero-knowledge gadgets. This library uses
|
||||
[`generalized-bulletproofs`](https://docs.rs/generalized-bulletproofs),
|
||||
[`generalized-bulletproofs-circuit-abstraction`](https://docs.rs/generalized-bulletproofs-circuit-abstraction),
|
||||
and
|
||||
[`generalized-bulletproofs-ec-gadgets`](https://docs.rs/generalized-bulletproofs-ec-gadgets)
|
||||
from the Monero project's FCMP++ codebase. These libraries have received the
|
||||
following audits in the past:
|
||||
- https://github.com/kayabaNerve/monero-oxide/tree/fcmp++/audits/generalized-bulletproofs
|
||||
- https://github.com/kayabaNerve/monero-oxide/tree/fcmp++/audits/fcmps
|
||||
|
||||
---
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
|
||||
@@ -23,7 +23,7 @@ rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false, features = ["alloc"] }
|
||||
std-shims = { version = "0.1", path = "../../../common/std-shims", default-features = false }
|
||||
|
||||
multiexp = { path = "../../multiexp", version = "0.4", default-features = false }
|
||||
ciphersuite = { path = "../../ciphersuite", version = "^0.4.1", default-features = false }
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![no_std]
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
@@ -22,6 +22,7 @@ use ciphersuite::{
|
||||
|
||||
/// The ID of a participant, defined as a non-zero u16.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Zeroize)]
|
||||
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))]
|
||||
pub struct Participant(u16);
|
||||
impl Participant {
|
||||
/// Create a new Participant identifier from a u16.
|
||||
@@ -128,8 +129,18 @@ pub enum DkgError {
|
||||
NotParticipating,
|
||||
}
|
||||
|
||||
// Manually implements BorshDeserialize so we can enforce it's a valid index
|
||||
#[cfg(feature = "borsh")]
|
||||
impl borsh::BorshDeserialize for Participant {
|
||||
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
Participant::new(u16::deserialize_reader(reader)?)
|
||||
.ok_or_else(|| io::Error::other("invalid participant"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parameters for a multisig.
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
||||
#[cfg_attr(feature = "borsh", derive(borsh::BorshSerialize))]
|
||||
pub struct ThresholdParams {
|
||||
/// Participants needed to sign on behalf of the group.
|
||||
t: u16,
|
||||
@@ -199,6 +210,16 @@ impl ThresholdParams {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "borsh")]
|
||||
impl borsh::BorshDeserialize for ThresholdParams {
|
||||
fn deserialize_reader<R: io::Read>(reader: &mut R) -> io::Result<Self> {
|
||||
let t = u16::deserialize_reader(reader)?;
|
||||
let n = u16::deserialize_reader(reader)?;
|
||||
let i = Participant::deserialize_reader(reader)?;
|
||||
ThresholdParams::new(t, n, i).map_err(|e| io::Error::other(format!("{e:?}")))
|
||||
}
|
||||
}
|
||||
|
||||
/// A method of interpolation.
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub enum Interpolation<F: Zeroize + PrimeField> {
|
||||
|
||||
@@ -33,6 +33,6 @@ rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
ff-group-tests = { path = "../ff-group-tests" }
|
||||
|
||||
[features]
|
||||
alloc = ["zeroize/alloc", "sha3/alloc", "prime-field/alloc", "ciphersuite/alloc"]
|
||||
alloc = ["zeroize/alloc", "sha3/alloc", "crypto-bigint/alloc", "prime-field/alloc", "ciphersuite/alloc"]
|
||||
std = ["alloc", "zeroize/std", "prime-field/std", "ciphersuite/std"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![no_std]
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
[dependencies]
|
||||
hex-literal = { version = "1", default-features = false }
|
||||
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false }
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false, optional = true }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
|
||||
@@ -29,7 +29,7 @@ curve25519-dalek = { version = "4", default-features = false, features = ["legac
|
||||
blake2 = { version = "0.11.0-rc.2", default-features = false }
|
||||
ciphersuite = { path = "../ciphersuite", version = "0.4", default-features = false }
|
||||
|
||||
generalized-bulletproofs-ec-gadgets = { git = "https://github.com/monero-oxide/monero-oxide", rev = "dc1b3dbe436aae61ec363505052d4715d38ce1df", default-features = false, optional = true }
|
||||
generalized-bulletproofs-ec-gadgets = { git = "https://github.com/monero-oxide/monero-oxide", rev = "7216a2e84c7671c167c3d81eafe0d2b1f418f102", default-features = false, optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4"
|
||||
@@ -39,6 +39,6 @@ rand_core = { version = "0.6", features = ["std"] }
|
||||
ff-group-tests = { path = "../ff-group-tests" }
|
||||
|
||||
[features]
|
||||
alloc = ["zeroize/alloc", "prime-field/alloc", "short-weierstrass/alloc", "curve25519-dalek/alloc", "blake2/alloc", "ciphersuite/alloc", "generalized-bulletproofs-ec-gadgets"]
|
||||
alloc = ["std-shims", "zeroize/alloc", "prime-field/alloc", "short-weierstrass/alloc", "curve25519-dalek/alloc", "blake2/alloc", "ciphersuite/alloc", "generalized-bulletproofs-ec-gadgets"]
|
||||
std = ["alloc", "std-shims/std", "zeroize/std", "prime-field/std", "short-weierstrass/std", "ciphersuite/std", "generalized-bulletproofs-ec-gadgets/std"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
/// Tests for the Field trait.
|
||||
|
||||
@@ -17,35 +17,33 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
std-shims = { version = "0.1", path = "../../common/std-shims", default-features = false, features = ["alloc"] }
|
||||
thiserror = { version = "2", default-features = false, features = ["std"] }
|
||||
|
||||
thiserror = { version = "2", default-features = false }
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||
|
||||
rand_core = { version = "0.6", default-features = false, features = ["alloc"] }
|
||||
rand_chacha = { version = "0.3", default-features = false }
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std", "zeroize_derive"] }
|
||||
subtle = { version = "^2.4", default-features = false, features = ["std"] }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["alloc", "zeroize_derive"] }
|
||||
subtle = { version = "^2.4", default-features = false }
|
||||
hex = { version = "0.4", default-features = false, features = ["std"], optional = true }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["alloc"], optional = true }
|
||||
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["std", "recommended"] }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../transcript", version = "^0.3.2", default-features = false, features = ["recommended"] }
|
||||
dalek-ff-group = { path = "../dalek-ff-group", version = "0.5", default-features = false, features = ["std"], optional = true }
|
||||
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, features = ["std"], optional = true }
|
||||
|
||||
dalek-ff-group = { path = "../dalek-ff-group", version = "0.5", default-features = false, features = ["alloc"], optional = true }
|
||||
minimal-ed448 = { path = "../ed448", version = "0.4", default-features = false, features = ["alloc"], optional = true }
|
||||
|
||||
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["alloc"] }
|
||||
ciphersuite = { path = "../ciphersuite", version = "^0.4.1", default-features = false, features = ["std"] }
|
||||
sha2 = { version = "0.10.0", default-features = false, optional = true }
|
||||
elliptic-curve = { version = "0.13", default-features = false, features = ["hash2curve"], optional = true }
|
||||
ciphersuite-kp256 = { path = "../ciphersuite/kp256", version = "0.4", default-features = false, features = ["alloc"], optional = true }
|
||||
ciphersuite-kp256 = { path = "../ciphersuite/kp256", version = "0.4", default-features = false, features = ["std"], optional = true }
|
||||
|
||||
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["alloc", "batch"] }
|
||||
multiexp = { path = "../multiexp", version = "0.4", default-features = false, features = ["std", "batch"] }
|
||||
|
||||
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["alloc"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../schnorr", version = "^0.5.1", default-features = false, features = ["std"] }
|
||||
|
||||
dkg = { path = "../dkg", version = "0.6.1", default-features = false }
|
||||
dkg-recovery = { path = "../dkg/recovery", version = "0.6", default-features = false, optional = true }
|
||||
dkg-dealer = { path = "../dkg/dealer", version = "0.6", default-features = false, optional = true }
|
||||
dkg = { path = "../dkg", version = "0.6.1", default-features = false, features = ["std"] }
|
||||
dkg-recovery = { path = "../dkg/recovery", version = "0.6", default-features = false, features = ["std"], optional = true }
|
||||
dkg-dealer = { path = "../dkg/dealer", version = "0.6", default-features = false, features = ["std"], optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4"
|
||||
@@ -56,38 +54,6 @@ dkg-recovery = { path = "../dkg/recovery", default-features = false, features =
|
||||
dkg-dealer = { path = "../dkg/dealer", default-features = false, features = ["std"] }
|
||||
|
||||
[features]
|
||||
std = [
|
||||
"std-shims/std",
|
||||
|
||||
"thiserror/std",
|
||||
|
||||
"rand_core/std",
|
||||
"rand_chacha/std",
|
||||
|
||||
"zeroize/std",
|
||||
"subtle/std",
|
||||
|
||||
"hex?/std",
|
||||
|
||||
"transcript/std",
|
||||
|
||||
"dalek-ff-group?/std",
|
||||
"minimal-ed448?/std",
|
||||
|
||||
"ciphersuite/std",
|
||||
"sha2?/std",
|
||||
"elliptic-curve?/std",
|
||||
"ciphersuite-kp256?/std",
|
||||
|
||||
"multiexp/std",
|
||||
|
||||
"schnorr/std",
|
||||
|
||||
"dkg/std",
|
||||
"dkg-recovery?/std",
|
||||
"dkg-dealer?/std",
|
||||
]
|
||||
|
||||
ed25519 = ["dalek-ff-group"]
|
||||
ristretto = ["dalek-ff-group"]
|
||||
|
||||
@@ -97,5 +63,3 @@ p256 = ["sha2", "elliptic-curve", "ciphersuite-kp256"]
|
||||
ed448 = ["minimal-ed448"]
|
||||
|
||||
tests = ["hex", "rand_core/getrandom", "dkg-dealer", "dkg-recovery"]
|
||||
|
||||
default = ["std"]
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
use core::{marker::PhantomData, fmt::Debug};
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
use std_shims::io::{self, Read, Write};
|
||||
use std::io::{self, Read, Write};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
@@ -28,10 +26,8 @@ impl<A: Send + Sync + Clone + PartialEq + Debug + WriteAddendum> Addendum for A
|
||||
|
||||
/// Algorithm trait usable by the FROST signing machine to produce signatures..
|
||||
pub trait Algorithm<C: Curve>: Send + Sync {
|
||||
/// The transcript format this algorithm uses.
|
||||
///
|
||||
/// This MUST NOT be the IETF-compatible transcript included in this crate UNLESS this is an
|
||||
/// IETF-specified ciphersuite.
|
||||
/// The transcript format this algorithm uses. This likely should NOT be the IETF-compatible
|
||||
/// transcript included in this crate.
|
||||
type Transcript: Sync + Clone + Debug + Transcript;
|
||||
/// Serializable addendum, used in algorithms requiring more data than just the nonces.
|
||||
type Addendum: Addendum;
|
||||
@@ -71,10 +67,8 @@ pub trait Algorithm<C: Curve>: Send + Sync {
|
||||
) -> Result<(), FrostError>;
|
||||
|
||||
/// Sign a share with the given secret/nonce.
|
||||
///
|
||||
/// The secret will already have been its lagrange coefficient applied so it is the necessary
|
||||
/// key share.
|
||||
///
|
||||
/// The nonce will already have been processed into the combined form d + (e * p).
|
||||
fn sign_share(
|
||||
&mut self,
|
||||
@@ -89,7 +83,6 @@ pub trait Algorithm<C: Curve>: Send + Sync {
|
||||
fn verify(&self, group_key: C::G, nonces: &[Vec<C::G>], sum: C::F) -> Option<Self::Signature>;
|
||||
|
||||
/// Verify a specific share given as a response.
|
||||
///
|
||||
/// This function should return a series of pairs whose products should sum to zero for a valid
|
||||
/// share. Any error raised is treated as the share being invalid.
|
||||
#[allow(clippy::type_complexity, clippy::result_unit_err)]
|
||||
@@ -104,10 +97,8 @@ pub trait Algorithm<C: Curve>: Send + Sync {
|
||||
mod sealed {
|
||||
pub use super::*;
|
||||
|
||||
/// IETF-compliant transcript.
|
||||
///
|
||||
/// This is incredibly naive and MUST NOT be used within larger protocols. No guarantees are made
|
||||
/// about its safety EXCEPT as used with the IETF-specified FROST ciphersuites.
|
||||
/// IETF-compliant transcript. This is incredibly naive and should not be used within larger
|
||||
/// protocols.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IetfTranscript(pub(crate) Vec<u8>);
|
||||
impl Transcript for IetfTranscript {
|
||||
@@ -138,7 +129,6 @@ pub(crate) use sealed::IetfTranscript;
|
||||
/// HRAm usable by the included Schnorr signature algorithm to generate challenges.
|
||||
pub trait Hram<C: Curve>: Send + Sync + Clone {
|
||||
/// HRAm function to generate a challenge.
|
||||
///
|
||||
/// H2 from the IETF draft, despite having a different argument set (not being pre-formatted).
|
||||
#[allow(non_snake_case)]
|
||||
fn hram(R: &C::G, A: &C::G, m: &[u8]) -> C::F;
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
use core::{ops::Deref, convert::AsRef};
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
use std_shims::io::{self, Read};
|
||||
use std::io::{self, Read};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
|
||||
@@ -1,11 +1,8 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
use core::fmt::Debug;
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
use std_shims::collections::HashMap;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
|
||||
@@ -6,9 +6,7 @@
|
||||
// Each nonce remains of the form (d, e) and made into a proper nonce with d + (e * b)
|
||||
|
||||
use core::ops::Deref;
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
use std_shims::{
|
||||
use std::{
|
||||
io::{self, Read, Write},
|
||||
collections::HashMap,
|
||||
};
|
||||
|
||||
@@ -1,7 +1,5 @@
|
||||
use core::{ops::Deref, fmt::Debug};
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
use std_shims::{
|
||||
use std::{
|
||||
io::{self, Read, Write},
|
||||
collections::HashMap,
|
||||
};
|
||||
@@ -102,7 +100,6 @@ pub trait PreprocessMachine: Send {
|
||||
type SignMachine: SignMachine<Self::Signature, Preprocess = Self::Preprocess>;
|
||||
|
||||
/// Perform the preprocessing round required in order to sign.
|
||||
///
|
||||
/// Returns a preprocess message to be broadcast to all participants, over an authenticated
|
||||
/// channel.
|
||||
fn preprocess<R: RngCore + CryptoRng>(self, rng: &mut R)
|
||||
@@ -236,8 +233,6 @@ pub trait SignMachine<S>: Send + Sync + Sized {
|
||||
/// Takes in the participants' preprocess messages. Returns the signature share to be broadcast
|
||||
/// to all participants, over an authenticated channel. The parties who participate here will
|
||||
/// become the signing set for this session.
|
||||
///
|
||||
/// The caller MUST only use preprocesses obtained via this machine's `read_preprocess` function.
|
||||
fn sign(
|
||||
self,
|
||||
commitments: HashMap<Participant, Self::Preprocess>,
|
||||
@@ -424,10 +419,7 @@ pub trait SignatureMachine<S>: Send + Sync {
|
||||
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare>;
|
||||
|
||||
/// Complete signing.
|
||||
///
|
||||
/// Takes in everyone elses' shares. Returns the signature.
|
||||
///
|
||||
/// The caller MUST only use shares obtained via this machine's `read_shares` function.
|
||||
fn complete(self, shares: HashMap<Participant, Self::SignatureShare>) -> Result<S, FrostError>;
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std_shims::collections::HashMap;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std_shims::io::{self, Read};
|
||||
use std::io::{self, Read};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use core::ops::Deref;
|
||||
|
||||
use std_shims::collections::HashMap;
|
||||
use std::collections::HashMap;
|
||||
#[cfg(test)]
|
||||
use core::str::FromStr;
|
||||
use std::str::FromStr;
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
|
||||
@@ -17,6 +17,10 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustversion = "1"
|
||||
|
||||
std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
||||
|
||||
ff = { version = "0.13", default-features = false, features = ["bits"] }
|
||||
@@ -31,9 +35,8 @@ k256 = { version = "^0.13.1", default-features = false, features = ["arithmetic"
|
||||
dalek-ff-group = { path = "../dalek-ff-group" }
|
||||
|
||||
[features]
|
||||
alloc = ["zeroize/alloc"]
|
||||
std = ["alloc", "zeroize/std", "ff/std", "rand_core?/std"]
|
||||
std = ["std-shims/std", "zeroize/std", "ff/std", "rand_core?/std"]
|
||||
|
||||
batch = ["alloc", "rand_core"]
|
||||
batch = ["rand_core"]
|
||||
|
||||
default = ["std"]
|
||||
|
||||
@@ -12,6 +12,5 @@ culminating in commit
|
||||
[669d2dbffc1dafb82a09d9419ea182667115df06](https://github.com/serai-dex/serai/tree/669d2dbffc1dafb82a09d9419ea182667115df06).
|
||||
Any subsequent changes have not undergone auditing.
|
||||
|
||||
This library is usable under no-`std` and no-`alloc`. With the `alloc` feature,
|
||||
the library is fully functional. Without the `alloc` feature, the `multiexp`
|
||||
function is shimmed with a serial implementation.
|
||||
This library is usable under no_std, via alloc, when the default features are
|
||||
disabled.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use alloc::vec::Vec;
|
||||
use std_shims::vec::Vec;
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
|
||||
@@ -1,178 +1,201 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
#[cfg(not(feature = "std"))]
|
||||
#[macro_use]
|
||||
extern crate alloc;
|
||||
#[allow(unused_imports)]
|
||||
use std_shims::prelude::*;
|
||||
use std_shims::vec::Vec;
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use ff::PrimeFieldBits;
|
||||
use group::Group;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
mod straus;
|
||||
#[cfg(feature = "alloc")]
|
||||
use straus::*;
|
||||
|
||||
mod pippenger;
|
||||
use pippenger::*;
|
||||
|
||||
#[cfg(feature = "batch")]
|
||||
mod batch;
|
||||
#[cfg(feature = "batch")]
|
||||
pub use batch::BatchVerifier;
|
||||
|
||||
#[cfg(all(test, feature = "alloc"))]
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
#[cfg(feature = "alloc")]
|
||||
mod underlying {
|
||||
use super::*;
|
||||
// Use black_box when possible
|
||||
#[rustversion::since(1.66)]
|
||||
use core::hint::black_box;
|
||||
#[rustversion::before(1.66)]
|
||||
fn black_box<T>(val: T) -> T {
|
||||
val
|
||||
}
|
||||
|
||||
use core::hint::black_box;
|
||||
use alloc::{vec, vec::Vec};
|
||||
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
|
||||
let bit_ref = black_box(bit_ref);
|
||||
|
||||
pub(crate) use straus::*;
|
||||
let mut bit = black_box(*bit_ref);
|
||||
#[allow(clippy::cast_lossless)]
|
||||
let res = black_box(bit as u8);
|
||||
bit.zeroize();
|
||||
debug_assert!((res | 1) == 1);
|
||||
|
||||
pub(crate) use pippenger::*;
|
||||
bit_ref.zeroize();
|
||||
res
|
||||
}
|
||||
|
||||
#[cfg(feature = "batch")]
|
||||
pub use batch::BatchVerifier;
|
||||
// Convert scalars to `window`-sized bit groups, as needed to index a table
|
||||
// This algorithm works for `window <= 8`
|
||||
pub(crate) fn prep_bits<G: Group<Scalar: PrimeFieldBits>>(
|
||||
pairs: &[(G::Scalar, G)],
|
||||
window: u8,
|
||||
) -> Vec<Vec<u8>> {
|
||||
let w_usize = usize::from(window);
|
||||
|
||||
fn u8_from_bool(bit_ref: &mut bool) -> u8 {
|
||||
let bit_ref = black_box(bit_ref);
|
||||
let mut groupings = vec![];
|
||||
for pair in pairs {
|
||||
let p = groupings.len();
|
||||
let mut bits = pair.0.to_le_bits();
|
||||
groupings.push(vec![0; bits.len().div_ceil(w_usize)]);
|
||||
|
||||
let mut bit = black_box(*bit_ref);
|
||||
#[allow(clippy::cast_lossless)]
|
||||
let res = black_box(bit as u8);
|
||||
bit.zeroize();
|
||||
debug_assert!((res | 1) == 1);
|
||||
|
||||
bit_ref.zeroize();
|
||||
res
|
||||
}
|
||||
|
||||
// Convert scalars to `window`-sized bit groups, as needed to index a table
|
||||
// This algorithm works for `window <= 8`
|
||||
pub(crate) fn prep_bits<G: Group<Scalar: PrimeFieldBits>>(
|
||||
pairs: &[(G::Scalar, G)],
|
||||
window: u8,
|
||||
) -> Vec<Vec<u8>> {
|
||||
let w_usize = usize::from(window);
|
||||
|
||||
let mut groupings = vec![];
|
||||
for pair in pairs {
|
||||
let p = groupings.len();
|
||||
let mut bits = pair.0.to_le_bits();
|
||||
groupings.push(vec![0; bits.len().div_ceil(w_usize)]);
|
||||
|
||||
for (i, mut bit) in bits.iter_mut().enumerate() {
|
||||
let mut bit = u8_from_bool(&mut bit);
|
||||
groupings[p][i / w_usize] |= bit << (i % w_usize);
|
||||
bit.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
groupings
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
enum Algorithm {
|
||||
Null,
|
||||
Single,
|
||||
Straus(u8),
|
||||
Pippenger(u8),
|
||||
}
|
||||
|
||||
// These are 'rule of thumb's obtained via benchmarking `k256` and `curve25519-dalek`
|
||||
fn algorithm(len: usize) -> Algorithm {
|
||||
#[cfg(not(debug_assertions))]
|
||||
if len == 0 {
|
||||
Algorithm::Null
|
||||
} else if len == 1 {
|
||||
Algorithm::Single
|
||||
} else if len < 10 {
|
||||
// Straus 2 never showed a performance benefit, even with just 2 elements
|
||||
Algorithm::Straus(3)
|
||||
} else if len < 20 {
|
||||
Algorithm::Straus(4)
|
||||
} else if len < 50 {
|
||||
Algorithm::Straus(5)
|
||||
} else if len < 100 {
|
||||
Algorithm::Pippenger(4)
|
||||
} else if len < 125 {
|
||||
Algorithm::Pippenger(5)
|
||||
} else if len < 275 {
|
||||
Algorithm::Pippenger(6)
|
||||
} else if len < 400 {
|
||||
Algorithm::Pippenger(7)
|
||||
} else {
|
||||
Algorithm::Pippenger(8)
|
||||
}
|
||||
|
||||
#[cfg(debug_assertions)]
|
||||
if len == 0 {
|
||||
Algorithm::Null
|
||||
} else if len == 1 {
|
||||
Algorithm::Single
|
||||
} else if len < 10 {
|
||||
Algorithm::Straus(3)
|
||||
} else if len < 80 {
|
||||
Algorithm::Straus(4)
|
||||
} else if len < 100 {
|
||||
Algorithm::Straus(5)
|
||||
} else if len < 125 {
|
||||
Algorithm::Pippenger(4)
|
||||
} else if len < 275 {
|
||||
Algorithm::Pippenger(5)
|
||||
} else if len < 475 {
|
||||
Algorithm::Pippenger(6)
|
||||
} else if len < 750 {
|
||||
Algorithm::Pippenger(7)
|
||||
} else {
|
||||
Algorithm::Pippenger(8)
|
||||
for (i, mut bit) in bits.iter_mut().enumerate() {
|
||||
let mut bit = u8_from_bool(&mut bit);
|
||||
groupings[p][i / w_usize] |= bit << (i % w_usize);
|
||||
bit.zeroize();
|
||||
}
|
||||
}
|
||||
|
||||
/// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the
|
||||
/// amount of pairs.
|
||||
pub fn multiexp<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(
|
||||
pairs: &[(G::Scalar, G)],
|
||||
) -> G {
|
||||
match algorithm(pairs.len()) {
|
||||
Algorithm::Null => Group::identity(),
|
||||
Algorithm::Single => pairs[0].1 * pairs[0].0,
|
||||
// These functions panic if called without any pairs
|
||||
Algorithm::Straus(window) => straus(pairs, window),
|
||||
Algorithm::Pippenger(window) => pippenger(pairs, window),
|
||||
}
|
||||
groupings
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
enum Algorithm {
|
||||
Null,
|
||||
Single,
|
||||
Straus(u8),
|
||||
Pippenger(u8),
|
||||
}
|
||||
|
||||
/*
|
||||
Release (with runs 20, so all of these are off by 20x):
|
||||
|
||||
k256
|
||||
Straus 3 is more efficient at 5 with 678µs per
|
||||
Straus 4 is more efficient at 10 with 530µs per
|
||||
Straus 5 is more efficient at 35 with 467µs per
|
||||
|
||||
Pippenger 5 is more efficient at 125 with 431µs per
|
||||
Pippenger 6 is more efficient at 275 with 349µs per
|
||||
Pippenger 7 is more efficient at 375 with 360µs per
|
||||
|
||||
dalek
|
||||
Straus 3 is more efficient at 5 with 519µs per
|
||||
Straus 4 is more efficient at 10 with 376µs per
|
||||
Straus 5 is more efficient at 170 with 330µs per
|
||||
|
||||
Pippenger 5 is more efficient at 125 with 305µs per
|
||||
Pippenger 6 is more efficient at 275 with 250µs per
|
||||
Pippenger 7 is more efficient at 450 with 205µs per
|
||||
Pippenger 8 is more efficient at 800 with 213µs per
|
||||
|
||||
Debug (with runs 5, so...):
|
||||
|
||||
k256
|
||||
Straus 3 is more efficient at 5 with 2532µs per
|
||||
Straus 4 is more efficient at 10 with 1930µs per
|
||||
Straus 5 is more efficient at 80 with 1632µs per
|
||||
|
||||
Pippenger 5 is more efficient at 150 with 1441µs per
|
||||
Pippenger 6 is more efficient at 300 with 1235µs per
|
||||
Pippenger 7 is more efficient at 475 with 1182µs per
|
||||
Pippenger 8 is more efficient at 625 with 1170µs per
|
||||
|
||||
dalek:
|
||||
Straus 3 is more efficient at 5 with 971µs per
|
||||
Straus 4 is more efficient at 10 with 782µs per
|
||||
Straus 5 is more efficient at 75 with 778µs per
|
||||
Straus 6 is more efficient at 165 with 867µs per
|
||||
|
||||
Pippenger 5 is more efficient at 125 with 677µs per
|
||||
Pippenger 6 is more efficient at 250 with 655µs per
|
||||
Pippenger 7 is more efficient at 475 with 500µs per
|
||||
Pippenger 8 is more efficient at 875 with 499µs per
|
||||
*/
|
||||
fn algorithm(len: usize) -> Algorithm {
|
||||
#[cfg(not(debug_assertions))]
|
||||
if len == 0 {
|
||||
Algorithm::Null
|
||||
} else if len == 1 {
|
||||
Algorithm::Single
|
||||
} else if len < 10 {
|
||||
// Straus 2 never showed a performance benefit, even with just 2 elements
|
||||
Algorithm::Straus(3)
|
||||
} else if len < 20 {
|
||||
Algorithm::Straus(4)
|
||||
} else if len < 50 {
|
||||
Algorithm::Straus(5)
|
||||
} else if len < 100 {
|
||||
Algorithm::Pippenger(4)
|
||||
} else if len < 125 {
|
||||
Algorithm::Pippenger(5)
|
||||
} else if len < 275 {
|
||||
Algorithm::Pippenger(6)
|
||||
} else if len < 400 {
|
||||
Algorithm::Pippenger(7)
|
||||
} else {
|
||||
Algorithm::Pippenger(8)
|
||||
}
|
||||
|
||||
/// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm
|
||||
/// based on the amount of pairs.
|
||||
pub fn multiexp_vartime<G: Group<Scalar: PrimeFieldBits>>(pairs: &[(G::Scalar, G)]) -> G {
|
||||
match algorithm(pairs.len()) {
|
||||
Algorithm::Null => Group::identity(),
|
||||
Algorithm::Single => pairs[0].1 * pairs[0].0,
|
||||
Algorithm::Straus(window) => straus_vartime(pairs, window),
|
||||
Algorithm::Pippenger(window) => pippenger_vartime(pairs, window),
|
||||
}
|
||||
#[cfg(debug_assertions)]
|
||||
if len == 0 {
|
||||
Algorithm::Null
|
||||
} else if len == 1 {
|
||||
Algorithm::Single
|
||||
} else if len < 10 {
|
||||
Algorithm::Straus(3)
|
||||
} else if len < 80 {
|
||||
Algorithm::Straus(4)
|
||||
} else if len < 100 {
|
||||
Algorithm::Straus(5)
|
||||
} else if len < 125 {
|
||||
Algorithm::Pippenger(4)
|
||||
} else if len < 275 {
|
||||
Algorithm::Pippenger(5)
|
||||
} else if len < 475 {
|
||||
Algorithm::Pippenger(6)
|
||||
} else if len < 750 {
|
||||
Algorithm::Pippenger(7)
|
||||
} else {
|
||||
Algorithm::Pippenger(8)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "alloc"))]
|
||||
mod underlying {
|
||||
use super::*;
|
||||
|
||||
/// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the
|
||||
/// amount of pairs.
|
||||
pub fn multiexp<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(
|
||||
pairs: &[(G::Scalar, G)],
|
||||
) -> G {
|
||||
pairs.iter().map(|(scalar, point)| *point * scalar).sum()
|
||||
}
|
||||
|
||||
/// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm
|
||||
/// based on the amount of pairs.
|
||||
pub fn multiexp_vartime<G: Group<Scalar: PrimeFieldBits>>(pairs: &[(G::Scalar, G)]) -> G {
|
||||
pairs.iter().map(|(scalar, point)| *point * scalar).sum()
|
||||
/// Performs a multiexponentiation, automatically selecting the optimal algorithm based on the
|
||||
/// amount of pairs.
|
||||
pub fn multiexp<G: Zeroize + Group<Scalar: Zeroize + PrimeFieldBits>>(
|
||||
pairs: &[(G::Scalar, G)],
|
||||
) -> G {
|
||||
match algorithm(pairs.len()) {
|
||||
Algorithm::Null => Group::identity(),
|
||||
Algorithm::Single => pairs[0].1 * pairs[0].0,
|
||||
// These functions panic if called without any pairs
|
||||
Algorithm::Straus(window) => straus(pairs, window),
|
||||
Algorithm::Pippenger(window) => pippenger(pairs, window),
|
||||
}
|
||||
}
|
||||
|
||||
pub use underlying::*;
|
||||
/// Performs a multiexponentiation in variable time, automatically selecting the optimal algorithm
|
||||
/// based on the amount of pairs.
|
||||
pub fn multiexp_vartime<G: Group<Scalar: PrimeFieldBits>>(pairs: &[(G::Scalar, G)]) -> G {
|
||||
match algorithm(pairs.len()) {
|
||||
Algorithm::Null => Group::identity(),
|
||||
Algorithm::Single => pairs[0].1 * pairs[0].0,
|
||||
Algorithm::Straus(window) => straus_vartime(pairs, window),
|
||||
Algorithm::Pippenger(window) => pippenger_vartime(pairs, window),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use alloc::vec;
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use ff::PrimeFieldBits;
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user