mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-08 20:29:23 +00:00
Compare commits
71 Commits
polkadot-s
...
10s-tender
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
466af2bc19 | ||
|
|
b427f4b8ab | ||
|
|
1096ddb7ea | ||
|
|
5487844b9e | ||
|
|
627e7e6210 | ||
|
|
019b42c0e0 | ||
|
|
079fddbaa6 | ||
|
|
92d8b91be9 | ||
|
|
4f1f7984a6 | ||
|
|
cda14ac8b9 | ||
|
|
6f5d794f10 | ||
|
|
34b93b882c | ||
|
|
0880453f82 | ||
|
|
ebdfc9afb4 | ||
|
|
f6409d08f3 | ||
|
|
c41a8ac8f2 | ||
|
|
d88aa90ec2 | ||
|
|
c05c511938 | ||
|
|
df85c09435 | ||
|
|
62a619a312 | ||
|
|
95b7460907 | ||
|
|
95c3cfc52e | ||
|
|
f0694172ef | ||
|
|
29633ada1b | ||
|
|
337e54c672 | ||
|
|
347d4cf413 | ||
|
|
aaff74575f | ||
|
|
ad0ecc5185 | ||
|
|
af12cec3b9 | ||
|
|
89788be034 | ||
|
|
745075af6e | ||
|
|
9b25d0dad7 | ||
|
|
2b76e41c9a | ||
|
|
05219c3ce8 | ||
|
|
cc75b52a43 | ||
|
|
4913873b10 | ||
|
|
0b8c7ade6e | ||
|
|
21262d41e6 | ||
|
|
508f7eb23a | ||
|
|
90df391170 | ||
|
|
9d3d47fc9f | ||
|
|
6691f16292 | ||
|
|
9c06cbccad | ||
|
|
c507ab9fd6 | ||
|
|
3aa8007700 | ||
|
|
1ba2d8d832 | ||
|
|
e7b0ed3e7e | ||
|
|
f3429ec1ef | ||
|
|
1cff9b4264 | ||
|
|
3c5a82e915 | ||
|
|
93e85c5ce6 | ||
|
|
617ec604ee | ||
|
|
265261d3ba | ||
|
|
7eb388e546 | ||
|
|
6c8040f723 | ||
|
|
02776c54a8 | ||
|
|
ec8dfd4639 | ||
|
|
99e05e4e5e | ||
|
|
a72b547824 | ||
|
|
bad3d210ba | ||
|
|
8c676d98c5 | ||
|
|
890b70212a | ||
|
|
9f7140c3db | ||
|
|
8b26a85faa | ||
|
|
24ea65eae9 | ||
|
|
fff8dcb827 | ||
|
|
2b23252b4c | ||
|
|
b493e3e31f | ||
|
|
00774c29d7 | ||
|
|
a4c82632fb | ||
|
|
c8747e23c5 |
11
.github/actions/bitcoin/action.yml
vendored
11
.github/actions/bitcoin/action.yml
vendored
@@ -12,7 +12,7 @@ runs:
|
||||
steps:
|
||||
- name: Bitcoin Daemon Cache
|
||||
id: cache-bitcoind
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
with:
|
||||
path: bitcoin.tar.gz
|
||||
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
@@ -37,11 +37,4 @@ runs:
|
||||
|
||||
- name: Bitcoin Regtest Daemon
|
||||
shell: bash
|
||||
run: |
|
||||
RPC_USER=serai
|
||||
RPC_PASS=seraidex
|
||||
|
||||
bitcoind -txindex -regtest \
|
||||
-rpcuser=$RPC_USER -rpcpassword=$RPC_PASS \
|
||||
-rpcbind=127.0.0.1 -rpcbind=$(hostname) -rpcallowip=0.0.0.0/0 \
|
||||
-daemon
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/coins/bitcoin/run.sh -daemon
|
||||
|
||||
32
.github/actions/build-dependencies/action.yml
vendored
32
.github/actions/build-dependencies/action.yml
vendored
@@ -1,12 +1,6 @@
|
||||
name: build-dependencies
|
||||
description: Installs build dependencies for Serai
|
||||
|
||||
inputs:
|
||||
github-token:
|
||||
description: "GitHub token to install Protobuf with"
|
||||
require: true
|
||||
default:
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
@@ -20,15 +14,29 @@ runs:
|
||||
sudo apt autoremove -y
|
||||
sudo apt clean
|
||||
docker system prune -a --volumes
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Install apt dependencies
|
||||
- name: Remove unused packages
|
||||
shell: bash
|
||||
run: sudo apt install -y ca-certificates
|
||||
run: |
|
||||
(gem uninstall -aIx) || (exit 0)
|
||||
brew uninstall --force "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||
brew uninstall --force "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
||||
brew uninstall --force "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
||||
brew uninstall --force "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
||||
brew cleanup
|
||||
if: runner.os == 'macOS'
|
||||
|
||||
- name: Install Protobuf
|
||||
uses: arduino/setup-protoc@a8b67ba40b37d35169e222f3bb352603327985b6
|
||||
with:
|
||||
repo-token: ${{ inputs.github-token }}
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||
sudo apt install -y ca-certificates protobuf-compiler
|
||||
elif [ "$RUNNER_OS" == "Windows" ]; then
|
||||
choco install protoc
|
||||
elif [ "$RUNNER_OS" == "macOS" ]; then
|
||||
brew install protobuf
|
||||
fi
|
||||
|
||||
- name: Install solc
|
||||
shell: bash
|
||||
|
||||
11
.github/actions/monero-wallet-rpc/action.yml
vendored
11
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -5,14 +5,14 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: v0.18.2.0
|
||||
default: v0.18.3.1
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Monero Wallet RPC Cache
|
||||
id: cache-monero-wallet-rpc
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
with:
|
||||
path: monero-wallet-rpc
|
||||
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
@@ -41,4 +41,9 @@ runs:
|
||||
|
||||
- name: Monero Wallet RPC
|
||||
shell: bash
|
||||
run: ./monero-wallet-rpc --disable-rpc-login --rpc-bind-port 6061 --allow-mismatched-daemon-version --wallet-dir ./ --detach
|
||||
run: |
|
||||
./monero-wallet-rpc --allow-mismatched-daemon-version \
|
||||
--daemon-address 0.0.0.0:18081 --daemon-login serai:seraidex \
|
||||
--disable-rpc-login --rpc-bind-port 18082 \
|
||||
--wallet-dir ./ \
|
||||
--detach
|
||||
|
||||
12
.github/actions/monero/action.yml
vendored
12
.github/actions/monero/action.yml
vendored
@@ -5,16 +5,16 @@ inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: v0.18.2.0
|
||||
default: v0.18.3.1
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Monero Daemon Cache
|
||||
id: cache-monerod
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
with:
|
||||
path: monerod
|
||||
path: /usr/bin/monerod
|
||||
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
- name: Download the Monero Daemon
|
||||
@@ -37,8 +37,10 @@ runs:
|
||||
wget https://downloads.getmonero.org/cli/$FILE
|
||||
tar -xvf $FILE
|
||||
|
||||
mv monero-x86_64-linux-gnu-${{ inputs.version }}/monerod monerod
|
||||
sudo mv monero-x86_64-linux-gnu-${{ inputs.version }}/monerod /usr/bin/monerod
|
||||
sudo chmod 777 /usr/bin/monerod
|
||||
sudo chmod +x /usr/bin/monerod
|
||||
|
||||
- name: Monero Regtest Daemon
|
||||
shell: bash
|
||||
run: ./monerod --regtest --offline --fixed-difficulty=1 --detach
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/coins/monero/run.sh --detach
|
||||
|
||||
9
.github/actions/test-dependencies/action.yml
vendored
9
.github/actions/test-dependencies/action.yml
vendored
@@ -2,15 +2,10 @@ name: test-dependencies
|
||||
description: Installs test dependencies for Serai
|
||||
|
||||
inputs:
|
||||
github-token:
|
||||
description: "GitHub token to install Protobuf with"
|
||||
require: true
|
||||
default:
|
||||
|
||||
monero-version:
|
||||
description: "Monero version to download and run as a regtest node"
|
||||
required: false
|
||||
default: v0.18.2.0
|
||||
default: v0.18.3.1
|
||||
|
||||
bitcoin-version:
|
||||
description: "Bitcoin version to download and run as a regtest node"
|
||||
@@ -22,8 +17,6 @@ runs:
|
||||
steps:
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Install Foundry
|
||||
uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf
|
||||
|
||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
||||
nightly-2023-12-04
|
||||
nightly-2024-02-07
|
||||
|
||||
2
.github/workflows/coins-tests.yml
vendored
2
.github/workflows/coins-tests.yml
vendored
@@ -25,8 +25,6 @@ jobs:
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
|
||||
2
.github/workflows/common-tests.yml
vendored
2
.github/workflows/common-tests.yml
vendored
@@ -21,8 +21,6 @@ jobs:
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
|
||||
8
.github/workflows/coordinator-tests.yml
vendored
8
.github/workflows/coordinator-tests.yml
vendored
@@ -9,9 +9,8 @@ on:
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "message-queue/**"
|
||||
- "orchestration/message-queue/**"
|
||||
- "coordinator/**"
|
||||
- "orchestration/coordinator/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/coordinator/**"
|
||||
|
||||
@@ -21,9 +20,8 @@ on:
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "message-queue/**"
|
||||
- "orchestration/message-queue/**"
|
||||
- "coordinator/**"
|
||||
- "orchestration/coordinator/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/coordinator/**"
|
||||
|
||||
@@ -37,8 +35,6 @@ jobs:
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Run coordinator Docker tests
|
||||
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
|
||||
2
.github/workflows/crypto-tests.yml
vendored
2
.github/workflows/crypto-tests.yml
vendored
@@ -23,8 +23,6 @@ jobs:
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
|
||||
2
.github/workflows/daily-deny.yml
vendored
2
.github/workflows/daily-deny.yml
vendored
@@ -12,7 +12,7 @@ jobs:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Advisory Cache
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
with:
|
||||
path: ~/.cargo/advisory-db
|
||||
key: rust-advisory-db
|
||||
|
||||
2
.github/workflows/full-stack-tests.yml
vendored
2
.github/workflows/full-stack-tests.yml
vendored
@@ -17,8 +17,6 @@ jobs:
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Run Full Stack Docker tests
|
||||
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
|
||||
25
.github/workflows/lint.yml
vendored
25
.github/workflows/lint.yml
vendored
@@ -9,21 +9,24 @@ on:
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Get nightly version to use
|
||||
id: nightly
|
||||
shell: bash
|
||||
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install nightly rust
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c rust-src -c clippy
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
|
||||
|
||||
- name: Run Clippy
|
||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||
@@ -34,7 +37,8 @@ jobs:
|
||||
# The above clippy run will cause it to be updated, so checking there's
|
||||
# no differences present now performs the desired check
|
||||
- name: Verify lockfile
|
||||
run: git diff | wc -l | grep -x "0"
|
||||
shell: bash
|
||||
run: git diff | wc -l | LC_ALL="en_US.utf8" grep -x -e "^[ ]*0"
|
||||
|
||||
deny:
|
||||
runs-on: ubuntu-latest
|
||||
@@ -42,7 +46,7 @@ jobs:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Advisory Cache
|
||||
uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
with:
|
||||
path: ~/.cargo/advisory-db
|
||||
key: rust-advisory-db
|
||||
@@ -60,6 +64,7 @@ jobs:
|
||||
|
||||
- name: Get nightly version to use
|
||||
id: nightly
|
||||
shell: bash
|
||||
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install nightly rust
|
||||
@@ -68,14 +73,6 @@ jobs:
|
||||
- name: Run rustfmt
|
||||
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
||||
|
||||
dockerfiles:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- name: Verify Dockerfiles are up to date
|
||||
# Runs the file which generates them and checks the diff has no lines
|
||||
run: cd orchestration && ./dockerfiles.sh && git diff | wc -l | grep -x "0"
|
||||
|
||||
machete:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
|
||||
6
.github/workflows/message-queue-tests.yml
vendored
6
.github/workflows/message-queue-tests.yml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "message-queue/**"
|
||||
- "orchestration/message-queue/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/message-queue/**"
|
||||
|
||||
@@ -17,7 +17,7 @@ on:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "message-queue/**"
|
||||
- "orchestration/message-queue/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/message-queue/**"
|
||||
|
||||
@@ -31,8 +31,6 @@ jobs:
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Run message-queue Docker tests
|
||||
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
|
||||
2
.github/workflows/mini-tests.yml
vendored
2
.github/workflows/mini-tests.yml
vendored
@@ -21,8 +21,6 @@ jobs:
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run Tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p mini-serai
|
||||
|
||||
3
.github/workflows/monero-tests.yaml
vendored
3
.github/workflows/monero-tests.yaml
vendored
@@ -24,8 +24,6 @@ jobs:
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run Unit Tests Without Features
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
||||
@@ -45,7 +43,6 @@ jobs:
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
monero-version: ${{ matrix.version }}
|
||||
|
||||
- name: Run Integration Tests Without Features
|
||||
|
||||
2
.github/workflows/no-std.yml
vendored
2
.github/workflows/no-std.yml
vendored
@@ -27,8 +27,6 @@ jobs:
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Install RISC-V Toolchain
|
||||
run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
|
||||
|
||||
8
.github/workflows/processor-tests.yml
vendored
8
.github/workflows/processor-tests.yml
vendored
@@ -9,9 +9,8 @@ on:
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "message-queue/**"
|
||||
- "orchestration/message-queue/**"
|
||||
- "processor/**"
|
||||
- "orchestration/processor/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/processor/**"
|
||||
|
||||
@@ -21,9 +20,8 @@ on:
|
||||
- "crypto/**"
|
||||
- "coins/**"
|
||||
- "message-queue/**"
|
||||
- "orchestration/message-queue/**"
|
||||
- "processor/**"
|
||||
- "orchestration/processor/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/processor/**"
|
||||
|
||||
@@ -37,8 +35,6 @@ jobs:
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Run processor Docker tests
|
||||
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
|
||||
2
.github/workflows/reproducible-runtime.yml
vendored
2
.github/workflows/reproducible-runtime.yml
vendored
@@ -31,8 +31,6 @@ jobs:
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Run Reproducible Runtime tests
|
||||
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
||||
|
||||
6
.github/workflows/tests.yml
vendored
6
.github/workflows/tests.yml
vendored
@@ -33,8 +33,6 @@ jobs:
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
@@ -54,8 +52,6 @@ jobs:
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
@@ -79,8 +75,6 @@ jobs:
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
with:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Run Tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -1,3 +1,7 @@
|
||||
target
|
||||
.vscode
|
||||
Dockerfile
|
||||
Dockerfile.fast-epoch
|
||||
!orchestration/runtime/Dockerfile
|
||||
.test-logs
|
||||
|
||||
.vscode
|
||||
|
||||
2289
Cargo.lock
generated
2289
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
51
Cargo.toml
51
Cargo.toml
@@ -1,6 +1,18 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
# Version patches
|
||||
"patches/zstd",
|
||||
"patches/proc-macro-crate",
|
||||
|
||||
# std patches
|
||||
"patches/matches",
|
||||
"patches/is-terminal",
|
||||
|
||||
# Rewrites/redirects
|
||||
"patches/option-ext",
|
||||
"patches/directories-next",
|
||||
|
||||
"common/std-shims",
|
||||
"common/zalloc",
|
||||
"common/db",
|
||||
@@ -36,12 +48,6 @@ members = [
|
||||
"coordinator/tributary",
|
||||
"coordinator",
|
||||
|
||||
"substrate/tree-cleanup/is-terminal",
|
||||
"substrate/tree-cleanup/option-ext",
|
||||
"substrate/tree-cleanup/directories-next",
|
||||
"substrate/tree-cleanup/bandersnatch_vrfs",
|
||||
"substrate/tree-cleanup/w3f-bls",
|
||||
|
||||
"substrate/primitives",
|
||||
|
||||
"substrate/coins/primitives",
|
||||
@@ -63,6 +69,8 @@ members = [
|
||||
|
||||
"substrate/client",
|
||||
|
||||
"orchestration",
|
||||
|
||||
"mini",
|
||||
|
||||
"tests/no-std",
|
||||
@@ -99,35 +107,26 @@ panic = "unwind"
|
||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||
|
||||
# subxt *can* pull these off crates.io yet there's no benefit to this
|
||||
sp-core-hashing = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "experimental" }
|
||||
sp-std = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "experimental" }
|
||||
# Needed due to dockertest's usage of `Rc`s when we need `Arc`s
|
||||
dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "arc" }
|
||||
|
||||
# wasmtime pulls in an old version for this
|
||||
zstd = { path = "patches/zstd" }
|
||||
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
|
||||
proc-macro-crate = { path = "patches/proc-macro-crate" }
|
||||
|
||||
# is-terminal now has an std-based solution with an equivalent API
|
||||
is-terminal = { path = "substrate/tree-cleanup/is-terminal" }
|
||||
is-terminal = { path = "patches/is-terminal" }
|
||||
# So does matches
|
||||
matches = { path = "substrate/tree-cleanup/matches" }
|
||||
matches = { path = "patches/matches" }
|
||||
|
||||
# directories-next was created because directories was unmaintained
|
||||
# directories-next is now unmaintained while directories is maintained
|
||||
# The directories author pulls in ridiculously pointless crates and prefers
|
||||
# copyleft licenses
|
||||
# Serai's polkadot-sdk consolidated to directories-next, as directories-next is
|
||||
# acceptable and because we couldn't consolidate to directories without forking
|
||||
# wasmtime
|
||||
# The following two patches resolve everything
|
||||
option-ext = { path = "substrate/tree-cleanup/option-ext" }
|
||||
directories-next = { path = "substrate/tree-cleanup/directories-next" }
|
||||
|
||||
# mach is unmaintained, so this wraps mach2 as mach
|
||||
mach = { path = "substrate/tree-cleanup/mach" }
|
||||
|
||||
# cargo believes the following are in-tree despite no features activating them
|
||||
# We provide empty crates to not only prove they're unused, yet also clean up
|
||||
# our Cargo.lock
|
||||
w3f-bls = { path = "substrate/tree-cleanup/w3f-bls" }
|
||||
[patch."https://github.com/w3f/ring-vrf"]
|
||||
bandersnatch_vrfs = { path = "substrate/tree-cleanup/bandersnatch_vrfs" }
|
||||
option-ext = { path = "patches/option-ext" }
|
||||
directories-next = { path = "patches/directories-next" }
|
||||
|
||||
[workspace.lints.clippy]
|
||||
unwrap_or_default = "allow"
|
||||
|
||||
@@ -12,7 +12,7 @@ pub fn SEQUENTIAL() -> &'static Mutex<()> {
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) async fn rpc() -> Rpc {
|
||||
let rpc = Rpc::new("http://serai:seraidex@127.0.0.1:18443".to_string()).await.unwrap();
|
||||
let rpc = Rpc::new("http://serai:seraidex@127.0.0.1:8332".to_string()).await.unwrap();
|
||||
|
||||
// If this node has already been interacted with, clear its chain
|
||||
if rpc.get_latest_block_number().await.unwrap() > 0 {
|
||||
|
||||
@@ -26,6 +26,9 @@ curve25519-dalek = { version = "4", default-features = false, features = ["alloc
|
||||
group = { version = "0.13", default-features = false }
|
||||
dalek-ff-group = { path = "../../../crypto/dalek-ff-group", version = "0.4", default-features = false }
|
||||
|
||||
[dev-dependencies]
|
||||
hex = "0.4"
|
||||
|
||||
[features]
|
||||
std = ["std-shims/std", "subtle/std", "sha3/std", "dalek-ff-group/std"]
|
||||
default = ["std"]
|
||||
|
||||
@@ -7,7 +7,16 @@ use dalek_ff_group::FieldElement;
|
||||
|
||||
use crate::hash;
|
||||
|
||||
/// Monero's hash to point function, as named `ge_fromfe_frombytes_vartime`.
|
||||
/// Decompress canonically encoded ed25519 point
|
||||
/// It does not check if the point is in the prime order subgroup
|
||||
pub fn decompress_point(bytes: [u8; 32]) -> Option<EdwardsPoint> {
|
||||
CompressedEdwardsY(bytes)
|
||||
.decompress()
|
||||
// Ban points which are either unreduced or -0
|
||||
.filter(|point| point.compress().to_bytes() == bytes)
|
||||
}
|
||||
|
||||
/// Monero's hash to point function, as named `hash_to_ec`.
|
||||
pub fn hash_to_point(bytes: [u8; 32]) -> EdwardsPoint {
|
||||
#[allow(non_snake_case)]
|
||||
let A = FieldElement::from(486662u64);
|
||||
@@ -47,5 +56,5 @@ pub fn hash_to_point(bytes: [u8; 32]) -> EdwardsPoint {
|
||||
let mut bytes = Y.to_repr();
|
||||
bytes[31] |= sign.unwrap_u8() << 7;
|
||||
|
||||
CompressedEdwardsY(bytes).decompress().unwrap().mul_by_cofactor()
|
||||
decompress_point(bytes).unwrap().mul_by_cofactor()
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ use std_shims::{sync::OnceLock, vec::Vec};
|
||||
|
||||
use sha3::{Digest, Keccak256};
|
||||
|
||||
use curve25519_dalek::edwards::{EdwardsPoint as DalekPoint, CompressedEdwardsY};
|
||||
use curve25519_dalek::edwards::{EdwardsPoint as DalekPoint};
|
||||
|
||||
use group::{Group, GroupEncoding};
|
||||
use dalek_ff_group::EdwardsPoint;
|
||||
@@ -18,7 +18,10 @@ mod varint;
|
||||
use varint::write_varint;
|
||||
|
||||
mod hash_to_point;
|
||||
pub use hash_to_point::hash_to_point;
|
||||
pub use hash_to_point::{hash_to_point, decompress_point};
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
fn hash(data: &[u8]) -> [u8; 32] {
|
||||
Keccak256::digest(data).into()
|
||||
@@ -29,10 +32,7 @@ static H_CELL: OnceLock<DalekPoint> = OnceLock::new();
|
||||
#[allow(non_snake_case)]
|
||||
pub fn H() -> DalekPoint {
|
||||
*H_CELL.get_or_init(|| {
|
||||
CompressedEdwardsY(hash(&EdwardsPoint::generator().to_bytes()))
|
||||
.decompress()
|
||||
.unwrap()
|
||||
.mul_by_cofactor()
|
||||
decompress_point(hash(&EdwardsPoint::generator().to_bytes())).unwrap().mul_by_cofactor()
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
38
coins/monero/generators/src/tests/hash_to_point.rs
Normal file
38
coins/monero/generators/src/tests/hash_to_point.rs
Normal file
@@ -0,0 +1,38 @@
|
||||
use crate::{decompress_point, hash_to_point};
|
||||
|
||||
#[test]
|
||||
fn crypto_tests() {
|
||||
// tests.txt file copied from monero repo
|
||||
// https://github.com/monero-project/monero/
|
||||
// blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/tests/crypto/tests.txt
|
||||
let reader = include_str!("./tests.txt");
|
||||
|
||||
for line in reader.lines() {
|
||||
let mut words = line.split_whitespace();
|
||||
let command = words.next().unwrap();
|
||||
|
||||
match command {
|
||||
"check_key" => {
|
||||
let key = words.next().unwrap();
|
||||
let expected = match words.next().unwrap() {
|
||||
"true" => true,
|
||||
"false" => false,
|
||||
_ => unreachable!("invalid result"),
|
||||
};
|
||||
|
||||
let actual = decompress_point(hex::decode(key).unwrap().try_into().unwrap());
|
||||
|
||||
assert_eq!(actual.is_some(), expected);
|
||||
}
|
||||
"hash_to_ec" => {
|
||||
let bytes = words.next().unwrap();
|
||||
let expected = words.next().unwrap();
|
||||
|
||||
let actual = hash_to_point(hex::decode(bytes).unwrap().try_into().unwrap());
|
||||
|
||||
assert_eq!(hex::encode(actual.compress().to_bytes()), expected);
|
||||
}
|
||||
_ => unreachable!("unknown command"),
|
||||
}
|
||||
}
|
||||
}
|
||||
1
coins/monero/generators/src/tests/mod.rs
Normal file
1
coins/monero/generators/src/tests/mod.rs
Normal file
@@ -0,0 +1 @@
|
||||
mod hash_to_point;
|
||||
628
coins/monero/generators/src/tests/tests.txt
Normal file
628
coins/monero/generators/src/tests/tests.txt
Normal file
@@ -0,0 +1,628 @@
|
||||
check_key c2cb3cf3840aa9893e00ec77093d3d44dba7da840b51c48462072d58d8efd183 false
|
||||
check_key bd85a61bae0c101d826cbed54b1290f941d26e70607a07fc6f0ad611eb8f70a6 true
|
||||
check_key 328f81cad4eba24ab2bad7c0e56b1e2e7346e625bcb06ae649aef3ffa0b8bef3 false
|
||||
check_key 6016a5463b9e5a58c3410d3f892b76278883473c3f0b69459172d3de49e85abe true
|
||||
check_key 4c71282b2add07cdc6898a2622553f1ca4eb851e5cb121181628be5f3814c5b1 false
|
||||
check_key 69393c25c3b50e177f81f20f852dd604e768eb30052e23108b3cfa1a73f2736e true
|
||||
check_key 3d5a89b676cb84c2be3428d20a660dc6a37cae13912e127888a5132e8bac2163 true
|
||||
check_key 78cd665deb28cebc6208f307734c56fccdf5fa7e2933fadfcdd2b6246e9ae95c false
|
||||
check_key e03b2414e260580f86ee294cd4c636a5b153e617f704e81dad248fbf715b2ee4 true
|
||||
check_key 28c3503ce82d7cdc8e0d96c4553bcf0352bbcfc73925495dbe541e7e1df105fc false
|
||||
check_key 06855c3c3e0d03fec354059bda319b39916bdc10b6581e3f41b335ee7b014fd5 false
|
||||
check_key 556381485df0d7d5a268ab5ecfb2984b060acc63471183fcf538bf273b0c0cb5 true
|
||||
check_key c7f76d82ac64b1e7fdc32761ff00d6f0f7ada4cf223aa5a11187e3a02e1d5319 true
|
||||
check_key cfa85d8bdb6f633fcf031adee3a299ac42eeb6bd707744049f652f6322f5aa47 true
|
||||
check_key 91e9b63ced2b08979fee713365464cc3417c4f238f9bdd3396efbb3c58e195ee true
|
||||
check_key 7b56e76fe94bd30b3b2f2c4ba5fe4c504821753a8965eb1cbcf8896e2d6aba19 true
|
||||
check_key 7338df494bc416cf5edcc02069e067f39cb269ce67bd9faba956021ce3b3de3a false
|
||||
check_key f9a1f27b1618342a558379f4815fa5039a8fe9d98a09f45c1af857ba99231dc1 false
|
||||
check_key b2a1f37718180d4448a7fcb5f788048b1a7132dde1cfd25f0b9b01776a21c687 true
|
||||
check_key 0d3a0f9443a8b24510ad1e76a8117cca03bce416edfe35e3c2a2c2712454f8dc false
|
||||
check_key d8d3d806a76f120c4027dc9c9d741ad32e06861b9cfbc4ce39289c04e251bb3c false
|
||||
check_key 1e9e3ba7bc536cd113606842835d1f05b4b9e65875742f3a35bfb2d63164b5d5 true
|
||||
check_key 5c52d0087997a2cdf1d01ed0560d94b4bfd328cb741cb9a8d46ff50374b35a57 true
|
||||
check_key bb669d4d7ffc4b91a14defedcdbd96b330108b01adc63aa685e2165284c0033b false
|
||||
check_key d2709ae751a0a6fd796c98456fa95a7b64b75a3434f1caa3496eeaf5c14109b4 true
|
||||
check_key e0c238cba781684e655b10a7d4af04ab7ff2e7022182d7ed2279d6adf36b3e7a false
|
||||
check_key 34ebb4bf871572cee5c6935716fab8c8ec28feef4f039763d8f039b84a50bf4c false
|
||||
check_key 4730d4f38ec3f3b83e32e6335d2506df4ee39858848842c5a0184417fcc639e4 true
|
||||
check_key d42cf7fdf5e17e0a8a7f88505a2b7a3d297113bd93d3c20fa87e11509ec905a2 true
|
||||
check_key b757c95059cefabb0080d3a8ebca82e46efecfd29881be3121857f9d915e388c false
|
||||
check_key bbe777aaf04d02b96c0632f4b1c6f35f1c7bcbc5f22af192f92c077709a2b50b false
|
||||
check_key 73518522aabd28566f858c33fccb34b7a4de0e283f6f783f625604ee647afad9 true
|
||||
check_key f230622c4a8f6e516590466bd10f86b64fbef61695f6a054d37604e0b024d5af false
|
||||
check_key bc6b9a8379fd6c369f7c3bd9ddce58db6b78f27a41d798bb865c3920824d0943 false
|
||||
check_key 45a4f87c25898cd6be105fa1602b85c4d862782adaac8b85c996c4a2bcd8af47 true
|
||||
check_key eb4ad3561d21c4311affbd7cc2c7ff5fd509f72f88ba67dc097a75c31fdbd990 false
|
||||
check_key 2f34f4630c09a23b7ecc19f02b4190a26df69e07e13de8069ae5ff80d23762fc true
|
||||
check_key 2ea4e4fb5085eb5c8adee0d5ab7d35c67d74d343bd816cd13924536cffc2527c true
|
||||
check_key 5d35467ee6705a0d35818aa9ae94e4603c3e5500bfc4cf4c4f77a7160a597aa6 true
|
||||
check_key 8ff42bc76796e20c99b6e879369bd4b46a256db1366416291de9166e39d5a093 true
|
||||
check_key 0262ba718850df6c621e8a24cd9e4831c047e38818a89e15c7a06a489a4558e1 false
|
||||
check_key 58b29b2ba238b534b08fb46f05f430e61cb77dc251b0bb50afec1b6061fd9247 false
|
||||
check_key 153170e3dc2b0e1b368fc0d0e31053e872f094cdace9a2846367f0d9245a109b false
|
||||
check_key 40419d309d07522d493bb047ca9b5fb6c401aae226eefae6fd395f5bb9114200 true
|
||||
check_key 713068818d256ef69c78cd6082492013fbd48de3c9e7e076415dd0a692994504 true
|
||||
check_key a7218ee08e50781b0c87312d5e0031467e863c10081668e3792d96cbcee4e474 true
|
||||
check_key 356ce516b00e674ef1729c75b0a68090e7265cef675bbf32bf809495b67e9342 false
|
||||
check_key 52a5c053293675e3efd2c585047002ea6d77931cbf38f541b9070d319dc0d237 false
|
||||
check_key 77c0080bf157e069b18c4c604cc9505c5ec6f0f9930e087592d70507ca1b5534 false
|
||||
check_key e733bc41f880a4cfb1ca6f397916504130807289cacfca10b15f5b8d058ed1bf false
|
||||
check_key c4f1d3c884908a574ecea8be10e02277de35ef84a1d10f105f2be996f285161f true
|
||||
check_key aed677f7f69e146aa0863606ac580fc0bbdc22a88c4b4386abaa4bdfff66bcc9 false
|
||||
check_key 6ad0edf59769599af8caa986f502afc67aecbebb8107aaf5e7d3ae51d5cf8dd8 false
|
||||
check_key 64a0a70e99be1f775c222ee9cd6f1bee6f632cb9417899af398ff9aff70661c6 true
|
||||
check_key c63afaa03bb5c4ed7bc77aac175dbfb73f904440b2e3056a65850ac1bd261332 false
|
||||
check_key a4e89cd2471c26951513b1cfbdcf053a86575e095af52495276aa56ede8ce344 false
|
||||
check_key 2ce935d97f7c3ddb973de685d20f58ee39938fe557216328045ec2b83f3132be true
|
||||
check_key 3e3d38b1fca93c1559ac030d586616354c668aa76245a09e3fa6de55ac730973 true
|
||||
check_key 8b81b9681f76a4254007fd07ed1ded25fc675973ccb23afd06074805194733a4 false
|
||||
check_key 26d1c15dfc371489439e29bcef2afcf7ed01fac24960fdc2e7c20847a8067588 true
|
||||
check_key 85c1199b5a4591fc4cc36d23660648c1b9cfbb0e9c47199fa3eea33299a3dcec false
|
||||
check_key 60830ba5449c1f04ac54675dfc7cac7510106c4b7549852551f8fe65971123e2 false
|
||||
check_key 3e43c28c024597b3b836e4bc16905047cbf6e841b80e0b8cd6a325049070c2a5 false
|
||||
check_key 474792c16a0032343a6f28f4cb564747c3b1ea0b6a6b9a42f7c71d7cc3dd3b44 true
|
||||
check_key c8ec5e67cb5786673085191881950a3ca20dde88f46851b01dd91c695cfbad16 true
|
||||
check_key 861c4b24b24a87b8559e0bb665f84dcc506c147a909f335ae4573b92299f042f false
|
||||
check_key 2c9e0fe3e4983d79f86c8c36928528f1bc90d94352ce427032cdef6906d84d0b true
|
||||
check_key 9293742822c2dff63fdc1bf6645c864fd527cea2ddba6d4f3048d202fc340c9a true
|
||||
check_key 3956422ad380ef19cb9fe360ef09cc7aaec7163eea4114392a7a0b2e2671914e true
|
||||
check_key 5ae8e72cadda85e525922fec11bd53a261cf26ee230fe85a1187f831b1b2c258 false
|
||||
check_key 973feca43a0baf450c30ace5dc19015e19400f0898316e28d9f3c631da31f99a true
|
||||
check_key dd946c91a2077f45c5c16939e53859d9beabaf065e7b1b993d5e5cd385f8716e true
|
||||
check_key b3928f2d67e47f6bd6da81f72e64908d8ff391af5689f0202c4c6fec7666ffe8 true
|
||||
check_key 313382e82083697d7f9d256c3b3800b099b56c3ef33cacdccbd40a65622e25fc false
|
||||
check_key 7d65380c12144802d39ed9306eed79fe165854273700437c0b4b50559800c058 true
|
||||
check_key 4db5c20a49422fd27739c9ca80e2271a8a125dfcead22cb8f035d0e1b7b163be true
|
||||
check_key dd76a9f565ef0e44d1531349ec4c5f7c3c387c2f5823e693b4952f4b0b70808c true
|
||||
check_key 66430bf628eae23918c3ed17b42138db1f98c24819e55fc4a07452d0c85603eb true
|
||||
check_key 9f0b677830c3f089c27daf724bb10be848537f8285de83ab0292d35afb617f77 false
|
||||
check_key cbf98287391fb00b1e68ad64e9fb10198025864c099b8b9334d840457e673874 true
|
||||
check_key a42552e9446e49a83aed9e3370506671216b2d1471392293b8fc2b81c81a73ee false
|
||||
check_key fb3de55ac81a923d506a514602d65d004ec9d13e8b47e82d73af06da73006673 false
|
||||
check_key e17abb78e58a4b72ff4ad7387b290f2811be880b394b8bcaae7748ac09930169 false
|
||||
check_key 9ffbda7ace69753761cdb5eb01f75433efa5cdb6a4f1b664874182c6a95adcba true
|
||||
check_key 507123c979179ea0a3f7f67fb485f71c8636ec4ec70aa47b92f3c707e7541a54 false
|
||||
check_key f1d0b156571994ef578c61cb6545d34f834eb30e4357539a5633c862d4dffa91 false
|
||||
check_key 3de62311ec14f9ee95828c190b2dc3f03059d6119e8dfccb7323efc640e07c75 false
|
||||
check_key 5e50bb48bc9f6dd11d52c1f0d10d8ae5674d7a4af89cbbce178dafc8a562e5fe false
|
||||
check_key 20b2c16497be101995391ceefb979814b0ea76f1ed5b6987985bcdcd17b36a81 false
|
||||
check_key d63bff73b914ce791c840e99bfae0d47afdb99c2375e33c8f149d0df03d97873 false
|
||||
check_key 3f24b3d94b5ddd244e4c4e67a6d9f533f0396ca30454aa0ca799f21328b81d47 true
|
||||
check_key 6a44c016f09225a6d2e830290719d33eb29b53b553eea7737ed3a6e297b2e7d2 true
|
||||
check_key ff0f34df0c76c207b8340be2009db72f730c69c2bbfeea2013105eaccf1d1f8e true
|
||||
check_key 4baf559869fe4e915e219c3c8d9a2330fc91e542a5a2a7311d4d59fee996f807 true
|
||||
check_key 1632207dfef26e97d13b0d0035ea9468fc5a8a89b0990fce77bb143c9d7f3b67 true
|
||||
check_key fcb3dee3993d1a47630f29410903dd03706bd5e81c5802e6f1b9095cbdb404d3 true
|
||||
check_key fb527092b9809e3d27d7588c7ef89915a769b99c1e03e7f72bbead9ed837daae false
|
||||
check_key 902b118d27d40ab9cbd55edd375801ce302cdb59e09c8659a3ea1401918d8bba false
|
||||
check_key 4d6fbf25ca51e263a700f1abf84f758dde3d11b632e908b3093d64fe2e70ea0a true
|
||||
check_key f4c3211ec70affc1c9a94a6589460ee8360dad5f8c679152f16994038532e3fc true
|
||||
check_key c2b3d73ac14956d7fdf12fa92235af1bb09e1566a6a6ffd0025682c750abdd69 false
|
||||
check_key b7e68c12207d2e2104fb2ca224829b6fccc1c0e2154e8a931e3c837a945f4430 false
|
||||
check_key 56ca0ca227708f1099bda1463db9559541c8c11ffad7b3d95c717471f25a01bf true
|
||||
check_key 3eef3a46833e4d851671182a682e344e36bea7211a001f3b8af1093a9c83f1b2 true
|
||||
check_key bd1f4a4f26cab7c1cbc0e17049b90854d6d28d2d55181e1b5f7a8045fcdfa06e true
|
||||
check_key 8537b01c87e7c184d9555e8d93363dcd9b60a8acc94cd3e41eb7525fd3e1d35a false
|
||||
check_key 68ace49179d549bad391d98ab2cc8afee65f98ce14955c3c1b16e850fabec231 true
|
||||
check_key f9922f8a660e7c3e4f3735a817d18b72f59166a0be2d99795f953cf233a27e24 true
|
||||
check_key 036b6be3da26e80508d5a5a6a5999a1fe0db1ac4e9ade8f1ea2eaf2ea9b1a70e true
|
||||
check_key 5e595e886ce16b5ea31f53bcb619f16c8437276618c595739fece6339731feb0 false
|
||||
check_key 4ee2cebae3476ed2eeb7efef9d20958538b3642f938403302682a04115c0f8ed false
|
||||
check_key 519eedbd0da8676063ce7d5a605b3fc27afeecded857afa24b894ad248c87b5d false
|
||||
check_key ce2b627c0accf4a3105796680c37792b30c6337d2d4fea11678282455ff82ff7 false
|
||||
check_key aa26ed99071a8416215e8e7ded784aa7c2b303aab67e66f7539905d7e922eb4d false
|
||||
check_key 435ae49c9ca26758aa103bdcca8d51393b1906fe27a61c5245361e554f335ec2 true
|
||||
check_key 42568af395bd30024f6ccc95205c0e11a6ad1a7ee100f0ec46fcdf0af88e91fb false
|
||||
check_key 0b4a78d1fde56181445f04ca4780f0725daa9c375b496fab6c037d6b2c2275db true
|
||||
check_key 2f82d2a3c8ce801e1ad334f9e074a4fbf76ffac4080a7331dc1359c2b4f674a4 false
|
||||
check_key 24297d8832d733ed052dd102d4c40e813f702006f325644ccf0cb2c31f77953f false
|
||||
check_key 5231a53f6bea7c75b273bde4a9f673044ed87796f20e0909978f29d98fc8d4f0 true
|
||||
check_key 94b5affcf78be5cf62765c32a0794bc06b4900e8a47ddba0e166ec20cec05935 true
|
||||
check_key c14b4d846ea52ffbbb36aa62f059453af3cfae306280dada185d2d385ef8f317 true
|
||||
check_key cceb34fddf01a6182deb79c6000a998742d4800d23d1d8472e3f43cd61f94508 true
|
||||
check_key 1faffa33407fba1634d4136cf9447896776c16293b033c6794f06774b514744c true
|
||||
check_key faaac98f644a2b77fb09ba0ebf5fcddf3ff55f6604c0e9e77f0278063e25113a true
|
||||
check_key 09e8525b00bea395978279ca979247a76f38f86dce4465eb76c140a7f904c109 true
|
||||
check_key 2d797fc725e7fb6d3b412694e7386040effe4823cdf01f6ec7edea4bc0e77e20 false
|
||||
check_key bbb74dabee651a65f46bca472df6a8a749cc4ba5ca35078df5f6d27a772f922a false
|
||||
check_key 77513ca00f3866607c3eff5c2c011beffa775c0022c5a4e7de1120a27e6687fd true
|
||||
check_key 10064c14ace2a998fc2843eeeb62884fe3f7ab331ca70613d6a978f44d9868eb false
|
||||
check_key 026ae84beb5e54c62629a7b63702e85044e38cadfc9a1fcabee6099ba185005c false
|
||||
check_key aef91536292b7ba34a3e787fb019523c2fa7a0d56fca069cc82ccb6b02a45b14 false
|
||||
check_key 147bb1a82c623c722540feaad82b7adf4b85c6ec0cbcef3ca52906f3e85617ac true
|
||||
check_key fc9fb281a0847d58dc9340ef35ef02f7d20671142f12bdd1bfb324ab61d03911 false
|
||||
check_key b739801b9455ac617ca4a7190e2806669f638d4b2f9288171afb55e1542c8d71 false
|
||||
check_key 494cc1e2ee997eb1eb051f83c4c89968116714ddf74e460d4fa1c6e7c72e3eb3 true
|
||||
check_key ed2fbdf2b727ed9284db90ec900a942224787a880bc41d95c4bc4cf136260fd7 true
|
||||
check_key 02843d3e6fc6835ad03983670a592361a26948eb3e31648d572416a944d4909e true
|
||||
check_key c14fea556a7e1b6b6c3d4e2e38a4e7e95d834220ff0140d3f7f561a34e460801 true
|
||||
check_key 5f8f82a35452d0b0d09ffb40a1154641916c31e161ad1a6ab8cfddc2004efdf6 false
|
||||
check_key 7b93d72429fab07b49956007eba335bb8c5629fbf9e7a601eaa030f196934a56 true
|
||||
check_key 6a63ed96d2e46c2874beaf82344065d94b1e5c04406997f94caf4ccd97cfbab9 false
|
||||
check_key c915f409e1e0f776d1f440aa6969cfec97559ef864b07d8c0d7c1163871b4603 true
|
||||
check_key d06bc33630fc94303c2c369481308f805f5ce53c40141160aa4a1f072967617e false
|
||||
check_key 1aafb14ca15043c2589bcd32c7c5f29479216a1980e127e9536729faf1c40266 true
|
||||
check_key 58c115624a20f4b0c152ccd048c54a28a938556863ab8521b154d3165d3649cd false
|
||||
check_key 9001ba086e8aa8a67e128f36d700cc641071556306db7ec9b8ac12a6256b27b7 false
|
||||
check_key 898c468541634fb0def11f82c781341fce0def7b15695af4e642e397218c730c true
|
||||
check_key 47ea6539e65b7b611b0e1ae9ee170adf7c31581ca9f78796d8ebbcc5cd74b712 false
|
||||
check_key 0c60952a64eeac446652f5d3c136fd36966cf66310c15ee6ab2ecbf981461257 false
|
||||
check_key 682264c4686dc7736b6e46bdc8ab231239bc5dac3f5cb9681a1e97a527945e8e true
|
||||
check_key 276006845ca0ea4238b231434e20ad8b8b2a36876effbe1d1e3ffb1f14973397 true
|
||||
check_key eecd3a49e55e32446f86c045dce123ef6fe2e5c57db1d850644b3c56ec689fce true
|
||||
check_key a4dced63589118db3d5aebf6b5670e71250f07485ca4bb6dddf9cce3e4c227a1 false
|
||||
check_key b8ade608ba43d55db7ab481da88b74a9be513fca651c03e04d30cc79f50e0276 false
|
||||
check_key 0d91de88d007a03fe782f904808b036ff63dec6b73ce080c55231afd4ed261c3 true
|
||||
check_key 87c59becb52dd16501edadbb0e06b0406d69541c4d46115351e79951a8dd9c28 true
|
||||
check_key 9aee723be2265171fe10a86d1d3e9cf5a4e46178e859db83f86d1c6db104a247 false
|
||||
check_key 509d34ae5bf56db011845b8cdf0cc7729ed602fce765e9564cb433b4d4421a43 false
|
||||
check_key 06e766d9a6640558767c2aab29f73199130bfdc07fd858a73e6ae8e7b7ba23ba false
|
||||
check_key 801c4fe5ab3e7cf13f7aa2ca3bc57cc8eba587d21f8bc4cd40b1e98db7aec8d9 false
|
||||
check_key d85ad63aeb7d2faa22e5c9b87cd27f45b01e6d0fdc4c3ddf105584ac0a021465 false
|
||||
check_key a7ca13051eb2baeb5befa5e236e482e0bb71803ad06a6eae3ae48742393329d2 true
|
||||
check_key 5a9ba3ec20f116173d933bf5cf35c320ed3751432f3ab453e4a6c51c1d243257 false
|
||||
check_key a4091add8a6710c03285a422d6e67863a48b818f61c62e989b1e9b2ace240a87 false
|
||||
check_key bdee0c6442e6808f25bb18e21b19032cf93a55a5f5c6426fba2227a41c748684 true
|
||||
check_key d4aeb6cdad9667ec3b65c7fbc5bfd1b82bba1939c6bb448a86e40aec42be5f25 false
|
||||
check_key 73525b30a77f1212f7e339ec11f48c453e476f3669e6e70bebabc2fe9e37c160 true
|
||||
check_key 45501f2dc4d0a3131f9e0fe37a51c14869ab610abd8bf0158111617924953629 false
|
||||
check_key 07d0e4c592aa3676adf81cca31a95d50c8c269d995a78cde27b2a9a7a93083a6 false
|
||||
check_key a1797d6178c18add443d22fdbf45ca5e49ead2f78b70bdf1500f570ee90adca5 true
|
||||
check_key 0961e82e6e7855d7b7bf96777e14ae729f91c5bbd20f805bd7daac5ccbec4bab false
|
||||
check_key 57f5ba0ad36e997a4fb585cd2fc81b9cc5418db702c4d1e366639bb432d37c73 true
|
||||
check_key 82b005be61580856841e042ee8be74ae4ca66bb6733478e81ca1e56213de5c05 false
|
||||
check_key d7733dcae1874c93e9a2bd46385f720801f913744d60479930dad7d56c767cdc false
|
||||
check_key b8b8b698609ac3f1bd8f4965151b43b362e6c5e3d1c1feae312c1d43976d59ab true
|
||||
check_key 4bba7815a9a1b86a5b80b17ac0b514e2faa7a24024f269b330e5b7032ae8c04e true
|
||||
check_key 0f70da8f8266b58acda259935ef1a947c923f8698622c5503520ff31162e877b false
|
||||
check_key 233eaa3db80f314c6c895d1328a658a9175158fa2483ed216670c288a04b27bc false
|
||||
check_key a889f124fabfd7a1e2d176f485be0cbd8b3eeaafeee4f40e99e2a56befb665be true
|
||||
check_key 2b7b8abc198b11cf7efa21bc63ec436f790fe1f9b8c044440f183ab291af61d6 true
|
||||
check_key 2491804714f7938cf501fb2adf07597b4899b919cabbaab49518b8f8767fdc6a true
|
||||
check_key 52744a54fcb00dc930a5d7c2bc866cbfc1e75dd38b38021fd792bb0ca9f43164 true
|
||||
check_key e42cbf70b81ba318419104dffbb0cdc3b7e7d4698e422206b753a4e2e6fc69bb false
|
||||
check_key 2faff73e4fed62965f3dbf2e6446b5fea0364666cc8c9450b6ed63bbb6f5f0e7 true
|
||||
check_key 8b963928d75be661c3c18ddd4f4d1f37ebc095ce1edc13fe8b23784c8f416dfd false
|
||||
check_key b1162f952808434e4d2562ffda98bd311613d655d8cf85dc86e0a6c59f7158bc true
|
||||
check_key 5a69adcd9e4f5b0020467e968d85877cb3aa04fa86088d4499b57ca65a665836 true
|
||||
check_key 61ab47da432c829d0bc9d4fdb59520b135428eec665ad509678188b81c7adf49 false
|
||||
check_key 154bb547f22f65a87c0c3f56294f5791d04a3c14c8125d256aeed8ec54c4a06e true
|
||||
check_key 0a78197861c30fd3547b5f2eabd96d3ac22ac0632f03b7afd9d5d2bfc2db352f true
|
||||
check_key 8bdeadcca1f1f8a4a67b01ed2f10ef31aba7b034e8d1df3a69fe9aebf32454e0 false
|
||||
check_key f4b17dfca559be7d5cea500ac01e834624fed9befae3af746b39073d5f63190d true
|
||||
check_key 622c52821e16ddc63b58f3ec2b959fe8c6ea6b1a596d9a58fd81178963f41c01 true
|
||||
check_key 07bedd5d55c937ef5e23a56c6e58f31adb91224d985285d7fef39ede3a9efb17 false
|
||||
check_key 5179bf3b7458648e57dc20f003c6bbfd55e8cd7c0a6e90df6ef8e8183b46f99d true
|
||||
check_key 683c80c3f304f10fdd53a84813b5c25b1627ebd14eb29b258b41cd14396ef41f true
|
||||
check_key c266244ed597c438170875fe7874f81258a830105ca1108131e6b8fea95eb8ba true
|
||||
check_key 0c1cdc693df29c2d1e66b2ce3747e34a30287d5eb6c302495634ec856593fe8e true
|
||||
check_key 28950f508f6a0d4c20ab5e4d55b80565a6a539092e72b7eb0ed9fa5017ecef88 false
|
||||
check_key 8328a2a5fcfc4433b1c283539a8943e6eb8cc16c59f29dedc3af2c77cfd56f25 true
|
||||
check_key 5d0f82319676d4d3636ff5dc2a38ea5ec8aeaac4835fdcab983ab35d76b7967b false
|
||||
check_key cafcc75e94a014115f25c23aaae86e67352f928f468d4312b92240ff0f3a4481 false
|
||||
check_key 3e5fdd8072574218f389d018e959669e8ca4ef20b114ea7dce7bfb32339f9f42 true
|
||||
check_key 591763e3390a78ccb529ceea3d3a97165878b179ad2edaa166fd3c78ec69d391 true
|
||||
check_key 7a0a196935bf79dc2b1c3050e8f2bf0665f7773fc07511b828ec1c4b1451d317 false
|
||||
check_key 9cf0c034162131fbaa94a608f58546d0acbcc2e67b62a0b2be2ce75fc8c25b9a false
|
||||
check_key e3840846e3d32644d45654b96def09a5d6968caca9048c13fcaab7ae8851c316 false
|
||||
check_key a4e330253739af588d70fbda23543f6df7d76d894a486d169e5fedf7ed32d2e2 false
|
||||
check_key cfb41db7091223865f7ecbdda92b9a6fb08887827831451de5bcb3165395d95d true
|
||||
check_key 3d10bd023cef8ae30229fdbfa7446a3c218423d00f330857ff6adde080749015 false
|
||||
check_key 4403b53b8d4112bb1727bb8b5fd63d1f79f107705ffe17867704e70a61875328 false
|
||||
check_key 121ef0813a9f76b7a9c045058557c5072de6a102f06a9b103ead6af079420c29 true
|
||||
check_key 386204cf473caf3854351dda55844a41162eb9ce4740e1e31cfef037b41bc56e false
|
||||
check_key eb5872300dc658161df469364283e4658f37f6a1349976f8973bd6b5d1d57a39 true
|
||||
check_key b8f32188f0fc62eeb38a561ff7b7f3c94440e6d366a05ef7636958bc97834d02 false
|
||||
check_key a817f129a8292df79eef8531736fdebb2e985304653e7ef286574d0703b40fb4 false
|
||||
check_key 2c06595bc103447b9c20a71cd358c704cb43b0b34c23fb768e6730ac9494f39e true
|
||||
check_key dd84bc4c366ced4f65c50c26beb8a9bc26c88b7d4a77effbb0f7af1b28e25734 false
|
||||
check_key 76b4d33810eed637f90d49a530ac5415df97cafdac6f17eda1ba7eb9a14e5886 true
|
||||
check_key 926ce5161c4c92d90ec4efc58e5f449a2c385766c42d2e60af16b7362097aef5 false
|
||||
check_key 20c661f1e95e94a745eb9ec7a4fa719eff2f64052968e448d4734f90952aefee false
|
||||
check_key 671b50abbd119c756010416e15fcdcc9a8e92eed0f67cbca240c3a9154db55c0 false
|
||||
check_key df7aeee8458433e5c68253b8ef006a1c74ce3aef8951056f1fa918a8eb855213 false
|
||||
check_key 70c81a38b92849cf547e3d5a6570d78e5228d4eaf9c8fdd15959edc9eb750daf false
|
||||
check_key 55a512100b72d4ae0cfc16c75566fcaa3a7bb9116840db1559c71fd0e961cc36 false
|
||||
check_key dbfbec4d0d2433a794ad40dc0aea965b6582875805c9a7351b47377403296acd true
|
||||
check_key 0a7fe09eb9342214f98b38964f72ae3c787c19e5d7e256af9216f108f88b00a3 true
|
||||
check_key a82e54681475f53ced9730ee9e3a607e341014d9403f5a42f3dbdbe8fc52e842 true
|
||||
check_key 4d1f90059f7895a3f89abf16162e8d69b399c417f515ccb43b83144bbe8105f6 true
|
||||
check_key 94e5c5b8486b1f2ff4e98ddf3b9295787eb252ba9b408ca4d7724595861da834 false
|
||||
check_key d16e3e8dfa6d33d1d2db21c651006ccddbf4ce2e556594de5a22ae433e774ae6 false
|
||||
check_key a1b203ec5e36098a3af08d6077068fec57eab3a754cbb5f8192983f37191c2df false
|
||||
check_key 5378bb3ec8b4e49849bd7477356ed86f40757dd1ea3cee1e5183c7e7be4c3406 false
|
||||
check_key 541a4162edeb57130295441dc1cb604072d7323b6c7dffa02ea5e4fed1d2ee9e true
|
||||
check_key d8e86e189edcc4b5c262c26004691edd7bd909090997f886b00ed4b6af64d547 false
|
||||
check_key 18a8731d1983d1df2ce2703b4c85e7357b6356634ac1412e6c2ac33ad35f8364 false
|
||||
check_key b21212eac1eb11e811022514c5041233c4a07083a5b20acd7d632a938dc627de true
|
||||
check_key 50efcfac1a55e9829d89334513d6d921abeb237594174015d154512054e4f9d1 true
|
||||
check_key 9c44e8bcba31ddb4e67808422e42062540742ebd73439da0ba7837bf26649ec4 true
|
||||
check_key b068a4f90d5bd78fd350daa129de35e5297b0ad6be9c85c7a6f129e3760a1482 false
|
||||
check_key e9df93932f0096fcf2055564457c6dc685051673a4a6cd87779924be5c4abead true
|
||||
check_key eddab2fc52dac8ed12914d1eb5b0da9978662c4d35b388d64ddf8f065606acaf true
|
||||
check_key 54d3e6b3f2143d9083b4c98e4c22d98f99d274228050b2dc11695bf86631e89f true
|
||||
check_key 6da1d5ef1827de8bbf886623561b058032e196d17f983cbc52199b31b2acc75b true
|
||||
check_key e2a2df18e2235ebd743c9714e334f415d4ca4baf7ad1b335fb45021353d5117f true
|
||||
check_key f34cb7d6e861c8bfe6e15ac19de68e74ccc9b345a7b751a10a5c7f85a99dfeb6 false
|
||||
check_key f36e2f5967eb56244f9e4981a831f4d19c805e31983662641fe384e68176604a true
|
||||
check_key c7e2dc9e8aa6f9c23d379e0f5e3057a69b931b886bbb74ded9f660c06d457463 true
|
||||
check_key b97324364941e06f2ab4f5153a368f9b07c524a89e246720099042ad9e8c1c5b false
|
||||
check_key eff75c70d425f5bba0eef426e116a4697e54feefac870660d9cf24c685078d75 false
|
||||
check_key 161f3cd1a5873788755437e399136bcbf51ff5534700b3a8064f822995a15d24 false
|
||||
check_key 63d6d3d2c21e88b06c9ff856809572024d86c85d85d6d62a52105c0672d92e66 false
|
||||
check_key 1dc19b610b293de602f43dca6c204ce304702e6dc15d2a9337da55961bd26834 false
|
||||
check_key 28a16d02405f509e1cfef5236c0c5f73c3bcadcd23c8eff377253941f82769db true
|
||||
check_key 682d9cc3b65d149b8c2e54d6e20101e12b7cf96be90c9458e7a69699ec0c8ed7 false
|
||||
check_key 0000000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 0000000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 0100000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 0100000000000000000000000000000000000000000000000000000000000080 false
|
||||
check_key 0200000000000000000000000000000000000000000000000000000000000000 false
|
||||
check_key 0200000000000000000000000000000000000000000000000000000000000080 false
|
||||
check_key 0300000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 0300000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 0400000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 0400000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 0500000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 0500000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 0600000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 0600000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 0700000000000000000000000000000000000000000000000000000000000000 false
|
||||
check_key 0700000000000000000000000000000000000000000000000000000000000080 false
|
||||
check_key 0800000000000000000000000000000000000000000000000000000000000000 false
|
||||
check_key 0800000000000000000000000000000000000000000000000000000000000080 false
|
||||
check_key 0900000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 0900000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 0a00000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 0a00000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 0b00000000000000000000000000000000000000000000000000000000000000 false
|
||||
check_key 0b00000000000000000000000000000000000000000000000000000000000080 false
|
||||
check_key 0c00000000000000000000000000000000000000000000000000000000000000 false
|
||||
check_key 0c00000000000000000000000000000000000000000000000000000000000080 false
|
||||
check_key 0d00000000000000000000000000000000000000000000000000000000000000 false
|
||||
check_key 0d00000000000000000000000000000000000000000000000000000000000080 false
|
||||
check_key 0e00000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 0e00000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 0f00000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 0f00000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 1000000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 1000000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 1100000000000000000000000000000000000000000000000000000000000000 false
|
||||
check_key 1100000000000000000000000000000000000000000000000000000000000080 false
|
||||
check_key 1200000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 1200000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key 1300000000000000000000000000000000000000000000000000000000000000 true
|
||||
check_key 1300000000000000000000000000000000000000000000000000000000000080 true
|
||||
check_key daffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key daffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key dbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key dbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key dcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key dcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key ddffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key ddffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key deffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key deffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key dfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key dfffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key e0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key e0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key e1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key e1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key e2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key e2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key e3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key e3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key e4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key e4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key e5ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key e5ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key e6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key e6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key e7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key e7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key e8ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key e8ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key e9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key e9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key eaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key eaffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff true
|
||||
check_key ebffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key ebffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f true
|
||||
check_key ecffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key edffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key eeffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key efffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key efffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key f0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key f0ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key f1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key f1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key f2ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key f3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key f3ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key f4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key f4ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key f5ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key f5ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key f6ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key f7ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key f8ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key f8ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key f9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key f9ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key faffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key faffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key fbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key fbffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key fcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key fcffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key fdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key fdffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key feffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key feffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
check_key ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff7f false
|
||||
check_key ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff false
|
||||
hash_to_ec da66e9ba613919dec28ef367a125bb310d6d83fb9052e71034164b6dc4f392d0 52b3f38753b4e13b74624862e253072cf12f745d43fcfafbe8c217701a6e5875
|
||||
hash_to_ec a7fbdeeccb597c2d5fdaf2ea2e10cbfcd26b5740903e7f6d46bcbf9a90384fc6 f055ba2d0d9828ce2e203d9896bfda494d7830e7e3a27fa27d5eaa825a79a19c
|
||||
hash_to_ec ed6e6579368caba2cc4851672972e949c0ee586fee4d6d6a9476d4a908f64070 da3ceda9a2ef6316bf9272566e6dffd785ac71f57855c0202f422bbb86af4ec0
|
||||
hash_to_ec 9ae78e5620f1c4e6b29d03da006869465b3b16dae87ab0a51f4e1b74bc8aa48b 72d8720da66f797f55fbb7fa538af0b4a4f5930c8289c991472c37dc5ec16853
|
||||
hash_to_ec ab49eb4834d24db7f479753217b763f70604ecb79ed37e6c788528720f424e5b 45914ba926a1a22c8146459c7f050a51ef5f560f5b74bae436b93a379866e6b8
|
||||
hash_to_ec 5b79158ef2341180b8327b976efddbf364620b7e88d2e0707fa56f3b902c34b3 eac991dcbba39cb3bd166906ab48e2c3c3f4cd289a05e1c188486d348ede7c2e
|
||||
hash_to_ec f21daa7896c81d3a7a2e9df721035d3c3902fe546c9d739d0c334ed894fb1d21 a6bedc5ffcc867d0c13a88a03360c8c83a9e4ddf339851bd3768c53a124378ec
|
||||
hash_to_ec 3dae79aaca1abe6aecea7b0d38646c6b013d40053c7cdde2bed094497d925d2b 1a442546a35860a4ab697a36b158ded8e001bbfe20aef1c63e2840e87485c613
|
||||
hash_to_ec 3d219463a55c24ac6f55706a6e46ade3fcd1edc87bade7b967129372036aca63 b252922ab64e32968735b8ade861445aa8dc02b763bd249bff121d10829f7c52
|
||||
hash_to_ec bc5db69aced2b3197398eaf7cf60fd782379874b5ca27cb21bd23692c3c885cc ae072a43f78a0f29dc9822ae5e70865bbd151236a6d7fe4ae3e8f8961e19b0e5
|
||||
hash_to_ec 98a6ed760b225976f8ada0579540e35da643089656695b5d0b8c7265a37e2342 6a99dbfa8ead6228910498cc3ff3fb18cb8627c5735e4b8657da846c16d2dcad
|
||||
hash_to_ec e9cdc9fd9425a4a2389a5d60f76a2d839f0afbf66330f079a88fe23d73eae930 8aa518d091928668f3ca40e71e14b2698f6cae097b8120d7f6ae9afba8fd3d60
|
||||
hash_to_ec a50c026c0af2f9f9884c2e9b8464724ac83bef546fec2c86b7de0880980d24fb b07433f8df39da2453a1e13fd413123a158feae602d822b724d42ef6c8e443bf
|
||||
hash_to_ec bf180e20d160fa23ccfa6993febe22b920160efc5a9614245f1a3a360076e87a 9d6454ff69779ce978ea5fb3be88576dc8feaedf151e93b70065f92505f2e800
|
||||
hash_to_ec b2b64dfeb1d58c6afbf5a56d8c0c42012175ebb4b7df30f26a67b66be8c34614 0523b22e7f220c939b604a15780abc5816709b91b81d9ee1541d44bd2586bbd8
|
||||
hash_to_ec 463fc877f4279740020d10652c950f088ebdebeae34aa7a366c92c9c8773f63a daa5fa72e70c4d3af407b8f2f3364708029b2d4863bbdde54bd67bd08db0fcad
|
||||
hash_to_ec 721842f3809982e7b96a806ae1f162d98ae6911d476307ad1e4f24522fd26f55 4397c300a8cfcb42e7cc310bc975dc975ec2d191eaa7e0462998eb2830c34126
|
||||
hash_to_ec 384da8d9b83972af8cbefc2da5efc744037c8ef40efa4b3bacc3238a6232963d 3c80f107e6868f73ef600ab9229a3f4bbe24f4adce52e6ab3a66d5d510e0670d
|
||||
hash_to_ec e26f8adef5b6fe5bb01466bff0455ca23fda07e200133697b3b6430ca3332bde e262a58bcc1f8baf1980e00d5d40ba00803690174d14fb4c0f608429ce3df773
|
||||
hash_to_ec 6e275b4ea4f085a5d3151aa08cf16a8c60b078e70be7ce5dac75b5d7b0eebe7c cb21b5a7744b4fcdc92ead4be0b04bcb9145e7bb4b06eff3bb2f0fe429b85108
|
||||
hash_to_ec a0dde4561ad9daa796d9cd8a3c34fd41687cee76d128bf2e2252466e3ef3b068 79a2eb06bb7647f5d0aae5da7cf2e2b2d2ce890f25f2b1f81bfc5fef8c87a7d3
|
||||
hash_to_ec dbaf63830e037b4c329969d1d85e58cb6c4f56014fd08eb38219bd20031ae27c 079c93ae27cd98075a487fd3f7457ad2fb57cdf12ec8651fedd944d765d07549
|
||||
hash_to_ec 1e87ba8a9acf96948bc199ae55c83ab3277be152c6d0b1d68a07955768d81171 5c6339f834116791f9ea22fcc3970346aaeddacf13fbd0a7d4005fbd469492ca
|
||||
hash_to_ec 5a544088e63ddf5b9f444ed75a75bc9315c4c50439522f06b4823ecaf5e8a08d e95ca0730d57c6469be3a0f3c94382f8490257e2e546de86c650bdbc6482eaee
|
||||
hash_to_ec e4e06d92ebb036a5e4bb547dbaa43fd70db3929eef2702649455c86d7e59aa46 e26210ff8ee28e24ef2613df40aa8a874b5e3c1d07ae14acc59220615aa334dc
|
||||
hash_to_ec 5793b8b32dcc0f204501647f2976493c4f8f1fa5132315226f99f29a5a6fdfce 656e390086906d99852c9696e831f62cb56fc8f85f9a5c936c327f23c7faf4fe
|
||||
hash_to_ec 84f56fa4d7f12e0efd48b1f7c81c15d6e3843ebb419f4a27ec97028d4f9da19e 0cbd4f0cd288e1e071cce800877de6aef97b63fff867424a4f2b2bab25602608
|
||||
hash_to_ec 242683ddf0a9fc55f6585de3aa64ea17c9c544896ff7677cd82c98f833bdf2ca 38c36d52314549213df7c7201ab7749a4724cbea92812f583bb48cabc20816ad
|
||||
hash_to_ec a93ee320dc030aa382168c2eb6d75fce6e5a63a81f15632d514c6de8a7cfa5ee bd0a2facaa95bc95215a94be21996e46f789ee8beb38e75a1173b75fc686c505
|
||||
hash_to_ec e36136601d84475d25c3f14efe030363d646658937a8a8a19a812d5e6deb5944 2fb93d78fae299c9f6b22346acfb829796ee7a47ec71db5456d8201bec6c35a3
|
||||
hash_to_ec ba4b67d3d387c66baa4a32ec8b1db7681087e85076e71bab10036388c3aeb011 cc01329ce56f963bf444a124751c45b2c779ccb6dea16ca05251baca246b5401
|
||||
hash_to_ec 3fbc91896a2585154d6f7094c5ab9c487e29a27951c226eec1235f618e44946b 7d983acbb901bf5497d0708392e5e742ec8c8036cbb0d03403e9929da8cc85a7
|
||||
hash_to_ec a2da289fed650e9901f69a5f33535eb47c6bd07798633cbf6c00ce3172df76ac dca8a4d30ec2d657fefd0dba9c1c5fd45a79f665048b3cf72ac2c3b7363da1ac
|
||||
hash_to_ec 99025d2d493f768e273ed66cacd3a5b392761e6bd158ca09c8fba84631ea1534 7ef5af79ab155ab7e1770a47fcd7f194aca43d79ec6e303c7ce18c6a20279b04
|
||||
hash_to_ec 3cf1d01d0b70fb31f2a2f979c1bae812381430f474247d0b018167f2a2cd9a9f 7c53d799ec938a21bb305a6b5ca0a7a355fa9a68b01d289c4f22b36ce3738f95
|
||||
hash_to_ec 639c421b49636b2a1f8416c5d6e64425fe51e3b52584c265502379189895668e 0b47216ae5e6e03667143a6cf8894d9d73e3152c64fb455631d81a424410e871
|
||||
hash_to_ec 4ccf2c973348b7cc4b14f846f9bfcdcb959b7429accf6dede96248946841d990 7fd41f5b97ba42ed03947dd953f8e69770c92cc34b16236edad7ab3c78cbbb2e
|
||||
hash_to_ec f76ae09fff537f8919fd1a43ff9b8922b6a77e9e30791c82cf2c4b8acb51363e 8e2c6bf86461ad2c230c496ee3896da33c11cc020fd4c70faa3645b329049234
|
||||
hash_to_ec 98932da7450f15db6c1eef78359904915c31c2aa7572366ec8855180edb81e3a 86180adddfac0b4d1fb41d58e98445dde1da605b380d392e9386bd445f1d821c
|
||||
hash_to_ec ab26a1660988ec7aba91fc01f7aa9a157bbc12927f5b197062b922a5c0c7f8dd 2c44a43eda0d0aad055f18333e761f2f2ec11c585ec7339081c19266af918e4f
|
||||
hash_to_ec 4465d0c1b4930cc718252efd87d11d04162d2a321b9b850c4a19a6acdfca24f4 b03806287d804188a4d679a0ecee66f399d7bdc3bd1494f9b2b0772bbb5a034f
|
||||
hash_to_ec 0f2a7867864ed00e5c40082df0a0b031c89fa5f978d9beb2fde75153f51cfb75 5c471e1b118ef9d76c93aec70e0578f46e8db1d55affd447c1f64c0ad9a5caa5
|
||||
hash_to_ec 5c2808c07d8175f332cae050ce13bec4254870d76abff68faf34b0b8d3ad5000 eeff1d9a5aa428b7aecc575e63dde17294072eb246568493e1ed88ce5c95b779
|
||||
hash_to_ec 36300a21601fad00d00da45e27b36c11923b857f97e50303bd01f21998eaef95 b33b077871e6f5dad8ff6bc621c1b6dedcf700777d996c8c02d73f7297108b7e
|
||||
hash_to_ec 9e1afb76d6c480816d2cedd7f2ab08a36c309efaa3764dcdb51bad6049683805 4cd96ba7b543b1a224b8670bf20b3733e3910711d32456d3e58e920215788adf
|
||||
hash_to_ec 685f152704664495459b76c81567a4b571e8b307dd0e3c9b08ee95651a006047 80dd6b637580cb3be76025867f1525852b65a7a66066993fda3af7eb187dc1a5
|
||||
hash_to_ec 0b216444391a1163c14f7b27f9135e9747978c0e426dce1fa65c657f3e9146be 021259695a6854a4a03e8c74d09ab9630a401bfca06172a733fe122f01af90b4
|
||||
hash_to_ec cfcb35e98f71226c3558eaa9cf620db5ae207ece081ab13ddea4b1f122850a5a 46763d2742e2cdffe80bb3d056f4d3a1565aa83f19aab0a1f89e54ad81ae0814
|
||||
hash_to_ec 07e7292da8cdcdb58ee30c3fa16f1d609e9b3b1110dd6fa9b2cc18f4103a1c12 fe949ca251ac66f13a8925ae624a09cdbf6696d3c110442338d37700536e8ec7
|
||||
hash_to_ec 813bc7e3749e658190cf2a4e358bc07a6671f262e2c4eef9f44c66066a72e6a7 6b92fbda984bd0e6f4af7a5e04c2b66b6f0f9d197a9694362a8556e5b7439f8a
|
||||
hash_to_ec 89c50a1e5497156e0fae20d99f5e33e330362b962c9ca00eaf084fe91aaec71d ef36cb75eb95fb761a8fa8c376e9c4447bcd61421250f7a711bd289e6ed78a9b
|
||||
hash_to_ec d9bd9ff2dd807eb25de7c5de865dbc43cce2466389cedbc92b90aab0eb014f81 30104771ff961cd1861cd053689feab888c57b8a4a2e3989646ea7dea40f3c04
|
||||
hash_to_ec b8c837501b6ca3e118db9848717c847c062bf0ebeca5a7c211726c1426878af5 19a1e204b4a32ce9cccf5d96a541eb76a78789dceaf4fe69964e58ff96c29b63
|
||||
hash_to_ec 84376c5350a42c07ac9f96e8d5c35a8c7f62c639a1834b09e4331b5962ecace8 ba1e4437d5048bd1294eadc502092eafc470b99fde82649e84a52225e68e88f2
|
||||
hash_to_ec a3345e4a4cfc369bf0e7d11f49aed0d2a6ded00e3ff8c7605db9a919cf730640 0d318705c16e943c0fdcde134aaf6e4ccce9f3d9161d001861656fc7ea77a0b1
|
||||
hash_to_ec 3c994dfb9c71e4f401e65fd552dc9f49885f88b8b3588e24e1d2e9b8870ffab1 984157de5d7c2c4b43b2bffea171809165d7bb442baea88e83b27f839ebdb939
|
||||
hash_to_ec 153674c1c1b18a646f564af77c5bd7de452dc3f3e1e2326bfe9c57745b69ec5c e9a4a1e225ae472d1b3168c99f8ba1943ad2ed84ef29598f3f96314f22db9ef2
|
||||
hash_to_ec 2d46a705d4fe5d8b5a1f4e9ef46d9e06467450eb357b6d39faa000995314e871 b9d1aec540bf6a9c0e1b325ab87d4fbe66b1df48986dde3cb62e66e136eba107
|
||||
hash_to_ec 6764c3767f16ec8faecc62f9f76735f76b11d7556aeb61066aeaeaad4fc9042f 3a5c68fb94b023488fb5940e07d1005e7c18328e7a84f673ccd536c07560a57b
|
||||
hash_to_ec c99c6ee5804d4b13a445bc03eaa07a6ef5bcb2fff0f71678dd3bd66b822f8be8 a9e1ce91deed4136e6e53e143d1c0af106abde9d77c066c78ebbf5d227f9dde0
|
||||
hash_to_ec 3009182e1efac085c7eba24a7d9ef28ace98ebafa72211e73a41c935c37e6768 e55431a4c89d38bd95f8092cdf6e44d164ad5855677aba17ec262abc8c217c86
|
||||
hash_to_ec e7153acd114a7636a207be0b67fa86fee56dd318f2808a81e35dd13d4251b2d0 ff2b98d257e4d4ff7379e8871441ca7d26e73f78f3f5afcf421d78c9799ba677
|
||||
hash_to_ec 6378586744b721c5003976e3e18351c49cd28154c821bc45338892e5efedd197 3d765fb7bb4e165a3fa6ea00b5b5e22250f3861f0db0099626d9a9020443dda2
|
||||
hash_to_ec 5be49aba389b7e3ad6def3ba3c7dbec0a11a3c36fc9d441130ef370b8a8d29c2 2d61faf38062dc98ae1aaafec05e90a925c9769df5b8b8f7090d9e91b2a11151
|
||||
hash_to_ec f7bc382178d38e1b9a1a995bd8347c1283d8a2e8d150379faa53fd125e903d2b 544c815da65c3c5994b0ac7d6455578d03a2bc7cf558b788bcdb3430e231635a
|
||||
hash_to_ec c28b5c4b6662eebb3ec358600644849ebeb59d827ed589c161d900ca18715fa8 a2d64db3c0e0353c257aadf9abc12ac779654d364f348b9f8e429aa7571203db
|
||||
hash_to_ec 3a4792e5df9b2416a785739b9cf4e0d68aef600fa756a399cc949dd1fff5033a 4b54591bd79c30640b700dfb7f20158f692f467b6af70bd8a4e739c14a66c86a
|
||||
hash_to_ec 002e70f25e1ceaf35cc14b2c6975a4c777b284a695550541e6f5424b962c19f5 73987e9342e338eb57a7a9e03bd33144db37c1091e952a10bd243c5bb295c18a
|
||||
hash_to_ec 7eb671319f212c9cae0975571b6af109124724ba182937a9066546c92bdeff0c 49b46da3be0df1d141d2a323d5af82202afa2947a95b9f3df47722337f0d5798
|
||||
hash_to_ec ca093712559c8edd5c51689e2ddcb8641c2960e5d9c8b03a44926bb798a0c8dc b9ef9cf0f8e4a3d123db565afafb1102338bfb75498444ac0a25c5ed70d615da
|
||||
hash_to_ec cfea0a08a72777ff3aa7be0d8934587fa4127cd49a1a938232815dc3fd8b23ac b4de604b3d712f1ef578195fb0e53c865d41e2dfe425202c6cfe6f10e4404eb5
|
||||
hash_to_ec aa0122ae258d6db21a26a31c0c92d8a0e3fdb46594aed41d561e069687dedcd6 5247eaec346de1c6cddf0ab04c12cd1d85cdb6d3a2fba2a5f9a5fe461abef5eb
|
||||
hash_to_ec b3941734f4d3ba34ccaf03c4c737ac5a1e036eb74309300ce44d73aca24fef08 535938985c936e3780c61fe29a4121d6cb89a05080b6c2147031ea0c2b5b9829
|
||||
hash_to_ec 8c2ee1041a2743b30dcbf413cc9232099b9268f82a5a21a09b63e7aff750882f 6ad0d4b3a65b522dfad0e9ac814b1fb939bc4910bd780943c72f57f362754cca
|
||||
hash_to_ec 4b6829a2a2d46c8f0d0c23db0f735fcf976524bf39ccb623b919dd3b28ad5193 2e0097d7f92993bc45ba06baf4ca63d64899d86760adc4eb5eeefb4a78561050
|
||||
hash_to_ec 9c1407cb6bba11e7b4c1d274d772f074f410d6fe9a1ee7a22cddf379257877d9 692261c7d6a9a7031c67d033f6d82a68ef3c27bd51a5666e55972238769821cd
|
||||
hash_to_ec 638c42e4997abf8a4a9bffd040e31bd695d590cde8afbd7efd16ffdbae63bf66 793024c8ce196a2419f761dde8734734af6bd9eb772b30cc78f2cb89598dce97
|
||||
hash_to_ec 1fb60d79600de151a1cf8a2334deb5828632cbd91cb5b3d45ae06e08187ae23d ff2542cde5bc2562e69471a31cfc3d0c26e2f6ccc1891a633b07a3968e42521c
|
||||
hash_to_ec d2fdbbae4e38a1b734151c3df52540feb2d3ff74edfef2f740e49a5c363406ee 344c83ba6ff4e38b257077623d298d2f2b52002645021241bc9389f81b29ad12
|
||||
hash_to_ec 836c27a6ddfe1a24aba3d6022dff6dfe970f142d8b4ac6afb8efcba5a051942f b8af481d33726b3f875268282d621e4c63f891a09f920b8f2f49080f3a507387
|
||||
hash_to_ec 46281153ddcdf2e79d459693b6fe318c1969538dd59a750b790bfff6e9481abf 8eaf534919ab6573ba4e0fbde0e370ae01eae0763335177aa429f61c4295e9d4
|
||||
hash_to_ec d57b789e050bf3db462b79a997dac76aa048d4be05f133c66edee56afd3dbe66 0c5a294cb2cbb6d9d1c0a1d57d938278f674867f612ed89dcbe4533449f1a131
|
||||
hash_to_ec 548d524d03ac22da18ff4201ce8dbee83ad9af54ee4e26791d26ed2ab8f9bfc7 c6609d9e7d9fd982dec8a166ff4fb6f7d195b413aad2df85f73d555349134f3b
|
||||
hash_to_ec cc920690422e307357f573b87a6e0e65f432c6ec12a604eb718b66ba18897a56 6f11c466d1c72fccd81e51d9bda03b6e8d6a395e1d931b2a84e392dc9a3efa18
|
||||
hash_to_ec c7fb8a51f5fcd8824fc0875d4eb57ab4917cb97090a6e2288f852f2bb449edd9 45543fea6eed461016e48598b521f18ff70178afea18032b188deea3e56052fc
|
||||
hash_to_ec c681bb1b829e24b1c52cb890036b89f0029d261c6a15e5b2c684ee7dfe91e746 263006fe2c6b08f1ab29cdf442472c298e2faf225bbf5c32399d3745cd3904bd
|
||||
hash_to_ec e06411c542312fdd305e17e46be14c63bab5836dc8751da06164b1ae22d4e20f 901871be7a7ff5aecade2acff869846f3c50de69307ac155f2aa3a74d5472ef2
|
||||
hash_to_ec 9c725a2acb80fa712f9781da510e5163b1b30f4e1c064c26b5185e537f0614ea 02420d49257846eb39fddd196d3171679f6be21d9adac667786b65a6e90f57b1
|
||||
hash_to_ec 22792772820feafa85c5cb3fa8f876105251bef08617d389619697f47dff54f2 a3ad444e7811693687f3925e7c315ae55d08d9f4b0a29876bc2a891ab941c1c3
|
||||
hash_to_ec 0587b790121395d0f4f39093d10b4817f58a1e80621a24eea22b3c127d6ac5a2 86c417c695c64c7becaad0d59ddbb2bca4cb2b409a21253d680aac1a08617095
|
||||
hash_to_ec fa0b5f28399bef0cd87bfe6b8a2b69e9c5506fb4bacd22deba8049615a5db526 ede0ea240036ff75d075258a053f3ce5d6f77925d358dbe33c06509fc9b12111
|
||||
hash_to_ec 62a3274fc0bed109d5057b865c2ba6b6a5a417cb90a3425674102fcd457ede2d ff7e46751bb4dcd1e800a8feab7cf6771f42dc0cfed7084c23b8a5d255a6f34e
|
||||
hash_to_ec a6fcd4aecaaaf281563b9b7cd6fbc7b1829654f644f4165942669a2ef632b2bf 28f136be0eb957a5b36f8ec294399c9f73ad3a3c9bb953ad191758ced554a233
|
||||
hash_to_ec 01baa4c06d6676c9b286cda76ed949fd80a408b3309500ba84a5bb7e3dce58e2 a943d1afa2efce284740e7db21ea02db70b124808be2ff80cbf9b9cb96c7b73e
|
||||
hash_to_ec dd9aff9c006ba514cef8fae665657bc9813fe2715467cf479643ea4c4e365d6d 68de2f7d49de4004286ce0989a06a686b15d0f463a02ffd448a18914e1ddf713
|
||||
hash_to_ec 3df3513d5e539161761ce7992ab9935f649bc934bed0da3c5e1095344b733bb9 e9c2dd747d7b2482474325943cd850102b8093164678362c7621993a790e2a8a
|
||||
hash_to_ec 7680cfb244dc8ef37c671fff176be1a3dad00e5d283f93145d0cbee74cca2df4 a0fd8c3cca16a130eaa5864cbe8152b7adfbf09e8cf72244b2fc8364c3b20bf4
|
||||
hash_to_ec 8a547c38bd6b219ea0d612d4a155eba9c56034a1405dcf4b608de787f37e0fd8 76bf0dc40fd0a5508c5e091d8bb7eccfa28b331e72c6a0d4ac0e05a3d651850b
|
||||
hash_to_ec dd93901621f58465e9791012afa76908f1e80ad80e52b809dc7fc32bb004f0a8 09a0b7ecfe8058b1e9ee01c9b523826867ca97a32efad29ac8ceebca67a4ea00
|
||||
hash_to_ec b643010220f1f4ee6c7565f6e1b3dc84c18274ede363ac36b6af3707e69a1542 233c9ff8de59e5f96c2f91892a71d9d93fa7316319f30d1615f10ac1e01f9285
|
||||
hash_to_ec c2637b2299dfc1fd7e953e39a582bafd19e6e7fff3642978eb092b900dbfea80 339587ba1c05e2cba44196a4be1fd218b772199e2c61c3c0ff21dcd54b570c43
|
||||
hash_to_ec 1f36d3a7e7c468eb000937de138809e381ad2e23414cbbaac49b7f33533ed486 7e5b0a96051c77237a027a79764c2763487af88121c7774645e97827fb744888
|
||||
hash_to_ec 8c142a55f60b2edbe03335b7f90aa2bd63e567048a65d61c70cb28779c5200af d3d6d5563b3d81c8c91cf9806bb13b2850fb7c162c610fd2f5b83c464add8182
|
||||
hash_to_ec 99e7b98293c9de1f81aff1376485a990014b8b176521b2a68cdbde6300190398 119cbc01a1d9b9fb4759031d3a70685aebea0f01bc5ee082ce824265fd21b3b4
|
||||
hash_to_ec 9753bd38be072b51490290be6207ca4545e3541bdf194e0850ae0a9f9e64b8ba 1ad3aa759863153606fa6570f0e1290baded4c8c1f2ba0f67c1911bfc8ccd7a0
|
||||
hash_to_ec 322703864ceee19b7f17cec2a822f310f0c4da3ff98b0be61a6fd30ac4db649c 89d9e7a5947e1cde874e4030de278070aae363063cd3592ce5411821474f0816
|
||||
hash_to_ec c1acd01e1e535fad273a8b757d981470f43dd7d95af732901fbba16b6e245761 57e80445248111150da5e63c706b4abbf3eef2cc508bd0347ff6b81e8c59f5bc
|
||||
hash_to_ec 492473559f181bbe78f60215bc6d3a5168435ea2fc0a508372d6f5ca126e9767 df3965f137cf6f60c56ebd7c8f246281fd6dc92ce23a37e9f846f8452c884e01
|
||||
hash_to_ec afa9d6e0e2fb972ee806beb450c2c0165e58234b0676a4ec0ca19b6e710d7c35 669a57e69dd2845a5e50ed8e5d8423ac9ae792a43c7738554d6c5e765a7b088a
|
||||
hash_to_ec 094de050bdadef3b7dbaeeca29381c667e63e71220970149d97b95db8f4db61b 0cf5d03530c5e97850d0964c6a394de9cde1e8e498f8c0e173c518242c07f99a
|
||||
hash_to_ec 2ce583724bc699ad800b33176a1d983512fe3cb3afa65d99224b23dae223efb7 e1548fd563c75ae5b5366dbab4cb73c54e7d5e087c9e5453125ff8fbe6c83a5c
|
||||
hash_to_ec 8064974b976ff5ef6adaade6196ab69cda6970cd74f7f5899181805f691ad970 98ae63c47331a4ac433cb2f17230c525982d89d21e2838515a36ec5744ec2d15
|
||||
hash_to_ec 384911047de609c6ae8438c745897357989363885cef2381a8a00a090cf04a58 4692ec3a0a03263620841c108538d584322fdd24d221a74bf1e1f407f83828af
|
||||
hash_to_ec 0e1b1ced5ae997ef9c10b72cfc6d8c36d7433c01fc04f4083447f87243282528 6ee443ab0637702b7340bd4a908b9e2e63df0cc423c409fb320eb3f383118b80
|
||||
hash_to_ec 5a7aea70c85c040af6ff3384bcaa63ec45c015b55b44fffa37ab982a00dc57c5 2df2e20137cefd166c767646ecd2e386d28f405aebe43d739aa55beba04ed407
|
||||
hash_to_ec 3e878a3567487f20f7c98ea0488a40b87f1ba99e50bbfe9f00a423f927cbd898 697c7e60e4bf8c429ba7ac22b11a4b248d7465fc6abe597ec6d1e1c973330688
|
||||
hash_to_ec c0bb08350d8a4bb6bf8745f6440e9bd254653102a81c79d6528da2810da758e4 396a872ac9147a69b27223bf4ec4198345b26576b3690f233b832395f2598235
|
||||
hash_to_ec 6c3026a9284053a4ddb754818f9ae306ffa96eb7003bd03826eeccc9a0cf656e bef73da51d3ba9972a33d1afb7d263094b66ab6dbe3988161b08c17f8c69c2d5
|
||||
hash_to_ec f80b7d8f5a80d321af3a42130db199d9edcb8f5a82507d8bfca6d002d65458b6 aa59c167ea60ee024421bfbd00adbb3cbfc20e16bd3c9b172a6bef4d47ca7f57
|
||||
hash_to_ec bc0ffc24615aa02fafef447f17e7b776489cd2cc909f71e8344e01cad9f1610d 5c4195cc8dc3518143f06a9c228ae59ec9a6425a8fab89bfc638ad997cf35220
|
||||
hash_to_ec b15fad558737229f8816fcba8fbef805bd420c03e392d118c69bdf01890c4924 f5810477e37554728837f097e1b170d1d8c95351c7fff8abbbfc624e1a50c1b9
|
||||
hash_to_ec ec8c1f10d8e9da9cf0d57c4a1f2c402771bed7970109f3cf21ad32111f1f198f a697e0a3f09827b0cf3a4ffb6386388feda80d30ffffcbd54443dafcba162b28
|
||||
hash_to_ec a989647bf0d70fdb7533b8c303a2a07f5e42e26a45ffc4e48cff5ba88643a201 450fd73e636f94d0d232600dd39031386b0e2ecde4105124fc451341da9803db
|
||||
hash_to_ec 7159971b03c365480d91d625a0fadc8e3a632c518acf0dbec87dd659da70e168 377bc43c038ac46cf6565aa0a6d6bf39968c0c1142755dba3141eeebf0acdf5d
|
||||
hash_to_ec e39089a64fedac4b2c25e36312b33f79d02bf75a883f450f910915b8560a3b06 77efa7db1be020e77596f550de45626824a8268095d56a0991696b211cb329cc
|
||||
hash_to_ec 2056b3c6347611bb0929dad00ec932a4d9bec0f06b2d57f17e01ffa1528a719e b6072c2be2ce928e8cbbb87e8eb7e06975c0f93b309dd3b6a29edaad2b56f99b
|
||||
hash_to_ec 2c026793146e81b889fc741d62e06c341ce263560d57cd46d0376f5b29174489 8f1f64b67762aa784969e954c196a2c6610addc3604aa3291eb0b80304dfe9ef
|
||||
hash_to_ec be6026d6704379c489fa7749832b58bdb1a9685a5ffb68c438537f2f76e0011f 0072569a4090a9ad383a205bb092196c9de871c22506e3bb63d6b9d1b2357c96
|
||||
hash_to_ec f4db802d5c6b7d7b53663b03d988b4cd0c7cad6c26612c5307754a93ebdc9710 f21bc9be4cb28761f6fe1d0a555ad5e9748375a2e9faea25a1df75cc8d273e18
|
||||
hash_to_ec c27d79a564c56b00956a55090481e85fbc837fd5fb5e8311ecb436e300c07e3a 1b1891e6abec74621501450cd68bb1eeaa5b2fffff4ec441a55d1235ff3a0842
|
||||
hash_to_ec a1e2f93c717cad32af386efa624198973df5a710963dd19d4c3ac40032a3a286 69c60571e3f9f63d2bfb359386ae3b8cd9e49a2e9127753002866e85c0443573
|
||||
hash_to_ec 76920d7b1763474bc94a16433c3c28241a9acdee3ff2b2cb0e6757ba415310aa c1b409169f102b696fc7fa1aa9c48631e58e08b5132b6aadf43407627bb1b499
|
||||
hash_to_ec 57ac654b29fa227c181fff2121491fcb283af6cbe932c8199c946862c0e90cb2 a204e8d327ea93b0b1bd74a78ffc370b20cea6455e209f2bc258114baa16d728
|
||||
hash_to_ec 88e66cfaef6432b759c50efce885097d1752252b479dac5ed822fa6c85d56427 6fb84790d3749a5c1088209ee3823848d9c19bf1524215c44031143dd8080d70
|
||||
hash_to_ec c1e55da929c4f8f793696fc77ff4e1c317c34852d98403bfd15dd388ee7df0df 2f41e76f15c5b480665bd84067e3b543b85ce6de02be9da7a550b5e1ead94d34
|
||||
hash_to_ec 29e9ace5aa3c5a572b13f4b62b738a764d90c8c293ccb062ad798acbab7c5ef4 bce791aba1edc2a66079628fd838799489ab16b0a475ce7fe62e24cc56fe131c
|
||||
hash_to_ec f25b2340689dadacaa9a0ef08aee8447d80b982e8a1ea42cf0500a1b9d85b37d f7f53aa117e6772a9abc452b3931b0a99405ac45147e7c550ac9fcf7ffe377b5
|
||||
hash_to_ec 0cb6c47fc8478063b33f5aed615a05bcc84d782c497b6cc8e76ec1fa11edbfdb 7a0b58b03147e7c9be1d98de49ead2ce738d0071b0af8ca03cc92ceb26fc2246
|
||||
hash_to_ec 7bd7287d7c4b596fe46fe57a6982c959653487bea843a77dd47d40986200d576 343084618c58284c64a5ff076f891be64885dc2ac73fa1567f7b39fde6b91542
|
||||
hash_to_ec e4984bf330708152254fb18ecef12d546afd24898a3cf00fba866957b6ee1b82 c70e88b061656181fbd6ff12aca578fb66de5553c756ea4698a248b177185bc6
|
||||
hash_to_ec cefd6c3cb9754ea632d6aea140af017de5ea12e5184f868936b74d9aa349d603 4b476502a8a483aadd50667f262f95351901628dd3a2aac1a5a41c4ea03f1647
|
||||
hash_to_ec da5d0f33344ee7f3345204badf183491b9452b84bccc907602c7bad43e5cf43e 9561b9e61241625e028361494d4fa5cd78df4c7219fa64c8fede6d8421b8904a
|
||||
hash_to_ec d6f0a4f8c770a1274a76fd7ae4e5faf7779249263e1aaecc6f815cf376f5c302 cd5c55820be10f0d38feb81363ede3716a9168601a0dd1ce3109aab81367d698
|
||||
hash_to_ec b6bf32491d12a41c275d8518fc534d9a0d17aade509e7e8b8409a95c86167307 4aae534abbd67a9a8f2974154606c0e9be8932e920c7a5e931b46a92859acf82
|
||||
hash_to_ec 0f930beaad041f9cefd867bc194027dd651fb3c9bda5944ececdba8a7136b6d3 521708f8149891b418d0920369569a9d578029c78f8e41c68a0bb68d3ad5df60
|
||||
hash_to_ec 49b1fe0f97be74b81e0b047027b3e9f726fa5e90a67dafa877309397291c06c5 0852e59dfae5ec32cce606c119376597bce5cd4d04879d329f74e3ec66414cd3
|
||||
hash_to_ec 4d57647d03f2cfbd4782fcc933e0683b52d35fc8d37283e6c7de522ddfa7e698 cbeb9ebfbbc49ec81fac3b7b063fecac1bb40ea686d3ffb08f82b291715cd87f
|
||||
hash_to_ec 4ea3238c06fc9346c7421ff85bc0244b893860b94bc437378472814d09b2e99f a1fbae941adc344031bbdf53385dfdc012311490a4eb5e9a2749a21b27ce917a
|
||||
hash_to_ec 0cd3609f5c78b318cb853d189b73b1ee2d00edd4e5fce2812027daa3fcb1fed1 0c7a7241b16e3c47d41f5abbf205797bd4b63fc425a7120cb2a4bf324e08ae74
|
||||
hash_to_ec d74ab71428e36943c9868f70d3243469babd27988a1666a06f499a5741a52e3e 65b7c259f3b4547c082b2a7669b2b363668c4d87ac14e80471317b03b34e5216
|
||||
hash_to_ec f6b151998365e7d69bcbce383dd2e8b5bf93b8b72f029ff942588208c1619591 6ce840ce5dfbca238665c1e6eddb8b045aa85c69b5976fc55ab57e66d3d0a791
|
||||
hash_to_ec 207751de234b2bd7ec20bdd8326210c23aa68f04875c94ad7e256a96520f25d6 fc8f79ab3af317c38bfb88f40fb84422995a0479cfa6b03fa6df7f4e5f2813fb
|
||||
hash_to_ec 62291e2873f38c0a234b77d1964205f3f91905c261d3c06f81051a9b0cb787cb 076d1d767457518e6777cb3bd4df22c8a19eb617e4bbccd1b0bd37522d6597a5
|
||||
hash_to_ec 4b060df2d2854036751d00190ee821cb0066d256d4172539fdfa6fbd1cdfe1f9 59866e927c69e7de5df00dc46c0d2a1ddf799d901128ff040cebb8fd61b95da4
|
||||
hash_to_ec ac8daf73f9c609bb36bce4fdeec1e50be5f22de38c3904fabcf758f0fc180bc7 7d8dc4e956363b652468a5fecafd7c08d48a2297e93b8edcb38e595fdd5a1fde
|
||||
hash_to_ec fef7b6563fd27f3aab1d659806b26b8f2ec38bc8feefad50288383c001d1c20f e6e42547f12df431439d45103d2c5a583248f44554a98a3a433cf8c38b11805d
|
||||
hash_to_ec 40a3d6871c76ecc6bb7b28324478733e196cc11d062dd4c9265cf31be5cf5a97 8c55a3811c241a020b1be202a58d5defbc4c8945d73b132570b47dd7c019ccf0
|
||||
hash_to_ec 0cd71e7e562b2b47f4bc8640caf20e69d3a62f10231b4c7a372c9691cff9ac3c fb8e4e3de479b3bf1f4f13b4ed5507df1e80bd9250567b9d021b03339d6e7197
|
||||
hash_to_ec 40a4e62800a99b7a26e0b507ffb29592e5bdba25284dc473048f24b27d25b40a 90ae131d29ee4a71cd764ab26f1ca4e6d09a40db98f8692b345c3a0e130dc860
|
||||
hash_to_ec 1ddf35193cf52860bfe3e41060a7f44281241c6ae49cd541d24c1aca679b7501 3b4f50013895c522776ced456329c4e727de03575f6b99ae7d238a9f70862121
|
||||
hash_to_ec 014e0fa8ce9d5df262b9a1765725fde354a855de8aef3fc23684e05dd1ba8d34 3857f57776a3cb68721bcb7f1533a5f9fb416a1dc8824d719399b63a142d24de
|
||||
hash_to_ec 09987979b0e98d1d5355df8a8698b8f54d3a037d12745c0a4317fe519c3df9cc 32a181e2b754aeced214c73ac459c97d99e63317be3eb923344c64a396173bca
|
||||
hash_to_ec 51e9e8ec4413e92dbaaba067824c32b018487a8d16412ed310507b4741e18eed 0356b209156b4993fd5d5630308298429a1b0021c19bedecb7719ac607cfa644
|
||||
hash_to_ec 14d91313dfe46e353310e6a4a23ee15d7a4e1f431700a444be8520e6043d08d9 6f345f4018b5d178d9f61894d9f46ac09ff639483727b0d113943507cee88cfd
|
||||
hash_to_ec 0d5af9ace87382acfffb9ab1a34b6e921881aa015d4f6d9c73171b2b0a97600d a8dbf36c85bebe6a7b3733e70cd3cd9ed0eb282ca470f344e5fcf9fe959f2e6e
|
||||
hash_to_ec 996690caac7328b19d20ed28eb0003d675b1a9ff79055ab530e3bf170eb22a94 14340d7d935cffce74b8b2f325c9d92ce0238b51807ef2c1512935bb843194ce
|
||||
hash_to_ec ad839c4b4c278c8ebe16ff137a558255a1f74646aa87c6cd99e994c7bb97ce8a d4f2da327ffded913b50577be0e583db2b237b5ca74da648e9b985c247073b76
|
||||
hash_to_ec 26fc2eeeee983e1300d72362fdff42edf08038e4eee277a6e2dbd1bd8c9d6560 3468b8269728c2c0bfc2e53b1575415124798bc0f59b60ea2f14967fc0ca19ce
|
||||
hash_to_ec db33cecaf4ee6f0ceba338cc5fabfb7462cd952a9c9007357ff3f0ca8336f8bc 0bab38f58686d0ff770f770a297971510bc83e2ff2dfead34823d1c4d67f11af
|
||||
hash_to_ec a0ee84b3c646526fb8787d26dcd9b7fe9dc713c8a6c1a4ea640465a9f36a64df 4d7a638f6759d3ec45339cd1300e1239cca5f0f658ca3cd29bc9bdb32f44faf0
|
||||
hash_to_ec 6a702e7899fcf3988e2b6b55654c22e54f43d3fa29de19177bdff5b2295fe27f 145d5748d6054fb586568e276f6925aef593a5b9c8249ad3dbef510af99b4307
|
||||
hash_to_ec 30ce0fd4f1fac8b62d613b8ee4a66deef6eb7094bd8466531050b837460f6971 f3aa850d593ba7cef01389f7e1916e57617f1d75cd42f64ce8f5f272384b148c
|
||||
hash_to_ec 3aa31d4ad7046ad13d83eb11c9a6e90eb8483a374a77a9a7b2a7cc0978fefa76 2fe0827dc080d9c1e7ec475a78aa7ae3c86d1a35f4c3f25f4a1f7299cacf018a
|
||||
hash_to_ec 8562a5a91e763b98014523ebb6e49120979098f89c31df1fde9eb3a49a15b20f ae223bf85e2009a9daf5fd8a14685e2e1e625fc88818b2fd437dd7e109a48f59
|
||||
hash_to_ec ccf9c313a47b8dbf7ce42c94b785818bc24134d95b6d22acc53c1ec2be29cf27 3e79fce6fe5aa14251b6560df4b76e811d7739eec097f27052c4403a283be71d
|
||||
hash_to_ec d1e33cd6f8918618d5fb6d67ad8de939db8beaec4f115551eac64479b739b773 613fffcbe1bf48bb2d7bfd64fd97790a06025f8f2429edddb9ac145707847ecf
|
||||
hash_to_ec 81eaeced34dd44e448d5dafa5715225e4956c90911c964a96ff7aa5b86b969bc 8f81177495d120a1357380164d677509b167f2958eb8b962b616c3951d426d8c
|
||||
hash_to_ec 2bc001a29f8eab1c7377de69957ba365fb5bdaf9c2c220889709af920dfe27d3 9bcb3010038f366fa4c280eed6e914a23bfc402594d0b83d0e66730a465a565b
|
||||
hash_to_ec 6feeb703c05e86c58d9fc5623f1af8657ecd1e75a14d18c4eedb642a8a393d16 6544628ba67ed0e14854961739c4d467fcf49d6361e39d32ea73dabeae51e6c3
|
||||
hash_to_ec e8ff145a7c26897f2c1639edd333a5412f87752f110079f581ccdc87fcce208c d4b5a6e06069c7e012e32119f8eda08ff04a8dfa784e1cf1bced455a4d41d905
|
||||
hash_to_ec 80488131dcb2018527908dbf8cdf4b823ef0806dc1d360f4da671004ef7ff74d 9984a79d9fd4f317768b442161116eef84e2ca49e938642b268fd64312d59a27
|
||||
hash_to_ec d8c4ca60446849a784d1462aa26a3b93073ff6841cb2da3ef52ab9785b00b1fd da5ec1562e7de2382d35728312f4eea3608d4dba775c1c108de510e1ce97d059
|
||||
hash_to_ec 68645728dfc6b9358dfb426493238ba38f24a2f46a3e89edb47d212549939cb7 d3253aa7235113dcc1b577d3bb80be34f528398815a653dbdbacbcbdfd5887a1
|
||||
hash_to_ec 4e8eb97ba2d1046e1b42e67530a61441e31c84e5e5e448d8e8dbe75d104eaccb de94f73e83222aa0e39b559d4fef70387b0815b9b2f6beff5da67262d8f0eb3e
|
||||
hash_to_ec 104ff03122ffdf59b22b8c0fe3d8f2ef67d02328e4d5181916d3d2a92f9a0bb7 1517ccf69c0328327e1cf581f16944ff66bc91c37e1cd68a99525415e00b7c9f
|
||||
hash_to_ec 80f23aae7356ae9a2f9f7504495a731214d26f870fb7df68fdc00b233494156f 7aef046b0a70f84e8d239aa95e192b5a3fffa0fae5090c91273e8996beca9e38
|
||||
hash_to_ec 2424b33235955a737ebddbf1c6c59cd8778af74da3bd3e658447666a2ab2f557 d19e2be8d482950fbdae429618da7a9daedb8c5944dea19cd1b6b274e792231b
|
||||
hash_to_ec 0adc839d2b8f099e4341a4763b074c06318d6bcbd1ec558d20a9820c4a426463 cea5da12a84e5c20011726d9224a9930bec30f9571762dd7ca857b86bd37d056
|
||||
hash_to_ec 46c84d53951f1ba23c46a23d5d96bf019c559aa5d2d79e4535cfcdb36f38ce25 2a913a01a6f7dd78a43cdd5354d1160d9a5f0d824c489a892c80eba798a77567
|
||||
hash_to_ec 99bdaaf68555ccdc93d97c3a0fb4c126a1aa8b1202194a1a753401a6cae21055 1f645efe173577a092f2d847cc966e28ba3b36397fe84c96dfa4724ed4fcfdf9
|
||||
hash_to_ec c540ff78f1e063ad26ffa69febb8818c9f2a325072c566091ad816e40fe39af4 de7a762262c91ab4beccc0713233cb91163aec43e34de0dbcfad0c431e8a9722
|
||||
hash_to_ec de8b1ff8978cd5e02681521542b7b6c3c2f8f4602065059f83594809d04e3dda 290601e75207085bff3e016746e55a80310a76dea9ef566c24181079c76da11c
|
||||
hash_to_ec d555994c8a022e52602d2a8bdd01fc1bfa6b9ab6734ff72a1bd5f937de4627f8 5f6794e874f48c4b362d0a24207374c2d274e28de86351afc6ddb95d8cc2fd62
|
||||
hash_to_ec 19db72f703fe6f1b73f21b6ba133ae6b111ae8cc496d3aa32e02411e34c0d8d7 42f159f43d2d62b8cf8a47d5f1340c5cf070e9860fc60de647c55d50fe9f5607
|
||||
hash_to_ec 23a87a258c2a5d1353aa2d5946f9e5749b92f85e3c58e1d177c3b6c3dcac809c e5685016f79d5e87d1fecb3e2a0fe64e4875f7accd2f6649d7f6b16317549cb1
|
||||
hash_to_ec 43e1738d7d1b5b565f5fc78e81480f7edf9a4dc18f104fc4be95135b98931b17 650f5b682e45f2d0c5d5e8bcfd9e0cda7d9071b55ecbfaf5e3b59941cd7479f2
|
||||
hash_to_ec a9d644de0804edf62dee613efa2547e510990a9b7a987ebe55ec74c23873a878 52ad329f88499a4f110e6a6cba1f820012d8db6ccb8f6495ab1e3eb5a24786e1
|
||||
hash_to_ec 11f2b5d89a0350d7c8727becf0f4dd19bd90f8c94ff207132ab13282dd9b94e6 b798a47bb98dc2a8f99deaf64d27638e33a0d504c5d2fbee477a2bc9b89e2838
|
||||
hash_to_ec 5e206e3190b3b715d125f1a11fff424fb33e36e534c99ddde2a3517068b7dcc4 2738e9571c96b2ddf93cb5f4a72b1ea78d3731d9555b830494513c0683c950ca
|
||||
hash_to_ec efc3d65a43d4f10795c7265a76671348f80173e0f507c812f7ae76793b99c529 cf4434d18ce8167b51f117fe930860143c46e1739a8db1fba73b6b0de830d707
|
||||
hash_to_ec 81f00469788aad6631cf75b585ae06d43ec81c20479925a2009afac9687dff60 c335b5889b36ba4b4175bb0d986807e8eedb6f6b7329b70b922e2ab729c4202a
|
||||
hash_to_ec 9ef5ff329b525ee8f5c3ac38e1dba7cb19985617341d356707c67ff273aed02d bef9f9e051ba0e24d1fdf72099cf43ecdd250d047fb329855b5372d5c422db9e
|
||||
hash_to_ec 3fa1401bd63132cf8b385c0fa65f0715ba1fe6161e41d59f8033ae2b22f63fa1 8289a1cb3c2dae48879bb8913fafe2d196cc2fdab5f2a77607910efd33eae6df
|
||||
hash_to_ec 6559836fd0081fa38a3f8d8408b564e5698b9797cf5e15f7f12a7d2c84511989 28d405a6687d2ecc90c1c66bf0454d58f3fa38835743075e1db58c658e15a104
|
||||
hash_to_ec 8e0882d45f0e4c2fb2839d3be86ff699d4b2242f5b25ac5a3c2f65297c7d2032 2771fdcf9135a62007adb5f0004d8222f0e42f819c81710aa4dc3ab2042bebf3
|
||||
hash_to_ec 1d91dc4dd9bd82646029d13aca1af96830c1d8a0400ddebeb14b00c93501c039 7792c62e897f32cbc9c4229f0d28f7882ceeae120329a1cd35f76a75ac704e93
|
||||
hash_to_ec 09527f9052acbbdd7676cbbd9534780865f04a27aaadad2b7d4f1dac68883cf0 b934220cde1327f2dc6af67bcb4124bf424d5084ef4da945e4daad1717cd0bb8
|
||||
hash_to_ec 2362e1abe73e64cdd2ca7f6c5ea9f467213747dd3f2b7c6e5df9cb21e03307d7 676b7122b96564358bbaaf77e3a5a4db1767e4f9a50f6ddd1c69df4566755af9
|
||||
hash_to_ec 26c2dd2356e9b6c68a415b25f91d18614dc8500c66f346d28489da543ee75a94 0f4fd7086acd68eb7c9fa2410e2ecf18e34654eb44e979bc03ce436e992d5feb
|
||||
hash_to_ec 422dc0a09d6a45a8e0b563eeb6a5ee84b08abd3a8cb34ff93f77ba3b163f4042 631f1b412ff5a0fccbe53a02b4a3deaa93a0418ed9874df401eb698ef75d7441
|
||||
hash_to_ec ceecdf46f57ef3f36ff30a1a3579b609340282d1b26ab5ddef2f53514e91bab1 9bc6f981fe98d14a2fc5b01a8134b6d35e123ec9ab8a3f303e0a5abb28150e2e
|
||||
hash_to_ec 024a9e6e0d73f28aa6207fb1e02ce86d444d2d46f8211e8aaab54f459db91a5a 5fb0c1d2c3b30f399102104ea1874099fa83110b3d9c1fcfffb2981c98bf8cdf
|
||||
hash_to_ec 5b8e45e269c9ccac4c68e532a72b29346d218f4606f37a14064826a62050e3a8 c7be46a871b77fc05ce891d24bd6bd54d9775b7ef573c6bc2d92b67f3604c1d1
|
||||
hash_to_ec 9a6593a385c266389eef14237874b97bdcd1823c3199311667d4853c2d12aa81 9f55ee9d94102d2b9c5670f30586cf9823bf205b4d4fe088c323e87c4e10f26f
|
||||
hash_to_ec 27377e2811598c3569b92990865d39b72c7a5533e1be30f77330863187c11875 abd82bc726f2710a8b87e4c1cf5a069f0ae800de614468d3ff35639983020197
|
||||
hash_to_ec 7cacfaa135fb7d568b8dce8ea9136498b1b28c6d1020af45d376288d78d411f0 229fccd49744c0692508af329224553d21561ee6062b2b8a21f080f73da5bd97
|
||||
hash_to_ec 52abd90a5542d6496b8dec9567b020f30058e29458d64f2d4f3ad6f3bfc1a5a0 874e82ced7cf77577b3374087fb08a2300b7f403de628310c26bdb3be869d309
|
||||
hash_to_ec 5c8eebe9d12309187afa8d0d5191de3fdb84e5a05485d7cd62e8804ce7fdc0bc 12b7537643488aa8b9dcc4bae040cd491f8b466163b7988157b0502fb6c9177f
|
||||
hash_to_ec 6ca3dd5c7a21a6bf65d6eefbe20a66e9b1d6b64196344be0c075f47aea48e3aa 5e1d0705ee24675238293b73ab1d98359119d4b328275be2460cc6ee4d19cc88
|
||||
hash_to_ec d7e6cd0d39b4308c2a5ee547c4569c8bb3887e49cedece62d218d7c3c5277797 793dc4397112dfd9a8f4e061f457eb6d6fbb1d7a58c40bad5f16002c64914186
|
||||
hash_to_ec 9cb6de8ba967cca0f0f861c6e20546f8958446595c01c28dae7ba6cfa09d6b14 ba1a2f7502b58fee3499c20e35fa01bb932e7a7c4a925dc04fbf5d90f33cfb5e
|
||||
hash_to_ec 8ef9c7366733a1edcd116238cdbd177d61222d5c3e05b30ef6b85014cbcb6b79 8fc89664722947164ac9b77086aed319897612068f56ecd57f47029f14671603
|
||||
hash_to_ec 7f317a34e4fb7de9f69cb107ffc0e57fd9f5c85b85ccb5319d05cebfc169924a 4b71c42339c73db7d710cd63f374d478a6c13bdc352cff40e967282268965ba7
|
||||
hash_to_ec 15beef8d9687b92918a903b01d594859db4e7128263c8db0cae9d423ff962c1e cd75e6323952f6ac88f138f391b69f38c46d70b7eda61f9e431725b6f1d514a5
|
||||
hash_to_ec 7a1c04c9af8fc6649833fe81e96f0199fcfe94959256cbe1490075fc5be0904e 0368270cd979439ae0a9552a5d6c9f959e4247fcf920d9e071464582e79c04b1
|
||||
hash_to_ec c854c583d338615f85f69061e0fa9c9d7c5bbbfe562e8774fef3be556fe8bb63 061620171d7320f64bee98414ff7200a1f481521d202fb281cab06be73b80402
|
||||
hash_to_ec 0fb8af5aba05ad2503edf1cfad5a451da088e7e974772057cd991a4e0601a3eb d3cbc20384a4420143fcce2cb763b0c15bec4f3267d1bdad3c34c1ee6b790f5e
|
||||
hash_to_ec 9a251cf59e84a9da5630642f9671c732440caa8fcf4c92446a7e5f5ef99da46c 9b9679086a433f2077f40bcd4c7545fb5cc87e7dbb8bba468d53cb04a74361a0
|
||||
hash_to_ec 8c632e357cef00e0911eb566f8cc809136b3f5ac1e82d183e4d645cef89fa155 5e06b0f4f278fa1ccb5431866e0b35171cdb814e2e82b9189ce01d8d8a1b2408
|
||||
hash_to_ec 4aa4c31463475086a5d96b3ff550340567ab3b4a86fa3f01cfe9be18bc4dcb54 76a2916cfc093f27992e1f07b50f431d61d58e255507e208cd29ea4d3bc56623
|
||||
hash_to_ec 1d33d9aadb949346e3c78d065a0f5262374524f4cb97a7390c8cdaede7ca6578 9ad2f757f499359903031adea6126c577469c4e834a2959e3ac08ee74b13783c
|
||||
hash_to_ec d9217b9a070df20c4d2f0db42ff0bb36bfba9f51b0b6df8fdfe150405dce4934 65a843c522b4b8ec081a696a0d2dd8dfdfea45db201de7a5889a1446c6dff8c7
|
||||
hash_to_ec b665b2ca8a285e44ba84e785533b56496a5319730dbb95bc14d3bdfece7544dc 8a804cd13457497b0a29eeca2cecfaa858766ec1d270a0e0c6785b43fd49b824
|
||||
hash_to_ec 43b5cbcc21b3404bca97fa9a661940fe64d40f3ca569310e50b1bb0173c4d5ee 6c12fffb540d536060bb8b96cf635c1b2cbaa4d875a8d2fb0bf79a690363df19
|
||||
hash_to_ec 11c58f20562c00dec5bb4456be07cd98186837e9af38d50d45f5e7b6f0f9000d cee76b567586f66dadd38c01213bfc1a17d38e96a495efb4c26063dc498ba209
|
||||
hash_to_ec b069a980b51d8e030262db0b30069e660f4a3f6f8075d1790c153ba12b879f8b 262391b00bdee71d1d827b2cfe50b46c29e265934dc91959bd369aca0cc6444e
|
||||
hash_to_ec 75274bfd79bf33eb2f9ab046d34528af9a71811e7e3d55c20eb049c81ac692d8 cb93c850e36896fe6626e97c53652af6736ec3ba0641c7765d0cca2bad2352de
|
||||
hash_to_ec 5cdb6a24d9736a00f197d9707949fedc5405f367744fe8c83b7cff650302b589 8b4ac03123fab9275dcf340345a1b11fba48ef106d410ba2e0e6f6457037a419
|
||||
hash_to_ec 07fdc85f809f95a07b59b084402bf91c512ebbe05c7657d6ba27a9e7e121e3e2 61182b3def063630e11de648a278032bcb75949f3a24ef5a133da87830ae5c4e
|
||||
hash_to_ec a4188ca634cbb796f9927822e343d7b267e0a609c1a0ffa4dcf3726b9ffcc8a2 a911e4899fda28fd6337d708d34553ac5e810ee4938f6f7d9d6e521cab069edb
|
||||
hash_to_ec 3c128ec5c955ea189a5789df2c892e94193a534a9d5801b8f75df870bc492a69 59eef5ee9df0f681df5b5c67ead1f06b059a8a843837b67f20cce15779608170
|
||||
hash_to_ec 51a4cc7ec4a14a98c0731e9de7f3ce0779123222d95455e940f2014a23729ec8 105863ccda076af7290d1bf9ec828651dc5811159839044d23f1c3e31a11c5e2
|
||||
hash_to_ec 1b901a31acbb7807c3309facdc7d04bc3b5a4aa714e6e346bd1c6ad4634e6534 01b3c0000b6c6b471c67c6ab3f9c7a500beaea5edb5c8f2b34df91b69ff67f21
|
||||
hash_to_ec d2f2c8d79cfa2e7cb2db80568ba62ca0576741acfbe5e2baa0d9b3c424a7c84d 7df9d9088022bd1ce6814d6f8051eef27a650ee38e789b184da2691efd27139d
|
||||
hash_to_ec 04dcb7644fdfc12d8e34d6e57d7769db939b4a149ed2b81aa51a74ee90babe19 6cff0ab2dd3b32ba1bd1a78e3661722f3f10003a01ce83e430970557decedb2c
|
||||
hash_to_ec 222798c6841eeaa07e7b7e29686942d7c7f9afc38d09360c8e1f52f2b7debd12 133e3a04ec82aa9b8dbbec18cadbafff446d1270bf7c6f3f97ddd3906dae2468
|
||||
hash_to_ec 4f7277c3ef247a0689b486ad965f969c433fc63e95d7310e789c4708418ccabc 7e0f2c984dd3cffb35458938c95fe92acf2e697aed060b0e3377c7a07e53c494
|
||||
hash_to_ec 359b4d6709413243ae2c5409ea02714a9f8961bbbb64a91e81daf01e18c981bf eab69af2cb7f113ad6a27035c0399853d10bd0b99291fad37794d100f7530431
|
||||
hash_to_ec 6cea3c6a9eb38f60329537170aa4db8dbb869af2040061e53b10c267daf6568c da9a97f4fa96bd05dade5e2704a6a633ba4dbe5080a1e831cda888e9d4f86615
|
||||
hash_to_ec 3dddecb954ef0209bcf61fd5b46b6c94f2384ef281c48a20ffee74f90788172d af9899c31f944617af54712f93d1a2b4944e48867f480d0d1aec61f3b713e32d
|
||||
hash_to_ec 9605247462f50bdf7ff57fe966abbefe8b6efa0b65b5116252f0ec723717013f fc8f10904d42a74e09310ccf63db31a90f1dab88b278f15e3364a2356810f7e9
|
||||
hash_to_ec a005143c4d299933f866db41d0a0b8c67264f5d4ea840dd243cb10c3526bc077 928df1fe9404ffa9c1f4a1c8b2d43ab9b81c5615c8330d2dc2074ac66d4d5200
|
||||
hash_to_ec f45ce88065c34a163f8e77b6fb583502ed0eb1f490f63f76065a9d97e214e3a9 41bd6784270af4154f2f24f118617e2d7f5b7771a409f08b0f2b7bbcb5e3d666
|
||||
hash_to_ec 7b40ac30ed02b12ff592a5479c80cf5a7673abfdd4dd38810e40e63275bc2eed 6c6bf5961d83851c9728801093d9af04e5a693bc6cbad237b9ac4b0ed580a771
|
||||
hash_to_ec 9f985005794d3052a63361413a9820d2ce903198d6d5195b3f20a68f146c6d5c 88bcac53ba5b1c5b44730a24b4cc2cd782298fc70dc9d777b577a2b33b256449
|
||||
hash_to_ec 31b8e37d01fd5669de4ebf78889d749bc44ffe997186ace56f1fb3e60b8742d2 776366b44170efb130a5045597db5675c6c0b56f3def84863c6b6358aa8dcf40
|
||||
@@ -2,10 +2,7 @@
|
||||
mod binaries {
|
||||
pub(crate) use std::sync::Arc;
|
||||
|
||||
pub(crate) use curve25519_dalek::{
|
||||
scalar::Scalar,
|
||||
edwards::{CompressedEdwardsY, EdwardsPoint},
|
||||
};
|
||||
pub(crate) use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
pub(crate) use multiexp::BatchVerifier;
|
||||
|
||||
@@ -20,6 +17,8 @@ mod binaries {
|
||||
rpc::{RpcError, Rpc, HttpRpc},
|
||||
};
|
||||
|
||||
pub(crate) use monero_generators::decompress_point;
|
||||
|
||||
pub(crate) use tokio::task::JoinHandle;
|
||||
|
||||
pub(crate) async fn check_block(rpc: Arc<Rpc<HttpRpc>>, block_i: usize) {
|
||||
@@ -201,13 +200,12 @@ mod binaries {
|
||||
};
|
||||
|
||||
let rpc_point = |point: &str| {
|
||||
CompressedEdwardsY(
|
||||
decompress_point(
|
||||
hex::decode(point)
|
||||
.expect("invalid hex for ring member")
|
||||
.try_into()
|
||||
.expect("invalid point len for ring member"),
|
||||
)
|
||||
.decompress()
|
||||
.expect("invalid point for ring member")
|
||||
};
|
||||
|
||||
|
||||
@@ -58,10 +58,10 @@ pub struct Block {
|
||||
}
|
||||
|
||||
impl Block {
|
||||
pub fn number(&self) -> usize {
|
||||
pub fn number(&self) -> Option<u64> {
|
||||
match self.miner_tx.prefix.inputs.first() {
|
||||
Some(Input::Gen(number)) => (*number).try_into().unwrap(),
|
||||
_ => panic!("invalid block, miner TX didn't have a Input::Gen"),
|
||||
Some(Input::Gen(number)) => Some(*number),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,9 +114,16 @@ impl Block {
|
||||
}
|
||||
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Block> {
|
||||
let header = BlockHeader::read(r)?;
|
||||
|
||||
let miner_tx = Transaction::read(r)?;
|
||||
if !matches!(miner_tx.prefix.inputs.as_slice(), &[Input::Gen(_)]) {
|
||||
Err(io::Error::other("Miner transaction has incorrect input type."))?;
|
||||
}
|
||||
|
||||
Ok(Block {
|
||||
header: BlockHeader::read(r)?,
|
||||
miner_tx: Transaction::read(r)?,
|
||||
header,
|
||||
miner_tx,
|
||||
txs: (0_usize .. read_varint(r)?).map(|_| read_bytes(r)).collect::<Result<_, _>>()?,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -16,7 +16,7 @@ use sha3::{Digest, Keccak256};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
pub use monero_generators::H;
|
||||
pub use monero_generators::{H, decompress_point};
|
||||
|
||||
mod merkle;
|
||||
|
||||
@@ -46,6 +46,10 @@ pub mod wallet;
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
pub const DEFAULT_LOCK_WINDOW: usize = 10;
|
||||
pub const COINBASE_LOCK_WINDOW: usize = 60;
|
||||
pub const BLOCK_TIME: usize = 120;
|
||||
|
||||
static INV_EIGHT_CELL: OnceLock<Scalar> = OnceLock::new();
|
||||
#[allow(non_snake_case)]
|
||||
pub(crate) fn INV_EIGHT() -> Scalar {
|
||||
|
||||
@@ -33,7 +33,10 @@ pub struct RingMatrix {
|
||||
|
||||
impl RingMatrix {
|
||||
pub fn new(matrix: Vec<Vec<EdwardsPoint>>) -> Result<Self, MlsagError> {
|
||||
if matrix.is_empty() {
|
||||
// Monero requires that there is more than one ring member for MLSAG signatures:
|
||||
// https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/
|
||||
// src/ringct/rctSigs.cpp#L462
|
||||
if matrix.len() < 2 {
|
||||
Err(MlsagError::InvalidRing)?;
|
||||
}
|
||||
for member in &matrix {
|
||||
|
||||
@@ -257,7 +257,8 @@ impl RctPrunable {
|
||||
|
||||
pub fn read<R: Read>(
|
||||
rct_type: RctType,
|
||||
decoys: &[usize],
|
||||
ring_length: usize,
|
||||
inputs: usize,
|
||||
outputs: usize,
|
||||
r: &mut R,
|
||||
) -> io::Result<RctPrunable> {
|
||||
@@ -268,7 +269,7 @@ impl RctPrunable {
|
||||
// src/ringct/rctSigs.cpp#L609
|
||||
// And then for RctNull, that's only allowed for miner TXs which require one input of
|
||||
// Input::Gen
|
||||
if decoys.is_empty() {
|
||||
if inputs == 0 {
|
||||
Err(io::Error::other("transaction had no inputs"))?;
|
||||
}
|
||||
|
||||
@@ -276,11 +277,11 @@ impl RctPrunable {
|
||||
RctType::Null => RctPrunable::Null,
|
||||
RctType::MlsagAggregate => RctPrunable::AggregateMlsagBorromean {
|
||||
borromean: read_raw_vec(BorromeanRange::read, outputs, r)?,
|
||||
mlsag: Mlsag::read(decoys[0], decoys.len() + 1, r)?,
|
||||
mlsag: Mlsag::read(ring_length, inputs + 1, r)?,
|
||||
},
|
||||
RctType::MlsagIndividual => RctPrunable::MlsagBorromean {
|
||||
borromean: read_raw_vec(BorromeanRange::read, outputs, r)?,
|
||||
mlsags: decoys.iter().map(|d| Mlsag::read(*d, 2, r)).collect::<Result<_, _>>()?,
|
||||
mlsags: (0 .. inputs).map(|_| Mlsag::read(ring_length, 2, r)).collect::<Result<_, _>>()?,
|
||||
},
|
||||
RctType::Bulletproofs | RctType::BulletproofsCompactAmount => {
|
||||
RctPrunable::MlsagBulletproofs {
|
||||
@@ -295,8 +296,10 @@ impl RctPrunable {
|
||||
}
|
||||
Bulletproofs::read(r)?
|
||||
},
|
||||
mlsags: decoys.iter().map(|d| Mlsag::read(*d, 2, r)).collect::<Result<_, _>>()?,
|
||||
pseudo_outs: read_raw_vec(read_point, decoys.len(), r)?,
|
||||
mlsags: (0 .. inputs)
|
||||
.map(|_| Mlsag::read(ring_length, 2, r))
|
||||
.collect::<Result<_, _>>()?,
|
||||
pseudo_outs: read_raw_vec(read_point, inputs, r)?,
|
||||
}
|
||||
}
|
||||
RctType::Clsag | RctType::BulletproofsPlus => RctPrunable::Clsag {
|
||||
@@ -308,8 +311,8 @@ impl RctPrunable {
|
||||
r,
|
||||
)?
|
||||
},
|
||||
clsags: (0 .. decoys.len()).map(|o| Clsag::read(decoys[o], r)).collect::<Result<_, _>>()?,
|
||||
pseudo_outs: read_raw_vec(read_point, decoys.len(), r)?,
|
||||
clsags: (0 .. inputs).map(|_| Clsag::read(ring_length, r)).collect::<Result<_, _>>()?,
|
||||
pseudo_outs: read_raw_vec(read_point, inputs, r)?,
|
||||
},
|
||||
})
|
||||
}
|
||||
@@ -382,8 +385,16 @@ impl RctSignatures {
|
||||
serialized
|
||||
}
|
||||
|
||||
pub fn read<R: Read>(decoys: &[usize], outputs: usize, r: &mut R) -> io::Result<RctSignatures> {
|
||||
let base = RctBase::read(decoys.len(), outputs, r)?;
|
||||
Ok(RctSignatures { base: base.0, prunable: RctPrunable::read(base.1, decoys, outputs, r)? })
|
||||
pub fn read<R: Read>(
|
||||
ring_length: usize,
|
||||
inputs: usize,
|
||||
outputs: usize,
|
||||
r: &mut R,
|
||||
) -> io::Result<RctSignatures> {
|
||||
let base = RctBase::read(inputs, outputs, r)?;
|
||||
Ok(RctSignatures {
|
||||
base: base.0,
|
||||
prunable: RctPrunable::read(base.1, ring_length, inputs, outputs, r)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,7 +9,9 @@ use std_shims::{
|
||||
|
||||
use async_trait::async_trait;
|
||||
|
||||
use curve25519_dalek::edwards::{EdwardsPoint, CompressedEdwardsY};
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
|
||||
use monero_generators::decompress_point;
|
||||
|
||||
use serde::{Serialize, Deserialize, de::DeserializeOwned};
|
||||
use serde_json::{Value, json};
|
||||
@@ -52,6 +54,15 @@ struct TransactionsResponse {
|
||||
txs: Vec<TransactionResponse>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct OutputResponse {
|
||||
pub height: usize,
|
||||
pub unlocked: bool,
|
||||
key: String,
|
||||
mask: String,
|
||||
txid: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
||||
pub enum RpcError {
|
||||
@@ -86,10 +97,9 @@ fn hash_hex(hash: &str) -> Result<[u8; 32], RpcError> {
|
||||
}
|
||||
|
||||
fn rpc_point(point: &str) -> Result<EdwardsPoint, RpcError> {
|
||||
CompressedEdwardsY(
|
||||
decompress_point(
|
||||
rpc_hex(point)?.try_into().map_err(|_| RpcError::InvalidPoint(point.to_string()))?,
|
||||
)
|
||||
.decompress()
|
||||
.ok_or_else(|| RpcError::InvalidPoint(point.to_string()))
|
||||
}
|
||||
|
||||
@@ -533,29 +543,15 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
Ok(distributions.distributions.swap_remove(0).distribution)
|
||||
}
|
||||
|
||||
/// Get the specified outputs from the RingCT (zero-amount) pool, but only return them if their
|
||||
/// timelock has been satisfied.
|
||||
///
|
||||
/// The timelock being satisfied is distinct from being free of the 10-block lock applied to all
|
||||
/// Monero transactions.
|
||||
pub async fn get_unlocked_outputs(
|
||||
&self,
|
||||
indexes: &[u64],
|
||||
height: usize,
|
||||
) -> Result<Vec<Option<[EdwardsPoint; 2]>>, RpcError> {
|
||||
/// Get the specified outputs from the RingCT (zero-amount) pool
|
||||
pub async fn get_outs(&self, indexes: &[u64]) -> Result<Vec<OutputResponse>, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Out {
|
||||
key: String,
|
||||
mask: String,
|
||||
txid: String,
|
||||
struct OutsResponse {
|
||||
status: String,
|
||||
outs: Vec<OutputResponse>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Outs {
|
||||
outs: Vec<Out>,
|
||||
}
|
||||
|
||||
let outs: Outs = self
|
||||
let res: OutsResponse = self
|
||||
.rpc_call(
|
||||
"get_outs",
|
||||
Some(json!({
|
||||
@@ -568,15 +564,39 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
)
|
||||
.await?;
|
||||
|
||||
let txs = self
|
||||
.get_transactions(
|
||||
&outs.outs.iter().map(|out| hash_hex(&out.txid)).collect::<Result<Vec<_>, _>>()?,
|
||||
)
|
||||
.await?;
|
||||
if res.status != "OK" {
|
||||
Err(RpcError::InvalidNode("bad response to get_outs".to_string()))?;
|
||||
}
|
||||
|
||||
Ok(res.outs)
|
||||
}
|
||||
|
||||
/// Get the specified outputs from the RingCT (zero-amount) pool, but only return them if their
|
||||
/// timelock has been satisfied.
|
||||
///
|
||||
/// The timelock being satisfied is distinct from being free of the 10-block lock applied to all
|
||||
/// Monero transactions.
|
||||
pub async fn get_unlocked_outputs(
|
||||
&self,
|
||||
indexes: &[u64],
|
||||
height: usize,
|
||||
fingerprintable_canonical: bool,
|
||||
) -> Result<Vec<Option<[EdwardsPoint; 2]>>, RpcError> {
|
||||
let outs: Vec<OutputResponse> = self.get_outs(indexes).await?;
|
||||
|
||||
// Only need to fetch txs to do canonical check on timelock
|
||||
let txs = if fingerprintable_canonical {
|
||||
self
|
||||
.get_transactions(
|
||||
&outs.iter().map(|out| hash_hex(&out.txid)).collect::<Result<Vec<_>, _>>()?,
|
||||
)
|
||||
.await?
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
// TODO: https://github.com/serai-dex/serai/issues/104
|
||||
outs
|
||||
.outs
|
||||
.iter()
|
||||
.enumerate()
|
||||
.map(|(i, out)| {
|
||||
@@ -585,18 +605,20 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
// Only valid keys can be used in CLSAG proofs, hence the need for re-selection, yet
|
||||
// invalid keys may honestly exist on the blockchain
|
||||
// Only a recent hard fork checked output keys were valid points
|
||||
let Some(key) = CompressedEdwardsY(
|
||||
let Some(key) = decompress_point(
|
||||
rpc_hex(&out.key)?
|
||||
.try_into()
|
||||
.map_err(|_| RpcError::InvalidNode("non-32-byte point".to_string()))?,
|
||||
)
|
||||
.decompress() else {
|
||||
) else {
|
||||
return Ok(None);
|
||||
};
|
||||
Ok(
|
||||
Some([key, rpc_point(&out.mask)?])
|
||||
.filter(|_| Timelock::Block(height) >= txs[i].prefix.timelock),
|
||||
)
|
||||
Ok(Some([key, rpc_point(&out.mask)?]).filter(|_| {
|
||||
if fingerprintable_canonical {
|
||||
Timelock::Block(height) >= txs[i].prefix.timelock
|
||||
} else {
|
||||
out.unlocked
|
||||
}
|
||||
}))
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
@@ -713,13 +735,14 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
&self,
|
||||
address: &str,
|
||||
block_count: usize,
|
||||
) -> Result<Vec<[u8; 32]>, RpcError> {
|
||||
) -> Result<(Vec<[u8; 32]>, usize), RpcError> {
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct BlocksResponse {
|
||||
blocks: Vec<String>,
|
||||
height: usize,
|
||||
}
|
||||
|
||||
let block_strs = self
|
||||
let res = self
|
||||
.json_rpc_call::<BlocksResponse>(
|
||||
"generateblocks",
|
||||
Some(json!({
|
||||
@@ -727,13 +750,12 @@ impl<R: RpcConnection> Rpc<R> {
|
||||
"amount_of_blocks": block_count
|
||||
})),
|
||||
)
|
||||
.await?
|
||||
.blocks;
|
||||
.await?;
|
||||
|
||||
let mut blocks = Vec::with_capacity(block_strs.len());
|
||||
for block in block_strs {
|
||||
let mut blocks = Vec::with_capacity(res.blocks.len());
|
||||
for block in res.blocks {
|
||||
blocks.push(hash_hex(&block)?);
|
||||
}
|
||||
Ok(blocks)
|
||||
Ok((blocks, res.height))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,19 +4,28 @@ use std_shims::{
|
||||
io::{self, Read, Write},
|
||||
};
|
||||
|
||||
use curve25519_dalek::{
|
||||
scalar::Scalar,
|
||||
edwards::{EdwardsPoint, CompressedEdwardsY},
|
||||
};
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use monero_generators::decompress_point;
|
||||
|
||||
const VARINT_CONTINUATION_MASK: u8 = 0b1000_0000;
|
||||
|
||||
mod sealed {
|
||||
pub trait VarInt: TryInto<u64> + TryFrom<u64> + Copy {}
|
||||
impl VarInt for u8 {}
|
||||
impl VarInt for u32 {}
|
||||
impl VarInt for u64 {}
|
||||
impl VarInt for usize {}
|
||||
pub trait VarInt: TryInto<u64> + TryFrom<u64> + Copy {
|
||||
const BITS: usize;
|
||||
}
|
||||
impl VarInt for u8 {
|
||||
const BITS: usize = 8;
|
||||
}
|
||||
impl VarInt for u32 {
|
||||
const BITS: usize = 32;
|
||||
}
|
||||
impl VarInt for u64 {
|
||||
const BITS: usize = 64;
|
||||
}
|
||||
impl VarInt for usize {
|
||||
const BITS: usize = core::mem::size_of::<usize>() * 8;
|
||||
}
|
||||
}
|
||||
|
||||
// This will panic if the VarInt exceeds u64::MAX
|
||||
@@ -102,7 +111,7 @@ pub(crate) fn read_varint<R: Read, U: sealed::VarInt>(r: &mut R) -> io::Result<U
|
||||
if (bits != 0) && (b == 0) {
|
||||
Err(io::Error::other("non-canonical varint"))?;
|
||||
}
|
||||
if ((bits + 7) > 64) && (b >= (1 << (64 - bits))) {
|
||||
if ((bits + 7) >= U::BITS) && (b >= (1 << (U::BITS - bits))) {
|
||||
Err(io::Error::other("varint overflow"))?;
|
||||
}
|
||||
|
||||
@@ -126,11 +135,7 @@ pub(crate) fn read_scalar<R: Read>(r: &mut R) -> io::Result<Scalar> {
|
||||
|
||||
pub(crate) fn read_point<R: Read>(r: &mut R) -> io::Result<EdwardsPoint> {
|
||||
let bytes = read_bytes(r)?;
|
||||
CompressedEdwardsY(bytes)
|
||||
.decompress()
|
||||
// Ban points which are either unreduced or -0
|
||||
.filter(|point| point.compress().to_bytes() == bytes)
|
||||
.ok_or_else(|| io::Error::other("invalid point"))
|
||||
decompress_point(bytes).ok_or_else(|| io::Error::other("invalid point"))
|
||||
}
|
||||
|
||||
pub(crate) fn read_torsion_free_point<R: Read>(r: &mut R) -> io::Result<EdwardsPoint> {
|
||||
|
||||
@@ -2,7 +2,9 @@ use hex_literal::hex;
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, edwards::CompressedEdwardsY};
|
||||
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
|
||||
|
||||
use monero_generators::decompress_point;
|
||||
|
||||
use crate::{
|
||||
random_scalar,
|
||||
@@ -142,14 +144,8 @@ fn featured_vectors() {
|
||||
}
|
||||
_ => panic!("Unknown network"),
|
||||
};
|
||||
let spend = CompressedEdwardsY::from_slice(&hex::decode(vector.spend).unwrap())
|
||||
.unwrap()
|
||||
.decompress()
|
||||
.unwrap();
|
||||
let view = CompressedEdwardsY::from_slice(&hex::decode(vector.view).unwrap())
|
||||
.unwrap()
|
||||
.decompress()
|
||||
.unwrap();
|
||||
let spend = decompress_point(hex::decode(vector.spend).unwrap().try_into().unwrap()).unwrap();
|
||||
let view = decompress_point(hex::decode(vector.view).unwrap().try_into().unwrap()).unwrap();
|
||||
|
||||
let addr = MoneroAddress::from_str(network, &vector.address).unwrap();
|
||||
assert_eq!(addr.spend, spend);
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use hex_literal::hex;
|
||||
use rand_core::OsRng;
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::CompressedEdwardsY};
|
||||
use curve25519_dalek::scalar::Scalar;
|
||||
use monero_generators::decompress_point;
|
||||
use multiexp::BatchVerifier;
|
||||
|
||||
use crate::{
|
||||
@@ -14,7 +15,7 @@ mod plus;
|
||||
#[test]
|
||||
fn bulletproofs_vector() {
|
||||
let scalar = |scalar| Scalar::from_canonical_bytes(scalar).unwrap();
|
||||
let point = |point| CompressedEdwardsY(point).decompress().unwrap();
|
||||
let point = |point| decompress_point(point).unwrap();
|
||||
|
||||
// Generated from Monero
|
||||
assert!(Bulletproofs::Original(OriginalStruct {
|
||||
|
||||
158
coins/monero/src/tests/extra.rs
Normal file
158
coins/monero/src/tests/extra.rs
Normal file
@@ -0,0 +1,158 @@
|
||||
use crate::{
|
||||
wallet::{ExtraField, Extra, extra::MAX_TX_EXTRA_PADDING_COUNT},
|
||||
serialize::write_varint,
|
||||
};
|
||||
|
||||
use curve25519_dalek::edwards::{EdwardsPoint, CompressedEdwardsY};
|
||||
|
||||
// Borrowed tests from
|
||||
// https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/
|
||||
// tests/unit_tests/test_tx_utils.cpp
|
||||
|
||||
const PUB_KEY_BYTES: [u8; 33] = [
|
||||
1, 30, 208, 98, 162, 133, 64, 85, 83, 112, 91, 188, 89, 211, 24, 131, 39, 154, 22, 228, 80, 63,
|
||||
198, 141, 173, 111, 244, 183, 4, 149, 186, 140, 230,
|
||||
];
|
||||
|
||||
fn pub_key() -> EdwardsPoint {
|
||||
CompressedEdwardsY(PUB_KEY_BYTES[1 .. PUB_KEY_BYTES.len()].try_into().expect("invalid pub key"))
|
||||
.decompress()
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn test_write_buf(extra: &Extra, buf: &[u8]) {
|
||||
let mut w: Vec<u8> = vec![];
|
||||
Extra::write(extra, &mut w).unwrap();
|
||||
assert_eq!(buf, w);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn empty_extra() {
|
||||
let buf: Vec<u8> = vec![];
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert!(extra.0.is_empty());
|
||||
test_write_buf(&extra, &buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn padding_only_size_1() {
|
||||
let buf: Vec<u8> = vec![0];
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert_eq!(extra.0, vec![ExtraField::Padding(1)]);
|
||||
test_write_buf(&extra, &buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn padding_only_size_2() {
|
||||
let buf: Vec<u8> = vec![0, 0];
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert_eq!(extra.0, vec![ExtraField::Padding(2)]);
|
||||
test_write_buf(&extra, &buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn padding_only_max_size() {
|
||||
let buf: Vec<u8> = vec![0; MAX_TX_EXTRA_PADDING_COUNT];
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert_eq!(extra.0, vec![ExtraField::Padding(MAX_TX_EXTRA_PADDING_COUNT)]);
|
||||
test_write_buf(&extra, &buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn padding_only_exceed_max_size() {
|
||||
let buf: Vec<u8> = vec![0; MAX_TX_EXTRA_PADDING_COUNT + 1];
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert!(extra.0.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_padding_only() {
|
||||
let buf: Vec<u8> = vec![0, 42];
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert!(extra.0.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pub_key_only() {
|
||||
let buf: Vec<u8> = PUB_KEY_BYTES.to_vec();
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert_eq!(extra.0, vec![ExtraField::PublicKey(pub_key())]);
|
||||
test_write_buf(&extra, &buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extra_nonce_only() {
|
||||
let buf: Vec<u8> = vec![2, 1, 42];
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert_eq!(extra.0, vec![ExtraField::Nonce(vec![42])]);
|
||||
test_write_buf(&extra, &buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extra_nonce_only_wrong_size() {
|
||||
let mut buf: Vec<u8> = vec![0; 20];
|
||||
buf[0] = 2;
|
||||
buf[1] = 255;
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert!(extra.0.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pub_key_and_padding() {
|
||||
let mut buf: Vec<u8> = PUB_KEY_BYTES.to_vec();
|
||||
buf.extend([
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
|
||||
]);
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert_eq!(extra.0, vec![ExtraField::PublicKey(pub_key()), ExtraField::Padding(76)]);
|
||||
test_write_buf(&extra, &buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn pub_key_and_invalid_padding() {
|
||||
let mut buf: Vec<u8> = PUB_KEY_BYTES.to_vec();
|
||||
buf.extend([0, 1]);
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert_eq!(extra.0, vec![ExtraField::PublicKey(pub_key())]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extra_mysterious_minergate_only() {
|
||||
let buf: Vec<u8> = vec![222, 1, 42];
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert_eq!(extra.0, vec![ExtraField::MysteriousMinergate(vec![42])]);
|
||||
test_write_buf(&extra, &buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extra_mysterious_minergate_only_large() {
|
||||
let mut buf: Vec<u8> = vec![222];
|
||||
write_varint(&512u64, &mut buf).unwrap();
|
||||
buf.extend_from_slice(&vec![0; 512]);
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert_eq!(extra.0, vec![ExtraField::MysteriousMinergate(vec![0; 512])]);
|
||||
test_write_buf(&extra, &buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extra_mysterious_minergate_only_wrong_size() {
|
||||
let mut buf: Vec<u8> = vec![0; 20];
|
||||
buf[0] = 222;
|
||||
buf[1] = 255;
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert!(extra.0.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn extra_mysterious_minergate_and_pub_key() {
|
||||
let mut buf: Vec<u8> = vec![222, 1, 42];
|
||||
buf.extend(PUB_KEY_BYTES.to_vec());
|
||||
let extra = Extra::read::<&[u8]>(&mut buf.as_ref()).unwrap();
|
||||
assert_eq!(
|
||||
extra.0,
|
||||
vec![ExtraField::MysteriousMinergate(vec![42]), ExtraField::PublicKey(pub_key())]
|
||||
);
|
||||
test_write_buf(&extra, &buf);
|
||||
}
|
||||
@@ -3,3 +3,4 @@ mod clsag;
|
||||
mod bulletproofs;
|
||||
mod address;
|
||||
mod seed;
|
||||
mod extra;
|
||||
|
||||
@@ -7,7 +7,7 @@ use curve25519_dalek::scalar::Scalar;
|
||||
use crate::{
|
||||
hash,
|
||||
wallet::seed::{
|
||||
Seed, SeedType,
|
||||
Seed, SeedType, SeedError,
|
||||
classic::{self, trim_by_lang},
|
||||
polyseed,
|
||||
},
|
||||
@@ -137,6 +137,53 @@ fn test_classic_seed() {
|
||||
spend: "647f4765b66b636ff07170ab6280a9a6804dfbaf19db2ad37d23be024a18730b".into(),
|
||||
view: "045da65316a906a8c30046053119c18020b07a7a3a6ef5c01ab2a8755416bd02".into(),
|
||||
},
|
||||
// The following seeds require the language specification in order to calculate
|
||||
// a single valid checksum
|
||||
Vector {
|
||||
language: classic::Language::Spanish,
|
||||
seed: "pluma laico atraer pintor peor cerca balde buscar \
|
||||
lancha batir nulo reloj resto gemelo nevera poder columna gol \
|
||||
oveja latir amplio bolero feliz fuerza nevera"
|
||||
.into(),
|
||||
spend: "30303983fc8d215dd020cc6b8223793318d55c466a86e4390954f373fdc7200a".into(),
|
||||
view: "97c649143f3c147ba59aa5506cc09c7992c5c219bb26964442142bf97980800e".into(),
|
||||
},
|
||||
Vector {
|
||||
language: classic::Language::Spanish,
|
||||
seed: "pluma pluma pluma pluma pluma pluma pluma pluma \
|
||||
pluma pluma pluma pluma pluma pluma pluma pluma \
|
||||
pluma pluma pluma pluma pluma pluma pluma pluma pluma"
|
||||
.into(),
|
||||
spend: "b4050000b4050000b4050000b4050000b4050000b4050000b4050000b4050000".into(),
|
||||
view: "d73534f7912b395eb70ef911791a2814eb6df7ce56528eaaa83ff2b72d9f5e0f".into(),
|
||||
},
|
||||
Vector {
|
||||
language: classic::Language::English,
|
||||
seed: "plus plus plus plus plus plus plus plus \
|
||||
plus plus plus plus plus plus plus plus \
|
||||
plus plus plus plus plus plus plus plus plus"
|
||||
.into(),
|
||||
spend: "3b0400003b0400003b0400003b0400003b0400003b0400003b0400003b040000".into(),
|
||||
view: "43a8a7715eed11eff145a2024ddcc39740255156da7bbd736ee66a0838053a02".into(),
|
||||
},
|
||||
Vector {
|
||||
language: classic::Language::Spanish,
|
||||
seed: "audio audio audio audio audio audio audio audio \
|
||||
audio audio audio audio audio audio audio audio \
|
||||
audio audio audio audio audio audio audio audio audio"
|
||||
.into(),
|
||||
spend: "ba000000ba000000ba000000ba000000ba000000ba000000ba000000ba000000".into(),
|
||||
view: "1437256da2c85d029b293d8c6b1d625d9374969301869b12f37186e3f906c708".into(),
|
||||
},
|
||||
Vector {
|
||||
language: classic::Language::English,
|
||||
seed: "audio audio audio audio audio audio audio audio \
|
||||
audio audio audio audio audio audio audio audio \
|
||||
audio audio audio audio audio audio audio audio audio"
|
||||
.into(),
|
||||
spend: "7900000079000000790000007900000079000000790000007900000079000000".into(),
|
||||
view: "20bec797ab96780ae6a045dd816676ca7ed1d7c6773f7022d03ad234b581d600".into(),
|
||||
},
|
||||
];
|
||||
|
||||
for vector in vectors {
|
||||
@@ -150,15 +197,15 @@ fn test_classic_seed() {
|
||||
|
||||
// Test against Monero
|
||||
{
|
||||
let seed = Seed::from_string(Zeroizing::new(vector.seed.clone())).unwrap();
|
||||
println!("{}. language: {:?}, seed: {}", line!(), vector.language, vector.seed.clone());
|
||||
let seed =
|
||||
Seed::from_string(SeedType::Classic(vector.language), Zeroizing::new(vector.seed.clone()))
|
||||
.unwrap();
|
||||
let trim = trim_seed(&vector.seed);
|
||||
println!(
|
||||
"{}. seed: {}, entropy: {:?}, trim: {trim}",
|
||||
line!(),
|
||||
*seed.to_string(),
|
||||
*seed.entropy()
|
||||
assert_eq!(
|
||||
seed,
|
||||
Seed::from_string(SeedType::Classic(vector.language), Zeroizing::new(trim)).unwrap()
|
||||
);
|
||||
assert_eq!(seed, Seed::from_string(Zeroizing::new(trim)).unwrap());
|
||||
|
||||
let spend: [u8; 32] = hex::decode(vector.spend).unwrap().try_into().unwrap();
|
||||
// For classical seeds, Monero directly uses the entropy as a spend key
|
||||
@@ -184,19 +231,20 @@ fn test_classic_seed() {
|
||||
// Test against ourselves
|
||||
{
|
||||
let seed = Seed::new(&mut OsRng, SeedType::Classic(vector.language));
|
||||
println!("{}. seed: {}", line!(), *seed.to_string());
|
||||
let trim = trim_seed(&seed.to_string());
|
||||
println!(
|
||||
"{}. seed: {}, entropy: {:?}, trim: {trim}",
|
||||
line!(),
|
||||
*seed.to_string(),
|
||||
*seed.entropy()
|
||||
assert_eq!(
|
||||
seed,
|
||||
Seed::from_string(SeedType::Classic(vector.language), Zeroizing::new(trim)).unwrap()
|
||||
);
|
||||
assert_eq!(seed, Seed::from_string(Zeroizing::new(trim)).unwrap());
|
||||
assert_eq!(
|
||||
seed,
|
||||
Seed::from_entropy(SeedType::Classic(vector.language), seed.entropy(), None).unwrap()
|
||||
);
|
||||
assert_eq!(seed, Seed::from_string(seed.to_string()).unwrap());
|
||||
assert_eq!(
|
||||
seed,
|
||||
Seed::from_string(SeedType::Classic(vector.language), seed.to_string()).unwrap()
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -309,6 +357,18 @@ fn test_polyseed() {
|
||||
has_prefix: false,
|
||||
has_accent: false,
|
||||
},
|
||||
// The following seed requires the language specification in order to calculate
|
||||
// a single valid checksum
|
||||
Vector {
|
||||
language: polyseed::Language::Spanish,
|
||||
seed: "impo sort usua cabi venu nobl oliv clim \
|
||||
cont barr marc auto prod vaca torn fati"
|
||||
.into(),
|
||||
entropy: "dbfce25fe09b68a340e01c62417eeef43ad51800000000000000000000000000".into(),
|
||||
birthday: 1701511650,
|
||||
has_prefix: true,
|
||||
has_accent: true,
|
||||
},
|
||||
];
|
||||
|
||||
for vector in vectors {
|
||||
@@ -350,31 +410,32 @@ fn test_polyseed() {
|
||||
};
|
||||
|
||||
// String -> Seed
|
||||
let seed = Seed::from_string(Zeroizing::new(vector.seed.clone())).unwrap();
|
||||
println!("{}. language: {:?}, seed: {}", line!(), vector.language, vector.seed.clone());
|
||||
let seed =
|
||||
Seed::from_string(SeedType::Polyseed(vector.language), Zeroizing::new(vector.seed.clone()))
|
||||
.unwrap();
|
||||
let trim = trim_seed(&vector.seed);
|
||||
let add_whitespace = add_whitespace(vector.seed.clone());
|
||||
let seed_without_accents = seed_without_accents(&vector.seed);
|
||||
println!(
|
||||
"{}. seed: {}, entropy: {:?}, trim: {}, add_whitespace: {}, seed_without_accents: {}",
|
||||
line!(),
|
||||
*seed.to_string(),
|
||||
*seed.entropy(),
|
||||
trim,
|
||||
add_whitespace,
|
||||
seed_without_accents,
|
||||
);
|
||||
|
||||
// Make sure a version with added whitespace still works
|
||||
let whitespaced_seed = Seed::from_string(Zeroizing::new(add_whitespace)).unwrap();
|
||||
let whitespaced_seed =
|
||||
Seed::from_string(SeedType::Polyseed(vector.language), Zeroizing::new(add_whitespace))
|
||||
.unwrap();
|
||||
assert_eq!(seed, whitespaced_seed);
|
||||
// Check trimmed versions works
|
||||
if vector.has_prefix {
|
||||
let trimmed_seed = Seed::from_string(Zeroizing::new(trim)).unwrap();
|
||||
let trimmed_seed =
|
||||
Seed::from_string(SeedType::Polyseed(vector.language), Zeroizing::new(trim)).unwrap();
|
||||
assert_eq!(seed, trimmed_seed);
|
||||
}
|
||||
// Check versions without accents work
|
||||
if vector.has_accent {
|
||||
let seed_without_accents = Seed::from_string(Zeroizing::new(seed_without_accents)).unwrap();
|
||||
let seed_without_accents = Seed::from_string(
|
||||
SeedType::Polyseed(vector.language),
|
||||
Zeroizing::new(seed_without_accents),
|
||||
)
|
||||
.unwrap();
|
||||
assert_eq!(seed, seed_without_accents);
|
||||
}
|
||||
|
||||
@@ -391,8 +452,11 @@ fn test_polyseed() {
|
||||
// Check against ourselves
|
||||
{
|
||||
let seed = Seed::new(&mut OsRng, SeedType::Polyseed(vector.language));
|
||||
println!("{}. seed: {}, key: {:?}", line!(), *seed.to_string(), *seed.key());
|
||||
assert_eq!(seed, Seed::from_string(seed.to_string()).unwrap());
|
||||
println!("{}. seed: {}", line!(), *seed.to_string());
|
||||
assert_eq!(
|
||||
seed,
|
||||
Seed::from_string(SeedType::Polyseed(vector.language), seed.to_string()).unwrap()
|
||||
);
|
||||
assert_eq!(
|
||||
seed,
|
||||
Seed::from_entropy(
|
||||
@@ -405,3 +469,14 @@ fn test_polyseed() {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_polyseed() {
|
||||
// This seed includes unsupported features bits and should error on decode
|
||||
let seed = "include domain claim resemble urban hire lunch bird \
|
||||
crucial fire best wife ring warm ignore model"
|
||||
.into();
|
||||
let res =
|
||||
Seed::from_string(SeedType::Polyseed(polyseed::Language::English), Zeroizing::new(seed));
|
||||
assert_eq!(res, Err(SeedError::UnsupportedFeatures));
|
||||
}
|
||||
|
||||
@@ -161,7 +161,9 @@ impl Timelock {
|
||||
impl PartialOrd for Timelock {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
match (self, other) {
|
||||
(Timelock::None, Timelock::None) => Some(Ordering::Equal),
|
||||
(Timelock::None, _) => Some(Ordering::Less),
|
||||
(_, Timelock::None) => Some(Ordering::Greater),
|
||||
(Timelock::Block(a), Timelock::Block(b)) => a.partial_cmp(b),
|
||||
(Timelock::Time(a), Timelock::Time(b)) => a.partial_cmp(b),
|
||||
_ => None,
|
||||
@@ -331,14 +333,11 @@ impl Transaction {
|
||||
}
|
||||
} else if prefix.version == 2 {
|
||||
rct_signatures = RctSignatures::read(
|
||||
&prefix
|
||||
.inputs
|
||||
.iter()
|
||||
.map(|input| match input {
|
||||
Input::Gen(_) => 0,
|
||||
Input::ToKey { key_offsets, .. } => key_offsets.len(),
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
prefix.inputs.first().map_or(0, |input| match input {
|
||||
Input::Gen(_) => 0,
|
||||
Input::ToKey { key_offsets, .. } => key_offsets.len(),
|
||||
}),
|
||||
prefix.inputs.len(),
|
||||
prefix.outputs.len(),
|
||||
r,
|
||||
)?;
|
||||
|
||||
@@ -3,7 +3,9 @@ use std_shims::string::{String, ToString};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
use curve25519_dalek::edwards::{EdwardsPoint, CompressedEdwardsY};
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
|
||||
use monero_generators::decompress_point;
|
||||
|
||||
use base58_monero::base58::{encode_check, decode_check};
|
||||
|
||||
@@ -240,12 +242,10 @@ impl<B: AddressBytes> Address<B> {
|
||||
}
|
||||
|
||||
let mut meta = AddressMeta::from_byte(raw[0])?;
|
||||
let spend = CompressedEdwardsY(raw[1 .. 33].try_into().unwrap())
|
||||
.decompress()
|
||||
.ok_or(AddressError::InvalidKey)?;
|
||||
let view = CompressedEdwardsY(raw[33 .. 65].try_into().unwrap())
|
||||
.decompress()
|
||||
.ok_or(AddressError::InvalidKey)?;
|
||||
let spend =
|
||||
decompress_point(raw[1 .. 33].try_into().unwrap()).ok_or(AddressError::InvalidKey)?;
|
||||
let view =
|
||||
decompress_point(raw[33 .. 65].try_into().unwrap()).ok_or(AddressError::InvalidKey)?;
|
||||
let mut read = 65;
|
||||
|
||||
if matches!(meta.kind, AddressType::Featured { .. }) {
|
||||
|
||||
@@ -21,15 +21,13 @@ use crate::{
|
||||
serialize::varint_len,
|
||||
wallet::SpendableOutput,
|
||||
rpc::{RpcError, RpcConnection, Rpc},
|
||||
DEFAULT_LOCK_WINDOW, COINBASE_LOCK_WINDOW, BLOCK_TIME,
|
||||
};
|
||||
|
||||
const LOCK_WINDOW: usize = 10;
|
||||
const MATURITY: u64 = 60;
|
||||
const RECENT_WINDOW: usize = 15;
|
||||
const BLOCK_TIME: usize = 120;
|
||||
const BLOCKS_PER_YEAR: usize = 365 * 24 * 60 * 60 / BLOCK_TIME;
|
||||
#[allow(clippy::cast_precision_loss)]
|
||||
const TIP_APPLICATION: f64 = (LOCK_WINDOW * BLOCK_TIME) as f64;
|
||||
const TIP_APPLICATION: f64 = (DEFAULT_LOCK_WINDOW * BLOCK_TIME) as f64;
|
||||
|
||||
// TODO: Resolve safety of this in case a reorg occurs/the network changes
|
||||
// TODO: Update this when scanning a block, as possible
|
||||
@@ -52,8 +50,10 @@ async fn select_n<'a, R: RngCore + CryptoRng, RPC: RpcConnection>(
|
||||
real: &[u64],
|
||||
used: &mut HashSet<u64>,
|
||||
count: usize,
|
||||
fingerprintable_canonical: bool,
|
||||
) -> Result<Vec<(u64, [EdwardsPoint; 2])>, RpcError> {
|
||||
if height >= rpc.get_height().await? {
|
||||
// TODO: consider removing this extra RPC and expect the caller to handle it
|
||||
if fingerprintable_canonical && height > rpc.get_height().await? {
|
||||
// TODO: Don't use InternalError for the caller's failure
|
||||
Err(RpcError::InternalError("decoys being requested from too young blocks"))?;
|
||||
}
|
||||
@@ -64,6 +64,8 @@ async fn select_n<'a, R: RngCore + CryptoRng, RPC: RpcConnection>(
|
||||
// Retries on failure. Retries are obvious as decoys, yet should be minimal
|
||||
while confirmed.len() != count {
|
||||
let remaining = count - confirmed.len();
|
||||
// TODO: over-request candidates in case some are locked to avoid needing
|
||||
// round trips to the daemon (and revealing obvious decoys to the daemon)
|
||||
let mut candidates = Vec::with_capacity(remaining);
|
||||
while candidates.len() != remaining {
|
||||
#[cfg(test)]
|
||||
@@ -117,7 +119,14 @@ async fn select_n<'a, R: RngCore + CryptoRng, RPC: RpcConnection>(
|
||||
}
|
||||
}
|
||||
|
||||
for (i, output) in rpc.get_unlocked_outputs(&candidates, height).await?.iter_mut().enumerate() {
|
||||
// TODO: make sure that the real output is included in the response, and
|
||||
// that mask and key are equal to expected
|
||||
for (i, output) in rpc
|
||||
.get_unlocked_outputs(&candidates, height, fingerprintable_canonical)
|
||||
.await?
|
||||
.iter_mut()
|
||||
.enumerate()
|
||||
{
|
||||
// Don't include the real spend as a decoy, despite requesting it
|
||||
if real_indexes.contains(&i) {
|
||||
continue;
|
||||
@@ -141,6 +150,154 @@ fn offset(ring: &[u64]) -> Vec<u64> {
|
||||
res
|
||||
}
|
||||
|
||||
async fn select_decoys<R: RngCore + CryptoRng, RPC: RpcConnection>(
|
||||
rng: &mut R,
|
||||
rpc: &Rpc<RPC>,
|
||||
ring_len: usize,
|
||||
height: usize,
|
||||
inputs: &[SpendableOutput],
|
||||
fingerprintable_canonical: bool,
|
||||
) -> Result<Vec<Decoys>, RpcError> {
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
#[cfg(not(feature = "std"))]
|
||||
let mut distribution = DISTRIBUTION().lock();
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
#[cfg(feature = "std")]
|
||||
let mut distribution = DISTRIBUTION().lock().await;
|
||||
|
||||
#[cfg(not(feature = "cache-distribution"))]
|
||||
let mut distribution = vec![];
|
||||
|
||||
let decoy_count = ring_len - 1;
|
||||
|
||||
// Convert the inputs in question to the raw output data
|
||||
let mut real = Vec::with_capacity(inputs.len());
|
||||
let mut outputs = Vec::with_capacity(inputs.len());
|
||||
for input in inputs {
|
||||
real.push(input.global_index);
|
||||
outputs.push((real[real.len() - 1], [input.key(), input.commitment().calculate()]));
|
||||
}
|
||||
|
||||
if distribution.len() < height {
|
||||
// TODO: verify distribution elems are strictly increasing
|
||||
let extension =
|
||||
rpc.get_output_distribution(distribution.len(), height.saturating_sub(1)).await?;
|
||||
distribution.extend(extension);
|
||||
}
|
||||
// If asked to use an older height than previously asked, truncate to ensure accuracy
|
||||
// Should never happen, yet risks desyncing if it did
|
||||
distribution.truncate(height);
|
||||
|
||||
if distribution.len() < DEFAULT_LOCK_WINDOW {
|
||||
Err(RpcError::InternalError("not enough decoy candidates"))?;
|
||||
}
|
||||
|
||||
#[allow(clippy::cast_precision_loss)]
|
||||
let per_second = {
|
||||
let blocks = distribution.len().min(BLOCKS_PER_YEAR);
|
||||
let initial = distribution[distribution.len().saturating_sub(blocks + 1)];
|
||||
let outputs = distribution[distribution.len() - 1].saturating_sub(initial);
|
||||
(outputs as f64) / ((blocks * BLOCK_TIME) as f64)
|
||||
};
|
||||
|
||||
let mut used = HashSet::<u64>::new();
|
||||
for o in &outputs {
|
||||
used.insert(o.0);
|
||||
}
|
||||
|
||||
// TODO: Create a TX with less than the target amount, as allowed by the protocol
|
||||
let high = distribution[distribution.len() - DEFAULT_LOCK_WINDOW];
|
||||
if high.saturating_sub(COINBASE_LOCK_WINDOW as u64) <
|
||||
u64::try_from(inputs.len() * ring_len).unwrap()
|
||||
{
|
||||
Err(RpcError::InternalError("not enough coinbase candidates"))?;
|
||||
}
|
||||
|
||||
// Select all decoys for this transaction, assuming we generate a sane transaction
|
||||
// We should almost never naturally generate an insane transaction, hence why this doesn't
|
||||
// bother with an overage
|
||||
let mut decoys = select_n(
|
||||
rng,
|
||||
rpc,
|
||||
&distribution,
|
||||
height,
|
||||
high,
|
||||
per_second,
|
||||
&real,
|
||||
&mut used,
|
||||
inputs.len() * decoy_count,
|
||||
fingerprintable_canonical,
|
||||
)
|
||||
.await?;
|
||||
real.zeroize();
|
||||
|
||||
let mut res = Vec::with_capacity(inputs.len());
|
||||
for o in outputs {
|
||||
// Grab the decoys for this specific output
|
||||
let mut ring = decoys.drain((decoys.len() - decoy_count) ..).collect::<Vec<_>>();
|
||||
ring.push(o);
|
||||
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
// Sanity checks are only run when 1000 outputs are available in Monero
|
||||
// We run this check whenever the highest output index, which we acknowledge, is > 500
|
||||
// This means we assume (for presumably test blockchains) the height being used has not had
|
||||
// 500 outputs since while itself not being a sufficiently mature blockchain
|
||||
// Considering Monero's p2p layer doesn't actually check transaction sanity, it should be
|
||||
// fine for us to not have perfectly matching rules, especially since this code will infinite
|
||||
// loop if it can't determine sanity, which is possible with sufficient inputs on
|
||||
// sufficiently small chains
|
||||
if high > 500 {
|
||||
// Make sure the TX passes the sanity check that the median output is within the last 40%
|
||||
let target_median = high * 3 / 5;
|
||||
while ring[ring_len / 2].0 < target_median {
|
||||
// If it's not, update the bottom half with new values to ensure the median only moves up
|
||||
for removed in ring.drain(0 .. (ring_len / 2)).collect::<Vec<_>>() {
|
||||
// If we removed the real spend, add it back
|
||||
if removed.0 == o.0 {
|
||||
ring.push(o);
|
||||
} else {
|
||||
// We could not remove this, saving CPU time and removing low values as
|
||||
// possibilities, yet it'd increase the amount of decoys required to create this
|
||||
// transaction and some removed outputs may be the best option (as we drop the first
|
||||
// half, not just the bottom n)
|
||||
used.remove(&removed.0);
|
||||
}
|
||||
}
|
||||
|
||||
// Select new outputs until we have a full sized ring again
|
||||
ring.extend(
|
||||
select_n(
|
||||
rng,
|
||||
rpc,
|
||||
&distribution,
|
||||
height,
|
||||
high,
|
||||
per_second,
|
||||
&[],
|
||||
&mut used,
|
||||
ring_len - ring.len(),
|
||||
fingerprintable_canonical,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
}
|
||||
|
||||
// The other sanity check rule is about duplicates, yet we already enforce unique ring
|
||||
// members
|
||||
}
|
||||
|
||||
res.push(Decoys {
|
||||
// Binary searches for the real spend since we don't know where it sorted to
|
||||
i: u8::try_from(ring.partition_point(|x| x.0 < o.0)).unwrap(),
|
||||
offsets: offset(&ring.iter().map(|output| output.0).collect::<Vec<_>>()),
|
||||
ring: ring.iter().map(|output| output.1).collect(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Decoy data, containing the actual member as well (at index `i`).
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
||||
pub struct Decoys {
|
||||
@@ -159,7 +316,16 @@ impl Decoys {
|
||||
self.offsets.len()
|
||||
}
|
||||
|
||||
/// Select decoys using the same distribution as Monero.
|
||||
pub fn indexes(&self) -> Vec<u64> {
|
||||
let mut res = vec![self.offsets[0]; self.len()];
|
||||
for m in 1 .. res.len() {
|
||||
res[m] = res[m - 1] + self.offsets[m];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
/// Select decoys using the same distribution as Monero. Relies on the monerod RPC
|
||||
/// response for an output's unlocked status, minimizing trips to the daemon.
|
||||
pub async fn select<R: RngCore + CryptoRng, RPC: RpcConnection>(
|
||||
rng: &mut R,
|
||||
rpc: &Rpc<RPC>,
|
||||
@@ -167,132 +333,24 @@ impl Decoys {
|
||||
height: usize,
|
||||
inputs: &[SpendableOutput],
|
||||
) -> Result<Vec<Decoys>, RpcError> {
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
#[cfg(not(feature = "std"))]
|
||||
let mut distribution = DISTRIBUTION().lock();
|
||||
#[cfg(feature = "cache-distribution")]
|
||||
#[cfg(feature = "std")]
|
||||
let mut distribution = DISTRIBUTION().lock().await;
|
||||
select_decoys(rng, rpc, ring_len, height, inputs, false).await
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "cache-distribution"))]
|
||||
let mut distribution = vec![];
|
||||
|
||||
let decoy_count = ring_len - 1;
|
||||
|
||||
// Convert the inputs in question to the raw output data
|
||||
let mut real = Vec::with_capacity(inputs.len());
|
||||
let mut outputs = Vec::with_capacity(inputs.len());
|
||||
for input in inputs {
|
||||
real.push(input.global_index);
|
||||
outputs.push((real[real.len() - 1], [input.key(), input.commitment().calculate()]));
|
||||
}
|
||||
|
||||
if distribution.len() <= height {
|
||||
let extension = rpc.get_output_distribution(distribution.len(), height).await?;
|
||||
distribution.extend(extension);
|
||||
}
|
||||
// If asked to use an older height than previously asked, truncate to ensure accuracy
|
||||
// Should never happen, yet risks desyncing if it did
|
||||
distribution.truncate(height + 1); // height is inclusive, and 0 is a valid height
|
||||
|
||||
let high = distribution[distribution.len() - 1];
|
||||
#[allow(clippy::cast_precision_loss)]
|
||||
let per_second = {
|
||||
let blocks = distribution.len().min(BLOCKS_PER_YEAR);
|
||||
let outputs = high - distribution[distribution.len().saturating_sub(blocks + 1)];
|
||||
(outputs as f64) / ((blocks * BLOCK_TIME) as f64)
|
||||
};
|
||||
|
||||
let mut used = HashSet::<u64>::new();
|
||||
for o in &outputs {
|
||||
used.insert(o.0);
|
||||
}
|
||||
|
||||
// TODO: Create a TX with less than the target amount, as allowed by the protocol
|
||||
if (high - MATURITY) < u64::try_from(inputs.len() * ring_len).unwrap() {
|
||||
Err(RpcError::InternalError("not enough decoy candidates"))?;
|
||||
}
|
||||
|
||||
// Select all decoys for this transaction, assuming we generate a sane transaction
|
||||
// We should almost never naturally generate an insane transaction, hence why this doesn't
|
||||
// bother with an overage
|
||||
let mut decoys = select_n(
|
||||
rng,
|
||||
rpc,
|
||||
&distribution,
|
||||
height,
|
||||
high,
|
||||
per_second,
|
||||
&real,
|
||||
&mut used,
|
||||
inputs.len() * decoy_count,
|
||||
)
|
||||
.await?;
|
||||
real.zeroize();
|
||||
|
||||
let mut res = Vec::with_capacity(inputs.len());
|
||||
for o in outputs {
|
||||
// Grab the decoys for this specific output
|
||||
let mut ring = decoys.drain((decoys.len() - decoy_count) ..).collect::<Vec<_>>();
|
||||
ring.push(o);
|
||||
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
// Sanity checks are only run when 1000 outputs are available in Monero
|
||||
// We run this check whenever the highest output index, which we acknowledge, is > 500
|
||||
// This means we assume (for presumably test blockchains) the height being used has not had
|
||||
// 500 outputs since while itself not being a sufficiently mature blockchain
|
||||
// Considering Monero's p2p layer doesn't actually check transaction sanity, it should be
|
||||
// fine for us to not have perfectly matching rules, especially since this code will infinite
|
||||
// loop if it can't determine sanity, which is possible with sufficient inputs on
|
||||
// sufficiently small chains
|
||||
if high > 500 {
|
||||
// Make sure the TX passes the sanity check that the median output is within the last 40%
|
||||
let target_median = high * 3 / 5;
|
||||
while ring[ring_len / 2].0 < target_median {
|
||||
// If it's not, update the bottom half with new values to ensure the median only moves up
|
||||
for removed in ring.drain(0 .. (ring_len / 2)).collect::<Vec<_>>() {
|
||||
// If we removed the real spend, add it back
|
||||
if removed.0 == o.0 {
|
||||
ring.push(o);
|
||||
} else {
|
||||
// We could not remove this, saving CPU time and removing low values as
|
||||
// possibilities, yet it'd increase the amount of decoys required to create this
|
||||
// transaction and some removed outputs may be the best option (as we drop the first
|
||||
// half, not just the bottom n)
|
||||
used.remove(&removed.0);
|
||||
}
|
||||
}
|
||||
|
||||
// Select new outputs until we have a full sized ring again
|
||||
ring.extend(
|
||||
select_n(
|
||||
rng,
|
||||
rpc,
|
||||
&distribution,
|
||||
height,
|
||||
high,
|
||||
per_second,
|
||||
&[],
|
||||
&mut used,
|
||||
ring_len - ring.len(),
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
}
|
||||
|
||||
// The other sanity check rule is about duplicates, yet we already enforce unique ring
|
||||
// members
|
||||
}
|
||||
|
||||
res.push(Decoys {
|
||||
// Binary searches for the real spend since we don't know where it sorted to
|
||||
i: u8::try_from(ring.partition_point(|x| x.0 < o.0)).unwrap(),
|
||||
offsets: offset(&ring.iter().map(|output| output.0).collect::<Vec<_>>()),
|
||||
ring: ring.iter().map(|output| output.1).collect(),
|
||||
});
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
/// If no reorg has occurred and an honest RPC, any caller who passes the same height to this
|
||||
/// function will use the same distribution to select decoys. It is fingerprintable
|
||||
/// because a caller using this will not be able to select decoys that are timelocked
|
||||
/// with a timestamp. Any transaction which includes timestamp timelocked decoys in its
|
||||
/// rings could not be constructed using this function.
|
||||
///
|
||||
/// TODO: upstream change to monerod get_outs RPC to accept a height param for checking
|
||||
/// output's unlocked status and remove all usage of fingerprintable_canonical
|
||||
pub async fn fingerprintable_canonical_select<R: RngCore + CryptoRng, RPC: RpcConnection>(
|
||||
rng: &mut R,
|
||||
rpc: &Rpc<RPC>,
|
||||
ring_len: usize,
|
||||
height: usize,
|
||||
inputs: &[SpendableOutput],
|
||||
) -> Result<Vec<Decoys>, RpcError> {
|
||||
select_decoys(rng, rpc, ring_len, height, inputs, true).await
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use core::ops::BitXor;
|
||||
use std_shims::{
|
||||
vec::Vec,
|
||||
io::{self, Read, Write},
|
||||
io::{self, Read, BufRead, Write},
|
||||
};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
@@ -13,6 +13,7 @@ use crate::serialize::{
|
||||
write_point, write_vec,
|
||||
};
|
||||
|
||||
pub const MAX_TX_EXTRA_PADDING_COUNT: usize = 255;
|
||||
pub const MAX_TX_EXTRA_NONCE_SIZE: usize = 255;
|
||||
|
||||
pub const PAYMENT_ID_MARKER: u8 = 0;
|
||||
@@ -70,15 +71,23 @@ impl PaymentId {
|
||||
// Doesn't bother with padding nor MinerGate
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub enum ExtraField {
|
||||
Padding(usize),
|
||||
PublicKey(EdwardsPoint),
|
||||
Nonce(Vec<u8>),
|
||||
MergeMining(usize, [u8; 32]),
|
||||
PublicKeys(Vec<EdwardsPoint>),
|
||||
MysteriousMinergate(Vec<u8>),
|
||||
}
|
||||
|
||||
impl ExtraField {
|
||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
ExtraField::Padding(size) => {
|
||||
w.write_all(&[0])?;
|
||||
for _ in 1 .. *size {
|
||||
write_byte(&0u8, w)?;
|
||||
}
|
||||
}
|
||||
ExtraField::PublicKey(key) => {
|
||||
w.write_all(&[1])?;
|
||||
w.write_all(&key.compress().to_bytes())?;
|
||||
@@ -96,12 +105,39 @@ impl ExtraField {
|
||||
w.write_all(&[4])?;
|
||||
write_vec(write_point, keys, w)?;
|
||||
}
|
||||
ExtraField::MysteriousMinergate(data) => {
|
||||
w.write_all(&[0xDE])?;
|
||||
write_vec(write_byte, data, w)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<ExtraField> {
|
||||
pub fn read<R: BufRead>(r: &mut R) -> io::Result<ExtraField> {
|
||||
Ok(match read_byte(r)? {
|
||||
0 => ExtraField::Padding({
|
||||
// Read until either non-zero, max padding count, or end of buffer
|
||||
let mut size: usize = 1;
|
||||
loop {
|
||||
let buf = r.fill_buf()?;
|
||||
let mut n_consume = 0;
|
||||
for v in buf {
|
||||
if *v != 0u8 {
|
||||
Err(io::Error::other("non-zero value after padding"))?
|
||||
}
|
||||
n_consume += 1;
|
||||
size += 1;
|
||||
if size > MAX_TX_EXTRA_PADDING_COUNT {
|
||||
Err(io::Error::other("padding exceeded max count"))?
|
||||
}
|
||||
}
|
||||
if n_consume == 0 {
|
||||
break;
|
||||
}
|
||||
r.consume(n_consume);
|
||||
}
|
||||
size
|
||||
}),
|
||||
1 => ExtraField::PublicKey(read_point(r)?),
|
||||
2 => ExtraField::Nonce({
|
||||
let nonce = read_vec(read_byte, r)?;
|
||||
@@ -112,20 +148,21 @@ impl ExtraField {
|
||||
}),
|
||||
3 => ExtraField::MergeMining(read_varint(r)?, read_bytes(r)?),
|
||||
4 => ExtraField::PublicKeys(read_vec(read_point, r)?),
|
||||
0xDE => ExtraField::MysteriousMinergate(read_vec(read_byte, r)?),
|
||||
_ => Err(io::Error::other("unknown extra field"))?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub struct Extra(Vec<ExtraField>);
|
||||
pub struct Extra(pub(crate) Vec<ExtraField>);
|
||||
impl Extra {
|
||||
pub fn keys(&self) -> Option<(EdwardsPoint, Option<Vec<EdwardsPoint>>)> {
|
||||
let mut key = None;
|
||||
pub fn keys(&self) -> Option<(Vec<EdwardsPoint>, Option<Vec<EdwardsPoint>>)> {
|
||||
let mut keys = vec![];
|
||||
let mut additional = None;
|
||||
for field in &self.0 {
|
||||
match field.clone() {
|
||||
ExtraField::PublicKey(this_key) => key = key.or(Some(this_key)),
|
||||
ExtraField::PublicKey(this_key) => keys.push(this_key),
|
||||
ExtraField::PublicKeys(these_additional) => {
|
||||
additional = additional.or(Some(these_additional))
|
||||
}
|
||||
@@ -133,7 +170,11 @@ impl Extra {
|
||||
}
|
||||
}
|
||||
// Don't return any keys if this was non-standard and didn't include the primary key
|
||||
key.map(|key| (key, additional))
|
||||
if keys.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some((keys, additional))
|
||||
}
|
||||
}
|
||||
|
||||
pub fn payment_id(&self) -> Option<PaymentId> {
|
||||
@@ -200,7 +241,7 @@ impl Extra {
|
||||
buf
|
||||
}
|
||||
|
||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Extra> {
|
||||
pub fn read<R: BufRead>(r: &mut R) -> io::Result<Extra> {
|
||||
let mut res = Extra(vec![]);
|
||||
let mut field;
|
||||
while {
|
||||
|
||||
@@ -9,6 +9,8 @@ use zeroize::{Zeroize, ZeroizeOnDrop};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use monero_generators::decompress_point;
|
||||
|
||||
use crate::{
|
||||
Commitment,
|
||||
serialize::{read_byte, read_u32, read_u64, read_bytes, read_scalar, read_point, read_raw_vec},
|
||||
@@ -99,10 +101,18 @@ pub struct Metadata {
|
||||
/// The subaddress this output was sent to.
|
||||
pub subaddress: Option<SubaddressIndex>,
|
||||
/// The payment ID included with this output.
|
||||
/// This will be gibberish if the payment ID wasn't intended for the recipient or wasn't included.
|
||||
// Could be an Option, as extra doesn't necessarily have a payment ID, yet all Monero TXs should
|
||||
// have this making it simplest for it to be as-is.
|
||||
pub payment_id: [u8; 8],
|
||||
/// There are 2 circumstances in which the reference wallet2 ignores the payment ID
|
||||
/// but the payment ID will be returned here anyway:
|
||||
///
|
||||
/// 1) If the payment ID is tied to an output received by a subaddress account
|
||||
/// that spent Monero in the transaction (the received output is considered
|
||||
/// "change" and is not considered a "payment" in this case). If there are multiple
|
||||
/// spending subaddress accounts in a transaction, the highest index spent key image
|
||||
/// is used to determine the spending subaddress account.
|
||||
///
|
||||
/// 2) If the payment ID is the unencrypted variant and the block's hf version is
|
||||
/// v12 or higher (https://github.com/serai-dex/serai/issues/512)
|
||||
pub payment_id: Option<PaymentId>,
|
||||
/// Arbitrary data encoded in TX extra.
|
||||
pub arbitrary_data: Vec<Vec<u8>>,
|
||||
}
|
||||
@@ -112,7 +122,7 @@ impl core::fmt::Debug for Metadata {
|
||||
fmt
|
||||
.debug_struct("Metadata")
|
||||
.field("subaddress", &self.subaddress)
|
||||
.field("payment_id", &hex::encode(self.payment_id))
|
||||
.field("payment_id", &self.payment_id)
|
||||
.field("arbitrary_data", &self.arbitrary_data.iter().map(hex::encode).collect::<Vec<_>>())
|
||||
.finish()
|
||||
}
|
||||
@@ -127,7 +137,13 @@ impl Metadata {
|
||||
} else {
|
||||
w.write_all(&[0])?;
|
||||
}
|
||||
w.write_all(&self.payment_id)?;
|
||||
|
||||
if let Some(payment_id) = self.payment_id {
|
||||
w.write_all(&[1])?;
|
||||
payment_id.write(w)?;
|
||||
} else {
|
||||
w.write_all(&[0])?;
|
||||
}
|
||||
|
||||
w.write_all(&u32::try_from(self.arbitrary_data.len()).unwrap().to_le_bytes())?;
|
||||
for part in &self.arbitrary_data {
|
||||
@@ -155,7 +171,7 @@ impl Metadata {
|
||||
|
||||
Ok(Metadata {
|
||||
subaddress,
|
||||
payment_id: read_bytes(r)?,
|
||||
payment_id: if read_byte(r)? == 1 { PaymentId::read(r).ok() } else { None },
|
||||
arbitrary_data: {
|
||||
let mut data = vec![];
|
||||
for _ in 0 .. read_u32(r)? {
|
||||
@@ -334,7 +350,7 @@ impl Scanner {
|
||||
return Timelocked(tx.prefix.timelock, vec![]);
|
||||
};
|
||||
|
||||
let Some((tx_key, additional)) = extra.keys() else {
|
||||
let Some((tx_keys, additional)) = extra.keys() else {
|
||||
return Timelocked(tx.prefix.timelock, vec![]);
|
||||
};
|
||||
|
||||
@@ -349,13 +365,15 @@ impl Scanner {
|
||||
}
|
||||
}
|
||||
|
||||
let output_key = output.key.decompress();
|
||||
let output_key = decompress_point(output.key.to_bytes());
|
||||
if output_key.is_none() {
|
||||
continue;
|
||||
}
|
||||
let output_key = output_key.unwrap();
|
||||
|
||||
for key in [Some(Some(&tx_key)), additional.as_ref().map(|additional| additional.get(o))] {
|
||||
let additional = additional.as_ref().map(|additional| additional.get(o));
|
||||
|
||||
for key in tx_keys.iter().map(|key| Some(Some(key))).chain(core::iter::once(additional)) {
|
||||
let key = match key {
|
||||
Some(Some(key)) => key,
|
||||
Some(None) => {
|
||||
@@ -363,7 +381,6 @@ impl Scanner {
|
||||
// https://github.com/monero-project/monero/
|
||||
// blob/04a1e2875d6e35e27bb21497988a6c822d319c28/
|
||||
// src/cryptonote_basic/cryptonote_format_utils.cpp#L1062
|
||||
// TODO: Should this return? Where does Monero set the trap handler for this exception?
|
||||
continue;
|
||||
}
|
||||
None => {
|
||||
@@ -376,12 +393,7 @@ impl Scanner {
|
||||
o,
|
||||
);
|
||||
|
||||
let payment_id =
|
||||
if let Some(PaymentId::Encrypted(id)) = payment_id.map(|id| id ^ payment_id_xor) {
|
||||
id
|
||||
} else {
|
||||
payment_id_xor
|
||||
};
|
||||
let payment_id = payment_id.map(|id| id ^ payment_id_xor);
|
||||
|
||||
if let Some(actual_view_tag) = output.view_tag {
|
||||
if actual_view_tag != view_tag {
|
||||
|
||||
@@ -16,7 +16,7 @@ use crate::{random_scalar, wallet::seed::SeedError};
|
||||
pub(crate) const CLASSIC_SEED_LENGTH: usize = 24;
|
||||
pub(crate) const CLASSIC_SEED_LENGTH_WITH_CHECKSUM: usize = 25;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, Zeroize)]
|
||||
pub enum Language {
|
||||
Chinese,
|
||||
English,
|
||||
@@ -184,63 +184,58 @@ fn key_to_seed(lang: Language, key: Zeroizing<Scalar>) -> ClassicSeed {
|
||||
}
|
||||
*res += word;
|
||||
}
|
||||
ClassicSeed(res)
|
||||
ClassicSeed(lang, res)
|
||||
}
|
||||
|
||||
// Convert a seed to bytes
|
||||
pub(crate) fn seed_to_bytes(words: &str) -> Result<(Language, Zeroizing<[u8; 32]>), SeedError> {
|
||||
pub(crate) fn seed_to_bytes(lang: Language, words: &str) -> Result<Zeroizing<[u8; 32]>, SeedError> {
|
||||
// get seed words
|
||||
let words = words.split_whitespace().map(|w| Zeroizing::new(w.to_string())).collect::<Vec<_>>();
|
||||
if (words.len() != CLASSIC_SEED_LENGTH) && (words.len() != CLASSIC_SEED_LENGTH_WITH_CHECKSUM) {
|
||||
panic!("invalid seed passed to seed_to_bytes");
|
||||
}
|
||||
|
||||
// find the language
|
||||
let (matched_indices, lang_name, lang) = (|| {
|
||||
let has_checksum = words.len() == CLASSIC_SEED_LENGTH_WITH_CHECKSUM;
|
||||
if has_checksum && lang == Language::EnglishOld {
|
||||
Err(SeedError::EnglishOldWithChecksum)?;
|
||||
}
|
||||
|
||||
// Validate words are in the language word list
|
||||
let lang_word_list: &WordList = &LANGUAGES()[&lang];
|
||||
let matched_indices = (|| {
|
||||
let has_checksum = words.len() == CLASSIC_SEED_LENGTH_WITH_CHECKSUM;
|
||||
let mut matched_indices = Zeroizing::new(vec![]);
|
||||
|
||||
// Iterate through all the languages
|
||||
'language: for (lang_name, lang) in LANGUAGES() {
|
||||
matched_indices.zeroize();
|
||||
matched_indices.clear();
|
||||
// Iterate through all the words and see if they're all present
|
||||
for word in &words {
|
||||
let trimmed = trim(word, lang_word_list.unique_prefix_length);
|
||||
let word = if has_checksum { &trimmed } else { word };
|
||||
|
||||
// Iterate through all the words and see if they're all present
|
||||
for word in &words {
|
||||
let trimmed = trim(word, lang.unique_prefix_length);
|
||||
let word = if has_checksum { &trimmed } else { word };
|
||||
|
||||
if let Some(index) = if has_checksum {
|
||||
lang.trimmed_word_map.get(word.deref())
|
||||
} else {
|
||||
lang.word_map.get(&word.as_str())
|
||||
} {
|
||||
matched_indices.push(*index);
|
||||
} else {
|
||||
continue 'language;
|
||||
}
|
||||
if let Some(index) = if has_checksum {
|
||||
lang_word_list.trimmed_word_map.get(word.deref())
|
||||
} else {
|
||||
lang_word_list.word_map.get(&word.as_str())
|
||||
} {
|
||||
matched_indices.push(*index);
|
||||
} else {
|
||||
Err(SeedError::InvalidSeed)?;
|
||||
}
|
||||
|
||||
if has_checksum {
|
||||
if lang_name == &Language::EnglishOld {
|
||||
Err(SeedError::EnglishOldWithChecksum)?;
|
||||
}
|
||||
|
||||
// exclude the last word when calculating a checksum.
|
||||
let last_word = words.last().unwrap().clone();
|
||||
let checksum = words[checksum_index(&words[.. words.len() - 1], lang)].clone();
|
||||
|
||||
// check the trimmed checksum and trimmed last word line up
|
||||
if trim(&checksum, lang.unique_prefix_length) != trim(&last_word, lang.unique_prefix_length)
|
||||
{
|
||||
Err(SeedError::InvalidChecksum)?;
|
||||
}
|
||||
}
|
||||
|
||||
return Ok((matched_indices, lang_name, lang));
|
||||
}
|
||||
|
||||
Err(SeedError::UnknownLanguage)?
|
||||
if has_checksum {
|
||||
// exclude the last word when calculating a checksum.
|
||||
let last_word = words.last().unwrap().clone();
|
||||
let checksum = words[checksum_index(&words[.. words.len() - 1], lang_word_list)].clone();
|
||||
|
||||
// check the trimmed checksum and trimmed last word line up
|
||||
if trim(&checksum, lang_word_list.unique_prefix_length) !=
|
||||
trim(&last_word, lang_word_list.unique_prefix_length)
|
||||
{
|
||||
Err(SeedError::InvalidChecksum)?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(matched_indices)
|
||||
})()?;
|
||||
|
||||
// convert to bytes
|
||||
@@ -254,16 +249,17 @@ pub(crate) fn seed_to_bytes(words: &str) -> Result<(Language, Zeroizing<[u8; 32]
|
||||
indices[3] = matched_indices[i3 + 2];
|
||||
|
||||
let inner = |i| {
|
||||
let mut base = (lang.word_list.len() - indices[i] + indices[i + 1]) % lang.word_list.len();
|
||||
let mut base = (lang_word_list.word_list.len() - indices[i] + indices[i + 1]) %
|
||||
lang_word_list.word_list.len();
|
||||
// Shift the index over
|
||||
for _ in 0 .. i {
|
||||
base *= lang.word_list.len();
|
||||
base *= lang_word_list.word_list.len();
|
||||
}
|
||||
base
|
||||
};
|
||||
// set the last index
|
||||
indices[0] = indices[1] + inner(1) + inner(2);
|
||||
if (indices[0] % lang.word_list.len()) != indices[1] {
|
||||
if (indices[0] % lang_word_list.word_list.len()) != indices[1] {
|
||||
Err(SeedError::InvalidSeed)?;
|
||||
}
|
||||
|
||||
@@ -273,19 +269,19 @@ pub(crate) fn seed_to_bytes(words: &str) -> Result<(Language, Zeroizing<[u8; 32]
|
||||
bytes.zeroize();
|
||||
}
|
||||
|
||||
Ok((*lang_name, res))
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Zeroize)]
|
||||
pub struct ClassicSeed(Zeroizing<String>);
|
||||
pub struct ClassicSeed(Language, Zeroizing<String>);
|
||||
impl ClassicSeed {
|
||||
pub(crate) fn new<R: RngCore + CryptoRng>(rng: &mut R, lang: Language) -> ClassicSeed {
|
||||
key_to_seed(lang, Zeroizing::new(random_scalar(rng)))
|
||||
}
|
||||
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
pub fn from_string(words: Zeroizing<String>) -> Result<ClassicSeed, SeedError> {
|
||||
let (lang, entropy) = seed_to_bytes(&words)?;
|
||||
pub fn from_string(lang: Language, words: Zeroizing<String>) -> Result<ClassicSeed, SeedError> {
|
||||
let entropy = seed_to_bytes(lang, &words)?;
|
||||
|
||||
// Make sure this is a valid scalar
|
||||
let scalar = Scalar::from_canonical_bytes(*entropy);
|
||||
@@ -306,10 +302,10 @@ impl ClassicSeed {
|
||||
}
|
||||
|
||||
pub(crate) fn to_string(&self) -> Zeroizing<String> {
|
||||
self.0.clone()
|
||||
self.1.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn entropy(&self) -> Zeroizing<[u8; 32]> {
|
||||
seed_to_bytes(&self.0).unwrap().1
|
||||
seed_to_bytes(self.0, &self.1).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -61,13 +61,23 @@ impl Seed {
|
||||
}
|
||||
|
||||
/// Parse a seed from a `String`.
|
||||
pub fn from_string(words: Zeroizing<String>) -> Result<Seed, SeedError> {
|
||||
match words.split_whitespace().count() {
|
||||
CLASSIC_SEED_LENGTH | CLASSIC_SEED_LENGTH_WITH_CHECKSUM => {
|
||||
ClassicSeed::from_string(words).map(Seed::Classic)
|
||||
pub fn from_string(seed_type: SeedType, words: Zeroizing<String>) -> Result<Seed, SeedError> {
|
||||
let word_count = words.split_whitespace().count();
|
||||
match seed_type {
|
||||
SeedType::Classic(lang) => {
|
||||
if word_count != CLASSIC_SEED_LENGTH && word_count != CLASSIC_SEED_LENGTH_WITH_CHECKSUM {
|
||||
Err(SeedError::InvalidSeedLength)?
|
||||
} else {
|
||||
ClassicSeed::from_string(lang, words).map(Seed::Classic)
|
||||
}
|
||||
}
|
||||
SeedType::Polyseed(lang) => {
|
||||
if word_count != POLYSEED_LENGTH {
|
||||
Err(SeedError::InvalidSeedLength)?
|
||||
} else {
|
||||
Polyseed::from_string(lang, words).map(Seed::Polyseed)
|
||||
}
|
||||
}
|
||||
POLYSEED_LENGTH => Polyseed::from_string(words).map(Seed::Polyseed),
|
||||
_ => Err(SeedError::InvalidSeedLength)?,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -217,6 +217,31 @@ impl Polyseed {
|
||||
poly
|
||||
}
|
||||
|
||||
fn from_internal(
|
||||
language: Language,
|
||||
masked_features: u8,
|
||||
encoded_birthday: u16,
|
||||
entropy: Zeroizing<[u8; 32]>,
|
||||
) -> Result<Polyseed, SeedError> {
|
||||
if !polyseed_features_supported(masked_features) {
|
||||
Err(SeedError::UnsupportedFeatures)?;
|
||||
}
|
||||
|
||||
if !valid_entropy(&entropy) {
|
||||
Err(SeedError::InvalidEntropy)?;
|
||||
}
|
||||
|
||||
let mut res = Polyseed {
|
||||
language,
|
||||
birthday: encoded_birthday,
|
||||
features: masked_features,
|
||||
entropy,
|
||||
checksum: 0,
|
||||
};
|
||||
res.checksum = poly_eval(&res.to_poly());
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
/// Create a new `Polyseed` with specific internals.
|
||||
///
|
||||
/// `birthday` is defined in seconds since the Unix epoch.
|
||||
@@ -226,20 +251,7 @@ impl Polyseed {
|
||||
birthday: u64,
|
||||
entropy: Zeroizing<[u8; 32]>,
|
||||
) -> Result<Polyseed, SeedError> {
|
||||
let features = user_features(features);
|
||||
if !polyseed_features_supported(features) {
|
||||
Err(SeedError::UnsupportedFeatures)?;
|
||||
}
|
||||
|
||||
let birthday = birthday_encode(birthday);
|
||||
|
||||
if !valid_entropy(&entropy) {
|
||||
Err(SeedError::InvalidEntropy)?;
|
||||
}
|
||||
|
||||
let mut res = Polyseed { language, birthday, features, entropy, checksum: 0 };
|
||||
res.checksum = poly_eval(&res.to_poly());
|
||||
Ok(res)
|
||||
Self::from_internal(language, user_features(features), birthday_encode(birthday), entropy)
|
||||
}
|
||||
|
||||
/// Create a new `Polyseed`.
|
||||
@@ -263,67 +275,62 @@ impl Polyseed {
|
||||
|
||||
/// Create a new `Polyseed` from a String.
|
||||
#[allow(clippy::needless_pass_by_value)]
|
||||
pub fn from_string(seed: Zeroizing<String>) -> Result<Polyseed, SeedError> {
|
||||
pub fn from_string(lang: Language, seed: Zeroizing<String>) -> Result<Polyseed, SeedError> {
|
||||
// Decode the seed into its polynomial coefficients
|
||||
let mut poly = [0; POLYSEED_LENGTH];
|
||||
let lang = (|| {
|
||||
'language: for (name, lang) in LANGUAGES() {
|
||||
for (i, word) in seed.split_whitespace().enumerate() {
|
||||
// Find the word's index
|
||||
fn check_if_matches<S: AsRef<str>, I: Iterator<Item = S>>(
|
||||
has_prefix: bool,
|
||||
mut lang_words: I,
|
||||
word: &str,
|
||||
) -> Option<usize> {
|
||||
if has_prefix {
|
||||
// Get the position of the word within the iterator
|
||||
// Doesn't use starts_with and some words are substrs of others, leading to false
|
||||
// positives
|
||||
let mut get_position = || {
|
||||
lang_words.position(|lang_word| {
|
||||
let mut lang_word = lang_word.as_ref().chars();
|
||||
let mut word = word.chars();
|
||||
|
||||
let mut res = true;
|
||||
for _ in 0 .. PREFIX_LEN {
|
||||
res &= lang_word.next() == word.next();
|
||||
}
|
||||
res
|
||||
})
|
||||
};
|
||||
let res = get_position();
|
||||
// If another word has this prefix, don't call it a match
|
||||
if get_position().is_some() {
|
||||
return None;
|
||||
// Validate words are in the lang word list
|
||||
let lang_word_list: &WordList = &LANGUAGES()[&lang];
|
||||
for (i, word) in seed.split_whitespace().enumerate() {
|
||||
// Find the word's index
|
||||
fn check_if_matches<S: AsRef<str>, I: Iterator<Item = S>>(
|
||||
has_prefix: bool,
|
||||
mut lang_words: I,
|
||||
word: &str,
|
||||
) -> Option<usize> {
|
||||
if has_prefix {
|
||||
// Get the position of the word within the iterator
|
||||
// Doesn't use starts_with and some words are substrs of others, leading to false
|
||||
// positives
|
||||
let mut get_position = || {
|
||||
lang_words.position(|lang_word| {
|
||||
let mut lang_word = lang_word.as_ref().chars();
|
||||
let mut word = word.chars();
|
||||
|
||||
let mut res = true;
|
||||
for _ in 0 .. PREFIX_LEN {
|
||||
res &= lang_word.next() == word.next();
|
||||
}
|
||||
res
|
||||
} else {
|
||||
lang_words.position(|lang_word| lang_word.as_ref() == word)
|
||||
}
|
||||
}
|
||||
|
||||
let Some(coeff) = (if lang.has_accent {
|
||||
let ascii = |word: &str| word.chars().filter(char::is_ascii).collect::<String>();
|
||||
check_if_matches(
|
||||
lang.has_prefix,
|
||||
lang.words.iter().map(|lang_word| ascii(lang_word)),
|
||||
&ascii(word),
|
||||
)
|
||||
} else {
|
||||
check_if_matches(lang.has_prefix, lang.words.iter(), word)
|
||||
}) else {
|
||||
continue 'language;
|
||||
})
|
||||
};
|
||||
|
||||
// WordList asserts the word list length is less than u16::MAX
|
||||
poly[i] = u16::try_from(coeff).expect("coeff exceeded u16");
|
||||
let res = get_position();
|
||||
// If another word has this prefix, don't call it a match
|
||||
if get_position().is_some() {
|
||||
return None;
|
||||
}
|
||||
res
|
||||
} else {
|
||||
lang_words.position(|lang_word| lang_word.as_ref() == word)
|
||||
}
|
||||
|
||||
return Ok(*name);
|
||||
}
|
||||
|
||||
Err(SeedError::UnknownLanguage)
|
||||
})()?;
|
||||
let Some(coeff) = (if lang_word_list.has_accent {
|
||||
let ascii = |word: &str| word.chars().filter(char::is_ascii).collect::<String>();
|
||||
check_if_matches(
|
||||
lang_word_list.has_prefix,
|
||||
lang_word_list.words.iter().map(|lang_word| ascii(lang_word)),
|
||||
&ascii(word),
|
||||
)
|
||||
} else {
|
||||
check_if_matches(lang_word_list.has_prefix, lang_word_list.words.iter(), word)
|
||||
}) else {
|
||||
Err(SeedError::InvalidSeed)?
|
||||
};
|
||||
|
||||
// WordList asserts the word list length is less than u16::MAX
|
||||
poly[i] = u16::try_from(coeff).expect("coeff exceeded u16");
|
||||
}
|
||||
|
||||
// xor out the coin
|
||||
poly[POLY_NUM_CHECK_DIGITS] ^= COIN;
|
||||
@@ -375,7 +382,7 @@ impl Polyseed {
|
||||
let features =
|
||||
u8::try_from(extra >> DATE_BITS).expect("couldn't convert extra >> DATE_BITS to u8");
|
||||
|
||||
let res = Polyseed::from(lang, features, birthday_decode(birthday), entropy);
|
||||
let res = Self::from_internal(lang, features, birthday, entropy);
|
||||
if let Ok(res) = res.as_ref() {
|
||||
debug_assert_eq!(res.checksum, checksum);
|
||||
}
|
||||
|
||||
@@ -69,16 +69,23 @@ impl SendOutput {
|
||||
#[allow(non_snake_case)]
|
||||
fn internal(
|
||||
unique: [u8; 32],
|
||||
output: (usize, (MoneroAddress, u64)),
|
||||
output: (usize, (MoneroAddress, u64), bool),
|
||||
ecdh: EdwardsPoint,
|
||||
R: EdwardsPoint,
|
||||
) -> (SendOutput, Option<[u8; 8]>) {
|
||||
let o = output.0;
|
||||
let need_dummy_payment_id = output.2;
|
||||
let output = output.1;
|
||||
|
||||
let (view_tag, shared_key, payment_id_xor) =
|
||||
shared_key(Some(unique).filter(|_| output.0.is_guaranteed()), ecdh, o);
|
||||
|
||||
let payment_id = output
|
||||
.0
|
||||
.payment_id()
|
||||
.or(if need_dummy_payment_id { Some([0u8; 8]) } else { None })
|
||||
.map(|id| (u64::from_le_bytes(id) ^ u64::from_le_bytes(payment_id_xor)).to_le_bytes());
|
||||
|
||||
(
|
||||
SendOutput {
|
||||
R,
|
||||
@@ -87,17 +94,14 @@ impl SendOutput {
|
||||
commitment: Commitment::new(commitment_mask(shared_key), output.1),
|
||||
amount: amount_encryption(output.1, shared_key),
|
||||
},
|
||||
output
|
||||
.0
|
||||
.payment_id()
|
||||
.map(|id| (u64::from_le_bytes(id) ^ u64::from_le_bytes(payment_id_xor)).to_le_bytes()),
|
||||
payment_id,
|
||||
)
|
||||
}
|
||||
|
||||
fn new(
|
||||
r: &Zeroizing<Scalar>,
|
||||
unique: [u8; 32],
|
||||
output: (usize, (MoneroAddress, u64)),
|
||||
output: (usize, (MoneroAddress, u64), bool),
|
||||
) -> (SendOutput, Option<[u8; 8]>) {
|
||||
let address = output.1 .0;
|
||||
SendOutput::internal(
|
||||
@@ -115,7 +119,7 @@ impl SendOutput {
|
||||
fn change(
|
||||
ecdh: EdwardsPoint,
|
||||
unique: [u8; 32],
|
||||
output: (usize, (MoneroAddress, u64)),
|
||||
output: (usize, (MoneroAddress, u64), bool),
|
||||
) -> (SendOutput, Option<[u8; 8]>) {
|
||||
SendOutput::internal(unique, output, ecdh, ED25519_BASEPOINT_POINT)
|
||||
}
|
||||
@@ -270,20 +274,22 @@ impl Fee {
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
#[allow(non_camel_case_types)]
|
||||
pub enum FeePriority {
|
||||
Lowest,
|
||||
Low,
|
||||
Medium,
|
||||
High,
|
||||
Unimportant,
|
||||
Normal,
|
||||
Elevated,
|
||||
Priority,
|
||||
Custom { priority: u32 },
|
||||
}
|
||||
|
||||
/// https://github.com/monero-project/monero/blob/ac02af92867590ca80b2779a7bbeafa99ff94dcb/
|
||||
/// src/simplewallet/simplewallet.cpp#L161
|
||||
impl FeePriority {
|
||||
pub(crate) fn fee_priority(&self) -> u32 {
|
||||
match self {
|
||||
FeePriority::Lowest => 0,
|
||||
FeePriority::Low => 1,
|
||||
FeePriority::Medium => 2,
|
||||
FeePriority::High => 3,
|
||||
FeePriority::Unimportant => 1,
|
||||
FeePriority::Normal => 2,
|
||||
FeePriority::Elevated => 3,
|
||||
FeePriority::Priority => 4,
|
||||
FeePriority::Custom { priority, .. } => *priority,
|
||||
}
|
||||
}
|
||||
@@ -291,8 +297,8 @@ impl FeePriority {
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
||||
pub(crate) enum InternalPayment {
|
||||
Payment((MoneroAddress, u64)),
|
||||
Change((MoneroAddress, Option<Zeroizing<Scalar>>), u64),
|
||||
Payment((MoneroAddress, u64), bool),
|
||||
Change((MoneroAddress, u64), Option<Zeroizing<Scalar>>),
|
||||
}
|
||||
|
||||
/// The eventual output of a SignableTransaction.
|
||||
@@ -352,6 +358,14 @@ impl Change {
|
||||
|
||||
/// Create a fingerprintable change output specification which will harm privacy. Only use this
|
||||
/// if you know what you're doing.
|
||||
///
|
||||
/// If the change address is None, there are 2 potential fingerprints:
|
||||
///
|
||||
/// 1) The change in the tx is shunted to the fee (fingerprintable fee).
|
||||
///
|
||||
/// 2) If there are 2 outputs in the tx, there would be no payment ID as is the case when the
|
||||
/// reference wallet creates 2 output txs, since monero-serai doesn't know which output
|
||||
/// to tie the dummy payment ID to.
|
||||
pub fn fingerprintable(address: Option<MoneroAddress>) -> Change {
|
||||
Change { address, view: None }
|
||||
}
|
||||
@@ -362,9 +376,9 @@ fn need_additional(payments: &[InternalPayment]) -> (bool, bool) {
|
||||
let subaddresses = payments
|
||||
.iter()
|
||||
.filter(|payment| match *payment {
|
||||
InternalPayment::Payment(payment) => payment.0.is_subaddress(),
|
||||
InternalPayment::Change(change, _) => {
|
||||
if change.1.is_some() {
|
||||
InternalPayment::Payment(payment, _) => payment.0.is_subaddress(),
|
||||
InternalPayment::Change(change, change_view) => {
|
||||
if change_view.is_some() {
|
||||
has_change_view = true;
|
||||
// It should not be possible to construct a change specification to a subaddress with a
|
||||
// view key
|
||||
@@ -391,7 +405,7 @@ fn sanity_check_change_payment_quantity(payments: &[InternalPayment], has_change
|
||||
payments
|
||||
.iter()
|
||||
.filter(|payment| match *payment {
|
||||
InternalPayment::Payment(_) => false,
|
||||
InternalPayment::Payment(_, _) => false,
|
||||
InternalPayment::Change(_, _) => true,
|
||||
})
|
||||
.count(),
|
||||
@@ -463,6 +477,13 @@ impl SignableTransaction {
|
||||
Err(TransactionError::NoChange)?;
|
||||
}
|
||||
|
||||
// All 2 output txs created by the reference wallet have payment IDs to avoid
|
||||
// fingerprinting integrated addresses. Note: we won't create a dummy payment
|
||||
// ID if we create a 0-change 2-output tx since we don't know which output should
|
||||
// receive the payment ID and such a tx is fingerprintable to monero-serai anyway
|
||||
let need_dummy_payment_id = !has_payment_id && payments.len() == 1;
|
||||
has_payment_id |= need_dummy_payment_id;
|
||||
|
||||
// Get the outgoing amount ignoring fees
|
||||
let out_amount = payments.iter().map(|payment| payment.1).sum::<u64>();
|
||||
|
||||
@@ -472,19 +493,21 @@ impl SignableTransaction {
|
||||
}
|
||||
|
||||
// Collect payments in a container that includes a change output if a change address is provided
|
||||
let mut payments = payments.into_iter().map(InternalPayment::Payment).collect::<Vec<_>>();
|
||||
let mut payments = payments
|
||||
.into_iter()
|
||||
.map(|payment| InternalPayment::Payment(payment, need_dummy_payment_id))
|
||||
.collect::<Vec<_>>();
|
||||
debug_assert!(!need_dummy_payment_id || (payments.len() == 1 && change.address.is_some()));
|
||||
|
||||
if let Some(change_address) = change.address.as_ref() {
|
||||
// Push a 0 amount change output that we'll use to do fee calculations.
|
||||
// We'll modify the change amount after calculating the fee
|
||||
payments.push(InternalPayment::Change((*change_address, change.view.clone()), 0));
|
||||
payments.push(InternalPayment::Change((*change_address, 0), change.view.clone()));
|
||||
}
|
||||
|
||||
// Determine if we'll need additional pub keys in tx extra
|
||||
let (_, additional) = need_additional(&payments);
|
||||
|
||||
// Add a dummy payment ID if there's only 2 payments
|
||||
has_payment_id |= outputs == 2;
|
||||
|
||||
// Calculate the extra length
|
||||
let extra = Extra::fee_weight(outputs, additional, has_payment_id, data.as_ref());
|
||||
|
||||
@@ -524,8 +547,8 @@ impl SignableTransaction {
|
||||
let change_payment = payments.last_mut().unwrap();
|
||||
debug_assert!(matches!(change_payment, InternalPayment::Change(_, _)));
|
||||
*change_payment = InternalPayment::Change(
|
||||
(*change_address, change.view.clone()),
|
||||
in_amount - out_amount - fee,
|
||||
(*change_address, in_amount - out_amount - fee),
|
||||
change.view.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
@@ -538,8 +561,8 @@ impl SignableTransaction {
|
||||
payments
|
||||
.iter()
|
||||
.map(|payment| match *payment {
|
||||
InternalPayment::Payment(payment) => payment.1,
|
||||
InternalPayment::Change(_, amount) => amount,
|
||||
InternalPayment::Payment(payment, _) => payment.1,
|
||||
InternalPayment::Change(change, _) => change.1,
|
||||
})
|
||||
.sum::<u64>() +
|
||||
fee,
|
||||
@@ -606,7 +629,7 @@ impl SignableTransaction {
|
||||
if modified_change_ecdh {
|
||||
for payment in &*payments {
|
||||
match payment {
|
||||
InternalPayment::Payment(payment) => {
|
||||
InternalPayment::Payment(payment, _) => {
|
||||
// This should be the only payment and it should be a subaddress
|
||||
debug_assert!(payment.0.is_subaddress());
|
||||
tx_public_key = tx_key.deref() * payment.0.spend;
|
||||
@@ -625,8 +648,8 @@ impl SignableTransaction {
|
||||
// Downcast the change output to a payment output if it doesn't require special handling
|
||||
// regarding it's view key
|
||||
payment = if !modified_change_ecdh {
|
||||
if let InternalPayment::Change(change, amount) = &payment {
|
||||
InternalPayment::Payment((change.0, *amount))
|
||||
if let InternalPayment::Change(change, _) = &payment {
|
||||
InternalPayment::Payment(*change, false)
|
||||
} else {
|
||||
payment
|
||||
}
|
||||
@@ -635,13 +658,14 @@ impl SignableTransaction {
|
||||
};
|
||||
|
||||
let (output, payment_id) = match payment {
|
||||
InternalPayment::Payment(payment) => {
|
||||
InternalPayment::Payment(payment, need_dummy_payment_id) => {
|
||||
// If this is a subaddress, generate a dedicated r. Else, reuse the TX key
|
||||
let dedicated = Zeroizing::new(random_scalar(&mut rng));
|
||||
let use_dedicated = additional && payment.0.is_subaddress();
|
||||
let r = if use_dedicated { &dedicated } else { &tx_key };
|
||||
|
||||
let (mut output, payment_id) = SendOutput::new(r, uniqueness, (o, payment));
|
||||
let (mut output, payment_id) =
|
||||
SendOutput::new(r, uniqueness, (o, payment, need_dummy_payment_id));
|
||||
if modified_change_ecdh {
|
||||
debug_assert_eq!(tx_public_key, output.R);
|
||||
}
|
||||
@@ -655,11 +679,11 @@ impl SignableTransaction {
|
||||
}
|
||||
(output, payment_id)
|
||||
}
|
||||
InternalPayment::Change(change, amount) => {
|
||||
InternalPayment::Change(change, change_view) => {
|
||||
// Instead of rA, use Ra, where R is r * subaddress_spend_key
|
||||
// change.view must be Some as if it's None, this payment would've been downcast
|
||||
let ecdh = tx_public_key * change.1.unwrap().deref();
|
||||
SendOutput::change(ecdh, uniqueness, (o, (change.0, amount)))
|
||||
let ecdh = tx_public_key * change_view.unwrap().deref();
|
||||
SendOutput::change(ecdh, uniqueness, (o, change, false))
|
||||
}
|
||||
};
|
||||
|
||||
@@ -667,16 +691,6 @@ impl SignableTransaction {
|
||||
id = id.or(payment_id);
|
||||
}
|
||||
|
||||
// Include a random payment ID if we don't actually have one
|
||||
// It prevents transactions from leaking if they're sending to integrated addresses or not
|
||||
// Only do this if we only have two outputs though, as Monero won't add a dummy if there's
|
||||
// more than two outputs
|
||||
if outputs.len() <= 2 {
|
||||
let mut rand = [0; 8];
|
||||
rng.fill_bytes(&mut rand);
|
||||
id = id.or(Some(rand));
|
||||
}
|
||||
|
||||
(tx_public_key, additional_keys, outputs, id)
|
||||
}
|
||||
|
||||
@@ -949,21 +963,26 @@ impl Eventuality {
|
||||
|
||||
fn write_payment<W: io::Write>(payment: &InternalPayment, w: &mut W) -> io::Result<()> {
|
||||
match payment {
|
||||
InternalPayment::Payment(payment) => {
|
||||
InternalPayment::Payment(payment, need_dummy_payment_id) => {
|
||||
w.write_all(&[0])?;
|
||||
write_vec(write_byte, payment.0.to_string().as_bytes(), w)?;
|
||||
w.write_all(&payment.1.to_le_bytes())
|
||||
w.write_all(&payment.1.to_le_bytes())?;
|
||||
if *need_dummy_payment_id {
|
||||
w.write_all(&[1])
|
||||
} else {
|
||||
w.write_all(&[0])
|
||||
}
|
||||
}
|
||||
InternalPayment::Change(change, amount) => {
|
||||
InternalPayment::Change(change, change_view) => {
|
||||
w.write_all(&[1])?;
|
||||
write_vec(write_byte, change.0.to_string().as_bytes(), w)?;
|
||||
if let Some(view) = change.1.as_ref() {
|
||||
w.write_all(&change.1.to_le_bytes())?;
|
||||
if let Some(view) = change_view.as_ref() {
|
||||
w.write_all(&[1])?;
|
||||
write_scalar(view, w)?;
|
||||
write_scalar(view, w)
|
||||
} else {
|
||||
w.write_all(&[0])?;
|
||||
w.write_all(&[0])
|
||||
}
|
||||
w.write_all(&amount.to_le_bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -988,17 +1007,21 @@ impl Eventuality {
|
||||
|
||||
fn read_payment<R: io::Read>(r: &mut R) -> io::Result<InternalPayment> {
|
||||
Ok(match read_byte(r)? {
|
||||
0 => InternalPayment::Payment((read_address(r)?, read_u64(r)?)),
|
||||
0 => InternalPayment::Payment(
|
||||
(read_address(r)?, read_u64(r)?),
|
||||
match read_byte(r)? {
|
||||
0 => false,
|
||||
1 => true,
|
||||
_ => Err(io::Error::other("invalid need additional"))?,
|
||||
},
|
||||
),
|
||||
1 => InternalPayment::Change(
|
||||
(
|
||||
read_address(r)?,
|
||||
match read_byte(r)? {
|
||||
0 => None,
|
||||
1 => Some(Zeroizing::new(read_scalar(r)?)),
|
||||
_ => Err(io::Error::other("invalid change payment"))?,
|
||||
},
|
||||
),
|
||||
read_u64(r)?,
|
||||
(read_address(r)?, read_u64(r)?),
|
||||
match read_byte(r)? {
|
||||
0 => None,
|
||||
1 => Some(Zeroizing::new(read_scalar(r)?)),
|
||||
_ => Err(io::Error::other("invalid change view"))?,
|
||||
},
|
||||
),
|
||||
_ => Err(io::Error::other("invalid payment"))?,
|
||||
})
|
||||
|
||||
@@ -120,16 +120,20 @@ impl SignableTransaction {
|
||||
|
||||
for payment in &self.payments {
|
||||
match payment {
|
||||
InternalPayment::Payment(payment) => {
|
||||
InternalPayment::Payment(payment, need_dummy_payment_id) => {
|
||||
transcript.append_message(b"payment_address", payment.0.to_string().as_bytes());
|
||||
transcript.append_message(b"payment_amount", payment.1.to_le_bytes());
|
||||
transcript.append_message(
|
||||
b"need_dummy_payment_id",
|
||||
[if *need_dummy_payment_id { 1u8 } else { 0u8 }],
|
||||
);
|
||||
}
|
||||
InternalPayment::Change(change, amount) => {
|
||||
InternalPayment::Change(change, change_view) => {
|
||||
transcript.append_message(b"change_address", change.0.to_string().as_bytes());
|
||||
if let Some(view) = change.1.as_ref() {
|
||||
transcript.append_message(b"change_amount", change.1.to_le_bytes());
|
||||
if let Some(view) = change_view.as_ref() {
|
||||
transcript.append_message(b"change_view_key", Zeroizing::new(view.to_bytes()));
|
||||
}
|
||||
transcript.append_message(b"change_amount", amount.to_le_bytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
162
coins/monero/tests/decoys.rs
Normal file
162
coins/monero/tests/decoys.rs
Normal file
@@ -0,0 +1,162 @@
|
||||
use monero_serai::{
|
||||
transaction::Transaction,
|
||||
wallet::SpendableOutput,
|
||||
rpc::{Rpc, OutputResponse},
|
||||
Protocol, DEFAULT_LOCK_WINDOW,
|
||||
};
|
||||
|
||||
mod runner;
|
||||
|
||||
test!(
|
||||
select_latest_output_as_decoy_canonical,
|
||||
(
|
||||
// First make an initial tx0
|
||||
|_, mut builder: Builder, addr| async move {
|
||||
builder.add_payment(addr, 2000000000000);
|
||||
(builder.build().unwrap(), ())
|
||||
},
|
||||
|rpc: Rpc<_>, tx: Transaction, mut scanner: Scanner, ()| async move {
|
||||
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||
assert_eq!(output.commitment().amount, 2000000000000);
|
||||
SpendableOutput::from(&rpc, output).await.unwrap()
|
||||
},
|
||||
),
|
||||
(
|
||||
// Then make a second tx1
|
||||
|protocol: Protocol, rpc: Rpc<_>, mut builder: Builder, addr, state: _| async move {
|
||||
let output_tx0: SpendableOutput = state;
|
||||
let decoys = Decoys::fingerprintable_canonical_select(
|
||||
&mut OsRng,
|
||||
&rpc,
|
||||
protocol.ring_len(),
|
||||
rpc.get_height().await.unwrap(),
|
||||
&[output_tx0.clone()],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let inputs = [output_tx0.clone()].into_iter().zip(decoys).collect::<Vec<_>>();
|
||||
builder.add_inputs(&inputs);
|
||||
builder.add_payment(addr, 1000000000000);
|
||||
|
||||
(builder.build().unwrap(), (protocol, output_tx0))
|
||||
},
|
||||
// Then make sure DSA selects freshly unlocked output from tx1 as a decoy
|
||||
|rpc: Rpc<_>, tx: Transaction, mut scanner: Scanner, state: (_, _)| async move {
|
||||
use rand_core::OsRng;
|
||||
|
||||
let height = rpc.get_height().await.unwrap();
|
||||
|
||||
let output_tx1 =
|
||||
SpendableOutput::from(&rpc, scanner.scan_transaction(&tx).not_locked().swap_remove(0))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Make sure output from tx1 is in the block in which it unlocks
|
||||
let out_tx1: OutputResponse =
|
||||
rpc.get_outs(&[output_tx1.global_index]).await.unwrap().swap_remove(0);
|
||||
assert_eq!(out_tx1.height, height - DEFAULT_LOCK_WINDOW);
|
||||
assert!(out_tx1.unlocked);
|
||||
|
||||
// Select decoys using spendable output from tx0 as the real, and make sure DSA selects
|
||||
// the freshly unlocked output from tx1 as a decoy
|
||||
let (protocol, output_tx0): (Protocol, SpendableOutput) = state;
|
||||
let mut selected_fresh_decoy = false;
|
||||
let mut attempts = 1000;
|
||||
while !selected_fresh_decoy && attempts > 0 {
|
||||
let decoys = Decoys::fingerprintable_canonical_select(
|
||||
&mut OsRng, // TODO: use a seeded RNG to consistently select the latest output
|
||||
&rpc,
|
||||
protocol.ring_len(),
|
||||
height,
|
||||
&[output_tx0.clone()],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
selected_fresh_decoy = decoys[0].indexes().contains(&output_tx1.global_index);
|
||||
attempts -= 1;
|
||||
}
|
||||
|
||||
assert!(selected_fresh_decoy);
|
||||
assert_eq!(height, rpc.get_height().await.unwrap());
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
test!(
|
||||
select_latest_output_as_decoy,
|
||||
(
|
||||
// First make an initial tx0
|
||||
|_, mut builder: Builder, addr| async move {
|
||||
builder.add_payment(addr, 2000000000000);
|
||||
(builder.build().unwrap(), ())
|
||||
},
|
||||
|rpc: Rpc<_>, tx: Transaction, mut scanner: Scanner, ()| async move {
|
||||
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||
assert_eq!(output.commitment().amount, 2000000000000);
|
||||
SpendableOutput::from(&rpc, output).await.unwrap()
|
||||
},
|
||||
),
|
||||
(
|
||||
// Then make a second tx1
|
||||
|protocol: Protocol, rpc: Rpc<_>, mut builder: Builder, addr, state: _| async move {
|
||||
let output_tx0: SpendableOutput = state;
|
||||
let decoys = Decoys::select(
|
||||
&mut OsRng,
|
||||
&rpc,
|
||||
protocol.ring_len(),
|
||||
rpc.get_height().await.unwrap(),
|
||||
&[output_tx0.clone()],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let inputs = [output_tx0.clone()].into_iter().zip(decoys).collect::<Vec<_>>();
|
||||
builder.add_inputs(&inputs);
|
||||
builder.add_payment(addr, 1000000000000);
|
||||
|
||||
(builder.build().unwrap(), (protocol, output_tx0))
|
||||
},
|
||||
// Then make sure DSA selects freshly unlocked output from tx1 as a decoy
|
||||
|rpc: Rpc<_>, tx: Transaction, mut scanner: Scanner, state: (_, _)| async move {
|
||||
use rand_core::OsRng;
|
||||
|
||||
let height = rpc.get_height().await.unwrap();
|
||||
|
||||
let output_tx1 =
|
||||
SpendableOutput::from(&rpc, scanner.scan_transaction(&tx).not_locked().swap_remove(0))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Make sure output from tx1 is in the block in which it unlocks
|
||||
let out_tx1: OutputResponse =
|
||||
rpc.get_outs(&[output_tx1.global_index]).await.unwrap().swap_remove(0);
|
||||
assert_eq!(out_tx1.height, height - DEFAULT_LOCK_WINDOW);
|
||||
assert!(out_tx1.unlocked);
|
||||
|
||||
// Select decoys using spendable output from tx0 as the real, and make sure DSA selects
|
||||
// the freshly unlocked output from tx1 as a decoy
|
||||
let (protocol, output_tx0): (Protocol, SpendableOutput) = state;
|
||||
let mut selected_fresh_decoy = false;
|
||||
let mut attempts = 1000;
|
||||
while !selected_fresh_decoy && attempts > 0 {
|
||||
let decoys = Decoys::select(
|
||||
&mut OsRng, // TODO: use a seeded RNG to consistently select the latest output
|
||||
&rpc,
|
||||
protocol.ring_len(),
|
||||
height,
|
||||
&[output_tx0.clone()],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
selected_fresh_decoy = decoys[0].indexes().contains(&output_tx1.global_index);
|
||||
attempts -= 1;
|
||||
}
|
||||
|
||||
assert!(selected_fresh_decoy);
|
||||
assert_eq!(height, rpc.get_height().await.unwrap());
|
||||
},
|
||||
),
|
||||
);
|
||||
@@ -17,6 +17,7 @@ use monero_serai::{
|
||||
SpendableOutput, Fee,
|
||||
},
|
||||
transaction::Transaction,
|
||||
DEFAULT_LOCK_WINDOW,
|
||||
};
|
||||
|
||||
pub fn random_address() -> (Scalar, ViewPair, MoneroAddress) {
|
||||
@@ -36,7 +37,6 @@ pub fn random_address() -> (Scalar, ViewPair, MoneroAddress) {
|
||||
|
||||
// TODO: Support transactions already on-chain
|
||||
// TODO: Don't have a side effect of mining blocks more blocks than needed under race conditions
|
||||
// TODO: mine as much as needed instead of default 10 blocks
|
||||
pub async fn mine_until_unlocked(rpc: &Rpc<HttpRpc>, addr: &str, tx_hash: [u8; 32]) {
|
||||
// mine until tx is in a block
|
||||
let mut height = rpc.get_height().await.unwrap();
|
||||
@@ -46,15 +46,23 @@ pub async fn mine_until_unlocked(rpc: &Rpc<HttpRpc>, addr: &str, tx_hash: [u8; 3
|
||||
found = match block.txs.iter().find(|&&x| x == tx_hash) {
|
||||
Some(_) => true,
|
||||
None => {
|
||||
rpc.generate_blocks(addr, 1).await.unwrap();
|
||||
height += 1;
|
||||
height = rpc.generate_blocks(addr, 1).await.unwrap().1 + 1;
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mine 9 more blocks to unlock the tx
|
||||
rpc.generate_blocks(addr, 9).await.unwrap();
|
||||
// Mine until tx's outputs are unlocked
|
||||
let o_indexes: Vec<u64> = rpc.get_o_indexes(tx_hash).await.unwrap();
|
||||
while rpc
|
||||
.get_outs(&o_indexes)
|
||||
.await
|
||||
.unwrap()
|
||||
.into_iter()
|
||||
.all(|o| (!(o.unlocked && height >= (o.height + DEFAULT_LOCK_WINDOW))))
|
||||
{
|
||||
height = rpc.generate_blocks(addr, 1).await.unwrap().1 + 1;
|
||||
}
|
||||
}
|
||||
|
||||
// Mines 60 blocks and returns an unlocked miner TX output.
|
||||
@@ -86,7 +94,7 @@ pub fn check_weight_and_fee(tx: &Transaction, fee_rate: Fee) {
|
||||
}
|
||||
|
||||
pub async fn rpc() -> Rpc<HttpRpc> {
|
||||
let rpc = HttpRpc::new("http://127.0.0.1:18081".to_string()).await.unwrap();
|
||||
let rpc = HttpRpc::new("http://serai:seraidex@127.0.0.1:18081".to_string()).await.unwrap();
|
||||
|
||||
// Only run once
|
||||
if rpc.get_height().await.unwrap() != 1 {
|
||||
@@ -212,7 +220,7 @@ macro_rules! test {
|
||||
|
||||
let builder = SignableTransactionBuilder::new(
|
||||
protocol,
|
||||
rpc.get_fee(protocol, FeePriority::Low).await.unwrap(),
|
||||
rpc.get_fee(protocol, FeePriority::Unimportant).await.unwrap(),
|
||||
Change::new(
|
||||
&ViewPair::new(
|
||||
&random_scalar(&mut OsRng) * ED25519_BASEPOINT_TABLE,
|
||||
@@ -260,12 +268,12 @@ macro_rules! test {
|
||||
let temp = Box::new({
|
||||
let mut builder = builder.clone();
|
||||
|
||||
let decoys = Decoys::select(
|
||||
let decoys = Decoys::fingerprintable_canonical_select(
|
||||
&mut OsRng,
|
||||
&rpc,
|
||||
protocol.ring_len(),
|
||||
rpc.get_height().await.unwrap() - 1,
|
||||
&[miner_tx.clone()]
|
||||
rpc.get_height().await.unwrap(),
|
||||
&[miner_tx.clone()],
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
use rand::RngCore;
|
||||
|
||||
use monero_serai::{transaction::Transaction, wallet::address::SubaddressIndex};
|
||||
use monero_serai::{
|
||||
transaction::Transaction,
|
||||
wallet::{address::SubaddressIndex, extra::PaymentId},
|
||||
};
|
||||
|
||||
mod runner;
|
||||
|
||||
@@ -16,6 +19,8 @@ test!(
|
||||
|_, tx: Transaction, _, mut state: Scanner| async move {
|
||||
let output = state.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||
assert_eq!(output.commitment().amount, 5);
|
||||
let dummy_payment_id = PaymentId::Encrypted([0u8; 8]);
|
||||
assert_eq!(output.metadata.payment_id, Some(dummy_payment_id));
|
||||
},
|
||||
),
|
||||
);
|
||||
@@ -57,7 +62,7 @@ test!(
|
||||
|_, tx: Transaction, _, mut state: (Scanner, [u8; 8])| async move {
|
||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||
assert_eq!(output.commitment().amount, 5);
|
||||
assert_eq!(output.metadata.payment_id, state.1);
|
||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(state.1)));
|
||||
},
|
||||
),
|
||||
);
|
||||
@@ -140,7 +145,7 @@ test!(
|
||||
|_, tx: Transaction, _, mut state: (Scanner, [u8; 8])| async move {
|
||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||
assert_eq!(output.commitment().amount, 5);
|
||||
assert_eq!(output.metadata.payment_id, state.1);
|
||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(state.1)));
|
||||
},
|
||||
),
|
||||
);
|
||||
@@ -174,7 +179,7 @@ test!(
|
||||
|_, tx: Transaction, _, mut state: (Scanner, [u8; 8], SubaddressIndex)| async move {
|
||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||
assert_eq!(output.commitment().amount, 5);
|
||||
assert_eq!(output.metadata.payment_id, state.1);
|
||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(state.1)));
|
||||
assert_eq!(output.metadata.subaddress, Some(state.2));
|
||||
},
|
||||
),
|
||||
@@ -259,7 +264,7 @@ test!(
|
||||
|_, tx: Transaction, _, mut state: (Scanner, [u8; 8])| async move {
|
||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||
assert_eq!(output.commitment().amount, 5);
|
||||
assert_eq!(output.metadata.payment_id, state.1);
|
||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(state.1)));
|
||||
},
|
||||
),
|
||||
);
|
||||
@@ -293,7 +298,7 @@ test!(
|
||||
|_, tx: Transaction, _, mut state: (Scanner, [u8; 8], SubaddressIndex)| async move {
|
||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
||||
assert_eq!(output.commitment().amount, 5);
|
||||
assert_eq!(output.metadata.payment_id, state.1);
|
||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(state.1)));
|
||||
assert_eq!(output.metadata.subaddress, Some(state.2));
|
||||
},
|
||||
),
|
||||
|
||||
@@ -24,11 +24,11 @@ async fn add_inputs(
|
||||
spendable_outputs.push(SpendableOutput::from(rpc, output).await.unwrap());
|
||||
}
|
||||
|
||||
let decoys = Decoys::select(
|
||||
let decoys = Decoys::fingerprintable_canonical_select(
|
||||
&mut OsRng,
|
||||
rpc,
|
||||
protocol.ring_len(),
|
||||
rpc.get_height().await.unwrap() - 1,
|
||||
rpc.get_height().await.unwrap(),
|
||||
&spendable_outputs,
|
||||
)
|
||||
.await
|
||||
@@ -110,7 +110,7 @@ test!(
|
||||
|
||||
let mut builder = SignableTransactionBuilder::new(
|
||||
protocol,
|
||||
rpc.get_fee(protocol, FeePriority::Low).await.unwrap(),
|
||||
rpc.get_fee(protocol, FeePriority::Unimportant).await.unwrap(),
|
||||
Change::new(&change_view, false),
|
||||
);
|
||||
add_inputs(protocol, &rpc, vec![outputs.first().unwrap().clone()], &mut builder).await;
|
||||
@@ -294,7 +294,7 @@ test!(
|
||||
|
||||
let mut builder = SignableTransactionBuilder::new(
|
||||
protocol,
|
||||
rpc.get_fee(protocol, FeePriority::Low).await.unwrap(),
|
||||
rpc.get_fee(protocol, FeePriority::Unimportant).await.unwrap(),
|
||||
Change::fingerprintable(None),
|
||||
);
|
||||
add_inputs(protocol, &rpc, vec![outputs.first().unwrap().clone()], &mut builder).await;
|
||||
|
||||
@@ -10,7 +10,7 @@ use monero_serai::{
|
||||
rpc::{EmptyResponse, HttpRpc, Rpc},
|
||||
wallet::{
|
||||
address::{Network, AddressSpec, SubaddressIndex, MoneroAddress},
|
||||
extra::{MAX_TX_EXTRA_NONCE_SIZE, Extra},
|
||||
extra::{MAX_TX_EXTRA_NONCE_SIZE, Extra, PaymentId},
|
||||
Scanner,
|
||||
},
|
||||
};
|
||||
@@ -35,7 +35,7 @@ async fn make_integrated_address(rpc: &Rpc<HttpRpc>, payment_id: [u8; 8]) -> Str
|
||||
}
|
||||
|
||||
async fn initialize_rpcs() -> (Rpc<HttpRpc>, Rpc<HttpRpc>, String) {
|
||||
let wallet_rpc = HttpRpc::new("http://127.0.0.1:6061".to_string()).await.unwrap();
|
||||
let wallet_rpc = HttpRpc::new("http://127.0.0.1:18082".to_string()).await.unwrap();
|
||||
let daemon_rpc = runner::rpc().await;
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
@@ -90,9 +90,7 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
|
||||
|
||||
// TODO: Needs https://github.com/monero-project/monero/pull/8882
|
||||
// let fee_rate = daemon_rpc
|
||||
// // Uses `FeePriority::Low` instead of `FeePriority::Lowest` because wallet RPC
|
||||
// // adjusts `monero_rpc::TransferPriority::Default` up by 1
|
||||
// .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Low)
|
||||
// .get_fee(daemon_rpc.get_protocol().await.unwrap(), FeePriority::Unimportant)
|
||||
// .await
|
||||
// .unwrap();
|
||||
|
||||
@@ -113,13 +111,17 @@ async fn from_wallet_rpc_to_self(spec: AddressSpec) {
|
||||
// runner::check_weight_and_fee(&tx, fee_rate);
|
||||
|
||||
match spec {
|
||||
AddressSpec::Subaddress(index) => assert_eq!(output.metadata.subaddress, Some(index)),
|
||||
AddressSpec::Subaddress(index) => {
|
||||
assert_eq!(output.metadata.subaddress, Some(index));
|
||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted([0u8; 8])));
|
||||
}
|
||||
AddressSpec::Integrated(payment_id) => {
|
||||
assert_eq!(output.metadata.payment_id, payment_id);
|
||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(payment_id)));
|
||||
assert_eq!(output.metadata.subaddress, None);
|
||||
}
|
||||
AddressSpec::Standard | AddressSpec::Featured { .. } => {
|
||||
assert_eq!(output.metadata.subaddress, None)
|
||||
assert_eq!(output.metadata.subaddress, None);
|
||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted([0u8; 8])));
|
||||
}
|
||||
}
|
||||
assert_eq!(output.commitment().amount, 1000000000000);
|
||||
@@ -157,6 +159,7 @@ struct Transfer {
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct TransfersResponse {
|
||||
transfer: Transfer,
|
||||
transfers: Vec<Transfer>,
|
||||
}
|
||||
|
||||
test!(
|
||||
@@ -180,6 +183,7 @@ test!(
|
||||
.unwrap();
|
||||
assert_eq!(transfer.transfer.subaddr_index, Index { major: 0, minor: 0 });
|
||||
assert_eq!(transfer.transfer.amount, 1000000);
|
||||
assert_eq!(transfer.transfer.payment_id, hex::encode([0u8; 8]));
|
||||
},
|
||||
),
|
||||
);
|
||||
@@ -217,6 +221,7 @@ test!(
|
||||
.unwrap();
|
||||
assert_eq!(transfer.transfer.subaddr_index, Index { major: data.1, minor: 0 });
|
||||
assert_eq!(transfer.transfer.amount, 1000000);
|
||||
assert_eq!(transfer.transfer.payment_id, hex::encode([0u8; 8]));
|
||||
|
||||
// Make sure only one R was included in TX extra
|
||||
assert!(Extra::read::<&[u8]>(&mut tx.prefix.extra.as_ref())
|
||||
@@ -229,6 +234,62 @@ test!(
|
||||
),
|
||||
);
|
||||
|
||||
test!(
|
||||
send_to_wallet_rpc_subaddresses,
|
||||
(
|
||||
|_, mut builder: Builder, _| async move {
|
||||
// initialize rpc
|
||||
let (wallet_rpc, daemon_rpc, _) = initialize_rpcs().await;
|
||||
|
||||
// make the subaddress
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct AddressesResponse {
|
||||
addresses: Vec<String>,
|
||||
address_index: u32,
|
||||
}
|
||||
let addrs: AddressesResponse = wallet_rpc
|
||||
.json_rpc_call("create_address", Some(json!({ "account_index": 0, "count": 2 })))
|
||||
.await
|
||||
.unwrap();
|
||||
assert!(addrs.address_index != 0);
|
||||
assert!(addrs.addresses.len() == 2);
|
||||
|
||||
builder.add_payments(&[
|
||||
(MoneroAddress::from_str(Network::Mainnet, &addrs.addresses[0]).unwrap(), 1000000),
|
||||
(MoneroAddress::from_str(Network::Mainnet, &addrs.addresses[1]).unwrap(), 2000000),
|
||||
]);
|
||||
(builder.build().unwrap(), (wallet_rpc, daemon_rpc, addrs.address_index))
|
||||
},
|
||||
|_, tx: Transaction, _, data: (Rpc<HttpRpc>, Rpc<HttpRpc>, u32)| async move {
|
||||
// confirm receipt
|
||||
let _: EmptyResponse = data.0.json_rpc_call("refresh", None).await.unwrap();
|
||||
let transfer: TransfersResponse = data
|
||||
.0
|
||||
.json_rpc_call(
|
||||
"get_transfer_by_txid",
|
||||
Some(json!({ "txid": hex::encode(tx.hash()), "account_index": 0 })),
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
assert_eq!(transfer.transfers.len(), 2);
|
||||
for t in transfer.transfers {
|
||||
match t.amount {
|
||||
1000000 => assert_eq!(t.subaddr_index, Index { major: 0, minor: data.2 }),
|
||||
2000000 => assert_eq!(t.subaddr_index, Index { major: 0, minor: data.2 + 1 }),
|
||||
_ => unreachable!(),
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure 3 additional pub keys are included in TX extra
|
||||
let keys =
|
||||
Extra::read::<&[u8]>(&mut tx.prefix.extra.as_ref()).unwrap().keys().unwrap().1.unwrap();
|
||||
|
||||
assert_eq!(keys.len(), 3);
|
||||
},
|
||||
),
|
||||
);
|
||||
|
||||
test!(
|
||||
send_to_wallet_rpc_integrated,
|
||||
(
|
||||
|
||||
@@ -42,9 +42,9 @@ macro_rules! create_db {
|
||||
}) => {
|
||||
$(
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct $field_name;
|
||||
pub(crate) struct $field_name;
|
||||
impl $field_name {
|
||||
pub fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||
use scale::Encode;
|
||||
$crate::serai_db_key(
|
||||
stringify!($db_name).as_bytes(),
|
||||
@@ -52,17 +52,17 @@ macro_rules! create_db {
|
||||
($($arg),*).encode()
|
||||
)
|
||||
}
|
||||
pub fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
|
||||
pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
|
||||
let key = $field_name::key($($arg),*);
|
||||
txn.put(&key, borsh::to_vec(data).unwrap());
|
||||
}
|
||||
pub fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
|
||||
pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
|
||||
getter.get($field_name::key($($arg),*)).map(|data| {
|
||||
borsh::from_slice(data.as_ref()).unwrap()
|
||||
})
|
||||
}
|
||||
#[allow(dead_code)]
|
||||
pub fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
|
||||
pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
|
||||
txn.del(&$field_name::key($($arg),*))
|
||||
}
|
||||
}
|
||||
@@ -83,7 +83,7 @@ macro_rules! db_channel {
|
||||
}
|
||||
|
||||
impl $field_name {
|
||||
pub fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
|
||||
// Use index 0 to store the amount of messages
|
||||
let messages_sent_key = $field_name::key($($arg),*, 0);
|
||||
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
|
||||
@@ -98,7 +98,7 @@ macro_rules! db_channel {
|
||||
|
||||
$field_name::set(txn, $($arg),*, index_to_use, value);
|
||||
}
|
||||
pub fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
|
||||
let messages_recvd_key = $field_name::key($($arg),*, 1);
|
||||
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
|
||||
3
common/env/src/lib.rs
vendored
3
common/env/src/lib.rs
vendored
@@ -3,6 +3,7 @@
|
||||
|
||||
// Obtain a variable from the Serai environment/secret store.
|
||||
pub fn var(variable: &str) -> Option<String> {
|
||||
// TODO: Move this to Kubernetes
|
||||
// TODO: Move this to a proper secret store
|
||||
// TODO: Unset this variable
|
||||
std::env::var(variable).ok()
|
||||
}
|
||||
|
||||
@@ -17,11 +17,13 @@ rustdoc-args = ["--cfg", "docsrs"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
# Deprecated here means to enable deprecated warnings, not to restore deprecated APIs
|
||||
hyper = { version = "0.14", default-features = false, features = ["http1", "tcp", "client", "runtime", "backports", "deprecated"] }
|
||||
tower-service = { version = "0.3", default-features = false }
|
||||
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
||||
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
|
||||
http-body-util = { version = "0.1", default-features = false }
|
||||
tokio = { version = "1", default-features = false }
|
||||
|
||||
hyper-rustls = { version = "0.24", default-features = false, features = ["http1", "native-tokio"], optional = true }
|
||||
hyper-rustls = { version = "0.26", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||
|
||||
zeroize = { version = "1", optional = true }
|
||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||
|
||||
@@ -5,14 +5,13 @@ use std::sync::Arc;
|
||||
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use tower_service::Service as TowerService;
|
||||
#[cfg(feature = "tls")]
|
||||
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
||||
use hyper::{
|
||||
Uri,
|
||||
header::HeaderValue,
|
||||
body::Body,
|
||||
service::Service,
|
||||
client::{HttpConnector, conn::http1::SendRequest},
|
||||
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
|
||||
use hyper_util::{
|
||||
rt::tokio::TokioExecutor,
|
||||
client::legacy::{Client as HyperClient, connect::HttpConnector},
|
||||
};
|
||||
pub use hyper;
|
||||
|
||||
@@ -29,6 +28,7 @@ pub enum Error {
|
||||
InconsistentHost,
|
||||
ConnectionError(Box<dyn Send + Sync + std::error::Error>),
|
||||
Hyper(hyper::Error),
|
||||
HyperUtil(hyper_util::client::legacy::Error),
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "tls"))]
|
||||
@@ -38,8 +38,12 @@ type Connector = HttpsConnector<HttpConnector>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum Connection {
|
||||
ConnectionPool(hyper::Client<Connector>),
|
||||
Connection { connector: Connector, host: Uri, connection: Arc<Mutex<Option<SendRequest<Body>>>> },
|
||||
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
||||
Connection {
|
||||
connector: Connector,
|
||||
host: Uri,
|
||||
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
@@ -49,17 +53,23 @@ pub struct Client {
|
||||
|
||||
impl Client {
|
||||
fn connector() -> Connector {
|
||||
let mut res = HttpConnector::new();
|
||||
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
||||
#[cfg(feature = "tls")]
|
||||
let res =
|
||||
HttpsConnectorBuilder::new().with_native_roots().https_or_http().enable_http1().build();
|
||||
#[cfg(not(feature = "tls"))]
|
||||
let res = HttpConnector::new();
|
||||
let res = HttpsConnectorBuilder::new()
|
||||
.with_native_roots()
|
||||
.expect("couldn't fetch system's SSL roots")
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.wrap_connector(res);
|
||||
res
|
||||
}
|
||||
|
||||
pub fn with_connection_pool() -> Client {
|
||||
Client {
|
||||
connection: Connection::ConnectionPool(hyper::Client::builder().build(Self::connector())),
|
||||
connection: Connection::ConnectionPool(
|
||||
HyperClient::builder(TokioExecutor::new()).build(Self::connector()),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,7 +122,9 @@ impl Client {
|
||||
}
|
||||
|
||||
let response = match &self.connection {
|
||||
Connection::ConnectionPool(client) => client.request(request).await.map_err(Error::Hyper)?,
|
||||
Connection::ConnectionPool(client) => {
|
||||
client.request(request).await.map_err(Error::HyperUtil)?
|
||||
}
|
||||
Connection::Connection { connector, host, connection } => {
|
||||
let mut connection_lock = connection.lock().await;
|
||||
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
use hyper::body::Body;
|
||||
use hyper::body::Bytes;
|
||||
#[cfg(feature = "basic-auth")]
|
||||
use hyper::header::HeaderValue;
|
||||
pub use http_body_util::Full;
|
||||
|
||||
#[cfg(feature = "basic-auth")]
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Request(pub(crate) hyper::Request<Body>);
|
||||
pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
|
||||
impl Request {
|
||||
#[cfg(feature = "basic-auth")]
|
||||
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
|
||||
@@ -59,8 +60,8 @@ impl Request {
|
||||
let _ = self.basic_auth_from_uri();
|
||||
}
|
||||
}
|
||||
impl From<hyper::Request<Body>> for Request {
|
||||
fn from(request: hyper::Request<Body>) -> Request {
|
||||
impl From<hyper::Request<Full<Bytes>>> for Request {
|
||||
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
||||
Request(request)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,16 @@
|
||||
use hyper::{
|
||||
StatusCode,
|
||||
header::{HeaderValue, HeaderMap},
|
||||
body::{Buf, Body},
|
||||
body::{Buf, Incoming},
|
||||
};
|
||||
use http_body_util::BodyExt;
|
||||
|
||||
use crate::{Client, Error};
|
||||
|
||||
// Borrows the client so its async task lives as long as this response exists.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
pub struct Response<'a>(pub(crate) hyper::Response<Body>, pub(crate) &'a Client);
|
||||
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
|
||||
impl<'a> Response<'a> {
|
||||
pub fn status(&self) -> StatusCode {
|
||||
self.0.status()
|
||||
@@ -17,6 +19,6 @@ impl<'a> Response<'a> {
|
||||
self.0.headers()
|
||||
}
|
||||
pub async fn body(self) -> Result<impl std::io::Read, Error> {
|
||||
hyper::body::aggregate(self.0.into_body()).await.map(Buf::reader).map_err(Error::Hyper)
|
||||
Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -64,6 +64,20 @@ mod shims {
|
||||
}
|
||||
}
|
||||
|
||||
pub trait BufRead: Read {
|
||||
fn fill_buf(&mut self) -> Result<&[u8]>;
|
||||
fn consume(&mut self, amt: usize);
|
||||
}
|
||||
|
||||
impl BufRead for &[u8] {
|
||||
fn fill_buf(&mut self) -> Result<&[u8]> {
|
||||
Ok(*self)
|
||||
}
|
||||
fn consume(&mut self, amt: usize) {
|
||||
*self = &self[amt ..];
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Write {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize>;
|
||||
fn write_all(&mut self, buf: &[u8]) -> Result<()> {
|
||||
|
||||
@@ -32,6 +32,7 @@ frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
|
||||
zalloc = { path = "../common/zalloc" }
|
||||
serai-db = { path = "../common/db" }
|
||||
serai-env = { path = "../common/env" }
|
||||
|
||||
@@ -39,7 +40,7 @@ processor-messages = { package = "serai-processor-messages", path = "../processo
|
||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
||||
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "experimental", default-features = false, features = ["std"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
@@ -48,15 +49,16 @@ borsh = { version = "1", default-features = false, features = ["std", "derive",
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "mdns", "macros"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "gossipsub", "macros"] }
|
||||
|
||||
[dev-dependencies]
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "experimental", default-features = false, features = ["std"] }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/polkadot-sdk", branch = "experimental", default-features = false, features = ["std"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
|
||||
[features]
|
||||
longer-reattempts = []
|
||||
parity-db = ["serai-db/parity-db"]
|
||||
rocksdb = ["serai-db/rocksdb"]
|
||||
|
||||
@@ -9,7 +9,10 @@ use zeroize::{Zeroize, Zeroizing};
|
||||
use rand_core::OsRng;
|
||||
|
||||
use ciphersuite::{
|
||||
group::ff::{Field, PrimeField},
|
||||
group::{
|
||||
ff::{Field, PrimeField},
|
||||
GroupEncoding,
|
||||
},
|
||||
Ciphersuite, Ristretto,
|
||||
};
|
||||
use schnorr::SchnorrSignature;
|
||||
@@ -63,6 +66,10 @@ use cosign_evaluator::CosignEvaluator;
|
||||
#[cfg(test)]
|
||||
pub mod tests;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =
|
||||
zalloc::ZeroizingAlloc(std::alloc::System);
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ActiveTributary<D: Db, P: P2p> {
|
||||
pub spec: TributarySpec,
|
||||
@@ -236,7 +243,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
coordinator::ProcessorMessage::InvalidParticipant { id, .. } |
|
||||
coordinator::ProcessorMessage::CosignPreprocess { id, .. } |
|
||||
coordinator::ProcessorMessage::BatchPreprocess { id, .. } |
|
||||
coordinator::ProcessorMessage::SlashReportPreprocess { id, .. } |
|
||||
coordinator::ProcessorMessage::SubstrateShare { id, .. } => Some(id.session),
|
||||
// This causes an action on our P2P net yet not on any Tributary
|
||||
coordinator::ProcessorMessage::CosignedBlock { block_number, block, signature } => {
|
||||
let cosigned_block = CosignedBlock {
|
||||
network,
|
||||
@@ -254,6 +263,55 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
P2p::broadcast(p2p, P2pMessageKind::CosignedBlock, buf).await;
|
||||
None
|
||||
}
|
||||
// This causes an action on Substrate yet not on any Tributary
|
||||
coordinator::ProcessorMessage::SignedSlashReport { session, signature } => {
|
||||
let set = ValidatorSet { network, session: *session };
|
||||
let signature: &[u8] = signature.as_ref();
|
||||
let signature = serai_client::Signature(signature.try_into().unwrap());
|
||||
|
||||
let slashes = crate::tributary::SlashReport::get(&txn, set)
|
||||
.expect("signed slash report despite not having slash report locally");
|
||||
let slashes_pubs =
|
||||
slashes.iter().map(|(address, points)| (Public(*address), *points)).collect::<Vec<_>>();
|
||||
|
||||
let tx = serai_client::SeraiValidatorSets::report_slashes(
|
||||
network,
|
||||
slashes
|
||||
.into_iter()
|
||||
.map(|(address, points)| (serai_client::SeraiAddress(address), points))
|
||||
.collect::<Vec<_>>()
|
||||
.try_into()
|
||||
.unwrap(),
|
||||
signature.clone(),
|
||||
);
|
||||
|
||||
loop {
|
||||
if serai.publish(&tx).await.is_ok() {
|
||||
break None;
|
||||
}
|
||||
|
||||
// Check if the slashes shouldn't still be reported. If not, break.
|
||||
let Ok(serai) = serai.as_of_latest_finalized_block().await else {
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
continue;
|
||||
};
|
||||
let Ok(key) = serai.validator_sets().key_pending_slash_report(network).await else {
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
continue;
|
||||
};
|
||||
let Some(key) = key else {
|
||||
break None;
|
||||
};
|
||||
// If this is the key for this slash report, then this will verify
|
||||
use sp_application_crypto::RuntimePublic;
|
||||
if !key.verify(
|
||||
&serai_client::validator_sets::primitives::report_slashes_message(&set, &slashes_pubs),
|
||||
&signature,
|
||||
) {
|
||||
break None;
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
// These don't return a relevant Tributary as there's no Tributary with action expected
|
||||
ProcessorMessage::Substrate(inner_msg) => match inner_msg {
|
||||
@@ -371,14 +429,22 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
}]
|
||||
}
|
||||
key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => {
|
||||
// This doesn't need the ID since it's a Provided transaction which everyone will provide
|
||||
// With this provision comes explicit ordering (with regards to other
|
||||
// RemoveParticipantDueToDkg transactions) and group consensus
|
||||
// Accordingly, this can't be replayed
|
||||
// It could be included on-chain early/late with regards to the chain's active attempt,
|
||||
// which attempt scheduling is written to make a non-issue by auto re-attempting once a
|
||||
// fatal slash occurs, regardless of timing
|
||||
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant: faulty }]
|
||||
// This doesn't have guaranteed timing
|
||||
//
|
||||
// While the party *should* be fatally slashed and not included in future attempts,
|
||||
// they'll actually be fatally slashed (assuming liveness before the Tributary retires)
|
||||
// and not included in future attempts *which begin after the latency window completes*
|
||||
let participant = spec
|
||||
.reverse_lookup_i(
|
||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
||||
faulty,
|
||||
)
|
||||
.unwrap();
|
||||
vec![Transaction::RemoveParticipantDueToDkg {
|
||||
participant,
|
||||
signed: Transaction::empty_signed(),
|
||||
}]
|
||||
}
|
||||
key_gen::ProcessorMessage::Shares { id, mut shares } => {
|
||||
// Create a MuSig-based machine to inform Substrate of this key generation
|
||||
@@ -388,7 +454,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
.expect("participating in a DKG attempt yet we didn't track who was removed yet?");
|
||||
let our_i = spec
|
||||
.i(&removed, pub_key)
|
||||
.expect("processor message to DKG for a session we aren't a validator in");
|
||||
.expect("processor message to DKG for an attempt we aren't a validator in");
|
||||
|
||||
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
|
||||
// without further context, it can't be deserialized without context
|
||||
@@ -417,30 +483,13 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
}]
|
||||
}
|
||||
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
|
||||
// Check if the MuSig signature had any errors as if so, we need to provide
|
||||
// RemoveParticipantDueToDkg
|
||||
// As for the safety of calling error_generating_key_pair, the processor is presumed
|
||||
// to only send InvalidShare or GeneratedKeyPair for a given attempt
|
||||
let mut txs = if let Some(faulty) =
|
||||
crate::tributary::error_generating_key_pair(&mut txn, key, spec, id.attempt)
|
||||
{
|
||||
vec![Transaction::RemoveParticipantDueToDkg {
|
||||
attempt: id.attempt,
|
||||
participant: faulty,
|
||||
}]
|
||||
} else {
|
||||
vec![]
|
||||
};
|
||||
|
||||
txs.push(Transaction::InvalidDkgShare {
|
||||
vec![Transaction::InvalidDkgShare {
|
||||
attempt: id.attempt,
|
||||
accuser,
|
||||
faulty,
|
||||
blame,
|
||||
signed: Transaction::empty_signed(),
|
||||
});
|
||||
|
||||
txs
|
||||
}]
|
||||
}
|
||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
|
||||
// TODO2: Check the KeyGenId fields
|
||||
@@ -454,6 +503,7 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
id.attempt,
|
||||
);
|
||||
|
||||
// TODO: Move this into generated_key_pair?
|
||||
match share {
|
||||
Ok(share) => {
|
||||
vec![Transaction::DkgConfirmed {
|
||||
@@ -463,14 +513,32 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
}]
|
||||
}
|
||||
Err(p) => {
|
||||
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant: p }]
|
||||
let participant = spec
|
||||
.reverse_lookup_i(
|
||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
||||
p,
|
||||
)
|
||||
.unwrap();
|
||||
vec![Transaction::RemoveParticipantDueToDkg {
|
||||
participant,
|
||||
signed: Transaction::empty_signed(),
|
||||
}]
|
||||
}
|
||||
}
|
||||
}
|
||||
// This is a response to the ordered VerifyBlame, which is why this satisfies the provided
|
||||
// transaction's needs to be perfectly ordered
|
||||
key_gen::ProcessorMessage::Blame { id, participant } => {
|
||||
vec![Transaction::RemoveParticipantDueToDkg { attempt: id.attempt, participant }]
|
||||
let participant = spec
|
||||
.reverse_lookup_i(
|
||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
||||
participant,
|
||||
)
|
||||
.unwrap();
|
||||
vec![Transaction::RemoveParticipantDueToDkg {
|
||||
participant,
|
||||
signed: Transaction::empty_signed(),
|
||||
}]
|
||||
}
|
||||
},
|
||||
ProcessorMessage::Sign(msg) => match msg {
|
||||
@@ -536,7 +604,8 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
// slash) and censor transactions (yet don't explicitly ban)
|
||||
vec![]
|
||||
}
|
||||
coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } => {
|
||||
coordinator::ProcessorMessage::CosignPreprocess { id, preprocesses } |
|
||||
coordinator::ProcessorMessage::SlashReportPreprocess { id, preprocesses } => {
|
||||
vec![Transaction::SubstrateSign(SignData {
|
||||
plan: id.id,
|
||||
attempt: id.attempt,
|
||||
@@ -651,6 +720,8 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
||||
}
|
||||
#[allow(clippy::match_same_arms)] // Allowed to preserve layout
|
||||
coordinator::ProcessorMessage::CosignedBlock { .. } => unreachable!(),
|
||||
#[allow(clippy::match_same_arms)]
|
||||
coordinator::ProcessorMessage::SignedSlashReport { .. } => unreachable!(),
|
||||
},
|
||||
ProcessorMessage::Substrate(inner_msg) => match inner_msg {
|
||||
processor_messages::substrate::ProcessorMessage::Batch { .. } |
|
||||
@@ -941,16 +1012,16 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||
key: Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
p2p: P,
|
||||
processors: Pro,
|
||||
serai: Serai,
|
||||
serai: Arc<Serai>,
|
||||
) {
|
||||
let serai = Arc::new(serai);
|
||||
|
||||
let (new_tributary_spec_send, mut new_tributary_spec_recv) = mpsc::unbounded_channel();
|
||||
// Reload active tributaries from the database
|
||||
for spec in ActiveTributaryDb::active_tributaries(&raw_db).1 {
|
||||
new_tributary_spec_send.send(spec).unwrap();
|
||||
}
|
||||
|
||||
let (perform_slash_report_send, mut perform_slash_report_recv) = mpsc::unbounded_channel();
|
||||
|
||||
let (tributary_retired_send, mut tributary_retired_recv) = mpsc::unbounded_channel();
|
||||
|
||||
// Handle new Substrate blocks
|
||||
@@ -960,6 +1031,7 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||
processors.clone(),
|
||||
serai.clone(),
|
||||
new_tributary_spec_send,
|
||||
perform_slash_report_send,
|
||||
tributary_retired_send,
|
||||
));
|
||||
|
||||
@@ -1014,10 +1086,12 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||
let raw_db = raw_db.clone();
|
||||
let key = key.clone();
|
||||
|
||||
let specs = Arc::new(RwLock::new(HashMap::new()));
|
||||
let tributaries = Arc::new(RwLock::new(HashMap::new()));
|
||||
// Spawn a task to maintain a local view of the tributaries for whenever recognized_id is
|
||||
// called
|
||||
tokio::spawn({
|
||||
let specs = specs.clone();
|
||||
let tributaries = tributaries.clone();
|
||||
let mut set_to_genesis = HashMap::new();
|
||||
async move {
|
||||
@@ -1026,9 +1100,11 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||
Ok(TributaryEvent::NewTributary(tributary)) => {
|
||||
set_to_genesis.insert(tributary.spec.set(), tributary.spec.genesis());
|
||||
tributaries.write().await.insert(tributary.spec.genesis(), tributary.tributary);
|
||||
specs.write().await.insert(tributary.spec.set(), tributary.spec);
|
||||
}
|
||||
Ok(TributaryEvent::TributaryRetired(set)) => {
|
||||
if let Some(genesis) = set_to_genesis.remove(&set) {
|
||||
specs.write().await.remove(&set);
|
||||
tributaries.write().await.remove(&genesis);
|
||||
}
|
||||
}
|
||||
@@ -1041,6 +1117,84 @@ pub async fn run<D: Db, Pro: Processors, P: P2p>(
|
||||
}
|
||||
});
|
||||
|
||||
// Also spawn a task to handle slash reports, as this needs such a view of tributaries
|
||||
tokio::spawn({
|
||||
let mut raw_db = raw_db.clone();
|
||||
let key = key.clone();
|
||||
let tributaries = tributaries.clone();
|
||||
async move {
|
||||
'task_loop: loop {
|
||||
match perform_slash_report_recv.recv().await {
|
||||
Some(set) => {
|
||||
let (genesis, validators) = loop {
|
||||
let specs = specs.read().await;
|
||||
let Some(spec) = specs.get(&set) else {
|
||||
// If we don't have this Tributary because it's retired, break and move on
|
||||
if RetiredTributaryDb::get(&raw_db, set).is_some() {
|
||||
continue 'task_loop;
|
||||
}
|
||||
|
||||
// This may happen if the task above is simply slow
|
||||
log::warn!("tributary we don't have yet is supposed to perform a slash report");
|
||||
continue;
|
||||
};
|
||||
break (spec.genesis(), spec.validators());
|
||||
};
|
||||
|
||||
let mut slashes = vec![];
|
||||
for (validator, _) in validators {
|
||||
if validator == (<Ristretto as Ciphersuite>::generator() * key.deref()) {
|
||||
continue;
|
||||
}
|
||||
let validator = validator.to_bytes();
|
||||
|
||||
let fatally = tributary::FatallySlashed::get(&raw_db, genesis, validator).is_some();
|
||||
// TODO: Properly type this
|
||||
let points = if fatally {
|
||||
u32::MAX
|
||||
} else {
|
||||
tributary::SlashPoints::get(&raw_db, genesis, validator).unwrap_or(0)
|
||||
};
|
||||
slashes.push(points);
|
||||
}
|
||||
|
||||
let mut tx = Transaction::SlashReport(slashes, Transaction::empty_signed());
|
||||
tx.sign(&mut OsRng, genesis, &key);
|
||||
|
||||
let mut first = true;
|
||||
loop {
|
||||
if !first {
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
first = false;
|
||||
|
||||
let tributaries = tributaries.read().await;
|
||||
let Some(tributary) = tributaries.get(&genesis) else {
|
||||
// If we don't have this Tributary because it's retired, break and move on
|
||||
if RetiredTributaryDb::get(&raw_db, set).is_some() {
|
||||
break;
|
||||
}
|
||||
|
||||
// This may happen if the task above is simply slow
|
||||
log::warn!("tributary we don't have yet is supposed to perform a slash report");
|
||||
continue;
|
||||
};
|
||||
// This is safe to perform multiple times and solely needs atomicity with regards
|
||||
// to itself
|
||||
// TODO: Should this not take a txn accordingly? It's best practice to take a txn,
|
||||
// yet taking a txn fails to declare its achieved independence
|
||||
let mut txn = raw_db.txn();
|
||||
tributary::publish_signed_transaction(&mut txn, tributary, tx).await;
|
||||
txn.commit();
|
||||
break;
|
||||
}
|
||||
}
|
||||
None => panic!("perform slash report sender closed"),
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
move |set: ValidatorSet, genesis, id_type, id: Vec<u8>| {
|
||||
log::debug!("recognized ID {:?} {}", id_type, hex::encode(&id));
|
||||
let mut raw_db = raw_db.clone();
|
||||
@@ -1202,11 +1356,10 @@ async fn main() {
|
||||
key_bytes.zeroize();
|
||||
key
|
||||
};
|
||||
let p2p = LibP2p::new();
|
||||
|
||||
let processors = Arc::new(MessageQueue::from_env(Service::Coordinator));
|
||||
|
||||
let serai = || async {
|
||||
let serai = (async {
|
||||
loop {
|
||||
let Ok(serai) = Serai::new(format!(
|
||||
"http://{}:9944",
|
||||
@@ -1219,8 +1372,10 @@ async fn main() {
|
||||
continue;
|
||||
};
|
||||
log::info!("made initial connection to Serai node");
|
||||
return serai;
|
||||
return Arc::new(serai);
|
||||
}
|
||||
};
|
||||
run(db, key, p2p, processors, serai().await).await
|
||||
})
|
||||
.await;
|
||||
let p2p = LibP2p::new(serai.clone());
|
||||
run(db, key, p2p, processors, serai).await
|
||||
}
|
||||
|
||||
@@ -7,19 +7,22 @@ use std::{
|
||||
};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_client::primitives::NetworkId;
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
||||
|
||||
use serai_db::Db;
|
||||
|
||||
use futures_util::StreamExt;
|
||||
use tokio::{
|
||||
sync::{Mutex, RwLock, mpsc, broadcast},
|
||||
time::sleep,
|
||||
};
|
||||
|
||||
use libp2p::{
|
||||
futures::StreamExt,
|
||||
core::multiaddr::{Protocol, Multiaddr},
|
||||
identity::Keypair,
|
||||
PeerId,
|
||||
tcp::Config as TcpConfig,
|
||||
@@ -127,8 +130,8 @@ pub struct Message<P: P2p> {
|
||||
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
|
||||
type Id: Send + Sync + Clone + Copy + fmt::Debug;
|
||||
|
||||
async fn subscribe(&self, genesis: [u8; 32]);
|
||||
async fn unsubscribe(&self, genesis: [u8; 32]);
|
||||
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
|
||||
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]);
|
||||
|
||||
async fn send_raw(&self, to: Self::Id, genesis: Option<[u8; 32]>, msg: Vec<u8>);
|
||||
async fn broadcast_raw(&self, genesis: Option<[u8; 32]>, msg: Vec<u8>);
|
||||
@@ -190,14 +193,12 @@ pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
|
||||
#[derive(NetworkBehaviour)]
|
||||
struct Behavior {
|
||||
gossipsub: GsBehavior,
|
||||
#[cfg(debug_assertions)]
|
||||
mdns: libp2p::mdns::tokio::Behaviour,
|
||||
}
|
||||
|
||||
#[allow(clippy::type_complexity)]
|
||||
#[derive(Clone)]
|
||||
pub struct LibP2p {
|
||||
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, [u8; 32])>>>,
|
||||
subscribe: Arc<Mutex<mpsc::UnboundedSender<(bool, ValidatorSet, [u8; 32])>>>,
|
||||
broadcast: Arc<Mutex<mpsc::UnboundedSender<(Option<[u8; 32]>, Vec<u8>)>>>,
|
||||
receive: Arc<Mutex<mpsc::UnboundedReceiver<(PeerId, Vec<u8>)>>>,
|
||||
}
|
||||
@@ -209,14 +210,13 @@ impl fmt::Debug for LibP2p {
|
||||
|
||||
impl LibP2p {
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Self {
|
||||
pub fn new(serai: Arc<Serai>) -> Self {
|
||||
// Block size limit + 1 KB of space for signatures/metadata
|
||||
const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024;
|
||||
|
||||
log::info!("creating a libp2p instance");
|
||||
|
||||
let throwaway_key_pair = Keypair::generate_ed25519();
|
||||
let throwaway_peer_id = PeerId::from(throwaway_key_pair.public());
|
||||
|
||||
let behavior = Behavior {
|
||||
gossipsub: {
|
||||
@@ -258,14 +258,6 @@ impl LibP2p {
|
||||
|
||||
gossipsub
|
||||
},
|
||||
|
||||
// Only use MDNS in debug environments, as it should have no value in a release build
|
||||
#[cfg(debug_assertions)]
|
||||
mdns: {
|
||||
log::info!("creating mdns service");
|
||||
libp2p::mdns::tokio::Behaviour::new(libp2p::mdns::Config::default(), throwaway_peer_id)
|
||||
.unwrap()
|
||||
},
|
||||
};
|
||||
|
||||
// Uses noise for authentication, yamux for multiplexing
|
||||
@@ -294,8 +286,8 @@ impl LibP2p {
|
||||
let (receive_send, receive_recv) = mpsc::unbounded_channel();
|
||||
let (subscribe_send, mut subscribe_recv) = mpsc::unbounded_channel();
|
||||
|
||||
fn topic_for_genesis(genesis: [u8; 32]) -> IdentTopic {
|
||||
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(genesis)))
|
||||
fn topic_for_set(set: ValidatorSet) -> IdentTopic {
|
||||
IdentTopic::new(format!("{LIBP2P_TOPIC}-{}", hex::encode(set.encode())))
|
||||
}
|
||||
|
||||
tokio::spawn({
|
||||
@@ -305,17 +297,14 @@ impl LibP2p {
|
||||
fn broadcast_raw(
|
||||
p2p: &mut Swarm<Behavior>,
|
||||
time_of_last_p2p_message: &mut Instant,
|
||||
genesis: Option<[u8; 32]>,
|
||||
set: Option<ValidatorSet>,
|
||||
msg: Vec<u8>,
|
||||
) {
|
||||
// Update the time of last message
|
||||
*time_of_last_p2p_message = Instant::now();
|
||||
|
||||
let topic = if let Some(genesis) = genesis {
|
||||
topic_for_genesis(genesis)
|
||||
} else {
|
||||
IdentTopic::new(LIBP2P_TOPIC)
|
||||
};
|
||||
let topic =
|
||||
if let Some(set) = set { topic_for_set(set) } else { IdentTopic::new(LIBP2P_TOPIC) };
|
||||
|
||||
match p2p.behaviour_mut().gossipsub.publish(topic, msg.clone()) {
|
||||
Err(PublishError::SigningError(e)) => panic!("signing error when broadcasting: {e}"),
|
||||
@@ -331,37 +320,97 @@ impl LibP2p {
|
||||
}
|
||||
|
||||
async move {
|
||||
let mut set_for_genesis = HashMap::new();
|
||||
let mut pending_p2p_connections = vec![];
|
||||
// Run this task ad-infinitum
|
||||
loop {
|
||||
// Handle pending P2P connections
|
||||
// TODO: Break this out onto its own task with better peer management logic?
|
||||
{
|
||||
let mut connect = |addr: Multiaddr| {
|
||||
log::info!("found peer from substrate: {addr}");
|
||||
|
||||
let protocols = addr.iter().filter_map(|piece| match piece {
|
||||
// Drop PeerIds from the Substrate P2p network
|
||||
Protocol::P2p(_) => None,
|
||||
// Use our own TCP port
|
||||
Protocol::Tcp(_) => Some(Protocol::Tcp(PORT)),
|
||||
other => Some(other),
|
||||
});
|
||||
|
||||
let mut new_addr = Multiaddr::empty();
|
||||
for protocol in protocols {
|
||||
new_addr.push(protocol);
|
||||
}
|
||||
let addr = new_addr;
|
||||
log::debug!("transformed found peer: {addr}");
|
||||
|
||||
if let Err(e) = swarm.dial(addr) {
|
||||
log::warn!("dialing peer failed: {e:?}");
|
||||
}
|
||||
};
|
||||
|
||||
while let Some(network) = pending_p2p_connections.pop() {
|
||||
if let Ok(mut nodes) = serai.p2p_validators(network).await {
|
||||
// If there's an insufficient amount of nodes known, connect to all yet add it back
|
||||
// and break
|
||||
if nodes.len() < 3 {
|
||||
log::warn!(
|
||||
"insufficient amount of P2P nodes known for {:?}: {}",
|
||||
network,
|
||||
nodes.len()
|
||||
);
|
||||
pending_p2p_connections.push(network);
|
||||
for node in nodes {
|
||||
connect(node);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
// Randomly select up to 5
|
||||
for _ in 0 .. 5 {
|
||||
if !nodes.is_empty() {
|
||||
let to_connect = nodes.swap_remove(
|
||||
usize::try_from(OsRng.next_u64() % u64::try_from(nodes.len()).unwrap())
|
||||
.unwrap(),
|
||||
);
|
||||
connect(to_connect);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let time_since_last = Instant::now().duration_since(time_of_last_p2p_message);
|
||||
tokio::select! {
|
||||
biased;
|
||||
|
||||
// Subscribe to any new topics
|
||||
topic = subscribe_recv.recv() => {
|
||||
let (subscribe, topic) = topic.expect("subscribe_recv closed. are we shutting down?");
|
||||
set = subscribe_recv.recv() => {
|
||||
let (subscribe, set, genesis): (_, ValidatorSet, [u8; 32]) =
|
||||
set.expect("subscribe_recv closed. are we shutting down?");
|
||||
let topic = topic_for_set(set);
|
||||
if subscribe {
|
||||
swarm
|
||||
.behaviour_mut()
|
||||
.gossipsub
|
||||
.subscribe(&topic_for_genesis(topic))
|
||||
.unwrap();
|
||||
log::info!("subscribing to p2p messages for {set:?}");
|
||||
pending_p2p_connections.push(set.network);
|
||||
set_for_genesis.insert(genesis, set);
|
||||
swarm.behaviour_mut().gossipsub.subscribe(&topic).unwrap();
|
||||
} else {
|
||||
swarm
|
||||
.behaviour_mut()
|
||||
.gossipsub
|
||||
.unsubscribe(&topic_for_genesis(topic))
|
||||
.unwrap();
|
||||
log::info!("unsubscribing to p2p messages for {set:?}");
|
||||
set_for_genesis.remove(&genesis);
|
||||
swarm.behaviour_mut().gossipsub.unsubscribe(&topic).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
// Handle any queued outbound messages
|
||||
msg = broadcast_recv.recv() => {
|
||||
let (genesis, msg) = msg.expect("broadcast_recv closed. are we shutting down?");
|
||||
let (genesis, msg): (Option<[u8; 32]>, Vec<u8>) =
|
||||
msg.expect("broadcast_recv closed. are we shutting down?");
|
||||
let set = genesis.and_then(|genesis| set_for_genesis.get(&genesis).copied());
|
||||
broadcast_raw(
|
||||
&mut swarm,
|
||||
&mut time_of_last_p2p_message,
|
||||
genesis,
|
||||
set,
|
||||
msg,
|
||||
);
|
||||
}
|
||||
@@ -369,28 +418,17 @@ impl LibP2p {
|
||||
// Handle new incoming messages
|
||||
event = swarm.next() => {
|
||||
match event {
|
||||
#[cfg(debug_assertions)]
|
||||
Some(SwarmEvent::Behaviour(BehaviorEvent::Mdns(
|
||||
libp2p::mdns::Event::Discovered(list),
|
||||
))) => {
|
||||
for (peer, mut addr) in list {
|
||||
// Check the port is as expected to prevent trying to peer with Substrate nodes
|
||||
if addr.pop() == Some(libp2p::multiaddr::Protocol::Tcp(PORT)) {
|
||||
log::info!("found peer via mdns");
|
||||
swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer);
|
||||
}
|
||||
}
|
||||
Some(SwarmEvent::Dialing { connection_id, .. }) => {
|
||||
log::debug!("dialing to peer in connection ID {}", &connection_id);
|
||||
}
|
||||
#[cfg(debug_assertions)]
|
||||
Some(SwarmEvent::Behaviour(BehaviorEvent::Mdns(
|
||||
libp2p::mdns::Event::Expired(list),
|
||||
))) => {
|
||||
for (peer, _) in list {
|
||||
log::info!("disconnecting peer due to mdns");
|
||||
swarm.behaviour_mut().gossipsub.remove_explicit_peer(&peer);
|
||||
Some(SwarmEvent::ConnectionEstablished { peer_id, connection_id, .. }) => {
|
||||
log::debug!(
|
||||
"connection established to peer {} in connection ID {}",
|
||||
&peer_id,
|
||||
&connection_id,
|
||||
);
|
||||
swarm.behaviour_mut().gossipsub.add_explicit_peer(&peer_id)
|
||||
}
|
||||
}
|
||||
|
||||
Some(SwarmEvent::Behaviour(BehaviorEvent::Gossipsub(
|
||||
GsEvent::Message { propagation_source, message, .. },
|
||||
))) => {
|
||||
@@ -434,21 +472,21 @@ impl LibP2p {
|
||||
impl P2p for LibP2p {
|
||||
type Id = PeerId;
|
||||
|
||||
async fn subscribe(&self, genesis: [u8; 32]) {
|
||||
async fn subscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
|
||||
self
|
||||
.subscribe
|
||||
.lock()
|
||||
.await
|
||||
.send((true, genesis))
|
||||
.send((true, set, genesis))
|
||||
.expect("subscribe_send closed. are we shutting down?");
|
||||
}
|
||||
|
||||
async fn unsubscribe(&self, genesis: [u8; 32]) {
|
||||
async fn unsubscribe(&self, set: ValidatorSet, genesis: [u8; 32]) {
|
||||
self
|
||||
.subscribe
|
||||
.lock()
|
||||
.await
|
||||
.send((false, genesis))
|
||||
.send((false, set, genesis))
|
||||
.expect("subscribe_send closed. are we shutting down?");
|
||||
}
|
||||
|
||||
@@ -552,7 +590,7 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
channels.write().await.insert(genesis, send);
|
||||
|
||||
// Subscribe to the topic for this tributary
|
||||
p2p.subscribe(genesis).await;
|
||||
p2p.subscribe(tributary.spec.set(), genesis).await;
|
||||
|
||||
// Per-Tributary P2P message handler
|
||||
tokio::spawn({
|
||||
@@ -675,8 +713,8 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
||||
}
|
||||
TributaryEvent::TributaryRetired(set) => {
|
||||
if let Some(genesis) = set_to_genesis.remove(&set) {
|
||||
p2p.unsubscribe(set, genesis).await;
|
||||
channels.write().await.remove(&genesis);
|
||||
p2p.unsubscribe(genesis).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,7 +13,7 @@ mod inner_db {
|
||||
}
|
||||
);
|
||||
}
|
||||
pub use inner_db::{NextBlock, BatchInstructionsHashDb};
|
||||
pub(crate) use inner_db::{NextBlock, BatchInstructionsHashDb};
|
||||
|
||||
pub struct HandledEvent;
|
||||
impl HandledEvent {
|
||||
|
||||
@@ -208,10 +208,12 @@ async fn handle_batch_and_burns<Pro: Processors>(
|
||||
|
||||
// Handle a specific Substrate block, returning an error when it fails to get data
|
||||
// (not blocking / holding)
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_block<D: Db, Pro: Processors>(
|
||||
db: &mut D,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
|
||||
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
|
||||
processors: &Pro,
|
||||
serai: &Serai,
|
||||
@@ -276,6 +278,9 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||
},
|
||||
)
|
||||
.await;
|
||||
|
||||
// TODO: If we were in the set, yet were removed, drop the tributary
|
||||
|
||||
let mut txn = db.txn();
|
||||
SeraiDkgCompleted::set(&mut txn, set, &substrate_key);
|
||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||
@@ -284,6 +289,27 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||
event_id += 1;
|
||||
}
|
||||
|
||||
for accepted_handover in serai.as_of(hash).validator_sets().accepted_handover_events().await? {
|
||||
let ValidatorSetsEvent::AcceptedHandover { set } = accepted_handover else {
|
||||
panic!("AcceptedHandover event wasn't AcceptedHandover: {accepted_handover:?}");
|
||||
};
|
||||
|
||||
if set.network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
|
||||
if HandledEvent::is_unhandled(db, hash, event_id) {
|
||||
log::info!("found fresh accepted handover event {:?}", accepted_handover);
|
||||
// TODO: This isn't atomic with the event handling
|
||||
// Send a oneshot receiver so we can await the response?
|
||||
perform_slash_report.send(set).unwrap();
|
||||
let mut txn = db.txn();
|
||||
HandledEvent::handle_event(&mut txn, hash, event_id);
|
||||
txn.commit();
|
||||
}
|
||||
event_id += 1;
|
||||
}
|
||||
|
||||
for retired_set in serai.as_of(hash).validator_sets().set_retired_events().await? {
|
||||
let ValidatorSetsEvent::SetRetired { set } = retired_set else {
|
||||
panic!("SetRetired event wasn't SetRetired: {retired_set:?}");
|
||||
@@ -317,10 +343,12 @@ async fn handle_block<D: Db, Pro: Processors>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn handle_new_blocks<D: Db, Pro: Processors>(
|
||||
db: &mut D,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
new_tributary_spec: &mpsc::UnboundedSender<TributarySpec>,
|
||||
perform_slash_report: &mpsc::UnboundedSender<ValidatorSet>,
|
||||
tributary_retired: &mpsc::UnboundedSender<ValidatorSet>,
|
||||
processors: &Pro,
|
||||
serai: &Serai,
|
||||
@@ -346,7 +374,17 @@ async fn handle_new_blocks<D: Db, Pro: Processors>(
|
||||
.expect("couldn't get block before the latest finalized block");
|
||||
|
||||
log::info!("handling substrate block {b}");
|
||||
handle_block(db, key, new_tributary_spec, tributary_retired, processors, serai, block).await?;
|
||||
handle_block(
|
||||
db,
|
||||
key,
|
||||
new_tributary_spec,
|
||||
perform_slash_report,
|
||||
tributary_retired,
|
||||
processors,
|
||||
serai,
|
||||
block,
|
||||
)
|
||||
.await?;
|
||||
*next_block += 1;
|
||||
|
||||
let mut txn = db.txn();
|
||||
@@ -365,6 +403,7 @@ pub async fn scan_task<D: Db, Pro: Processors>(
|
||||
processors: Pro,
|
||||
serai: Arc<Serai>,
|
||||
new_tributary_spec: mpsc::UnboundedSender<TributarySpec>,
|
||||
perform_slash_report: mpsc::UnboundedSender<ValidatorSet>,
|
||||
tributary_retired: mpsc::UnboundedSender<ValidatorSet>,
|
||||
) {
|
||||
log::info!("scanning substrate");
|
||||
@@ -440,6 +479,7 @@ pub async fn scan_task<D: Db, Pro: Processors>(
|
||||
&mut db,
|
||||
&key,
|
||||
&new_tributary_spec,
|
||||
&perform_slash_report,
|
||||
&tributary_retired,
|
||||
&processors,
|
||||
&serai,
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::{
|
||||
collections::{VecDeque, HashSet, HashMap},
|
||||
};
|
||||
|
||||
use serai_client::primitives::NetworkId;
|
||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet};
|
||||
|
||||
use processor_messages::CoordinatorMessage;
|
||||
|
||||
@@ -62,8 +62,8 @@ impl LocalP2p {
|
||||
impl P2p for LocalP2p {
|
||||
type Id = usize;
|
||||
|
||||
async fn subscribe(&self, _genesis: [u8; 32]) {}
|
||||
async fn unsubscribe(&self, _genesis: [u8; 32]) {}
|
||||
async fn subscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||
async fn unsubscribe(&self, _set: ValidatorSet, _genesis: [u8; 32]) {}
|
||||
|
||||
async fn send_raw(&self, to: Self::Id, _genesis: Option<[u8; 32]>, msg: Vec<u8>) {
|
||||
self.1.write().await.1[to].push_back((self.0, msg));
|
||||
|
||||
@@ -15,7 +15,7 @@ use serai_client::{
|
||||
|
||||
use tokio::time::sleep;
|
||||
|
||||
use serai_db::{Db, MemDb, DbTxn};
|
||||
use serai_db::{Get, DbTxn, Db, MemDb};
|
||||
|
||||
use processor_messages::{
|
||||
key_gen::{self, KeyGenId},
|
||||
@@ -349,6 +349,7 @@ async fn dkg_test() {
|
||||
impl PublishSeraiTransaction for CheckPublishSetKeys {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
_db: &(impl Sync + Get),
|
||||
set: ValidatorSet,
|
||||
removed: Vec<SeraiAddress>,
|
||||
key_pair: KeyPair,
|
||||
|
||||
@@ -2,10 +2,12 @@ use core::fmt::Debug;
|
||||
|
||||
use rand_core::{RngCore, OsRng};
|
||||
|
||||
use ciphersuite::{group::Group, Ciphersuite, Ristretto};
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use serai_client::{
|
||||
primitives::{SeraiAddress, Signature},
|
||||
validator_sets::primitives::{ValidatorSet, KeyPair},
|
||||
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
|
||||
};
|
||||
use processor_messages::coordinator::SubstrateSignableId;
|
||||
|
||||
@@ -28,6 +30,7 @@ mod sync;
|
||||
impl PublishSeraiTransaction for () {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
_db: &(impl Sync + serai_db::Get),
|
||||
_set: ValidatorSet,
|
||||
_removed: Vec<SeraiAddress>,
|
||||
_key_pair: KeyPair,
|
||||
@@ -60,7 +63,7 @@ fn random_sign_data<R: RngCore, Id: Clone + PartialEq + Eq + Debug + Encode + De
|
||||
|
||||
data: {
|
||||
let mut res = vec![];
|
||||
for _ in 0 ..= (rng.next_u64() % 256) {
|
||||
for _ in 0 ..= (rng.next_u64() % 255) {
|
||||
res.push(random_vec(&mut OsRng, 512));
|
||||
}
|
||||
res
|
||||
@@ -76,7 +79,7 @@ fn test_read_write<RW: Eq + Debug + ReadWrite>(value: &RW) {
|
||||
|
||||
#[test]
|
||||
fn tx_size_limit() {
|
||||
use serai_client::validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, MAX_KEY_LEN};
|
||||
use serai_client::validator_sets::primitives::MAX_KEY_LEN;
|
||||
|
||||
use tributary::TRANSACTION_SIZE_LIMIT;
|
||||
|
||||
@@ -141,11 +144,8 @@ fn serialize_sign_data() {
|
||||
#[test]
|
||||
fn serialize_transaction() {
|
||||
test_read_write(&Transaction::RemoveParticipantDueToDkg {
|
||||
attempt: u32::try_from(OsRng.next_u64() >> 32).unwrap(),
|
||||
participant: frost::Participant::new(
|
||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
||||
)
|
||||
.unwrap(),
|
||||
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
|
||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||
});
|
||||
|
||||
{
|
||||
@@ -277,4 +277,17 @@ fn serialize_transaction() {
|
||||
signature: random_signed_with_nonce(&mut OsRng, 2).signature,
|
||||
});
|
||||
}
|
||||
|
||||
test_read_write(&Transaction::SlashReport(
|
||||
{
|
||||
let amount =
|
||||
usize::try_from(OsRng.next_u64() % u64::from(MAX_KEY_SHARES_PER_SET - 1)).unwrap();
|
||||
let mut points = vec![];
|
||||
for _ in 0 .. amount {
|
||||
points.push((OsRng.next_u64() >> 32).try_into().unwrap());
|
||||
}
|
||||
points
|
||||
},
|
||||
random_signed_with_nonce(&mut OsRng, 0),
|
||||
));
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::collections::HashMap;
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use frost::Participant;
|
||||
|
||||
use serai_client::validator_sets::primitives::{KeyPair, ValidatorSet};
|
||||
@@ -49,27 +50,50 @@ create_db!(
|
||||
|
||||
TributaryBlockNumber: (block: [u8; 32]) -> u32,
|
||||
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
||||
|
||||
// TODO: Revisit the point of this
|
||||
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||
FatalSlashesAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
|
||||
RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
|
||||
OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||
// TODO: Combine these two
|
||||
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
||||
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
||||
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
||||
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||
RemovalNonces:
|
||||
(genesis: [u8; 32], removing: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
|
||||
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
|
||||
DkgCompleted: (genesis: [u8; 32]) -> (),
|
||||
LocallyDkgRemoved: (genesis: [u8; 32], validator: [u8; 32]) -> (),
|
||||
SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,
|
||||
|
||||
VotedToRemove: (genesis: [u8; 32], voter: [u8; 32], to_remove: [u8; 32]) -> (),
|
||||
VotesToRemove: (genesis: [u8; 32], to_remove: [u8; 32]) -> u16,
|
||||
|
||||
AttemptDb: (genesis: [u8; 32], topic: &Topic) -> u32,
|
||||
ReattemptDb: (genesis: [u8; 32], block: u32) -> Vec<Topic>,
|
||||
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
||||
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
||||
|
||||
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
||||
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
|
||||
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
|
||||
DkgLocallyCompleted: (genesis: [u8; 32]) -> (),
|
||||
|
||||
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
||||
|
||||
SignedTransactionDb: (order: &[u8], nonce: u32) -> Vec<u8>,
|
||||
|
||||
SlashReports: (genesis: [u8; 32], signer: [u8; 32]) -> Vec<u32>,
|
||||
SlashReported: (genesis: [u8; 32]) -> u16,
|
||||
SlashReportCutOff: (genesis: [u8; 32]) -> u64,
|
||||
SlashReport: (set: ValidatorSet) -> Vec<([u8; 32], u32)>,
|
||||
}
|
||||
);
|
||||
|
||||
impl FatalSlashes {
|
||||
pub fn get_as_keys(getter: &impl Get, genesis: [u8; 32]) -> Vec<<Ristretto as Ciphersuite>::G> {
|
||||
FatalSlashes::get(getter, genesis)
|
||||
.unwrap_or(vec![])
|
||||
.iter()
|
||||
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
|
||||
.collect::<Vec<_>>()
|
||||
}
|
||||
}
|
||||
|
||||
impl FatallySlashed {
|
||||
pub fn set_fatally_slashed(txn: &mut impl DbTxn, genesis: [u8; 32], account: [u8; 32]) {
|
||||
Self::set(txn, genesis, account, &());
|
||||
@@ -100,7 +124,13 @@ impl AttemptDb {
|
||||
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
||||
let attempt = Self::get(getter, genesis, &topic);
|
||||
// Don't require explicit recognition of the Dkg topic as it starts when the chain does
|
||||
if attempt.is_none() && ((topic == Topic::Dkg) || (topic == Topic::DkgConfirmation)) {
|
||||
// Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it
|
||||
// should always happen (eventually)
|
||||
if attempt.is_none() &&
|
||||
((topic == Topic::Dkg) ||
|
||||
(topic == Topic::DkgConfirmation) ||
|
||||
(topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))
|
||||
{
|
||||
return Some(0);
|
||||
}
|
||||
attempt
|
||||
@@ -115,16 +145,26 @@ impl ReattemptDb {
|
||||
topic: Topic,
|
||||
) {
|
||||
// 5 minutes
|
||||
#[cfg(not(feature = "longer-reattempts"))]
|
||||
const BASE_REATTEMPT_DELAY: u32 = (5 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME;
|
||||
|
||||
// 10 minutes, intended for latent environments like the GitHub CI
|
||||
#[cfg(feature = "longer-reattempts")]
|
||||
const BASE_REATTEMPT_DELAY: u32 = (10 * 60 * 1000) / tributary::tendermint::TARGET_BLOCK_TIME;
|
||||
|
||||
// 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5
|
||||
// Assumes no event will take longer than 15 minutes, yet grows the time in case there are
|
||||
// network bandwidth issues
|
||||
let reattempt_delay = BASE_REATTEMPT_DELAY *
|
||||
let mut reattempt_delay = BASE_REATTEMPT_DELAY *
|
||||
((AttemptDb::attempt(txn, genesis, topic)
|
||||
.expect("scheduling re-attempt for unknown topic") /
|
||||
3) +
|
||||
1)
|
||||
.min(3);
|
||||
// Allow more time for DKGs since they have an extra round and much more data
|
||||
if matches!(topic, Topic::Dkg) {
|
||||
reattempt_delay *= 4;
|
||||
}
|
||||
let upon_block = current_block_number + reattempt_delay;
|
||||
|
||||
let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]);
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
use core::ops::Deref;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use zeroize::{Zeroize, Zeroizing};
|
||||
use zeroize::Zeroizing;
|
||||
use rand_core::OsRng;
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use frost::dkg::Participant;
|
||||
|
||||
use scale::{Encode, Decode};
|
||||
use serai_client::{Public, Signature, validator_sets::primitives::KeyPair};
|
||||
use serai_client::{Signature, validator_sets::primitives::KeyPair};
|
||||
|
||||
use tributary::{Signed, TransactionKind, TransactionTrait};
|
||||
|
||||
@@ -42,35 +43,6 @@ pub fn dkg_confirmation_nonces(
|
||||
.preprocess()
|
||||
}
|
||||
|
||||
// If there's an error generating a key pair, return any errors which would've occurred when
|
||||
// executing the DkgConfirmer in order to stay in sync with those who did.
|
||||
//
|
||||
// The caller must ensure only error_generating_key_pair or generated_key_pair is called for a
|
||||
// given attempt.
|
||||
pub fn error_generating_key_pair(
|
||||
txn: &mut impl DbTxn,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
spec: &TributarySpec,
|
||||
attempt: u32,
|
||||
) -> Option<Participant> {
|
||||
let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap();
|
||||
|
||||
// Sign a key pair which can't be valid
|
||||
// (0xff used as 0 would be the Ristretto identity point, 0-length for the network key)
|
||||
let key_pair = KeyPair(Public([0xff; 32]), vec![0xffu8; 0].try_into().unwrap());
|
||||
match DkgConfirmer::new(key, spec, txn, attempt)
|
||||
.expect("reporting an error during DKG for an unrecognized attempt")
|
||||
.share(preprocesses, &key_pair)
|
||||
{
|
||||
Ok(mut share) => {
|
||||
// Zeroize the share to ensure it's not accessed
|
||||
share.zeroize();
|
||||
None
|
||||
}
|
||||
Err(p) => Some(p),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generated_key_pair<D: Db>(
|
||||
txn: &mut D::Transaction<'_>,
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
@@ -92,7 +64,7 @@ fn unflatten(
|
||||
data: &mut HashMap<Participant, Vec<u8>>,
|
||||
) {
|
||||
for (validator, _) in spec.validators() {
|
||||
let range = spec.i(removed, validator).unwrap();
|
||||
let Some(range) = spec.i(removed, validator) else { continue };
|
||||
let Some(all_segments) = data.remove(&range.start) else {
|
||||
continue;
|
||||
};
|
||||
@@ -105,13 +77,14 @@ fn unflatten(
|
||||
}
|
||||
|
||||
impl<
|
||||
D: Db,
|
||||
T: DbTxn,
|
||||
Pro: Processors,
|
||||
PST: PublishSeraiTransaction,
|
||||
PTT: PTTTrait,
|
||||
RID: RIDTrait,
|
||||
P: P2p,
|
||||
> TributaryBlockHandler<'_, T, Pro, PST, PTT, RID, P>
|
||||
> TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P>
|
||||
{
|
||||
fn accumulate(
|
||||
&mut self,
|
||||
@@ -126,10 +99,10 @@ impl<
|
||||
panic!("accumulating data for a participant multiple times");
|
||||
}
|
||||
let signer_shares = {
|
||||
let signer_i = self
|
||||
.spec
|
||||
.i(removed, signer)
|
||||
.expect("transaction signed by a non-validator for this tributary");
|
||||
let Some(signer_i) = self.spec.i(removed, signer) else {
|
||||
log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes()));
|
||||
return Accumulation::NotReady;
|
||||
};
|
||||
u16::from(signer_i.end) - u16::from(signer_i.start)
|
||||
};
|
||||
|
||||
@@ -153,10 +126,6 @@ impl<
|
||||
// place
|
||||
assert_eq!(AttemptDb::attempt(self.txn, genesis, data_spec.topic), Some(data_spec.attempt));
|
||||
ReattemptDb::schedule_reattempt(self.txn, genesis, self.block_number, data_spec.topic);
|
||||
|
||||
// Because this attempt was participated in, it was justified
|
||||
// The question becomes why did the prior attempt fail?
|
||||
// TODO: Slash people who failed to participate as expected in the prior attempt
|
||||
}
|
||||
|
||||
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
||||
@@ -169,37 +138,29 @@ impl<
|
||||
&data_spec.topic,
|
||||
&data_spec.attempt
|
||||
);
|
||||
return Accumulation::Ready({
|
||||
let mut data = HashMap::new();
|
||||
for validator in self.spec.validators().iter().map(|validator| validator.0) {
|
||||
data.insert(
|
||||
self.spec.i(removed, validator).unwrap().start,
|
||||
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
|
||||
data
|
||||
} else {
|
||||
continue;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(data.len(), usize::from(needed));
|
||||
let mut data = HashMap::new();
|
||||
for validator in self.spec.validators().iter().map(|validator| validator.0) {
|
||||
let Some(i) = self.spec.i(removed, validator) else { continue };
|
||||
data.insert(
|
||||
i.start,
|
||||
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
|
||||
data
|
||||
} else {
|
||||
continue;
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
// Remove our own piece of data, if we were involved
|
||||
if data
|
||||
.remove(
|
||||
&self
|
||||
.spec
|
||||
.i(removed, Ristretto::generator() * self.our_key.deref())
|
||||
.expect("handling a message for a Tributary we aren't part of")
|
||||
.start,
|
||||
)
|
||||
.is_some()
|
||||
{
|
||||
DataSet::Participating(data)
|
||||
} else {
|
||||
DataSet::NotParticipating
|
||||
assert_eq!(data.len(), usize::from(needed));
|
||||
|
||||
// Remove our own piece of data, if we were involved
|
||||
if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) {
|
||||
if data.remove(&i.start).is_some() {
|
||||
return Accumulation::Ready(DataSet::Participating(data));
|
||||
}
|
||||
});
|
||||
}
|
||||
return Accumulation::Ready(DataSet::NotParticipating);
|
||||
}
|
||||
Accumulation::NotReady
|
||||
}
|
||||
@@ -261,7 +222,12 @@ impl<
|
||||
signer: <Ristretto as Ciphersuite>::G,
|
||||
len: usize,
|
||||
) -> Result<(), ()> {
|
||||
let signer_i = self.spec.i(removed, signer).unwrap();
|
||||
let Some(signer_i) = self.spec.i(removed, signer) else {
|
||||
// TODO: Ensure processor doesn't so participate/check how it handles removals for being
|
||||
// offline
|
||||
self.fatal_slash(signer.to_bytes(), "signer participated despite being removed");
|
||||
Err(())?
|
||||
};
|
||||
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
||||
self.fatal_slash(
|
||||
signer.to_bytes(),
|
||||
@@ -288,17 +254,33 @@ impl<
|
||||
}
|
||||
|
||||
match tx {
|
||||
Transaction::RemoveParticipantDueToDkg { attempt, participant } => self
|
||||
.fatal_slash_with_participant_index(
|
||||
&removed_as_of_dkg_attempt(self.txn, genesis, attempt).unwrap_or_else(|| {
|
||||
panic!(
|
||||
"removed a participant due to a provided transaction with an attempt not {}",
|
||||
"locally handled?"
|
||||
)
|
||||
}),
|
||||
participant,
|
||||
"RemoveParticipantDueToDkg Provided TX",
|
||||
),
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
||||
if self.spec.i(&[], participant).is_none() {
|
||||
self.fatal_slash(
|
||||
participant.to_bytes(),
|
||||
"RemoveParticipantDueToDkg vote for non-validator",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let participant = participant.to_bytes();
|
||||
let signer = signed.signer.to_bytes();
|
||||
|
||||
assert!(
|
||||
VotedToRemove::get(self.txn, genesis, signer, participant).is_none(),
|
||||
"VotedToRemove multiple times despite a single nonce being allocated",
|
||||
);
|
||||
VotedToRemove::set(self.txn, genesis, signer, participant, &());
|
||||
|
||||
let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0);
|
||||
let signer_votes =
|
||||
self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?");
|
||||
let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start);
|
||||
VotesToRemove::set(self.txn, genesis, participant, &new_votes);
|
||||
if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) {
|
||||
self.fatal_slash(participant, "RemoveParticipantDueToDkg vote")
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
||||
@@ -325,7 +307,10 @@ impl<
|
||||
.await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
||||
panic!("wasn't a participant in DKG commitments")
|
||||
assert!(
|
||||
removed.contains(&(Ristretto::generator() * self.our_key.deref())),
|
||||
"NotParticipating in a DkgCommitments we weren't removed for"
|
||||
);
|
||||
}
|
||||
Accumulation::NotReady => {}
|
||||
}
|
||||
@@ -336,14 +321,19 @@ impl<
|
||||
self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref()));
|
||||
|
||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let sender_i = self
|
||||
.spec
|
||||
.i(&removed, signed.signer)
|
||||
.expect("transaction added to tributary by signer who isn't a participant");
|
||||
let Some(sender_i) = self.spec.i(&removed, signed.signer) else {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"DkgShares for a DKG they aren't participating in",
|
||||
);
|
||||
return;
|
||||
};
|
||||
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||
for shares in &shares {
|
||||
if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) {
|
||||
@@ -353,56 +343,59 @@ impl<
|
||||
}
|
||||
|
||||
// Save each share as needed for blame
|
||||
{
|
||||
let from_range = self.spec.i(&removed, signed.signer).unwrap();
|
||||
for (from_offset, shares) in shares.iter().enumerate() {
|
||||
let from =
|
||||
Participant::new(u16::from(from_range.start) + u16::try_from(from_offset).unwrap())
|
||||
.unwrap();
|
||||
for (from_offset, shares) in shares.iter().enumerate() {
|
||||
let from =
|
||||
Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap())
|
||||
.unwrap();
|
||||
|
||||
for (to_offset, share) in shares.iter().enumerate() {
|
||||
// 0-indexed (the enumeration) to 1-indexed (Participant)
|
||||
let mut to = u16::try_from(to_offset).unwrap() + 1;
|
||||
// Adjust for the omission of the sender's own shares
|
||||
if to >= u16::from(from_range.start) {
|
||||
to += u16::from(from_range.end) - u16::from(from_range.start);
|
||||
}
|
||||
let to = Participant::new(to).unwrap();
|
||||
|
||||
DkgShare::set(self.txn, genesis, from.into(), to.into(), share);
|
||||
for (to_offset, share) in shares.iter().enumerate() {
|
||||
// 0-indexed (the enumeration) to 1-indexed (Participant)
|
||||
let mut to = u16::try_from(to_offset).unwrap() + 1;
|
||||
// Adjust for the omission of the sender's own shares
|
||||
if to >= u16::from(sender_i.start) {
|
||||
to += u16::from(sender_i.end) - u16::from(sender_i.start);
|
||||
}
|
||||
let to = Participant::new(to).unwrap();
|
||||
|
||||
DkgShare::set(self.txn, genesis, from.into(), to.into(), share);
|
||||
}
|
||||
}
|
||||
|
||||
// Filter down to only our share's bytes for handle
|
||||
let our_i = self
|
||||
.spec
|
||||
.i(&removed, Ristretto::generator() * self.our_key.deref())
|
||||
.expect("in a tributary we're not a validator for");
|
||||
|
||||
let our_shares = if sender_i == our_i {
|
||||
vec![]
|
||||
} else {
|
||||
// 1-indexed to 0-indexed
|
||||
let mut our_i_pos = u16::from(our_i.start) - 1;
|
||||
// Handle the omission of the sender's own data
|
||||
if u16::from(our_i.start) > u16::from(sender_i.start) {
|
||||
our_i_pos -= sender_is_len;
|
||||
let our_shares = if let Some(our_i) =
|
||||
self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
|
||||
{
|
||||
if sender_i == our_i {
|
||||
vec![]
|
||||
} else {
|
||||
// 1-indexed to 0-indexed
|
||||
let mut our_i_pos = u16::from(our_i.start) - 1;
|
||||
// Handle the omission of the sender's own data
|
||||
if u16::from(our_i.start) > u16::from(sender_i.start) {
|
||||
our_i_pos -= sender_is_len;
|
||||
}
|
||||
let our_i_pos = usize::from(our_i_pos);
|
||||
shares
|
||||
.iter_mut()
|
||||
.map(|shares| {
|
||||
shares
|
||||
.drain(
|
||||
our_i_pos ..
|
||||
(our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
|
||||
)
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
let our_i_pos = usize::from(our_i_pos);
|
||||
shares
|
||||
.iter_mut()
|
||||
.map(|shares| {
|
||||
shares
|
||||
.drain(
|
||||
our_i_pos ..
|
||||
(our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
|
||||
)
|
||||
.collect::<Vec<_>>()
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
assert!(
|
||||
not_participating,
|
||||
"we didn't have an i while handling DkgShares we weren't removed for"
|
||||
);
|
||||
// Since we're not participating, simply save vec![] for our shares
|
||||
vec![]
|
||||
};
|
||||
// Drop shares as it's been mutated into invalidity
|
||||
// Drop shares as it's presumably been mutated into invalidity
|
||||
drop(shares);
|
||||
|
||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };
|
||||
@@ -458,7 +451,7 @@ impl<
|
||||
.await;
|
||||
}
|
||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
||||
panic!("wasn't a participant in DKG shares")
|
||||
assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for");
|
||||
}
|
||||
Accumulation::NotReady => {}
|
||||
}
|
||||
@@ -470,7 +463,13 @@ impl<
|
||||
.fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt");
|
||||
return;
|
||||
};
|
||||
let range = self.spec.i(&removed, signed.signer).unwrap();
|
||||
let Some(range) = self.spec.i(&removed, signed.signer) else {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"InvalidDkgShare for a DKG they aren't participating in",
|
||||
);
|
||||
return;
|
||||
};
|
||||
if !range.contains(&accuser) {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
@@ -524,8 +523,8 @@ impl<
|
||||
};
|
||||
|
||||
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
|
||||
// TODO: This can technically happen under very very very specific timing as the txn put
|
||||
// happens before DkgConfirmed, yet the txn commit isn't guaranteed to
|
||||
// TODO: This can technically happen under very very very specific timing as the txn
|
||||
// put happens before DkgConfirmed, yet the txn commit isn't guaranteed to
|
||||
let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect(
|
||||
"in DkgConfirmed handling, which happens after everyone \
|
||||
(including us) fires DkgConfirmed, yet no confirming key pair",
|
||||
@@ -535,16 +534,22 @@ impl<
|
||||
let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
|
||||
Ok(sig) => sig,
|
||||
Err(p) => {
|
||||
self.fatal_slash_with_participant_index(&removed, p, "invalid DkgConfirmer share");
|
||||
let mut tx = Transaction::RemoveParticipantDueToDkg {
|
||||
participant: self.spec.reverse_lookup_i(&removed, p).unwrap(),
|
||||
signed: Transaction::empty_signed(),
|
||||
};
|
||||
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
DkgCompleted::set(self.txn, genesis, &());
|
||||
DkgLocallyCompleted::set(self.txn, genesis, &());
|
||||
|
||||
self
|
||||
.publish_serai_tx
|
||||
.publish_set_keys(
|
||||
self.db,
|
||||
self.spec.set(),
|
||||
removed.into_iter().map(|key| key.to_bytes().into()).collect(),
|
||||
key_pair,
|
||||
@@ -733,6 +738,39 @@ impl<
|
||||
};
|
||||
self.processors.send(self.spec.set().network, msg).await;
|
||||
}
|
||||
|
||||
Transaction::SlashReport(points, signed) => {
|
||||
// Uses &[] as we only need the length which is independent to who else was removed
|
||||
let signer_range = self.spec.i(&[], signed.signer).unwrap();
|
||||
let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start);
|
||||
if points.len() != (self.spec.validators().len() - 1) {
|
||||
self.fatal_slash(
|
||||
signed.signer.to_bytes(),
|
||||
"submitted a distinct amount of slash points to participants",
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if SlashReports::get(self.txn, genesis, signed.signer.to_bytes()).is_some() {
|
||||
self.fatal_slash(signed.signer.to_bytes(), "submitted multiple slash points");
|
||||
return;
|
||||
}
|
||||
SlashReports::set(self.txn, genesis, signed.signer.to_bytes(), &points);
|
||||
|
||||
let prior_reported = SlashReported::get(self.txn, genesis).unwrap_or(0);
|
||||
let now_reported = prior_reported + signer_len;
|
||||
SlashReported::set(self.txn, genesis, &now_reported);
|
||||
|
||||
if (prior_reported < self.spec.t()) && (now_reported >= self.spec.t()) {
|
||||
SlashReportCutOff::set(
|
||||
self.txn,
|
||||
genesis,
|
||||
// 30 minutes into the future
|
||||
&(u64::from(self.block_number) +
|
||||
((30 * 60 * 1000) / u64::from(tributary::tendermint::TARGET_BLOCK_TIME))),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,20 +32,12 @@ pub fn removed_as_of_dkg_attempt(
|
||||
if attempt == 0 {
|
||||
Some(vec![])
|
||||
} else {
|
||||
FatalSlashesAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {
|
||||
RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {
|
||||
keys.iter().map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap()).collect()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub fn latest_removed(getter: &impl Get, genesis: [u8; 32]) -> Vec<<Ristretto as Ciphersuite>::G> {
|
||||
FatalSlashes::get(getter, genesis)
|
||||
.unwrap_or(vec![])
|
||||
.iter()
|
||||
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
|
||||
.collect()
|
||||
}
|
||||
|
||||
pub fn removed_as_of_set_keys(
|
||||
getter: &impl Get,
|
||||
set: ValidatorSet,
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration};
|
||||
use std::sync::Arc;
|
||||
use std::{sync::Arc, collections::HashSet};
|
||||
|
||||
use zeroize::Zeroizing;
|
||||
|
||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||
use frost::Participant;
|
||||
|
||||
use tokio::sync::broadcast;
|
||||
|
||||
@@ -17,7 +16,7 @@ use serai_client::{
|
||||
|
||||
use serai_db::DbTxn;
|
||||
|
||||
use processor_messages::coordinator::SubstrateSignableId;
|
||||
use processor_messages::coordinator::{SubstrateSignId, SubstrateSignableId};
|
||||
|
||||
use tributary::{
|
||||
TransactionKind, Transaction as TributaryTransaction, TransactionError, Block, TributaryReader,
|
||||
@@ -66,6 +65,7 @@ impl<
|
||||
pub trait PublishSeraiTransaction {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
db: &(impl Sync + Get),
|
||||
set: ValidatorSet,
|
||||
removed: Vec<SeraiAddress>,
|
||||
key_pair: KeyPair,
|
||||
@@ -85,6 +85,7 @@ mod impl_pst_for_serai {
|
||||
($Meta: ty, $check: ident) => {
|
||||
async fn publish(
|
||||
serai: &Serai,
|
||||
db: &impl Get,
|
||||
set: ValidatorSet,
|
||||
tx: serai_client::Transaction,
|
||||
meta: $Meta,
|
||||
@@ -96,29 +97,25 @@ mod impl_pst_for_serai {
|
||||
// creation
|
||||
// TODO2: Differentiate connection errors from invariants
|
||||
Err(e) => {
|
||||
// The following block is irrelevant, and can/likely will fail, if we're publishing
|
||||
// a TX for an old session
|
||||
// If we're on a newer session, move on
|
||||
if crate::RetiredTributaryDb::get(db, set).is_some() {
|
||||
log::warn!("trying to publish a TX relevant to set {set:?} which isn't the latest");
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Ok(serai) = serai.as_of_latest_finalized_block().await {
|
||||
let serai = serai.validator_sets();
|
||||
|
||||
// The following block is irrelevant, and can/likely will fail, if we're publishing
|
||||
// a TX for an old session
|
||||
// If we're on a newer session, move on
|
||||
if let Ok(Some(current_session)) = serai.session(set.network).await {
|
||||
if current_session.0 > set.session.0 {
|
||||
log::warn!(
|
||||
"trying to publish a TX relevant to set {set:?} which isn't the latest"
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if someone else published the TX in question
|
||||
if $check(serai, set, meta).await {
|
||||
return false;
|
||||
}
|
||||
|
||||
log::error!("couldn't connect to Serai node to publish TX: {e:?}");
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
|
||||
log::error!("couldn't connect to Serai node to publish TX: {e:?}");
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -130,6 +127,7 @@ mod impl_pst_for_serai {
|
||||
impl PublishSeraiTransaction for Serai {
|
||||
async fn publish_set_keys(
|
||||
&self,
|
||||
db: &(impl Sync + Get),
|
||||
set: ValidatorSet,
|
||||
removed: Vec<SeraiAddress>,
|
||||
key_pair: KeyPair,
|
||||
@@ -144,7 +142,7 @@ mod impl_pst_for_serai {
|
||||
false
|
||||
}
|
||||
common_pst!((), check);
|
||||
if publish(self, set, tx, ()).await {
|
||||
if publish(self, db, set, tx, ()).await {
|
||||
log::info!("published set keys for {set:?}");
|
||||
}
|
||||
}
|
||||
@@ -164,6 +162,7 @@ impl<FPtt: Send + Future<Output = ()>, F: Sync + Fn(Transaction) -> FPtt> PTTTra
|
||||
|
||||
pub struct TributaryBlockHandler<
|
||||
'a,
|
||||
D: Db,
|
||||
T: DbTxn,
|
||||
Pro: Processors,
|
||||
PST: PublishSeraiTransaction,
|
||||
@@ -171,6 +170,7 @@ pub struct TributaryBlockHandler<
|
||||
RID: RIDTrait,
|
||||
P: P2p,
|
||||
> {
|
||||
pub db: &'a D,
|
||||
pub txn: &'a mut T,
|
||||
pub our_key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
pub recognized_id: &'a RID,
|
||||
@@ -184,53 +184,29 @@ pub struct TributaryBlockHandler<
|
||||
}
|
||||
|
||||
impl<
|
||||
D: Db,
|
||||
T: DbTxn,
|
||||
Pro: Processors,
|
||||
PST: PublishSeraiTransaction,
|
||||
PTT: PTTTrait,
|
||||
RID: RIDTrait,
|
||||
P: P2p,
|
||||
> TributaryBlockHandler<'_, T, Pro, PST, PTT, RID, P>
|
||||
> TributaryBlockHandler<'_, D, T, Pro, PST, PTT, RID, P>
|
||||
{
|
||||
pub fn fatal_slash(&mut self, slashing: [u8; 32], reason: &str) {
|
||||
// TODO: If this fatal slash puts the remaining set below the threshold, spin
|
||||
|
||||
let genesis = self.spec.genesis();
|
||||
|
||||
log::warn!("fatally slashing {}. reason: {}", hex::encode(slashing), reason);
|
||||
FatallySlashed::set_fatally_slashed(self.txn, genesis, slashing);
|
||||
// TODO: disconnect the node from network/ban from further participation in all Tributaries
|
||||
|
||||
// TODO: If during DKG, trigger a re-attempt
|
||||
// Despite triggering a re-attempt, this DKG may still complete and may become in-use
|
||||
// TODO: disconnect the node from network/ban from further participation in all Tributaries
|
||||
}
|
||||
|
||||
// TODO: Once Substrate confirms a key, we need to rotate our validator set OR form a second
|
||||
// Tributary post-DKG
|
||||
// https://github.com/serai-dex/serai/issues/426
|
||||
|
||||
pub fn fatal_slash_with_participant_index(
|
||||
&mut self,
|
||||
removed: &[<Ristretto as Ciphersuite>::G],
|
||||
i: Participant,
|
||||
reason: &str,
|
||||
) {
|
||||
// Resolve from Participant to <Ristretto as Ciphersuite>::G
|
||||
let i = u16::from(i);
|
||||
let mut validator = None;
|
||||
for (potential, _) in self.spec.validators() {
|
||||
let v_i = self.spec.i(removed, potential).unwrap();
|
||||
if (u16::from(v_i.start) <= i) && (i < u16::from(v_i.end)) {
|
||||
validator = Some(potential);
|
||||
break;
|
||||
}
|
||||
}
|
||||
let validator = validator.unwrap();
|
||||
|
||||
self.fatal_slash(validator.to_bytes(), reason);
|
||||
}
|
||||
|
||||
async fn handle<D: Db>(mut self) {
|
||||
async fn handle(mut self) {
|
||||
log::info!("found block for Tributary {:?}", self.spec.set());
|
||||
|
||||
let transactions = self.block.transactions.clone();
|
||||
@@ -240,8 +216,7 @@ impl<
|
||||
// Since the evidence is on the chain, it should already have been validated
|
||||
// We can just punish the signer
|
||||
let data = match ev {
|
||||
Evidence::ConflictingMessages(first, second) |
|
||||
Evidence::ConflictingPrecommit(first, second) => (first, Some(second)),
|
||||
Evidence::ConflictingMessages(first, second) => (first, Some(second)),
|
||||
Evidence::InvalidPrecommit(first) | Evidence::InvalidValidRound(first) => (first, None),
|
||||
};
|
||||
let msgs = (
|
||||
@@ -267,10 +242,151 @@ impl<
|
||||
}
|
||||
|
||||
let genesis = self.spec.genesis();
|
||||
|
||||
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
|
||||
|
||||
// Calculate the shares still present, spinning if not enough are
|
||||
// still_present_shares is used by a below branch, yet it's a natural byproduct of checking if
|
||||
// we should spin, hence storing it in a variable here
|
||||
let still_present_shares = {
|
||||
// Start with the original n value
|
||||
let mut present_shares = self.spec.n(&[]);
|
||||
// Remove everyone fatally slashed
|
||||
for removed in ¤t_fatal_slashes {
|
||||
let original_i_for_removed =
|
||||
self.spec.i(&[], *removed).expect("removed party was never present");
|
||||
let removed_shares =
|
||||
u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start);
|
||||
present_shares -= removed_shares;
|
||||
}
|
||||
|
||||
// Spin if the present shares don't satisfy the required threshold
|
||||
if present_shares < self.spec.t() {
|
||||
loop {
|
||||
log::error!(
|
||||
"fatally slashed so many participants for {:?} we no longer meet the threshold",
|
||||
self.spec.set()
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
|
||||
present_shares
|
||||
};
|
||||
|
||||
for topic in ReattemptDb::take(self.txn, genesis, self.block_number) {
|
||||
let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic);
|
||||
log::info!("re-attempting {topic:?} with attempt {attempt}");
|
||||
|
||||
// Slash people who failed to participate as expected in the prior attempt
|
||||
{
|
||||
let prior_attempt = attempt - 1;
|
||||
let (removed, expected_participants) = match topic {
|
||||
Topic::Dkg => {
|
||||
// Every validator who wasn't removed is expected to have participated
|
||||
let removed =
|
||||
crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt)
|
||||
.expect("prior attempt didn't have its removed saved to disk");
|
||||
let removed_set = removed.iter().copied().collect::<HashSet<_>>();
|
||||
(
|
||||
removed,
|
||||
self
|
||||
.spec
|
||||
.validators()
|
||||
.into_iter()
|
||||
.filter_map(|(validator, _)| {
|
||||
Some(validator).filter(|validator| !removed_set.contains(validator))
|
||||
})
|
||||
.collect(),
|
||||
)
|
||||
}
|
||||
Topic::DkgConfirmation => {
|
||||
panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg")
|
||||
}
|
||||
Topic::SubstrateSign(_) | Topic::Sign(_) => {
|
||||
let removed =
|
||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
||||
.expect("SubstrateSign/Sign yet have yet to set keys");
|
||||
// TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
|
||||
let expected_participants = vec![];
|
||||
(removed, expected_participants)
|
||||
}
|
||||
};
|
||||
|
||||
let (expected_topic, expected_label) = match topic {
|
||||
Topic::Dkg => {
|
||||
let n = self.spec.n(&removed);
|
||||
// If we got all the DKG shares, we should be on DKG confirmation
|
||||
let share_spec =
|
||||
DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt };
|
||||
if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n {
|
||||
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
|
||||
// preprocess is part of Topic::Dkg Label::Share
|
||||
(Topic::DkgConfirmation, Label::Share)
|
||||
} else {
|
||||
let preprocess_spec = DataSpecification {
|
||||
topic: Topic::Dkg,
|
||||
label: Label::Preprocess,
|
||||
attempt: prior_attempt,
|
||||
};
|
||||
// If we got all the DKG preprocesses, DKG shares
|
||||
if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n {
|
||||
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
|
||||
// preprocess is part of Topic::Dkg Label::Share
|
||||
(Topic::Dkg, Label::Share)
|
||||
} else {
|
||||
(Topic::Dkg, Label::Preprocess)
|
||||
}
|
||||
}
|
||||
}
|
||||
Topic::DkgConfirmation => unreachable!(),
|
||||
// If we got enough participants to move forward, then we expect shares from them all
|
||||
Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share),
|
||||
};
|
||||
|
||||
let mut did_not_participate = vec![];
|
||||
for expected_participant in expected_participants {
|
||||
if DataDb::get(
|
||||
self.txn,
|
||||
genesis,
|
||||
&DataSpecification {
|
||||
topic: expected_topic,
|
||||
label: expected_label,
|
||||
attempt: prior_attempt,
|
||||
},
|
||||
&expected_participant.to_bytes(),
|
||||
)
|
||||
.is_none()
|
||||
{
|
||||
did_not_participate.push(expected_participant);
|
||||
}
|
||||
}
|
||||
|
||||
// If a supermajority didn't participate as expected, the protocol was likely aborted due
|
||||
// to detection of a completion or some larger networking error
|
||||
// Accordingly, clear did_not_participate
|
||||
// TODO
|
||||
|
||||
// If during the DKG, explicitly mark these people as having been offline
|
||||
// TODO: If they were offline sufficiently long ago, don't strike them off
|
||||
if topic == Topic::Dkg {
|
||||
let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]);
|
||||
for did_not_participate in did_not_participate {
|
||||
existing.push(did_not_participate.to_bytes());
|
||||
}
|
||||
OfflineDuringDkg::set(self.txn, genesis, &existing);
|
||||
}
|
||||
|
||||
// Slash everyone who didn't participate as expected
|
||||
// This may be overzealous as if a minority detects a completion, they'll abort yet the
|
||||
// supermajority will cause the above allowance to not trigger, causing an honest minority
|
||||
// to be slashed
|
||||
// At the end of the protocol, the accumulated slashes are reduced by the amount obtained
|
||||
// by the worst-performing member of the supermajority, and this is expected to
|
||||
// sufficiently compensate for slashes which occur under normal operation
|
||||
// TODO
|
||||
}
|
||||
|
||||
/*
|
||||
All of these have the same common flow:
|
||||
|
||||
@@ -290,33 +406,62 @@ impl<
|
||||
*/
|
||||
match topic {
|
||||
Topic::Dkg => {
|
||||
FatalSlashesAsOfDkgAttempt::set(
|
||||
let mut removed = current_fatal_slashes.clone();
|
||||
|
||||
let t = self.spec.t();
|
||||
{
|
||||
let mut present_shares = still_present_shares;
|
||||
|
||||
// Load the parties marked as offline across the various attempts
|
||||
let mut offline = OfflineDuringDkg::get(self.txn, genesis)
|
||||
.unwrap_or(vec![])
|
||||
.iter()
|
||||
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
|
||||
.collect::<Vec<_>>();
|
||||
// Pop from the list to prioritize the removal of those recently offline
|
||||
while let Some(offline) = offline.pop() {
|
||||
// Make sure they weren't removed already (such as due to being fatally slashed)
|
||||
// This also may trigger if they were offline across multiple attempts
|
||||
if removed.contains(&offline) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// If we can remove them and still meet the threshold, do so
|
||||
let original_i_for_offline =
|
||||
self.spec.i(&[], offline).expect("offline was never present?");
|
||||
let offline_shares =
|
||||
u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start);
|
||||
if (present_shares - offline_shares) >= t {
|
||||
present_shares -= offline_shares;
|
||||
removed.push(offline);
|
||||
}
|
||||
|
||||
// If we've removed as many people as we can, break
|
||||
if present_shares == t {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
RemovedAsOfDkgAttempt::set(
|
||||
self.txn,
|
||||
genesis,
|
||||
attempt,
|
||||
&FatalSlashes::get(self.txn, genesis).unwrap_or(vec![]),
|
||||
&removed.iter().map(<Ristretto as Ciphersuite>::G::to_bytes).collect(),
|
||||
);
|
||||
|
||||
if DkgCompleted::get(self.txn, genesis).is_none() {
|
||||
if DkgLocallyCompleted::get(self.txn, genesis).is_none() {
|
||||
let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
|
||||
else {
|
||||
continue;
|
||||
};
|
||||
|
||||
// Since it wasn't completed, instruct the processor to start the next attempt
|
||||
let id =
|
||||
processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt };
|
||||
|
||||
let removed = crate::tributary::latest_removed(self.txn, genesis);
|
||||
let our_i =
|
||||
self.spec.i(&removed, Ristretto::generator() * self.our_key.deref()).unwrap();
|
||||
|
||||
// TODO: Don't fatal slash, yet don't include, parties who have been offline so long as
|
||||
// we still meet the needed threshold. This will have to have any parties removed for
|
||||
// being offline, who aren't participating in the confirmed key, drop the Tributary and
|
||||
// notify their processor.
|
||||
|
||||
// TODO: Instead of DKG confirmations taking a n-of-n MuSig, have it take a t-of-n with
|
||||
// a specification of those explicitly removed and those removed due to being offline.
|
||||
|
||||
let params =
|
||||
frost::ThresholdParams::new(self.spec.t(), self.spec.n(&removed), our_i.start)
|
||||
.unwrap();
|
||||
frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap();
|
||||
let shares = u16::from(our_i.end) - u16::from(our_i.start);
|
||||
|
||||
self
|
||||
@@ -328,9 +473,7 @@ impl<
|
||||
.await;
|
||||
}
|
||||
}
|
||||
Topic::DkgConfirmation => {
|
||||
panic!("re-attempting DkgConfirmation when we should be re-attempting the Dkg")
|
||||
}
|
||||
Topic::DkgConfirmation => unreachable!(),
|
||||
Topic::SubstrateSign(inner_id) => {
|
||||
let id = processor_messages::coordinator::SubstrateSignId {
|
||||
session: self.spec.set().session,
|
||||
@@ -376,6 +519,24 @@ impl<
|
||||
.await;
|
||||
}
|
||||
}
|
||||
SubstrateSignableId::SlashReport => {
|
||||
// If this Tributary hasn't been retired...
|
||||
// (published SlashReport/took too long to do so)
|
||||
if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() {
|
||||
let report = SlashReport::get(self.txn, self.spec.set())
|
||||
.expect("re-attempting signing a SlashReport we don't have?");
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
processor_messages::coordinator::CoordinatorMessage::SignSlashReport {
|
||||
id,
|
||||
report,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Topic::Sign(id) => {
|
||||
@@ -398,6 +559,94 @@ impl<
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if Some(u64::from(self.block_number)) == SlashReportCutOff::get(self.txn, genesis) {
|
||||
// Grab every slash report
|
||||
let mut all_reports = vec![];
|
||||
for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {
|
||||
let Some(mut report) = SlashReports::get(self.txn, genesis, validator.to_bytes()) else {
|
||||
continue;
|
||||
};
|
||||
// Assign them 0 points for themselves
|
||||
report.insert(i, 0);
|
||||
// Uses &[] as we only need the length which is independent to who else was removed
|
||||
let signer_i = self.spec.i(&[], validator).unwrap();
|
||||
let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start);
|
||||
// Push `n` copies, one for each of their shares
|
||||
for _ in 0 .. signer_len {
|
||||
all_reports.push(report.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// For each participant, grab their median
|
||||
let mut medians = vec![];
|
||||
for p in 0 .. self.spec.validators().len() {
|
||||
let mut median_calc = vec![];
|
||||
for report in &all_reports {
|
||||
median_calc.push(report[p]);
|
||||
}
|
||||
median_calc.sort_unstable();
|
||||
medians.push(median_calc[median_calc.len() / 2]);
|
||||
}
|
||||
|
||||
// Grab the points of the last party within the best-performing threshold
|
||||
// This is done by first expanding the point values by the amount of shares
|
||||
let mut sorted_medians = vec![];
|
||||
for (i, (_, shares)) in self.spec.validators().into_iter().enumerate() {
|
||||
for _ in 0 .. shares {
|
||||
sorted_medians.push(medians[i]);
|
||||
}
|
||||
}
|
||||
// Then performing the sort
|
||||
sorted_medians.sort_unstable();
|
||||
let worst_points_by_party_within_threshold = sorted_medians[usize::from(self.spec.t()) - 1];
|
||||
|
||||
// Reduce everyone's points by this value
|
||||
for median in &mut medians {
|
||||
*median = median.saturating_sub(worst_points_by_party_within_threshold);
|
||||
}
|
||||
|
||||
// The threshold now has the proper incentive to report this as they no longer suffer
|
||||
// negative effects
|
||||
//
|
||||
// Additionally, if all validators had degraded performance, they don't all get penalized for
|
||||
// what's likely outside their control (as it occurred universally)
|
||||
|
||||
// Mark everyone fatally slashed with u32::MAX
|
||||
for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {
|
||||
if FatallySlashed::get(self.txn, genesis, validator.to_bytes()).is_some() {
|
||||
medians[i] = u32::MAX;
|
||||
}
|
||||
}
|
||||
|
||||
let mut report = vec![];
|
||||
for (i, (validator, _)) in self.spec.validators().into_iter().enumerate() {
|
||||
if medians[i] != 0 {
|
||||
report.push((validator.to_bytes(), medians[i]));
|
||||
}
|
||||
}
|
||||
|
||||
// This does lock in the report, meaning further slash point accumulations won't be reported
|
||||
// They still have value to be locally tracked due to local decisions made based off
|
||||
// accumulated slash reports
|
||||
SlashReport::set(self.txn, self.spec.set(), &report);
|
||||
|
||||
// Start a signing protocol for this
|
||||
self
|
||||
.processors
|
||||
.send(
|
||||
self.spec.set().network,
|
||||
processor_messages::coordinator::CoordinatorMessage::SignSlashReport {
|
||||
id: SubstrateSignId {
|
||||
session: self.spec.set().session,
|
||||
id: SubstrateSignableId::SlashReport,
|
||||
attempt: 0,
|
||||
},
|
||||
report,
|
||||
},
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -439,9 +688,11 @@ pub(crate) async fn handle_new_blocks<
|
||||
}
|
||||
}
|
||||
|
||||
let mut txn = db.txn();
|
||||
let mut db_clone = db.clone();
|
||||
let mut txn = db_clone.txn();
|
||||
TributaryBlockNumber::set(&mut txn, next, &block_number);
|
||||
(TributaryBlockHandler {
|
||||
db,
|
||||
txn: &mut txn,
|
||||
spec,
|
||||
our_key: key,
|
||||
@@ -453,7 +704,7 @@ pub(crate) async fn handle_new_blocks<
|
||||
block_number,
|
||||
_p2p: PhantomData::<P>,
|
||||
})
|
||||
.handle::<D>()
|
||||
.handle()
|
||||
.await;
|
||||
last_block = next;
|
||||
LastHandledBlock::set(&mut txn, genesis, &next);
|
||||
|
||||
@@ -215,14 +215,16 @@ fn threshold_i_map_to_keys_and_musig_i_map(
|
||||
mut map: HashMap<Participant, Vec<u8>>,
|
||||
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
|
||||
// Insert our own index so calculations aren't offset
|
||||
let our_threshold_i =
|
||||
spec.i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref()).unwrap().start;
|
||||
let our_threshold_i = spec
|
||||
.i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref())
|
||||
.expect("MuSig t-of-n signing a for a protocol we were removed from")
|
||||
.start;
|
||||
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
||||
|
||||
let spec_validators = spec.validators();
|
||||
let key_from_threshold_i = |threshold_i| {
|
||||
for (key, _) in &spec_validators {
|
||||
if threshold_i == spec.i(removed, *key).unwrap().start {
|
||||
if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start {
|
||||
return *key;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,6 +137,19 @@ impl TributarySpec {
|
||||
Some(result_i)
|
||||
}
|
||||
|
||||
pub fn reverse_lookup_i(
|
||||
&self,
|
||||
removed_validators: &[<Ristretto as Ciphersuite>::G],
|
||||
i: Participant,
|
||||
) -> Option<<Ristretto as Ciphersuite>::G> {
|
||||
for (validator, _) in &self.validators {
|
||||
if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) {
|
||||
return Some(*validator);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
||||
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
|
||||
}
|
||||
|
||||
@@ -131,8 +131,8 @@ impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
|
||||
#[derive(Clone, PartialEq, Eq)]
|
||||
pub enum Transaction {
|
||||
RemoveParticipantDueToDkg {
|
||||
attempt: u32,
|
||||
participant: Participant,
|
||||
participant: <Ristretto as Ciphersuite>::G,
|
||||
signed: Signed,
|
||||
},
|
||||
|
||||
DkgCommitments {
|
||||
@@ -190,16 +190,18 @@ pub enum Transaction {
|
||||
first_signer: <Ristretto as Ciphersuite>::G,
|
||||
signature: SchnorrSignature<Ristretto>,
|
||||
},
|
||||
|
||||
SlashReport(Vec<u32>, Signed),
|
||||
}
|
||||
|
||||
impl Debug for Transaction {
|
||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||
match self {
|
||||
Transaction::RemoveParticipantDueToDkg { attempt, participant } => fmt
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt
|
||||
.debug_struct("Transaction::RemoveParticipantDueToDkg")
|
||||
.field("participant", participant)
|
||||
.field("attempt", attempt)
|
||||
.finish(),
|
||||
.field("participant", &hex::encode(participant.to_bytes()))
|
||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt
|
||||
.debug_struct("Transaction::DkgCommitments")
|
||||
.field("attempt", attempt)
|
||||
@@ -244,6 +246,11 @@ impl Debug for Transaction {
|
||||
.field("plan", &hex::encode(plan))
|
||||
.field("tx_hash", &hex::encode(tx_hash))
|
||||
.finish_non_exhaustive(),
|
||||
Transaction::SlashReport(points, signed) => fmt
|
||||
.debug_struct("Transaction::SignCompleted")
|
||||
.field("points", points)
|
||||
.field("signed", signed)
|
||||
.finish(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -255,17 +262,8 @@ impl ReadWrite for Transaction {
|
||||
|
||||
match kind[0] {
|
||||
0 => Ok(Transaction::RemoveParticipantDueToDkg {
|
||||
attempt: {
|
||||
let mut attempt = [0; 4];
|
||||
reader.read_exact(&mut attempt)?;
|
||||
u32::from_le_bytes(attempt)
|
||||
},
|
||||
participant: {
|
||||
let mut participant = [0; 2];
|
||||
reader.read_exact(&mut participant)?;
|
||||
Participant::new(u16::from_le_bytes(participant))
|
||||
.ok_or_else(|| io::Error::other("invalid participant in RemoveParticipantDueToDkg"))?
|
||||
},
|
||||
participant: Ristretto::read_G(reader)?,
|
||||
signed: Signed::read_without_nonce(reader, 0)?,
|
||||
}),
|
||||
|
||||
1 => {
|
||||
@@ -422,16 +420,35 @@ impl ReadWrite for Transaction {
|
||||
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
|
||||
}
|
||||
|
||||
11 => {
|
||||
let mut len = [0];
|
||||
reader.read_exact(&mut len)?;
|
||||
let len = len[0];
|
||||
// If the set has as many validators as MAX_KEY_SHARES_PER_SET, then the amount of distinct
|
||||
// validators (the amount of validators reported on) will be at most
|
||||
// `MAX_KEY_SHARES_PER_SET - 1`
|
||||
if u32::from(len) > (serai_client::validator_sets::primitives::MAX_KEY_SHARES_PER_SET - 1) {
|
||||
Err(io::Error::other("more points reported than allowed validator"))?;
|
||||
}
|
||||
let mut points = vec![0u32; len.into()];
|
||||
for points in &mut points {
|
||||
let mut these_points = [0; 4];
|
||||
reader.read_exact(&mut these_points)?;
|
||||
*points = u32::from_le_bytes(these_points);
|
||||
}
|
||||
Ok(Transaction::SlashReport(points, Signed::read_without_nonce(reader, 0)?))
|
||||
}
|
||||
|
||||
_ => Err(io::Error::other("invalid transaction type")),
|
||||
}
|
||||
}
|
||||
|
||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||
match self {
|
||||
Transaction::RemoveParticipantDueToDkg { attempt, participant } => {
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
||||
writer.write_all(&[0])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
writer.write_all(&u16::from(*participant).to_le_bytes())
|
||||
writer.write_all(&participant.to_bytes())?;
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
|
||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
||||
@@ -457,7 +474,7 @@ impl ReadWrite for Transaction {
|
||||
writer.write_all(&[2])?;
|
||||
writer.write_all(&attempt.to_le_bytes())?;
|
||||
|
||||
// `shares` is a Vec which is supposed to map to a HashMap<Pariticpant, Vec<u8>>. Since we
|
||||
// `shares` is a Vec which is supposed to map to a HashMap<Participant, Vec<u8>>. Since we
|
||||
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
|
||||
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
|
||||
// This assumes at least one share is being sent to another party
|
||||
@@ -538,6 +555,14 @@ impl ReadWrite for Transaction {
|
||||
writer.write_all(&first_signer.to_bytes())?;
|
||||
signature.write(writer)
|
||||
}
|
||||
Transaction::SlashReport(points, signed) => {
|
||||
writer.write_all(&[11])?;
|
||||
writer.write_all(&[u8::try_from(points.len()).unwrap()])?;
|
||||
for points in points {
|
||||
writer.write_all(&points.to_le_bytes())?;
|
||||
}
|
||||
signed.write_without_nonce(writer)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -545,7 +570,9 @@ impl ReadWrite for Transaction {
|
||||
impl TransactionTrait for Transaction {
|
||||
fn kind(&self) -> TransactionKind<'_> {
|
||||
match self {
|
||||
Transaction::RemoveParticipantDueToDkg { .. } => TransactionKind::Provided("remove"),
|
||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
||||
TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed)
|
||||
}
|
||||
|
||||
Transaction::DkgCommitments { attempt, commitments: _, signed } |
|
||||
Transaction::DkgShares { attempt, signed, .. } |
|
||||
@@ -566,6 +593,10 @@ impl TransactionTrait for Transaction {
|
||||
TransactionKind::Signed((b"sign", data.plan, data.attempt).encode(), &data.signed)
|
||||
}
|
||||
Transaction::SignCompleted { .. } => TransactionKind::Unsigned,
|
||||
|
||||
Transaction::SlashReport(_, signed) => {
|
||||
TransactionKind::Signed(b"slash_report".to_vec(), signed)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -612,10 +643,9 @@ impl Transaction {
|
||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||
) {
|
||||
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
||||
#[allow(clippy::match_same_arms)] // Doesn't make semantic sense here
|
||||
let nonce = match tx {
|
||||
Transaction::RemoveParticipantDueToDkg { .. } => {
|
||||
panic!("signing RemoveParticipantDueToDkg")
|
||||
}
|
||||
Transaction::RemoveParticipantDueToDkg { .. } => 0,
|
||||
|
||||
Transaction::DkgCommitments { .. } => 0,
|
||||
Transaction::DkgShares { .. } => 1,
|
||||
@@ -630,13 +660,15 @@ impl Transaction {
|
||||
Transaction::Sign(data) => data.label.nonce(),
|
||||
|
||||
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
||||
|
||||
Transaction::SlashReport(_, _) => 0,
|
||||
};
|
||||
|
||||
(
|
||||
nonce,
|
||||
#[allow(clippy::match_same_arms)]
|
||||
match tx {
|
||||
Transaction::RemoveParticipantDueToDkg { .. } => panic!("signing RemoveParticipant"),
|
||||
|
||||
Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } |
|
||||
Transaction::DkgCommitments { ref mut signed, .. } |
|
||||
Transaction::DkgShares { ref mut signed, .. } |
|
||||
Transaction::InvalidDkgShare { ref mut signed, .. } |
|
||||
@@ -651,6 +683,8 @@ impl Transaction {
|
||||
Transaction::Sign(ref mut data) => &mut data.signed,
|
||||
|
||||
Transaction::SignCompleted { .. } => panic!("signing SignCompleted"),
|
||||
|
||||
Transaction::SlashReport(_, ref mut signed) => signed,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
@@ -175,9 +175,8 @@ impl<T: TransactionTrait> Block<T> {
|
||||
mut locally_provided: HashMap<&'static str, VecDeque<T>>,
|
||||
get_and_increment_nonce: &mut G,
|
||||
schema: &N::SignatureScheme,
|
||||
commit: impl Fn(u32) -> Option<Commit<N::SignatureScheme>>,
|
||||
unsigned_in_chain: impl Fn([u8; 32]) -> bool,
|
||||
provided_in_chain: impl Fn([u8; 32]) -> bool, // TODO: merge this with unsigned_on_chain?
|
||||
commit: impl Fn(u64) -> Option<Commit<N::SignatureScheme>>,
|
||||
provided_or_unsigned_in_chain: impl Fn([u8; 32]) -> bool,
|
||||
allow_non_local_provided: bool,
|
||||
) -> Result<(), BlockError> {
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
@@ -213,7 +212,7 @@ impl<T: TransactionTrait> Block<T> {
|
||||
|
||||
let current_tx_order = match tx.kind() {
|
||||
TransactionKind::Provided(order) => {
|
||||
if provided_in_chain(tx_hash) {
|
||||
if provided_or_unsigned_in_chain(tx_hash) {
|
||||
Err(BlockError::ProvidedAlreadyIncluded)?;
|
||||
}
|
||||
|
||||
@@ -233,7 +232,7 @@ impl<T: TransactionTrait> Block<T> {
|
||||
}
|
||||
TransactionKind::Unsigned => {
|
||||
// check we don't already have the tx in the chain
|
||||
if unsigned_in_chain(tx_hash) || included_in_block.contains(&tx_hash) {
|
||||
if provided_or_unsigned_in_chain(tx_hash) || included_in_block.contains(&tx_hash) {
|
||||
Err(BlockError::UnsignedAlreadyIncluded)?;
|
||||
}
|
||||
included_in_block.insert(tx_hash);
|
||||
|
||||
@@ -18,7 +18,7 @@ pub(crate) struct Blockchain<D: Db, T: TransactionTrait> {
|
||||
db: Option<D>,
|
||||
genesis: [u8; 32],
|
||||
|
||||
block_number: u32,
|
||||
block_number: u64,
|
||||
tip: [u8; 32],
|
||||
participants: HashSet<<Ristretto as Ciphersuite>::G>,
|
||||
|
||||
@@ -38,7 +38,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
fn block_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {
|
||||
D::key(b"tributary_blockchain", b"block", [genesis, hash].concat())
|
||||
}
|
||||
fn block_hash_key(genesis: &[u8], block_number: u32) -> Vec<u8> {
|
||||
fn block_hash_key(genesis: &[u8], block_number: u64) -> Vec<u8> {
|
||||
D::key(b"tributary_blockchain", b"block_hash", [genesis, &block_number.to_le_bytes()].concat())
|
||||
}
|
||||
fn commit_key(genesis: &[u8], hash: &[u8; 32]) -> Vec<u8> {
|
||||
@@ -88,7 +88,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
let db = res.db.as_ref().unwrap();
|
||||
db.get(res.block_number_key()).map(|number| (number, db.get(Self::tip_key(genesis)).unwrap()))
|
||||
} {
|
||||
res.block_number = u32::from_le_bytes(block_number.try_into().unwrap());
|
||||
res.block_number = u64::from_le_bytes(block_number.try_into().unwrap());
|
||||
res.tip.copy_from_slice(&tip);
|
||||
}
|
||||
|
||||
@@ -99,7 +99,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
self.tip
|
||||
}
|
||||
|
||||
pub(crate) fn block_number(&self) -> u32 {
|
||||
pub(crate) fn block_number(&self) -> u64 {
|
||||
self.block_number
|
||||
}
|
||||
|
||||
@@ -112,7 +112,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
db.get(Self::commit_key(&genesis, block))
|
||||
}
|
||||
|
||||
pub(crate) fn block_hash_from_db(db: &D, genesis: [u8; 32], block: u32) -> Option<[u8; 32]> {
|
||||
pub(crate) fn block_hash_from_db(db: &D, genesis: [u8; 32], block: u64) -> Option<[u8; 32]> {
|
||||
db.get(Self::block_hash_key(&genesis, block)).map(|h| h.try_into().unwrap())
|
||||
}
|
||||
|
||||
@@ -120,11 +120,11 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
Self::commit_from_db(self.db.as_ref().unwrap(), self.genesis, block)
|
||||
}
|
||||
|
||||
pub(crate) fn block_hash(&self, block: u32) -> Option<[u8; 32]> {
|
||||
pub(crate) fn block_hash(&self, block: u64) -> Option<[u8; 32]> {
|
||||
Self::block_hash_from_db(self.db.as_ref().unwrap(), self.genesis, block)
|
||||
}
|
||||
|
||||
pub(crate) fn commit_by_block_number(&self, block: u32) -> Option<Vec<u8>> {
|
||||
pub(crate) fn commit_by_block_number(&self, block: u64) -> Option<Vec<u8>> {
|
||||
self.commit(&self.block_hash(block)?)
|
||||
}
|
||||
|
||||
@@ -160,16 +160,16 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
let db = self.db.as_ref().unwrap();
|
||||
let genesis = self.genesis;
|
||||
|
||||
let commit = |block: u32| -> Option<Commit<N::SignatureScheme>> {
|
||||
let commit = |block: u64| -> Option<Commit<N::SignatureScheme>> {
|
||||
let hash = Self::block_hash_from_db(db, genesis, block)?;
|
||||
// we must have a commit per valid hash
|
||||
let commit = Self::commit_from_db(db, genesis, &hash).unwrap();
|
||||
// commit has to be valid if it is coming from our db
|
||||
Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())
|
||||
};
|
||||
|
||||
let unsigned_in_chain =
|
||||
|hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some();
|
||||
|
||||
self.mempool.add::<N, _>(
|
||||
|signer, order| {
|
||||
if self.participants.contains(&signer) {
|
||||
@@ -233,11 +233,11 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
allow_non_local_provided: bool,
|
||||
) -> Result<(), BlockError> {
|
||||
let db = self.db.as_ref().unwrap();
|
||||
let unsigned_in_chain =
|
||||
|hash: [u8; 32]| db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some();
|
||||
let provided_in_chain =
|
||||
|hash: [u8; 32]| db.get(Self::provided_included_key(&self.genesis, &hash)).is_some();
|
||||
let commit = |block: u32| -> Option<Commit<N::SignatureScheme>> {
|
||||
let provided_or_unsigned_in_chain = |hash: [u8; 32]| {
|
||||
db.get(Self::unsigned_included_key(&self.genesis, &hash)).is_some() ||
|
||||
db.get(Self::provided_included_key(&self.genesis, &hash)).is_some()
|
||||
};
|
||||
let commit = |block: u64| -> Option<Commit<N::SignatureScheme>> {
|
||||
let commit = self.commit_by_block_number(block)?;
|
||||
// commit has to be valid if it is coming from our db
|
||||
Some(Commit::<N::SignatureScheme>::decode(&mut commit.as_ref()).unwrap())
|
||||
@@ -263,8 +263,7 @@ impl<D: Db, T: TransactionTrait> Blockchain<D, T> {
|
||||
},
|
||||
schema,
|
||||
&commit,
|
||||
unsigned_in_chain,
|
||||
provided_in_chain,
|
||||
provided_or_unsigned_in_chain,
|
||||
allow_non_local_provided,
|
||||
);
|
||||
// Drop this TXN's changes as we're solely verifying the block
|
||||
|
||||
@@ -182,7 +182,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
let validators = Arc::new(Validators::new(genesis, validators)?);
|
||||
|
||||
let mut blockchain = Blockchain::new(db.clone(), genesis, &validators_vec);
|
||||
let block_number = BlockNumber(blockchain.block_number().into());
|
||||
let block_number = BlockNumber(blockchain.block_number());
|
||||
|
||||
let start_time = if let Some(commit) = blockchain.commit(&blockchain.tip()) {
|
||||
Commit::<Validators>::decode(&mut commit.as_ref()).unwrap().end_time
|
||||
@@ -240,7 +240,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
self.genesis
|
||||
}
|
||||
|
||||
pub async fn block_number(&self) -> u32 {
|
||||
pub async fn block_number(&self) -> u64 {
|
||||
self.network.blockchain.read().await.block_number()
|
||||
}
|
||||
pub async fn tip(&self) -> [u8; 32] {
|
||||
@@ -314,7 +314,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||
return false;
|
||||
}
|
||||
|
||||
let number = BlockNumber((block_number + 1).into());
|
||||
let number = BlockNumber(block_number + 1);
|
||||
self.synced_block.write().await.send(SyncedBlock { number, block, commit }).await.unwrap();
|
||||
result.next().await.unwrap()
|
||||
}
|
||||
|
||||
@@ -114,7 +114,7 @@ impl<D: Db, T: TransactionTrait> Mempool<D, T> {
|
||||
tx: Transaction<T>,
|
||||
schema: &N::SignatureScheme,
|
||||
unsigned_in_chain: impl Fn([u8; 32]) -> bool,
|
||||
commit: impl Fn(u32) -> Option<Commit<N::SignatureScheme>>,
|
||||
commit: impl Fn(u64) -> Option<Commit<N::SignatureScheme>>,
|
||||
) -> Result<bool, TransactionError> {
|
||||
match &tx {
|
||||
Transaction::Tendermint(tendermint_tx) => {
|
||||
|
||||
@@ -273,11 +273,33 @@ pub struct TendermintNetwork<D: Db, T: TransactionTrait, P: P2p> {
|
||||
pub(crate) p2p: P,
|
||||
}
|
||||
|
||||
pub const BLOCK_PROCESSING_TIME: u32 = 999;
|
||||
pub const LATENCY_TIME: u32 = 1667;
|
||||
// TODO: Add test asserting this
|
||||
pub const BLOCK_PROCESSING_TIME: u32 = 1000;
|
||||
pub const LATENCY_TIME: u32 = 3000;
|
||||
pub const TARGET_BLOCK_TIME: u32 = BLOCK_PROCESSING_TIME + (3 * LATENCY_TIME);
|
||||
|
||||
#[test]
|
||||
fn assert_target_block_time() {
|
||||
use serai_db::MemDb;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct DummyP2p;
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl P2p for DummyP2p {
|
||||
async fn broadcast(&self, _: [u8; 32], _: Vec<u8>) {
|
||||
unimplemented!()
|
||||
}
|
||||
}
|
||||
|
||||
// Type paremeters don't matter here since we only need to call the block_time()
|
||||
// and it only relies on the constants of the trait implementation. block_time() is in seconds,
|
||||
// TARGET_BLOCK_TIME is in milliseconds.
|
||||
assert_eq!(
|
||||
<TendermintNetwork<MemDb, TendermintTx, DummyP2p> as Network>::block_time(),
|
||||
TARGET_BLOCK_TIME / 1000
|
||||
)
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P> {
|
||||
type ValidatorId = [u8; 32];
|
||||
@@ -285,7 +307,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
type Weights = Arc<Validators>;
|
||||
type Block = TendermintBlock;
|
||||
|
||||
// These are in milliseconds and create a six-second block time.
|
||||
// These are in milliseconds and create a ten-second block time.
|
||||
// The block time is the latency on message delivery (where a message is some piece of data
|
||||
// embedded in a transaction) times three plus the block processing time, hence why it should be
|
||||
// kept low.
|
||||
@@ -342,7 +364,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Network for TendermintNetwork<D, T, P>
|
||||
};
|
||||
|
||||
// add tx to blockchain and broadcast to peers
|
||||
// TODO: Make a function out of this following block
|
||||
let mut to_broadcast = vec![TRANSACTION_MESSAGE];
|
||||
tx.write(&mut to_broadcast).unwrap();
|
||||
if self.blockchain.write().await.add_transaction::<Self>(
|
||||
|
||||
@@ -12,14 +12,11 @@ use crate::{
|
||||
};
|
||||
|
||||
use tendermint::{
|
||||
SignedMessageFor, Data,
|
||||
round::RoundData,
|
||||
time::CanonicalInstant,
|
||||
commit_msg,
|
||||
ext::{Network, Commit, RoundNumber, SignatureScheme},
|
||||
verify_tendermint_evience,
|
||||
ext::{Network, Commit},
|
||||
};
|
||||
|
||||
pub use tendermint::Evidence;
|
||||
pub use tendermint::{Evidence, decode_signed_message};
|
||||
|
||||
#[allow(clippy::large_enum_variant)]
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
@@ -63,127 +60,16 @@ impl Transaction for TendermintTx {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn decode_signed_message<N: Network>(
|
||||
mut data: &[u8],
|
||||
) -> Result<SignedMessageFor<N>, TransactionError> {
|
||||
SignedMessageFor::<N>::decode(&mut data).map_err(|_| TransactionError::InvalidContent)
|
||||
}
|
||||
|
||||
fn decode_and_verify_signed_message<N: Network>(
|
||||
data: &[u8],
|
||||
schema: &N::SignatureScheme,
|
||||
) -> Result<SignedMessageFor<N>, TransactionError> {
|
||||
let msg = decode_signed_message::<N>(data)?;
|
||||
|
||||
// verify that evidence messages are signed correctly
|
||||
if !msg.verify_signature(schema) {
|
||||
Err(TransactionError::InvalidSignature)?
|
||||
}
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
// TODO: Move this into tendermint-machine
|
||||
// TODO: Strongly type Evidence, instead of having two messages and no idea what's supposedly
|
||||
// wrong with them. Doing so will massively simplify the auditability of this (as this
|
||||
// re-implements an entire foreign library's checks for malicious behavior).
|
||||
pub(crate) fn verify_tendermint_tx<N: Network>(
|
||||
tx: &TendermintTx,
|
||||
schema: &N::SignatureScheme,
|
||||
commit: impl Fn(u32) -> Option<Commit<N::SignatureScheme>>,
|
||||
commit: impl Fn(u64) -> Option<Commit<N::SignatureScheme>>,
|
||||
) -> Result<(), TransactionError> {
|
||||
tx.verify()?;
|
||||
|
||||
match tx {
|
||||
// TODO: Only allow one evidence per validator, since evidence is fatal
|
||||
TendermintTx::SlashEvidence(ev) => {
|
||||
match ev {
|
||||
Evidence::ConflictingMessages(first, second) => {
|
||||
let first = decode_and_verify_signed_message::<N>(first, schema)?.msg;
|
||||
let second = decode_and_verify_signed_message::<N>(second, schema)?.msg;
|
||||
|
||||
// Make sure they're distinct messages, from the same sender, within the same block
|
||||
if (first == second) || (first.sender != second.sender) || (first.block != second.block) {
|
||||
Err(TransactionError::InvalidContent)?;
|
||||
}
|
||||
|
||||
// Distinct messages within the same step
|
||||
if !((first.round == second.round) && (first.data.step() == second.data.step())) {
|
||||
Err(TransactionError::InvalidContent)?;
|
||||
}
|
||||
}
|
||||
Evidence::ConflictingPrecommit(first, second) => {
|
||||
let first = decode_and_verify_signed_message::<N>(first, schema)?.msg;
|
||||
let second = decode_and_verify_signed_message::<N>(second, schema)?.msg;
|
||||
|
||||
if (first.sender != second.sender) || (first.block != second.block) {
|
||||
Err(TransactionError::InvalidContent)?;
|
||||
}
|
||||
|
||||
// check whether messages are precommits to different blocks
|
||||
// The inner signatures don't need to be verified since the outer signatures were
|
||||
// While the inner signatures may be invalid, that would've yielded a invalid precommit
|
||||
// signature slash instead of distinct precommit slash
|
||||
if let Data::Precommit(Some((h1, _))) = first.data {
|
||||
if let Data::Precommit(Some((h2, _))) = second.data {
|
||||
if h1 == h2 {
|
||||
Err(TransactionError::InvalidContent)?;
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// No fault identified
|
||||
Err(TransactionError::InvalidContent)?
|
||||
}
|
||||
Evidence::InvalidPrecommit(msg) => {
|
||||
let msg = decode_and_verify_signed_message::<N>(msg, schema)?.msg;
|
||||
|
||||
let Data::Precommit(Some((id, sig))) = &msg.data else {
|
||||
Err(TransactionError::InvalidContent)?
|
||||
};
|
||||
// TODO: We need to be passed in the genesis time to handle this edge case
|
||||
if msg.block.0 == 0 {
|
||||
todo!("invalid precommit signature on first block")
|
||||
}
|
||||
|
||||
// get the last commit
|
||||
// TODO: Why do we use u32 when Tendermint uses u64?
|
||||
let prior_commit = match u32::try_from(msg.block.0 - 1) {
|
||||
Ok(n) => match commit(n) {
|
||||
Some(c) => c,
|
||||
// If we have yet to sync the block in question, we will return InvalidContent based
|
||||
// on our own temporal ambiguity
|
||||
// This will also cause an InvalidContent for anything using a non-existent block,
|
||||
// yet that's valid behavior
|
||||
// TODO: Double check the ramifications of this
|
||||
_ => Err(TransactionError::InvalidContent)?,
|
||||
},
|
||||
_ => Err(TransactionError::InvalidContent)?,
|
||||
};
|
||||
|
||||
// calculate the end time till the msg round
|
||||
let mut last_end_time = CanonicalInstant::new(prior_commit.end_time);
|
||||
for r in 0 ..= msg.round.0 {
|
||||
last_end_time = RoundData::<N>::new(RoundNumber(r), last_end_time).end_time();
|
||||
}
|
||||
|
||||
// verify that the commit was actually invalid
|
||||
if schema.verify(msg.sender, &commit_msg(last_end_time.canonical(), id.as_ref()), sig) {
|
||||
Err(TransactionError::InvalidContent)?
|
||||
}
|
||||
}
|
||||
Evidence::InvalidValidRound(msg) => {
|
||||
let msg = decode_and_verify_signed_message::<N>(msg, schema)?.msg;
|
||||
|
||||
let Data::Proposal(Some(vr), _) = &msg.data else {
|
||||
Err(TransactionError::InvalidContent)?
|
||||
};
|
||||
if vr.0 < msg.round.0 {
|
||||
Err(TransactionError::InvalidContent)?
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
TendermintTx::SlashEvidence(ev) => verify_tendermint_evience::<N>(ev, schema, commit)
|
||||
.map_err(|_| TransactionError::InvalidContent)?,
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -78,11 +78,10 @@ fn empty_block() {
|
||||
const GENESIS: [u8; 32] = [0xff; 32];
|
||||
const LAST: [u8; 32] = [0x01; 32];
|
||||
let validators = Arc::new(Validators::new(GENESIS, vec![]).unwrap());
|
||||
let commit = |_: u32| -> Option<Commit<Arc<Validators>>> {
|
||||
let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {
|
||||
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
||||
};
|
||||
let unsigned_in_chain = |_: [u8; 32]| false;
|
||||
let provided_in_chain = |_: [u8; 32]| false;
|
||||
let provided_or_unsigned_in_chain = |_: [u8; 32]| false;
|
||||
Block::<NonceTransaction>::new(LAST, vec![], vec![])
|
||||
.verify::<N, _>(
|
||||
GENESIS,
|
||||
@@ -91,8 +90,7 @@ fn empty_block() {
|
||||
&mut |_, _| None,
|
||||
&validators,
|
||||
commit,
|
||||
unsigned_in_chain,
|
||||
provided_in_chain,
|
||||
provided_or_unsigned_in_chain,
|
||||
false,
|
||||
)
|
||||
.unwrap();
|
||||
@@ -113,11 +111,10 @@ fn duplicate_nonces() {
|
||||
insert(NonceTransaction::new(0, 0));
|
||||
insert(NonceTransaction::new(i, 1));
|
||||
|
||||
let commit = |_: u32| -> Option<Commit<Arc<Validators>>> {
|
||||
let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {
|
||||
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
||||
};
|
||||
let unsigned_in_chain = |_: [u8; 32]| false;
|
||||
let provided_in_chain = |_: [u8; 32]| false;
|
||||
let provided_or_unsigned_in_chain = |_: [u8; 32]| false;
|
||||
|
||||
let mut last_nonce = 0;
|
||||
let res = Block::new(LAST, vec![], mempool).verify::<N, _>(
|
||||
@@ -131,8 +128,7 @@ fn duplicate_nonces() {
|
||||
},
|
||||
&validators,
|
||||
commit,
|
||||
unsigned_in_chain,
|
||||
provided_in_chain,
|
||||
provided_or_unsigned_in_chain,
|
||||
false,
|
||||
);
|
||||
if i == 1 {
|
||||
|
||||
@@ -28,7 +28,7 @@ fn new_mempool<T: TransactionTrait>() -> ([u8; 32], MemDb, Mempool<MemDb, T>) {
|
||||
#[tokio::test]
|
||||
async fn mempool_addition() {
|
||||
let (genesis, db, mut mempool) = new_mempool::<SignedTransaction>();
|
||||
let commit = |_: u32| -> Option<Commit<Arc<Validators>>> {
|
||||
let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {
|
||||
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
||||
};
|
||||
let unsigned_in_chain = |_: [u8; 32]| false;
|
||||
@@ -160,7 +160,7 @@ async fn mempool_addition() {
|
||||
fn too_many_mempool() {
|
||||
let (genesis, _, mut mempool) = new_mempool::<SignedTransaction>();
|
||||
let validators = Arc::new(Validators::new(genesis, vec![]).unwrap());
|
||||
let commit = |_: u32| -> Option<Commit<Arc<Validators>>> {
|
||||
let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {
|
||||
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
||||
};
|
||||
let unsigned_in_chain = |_: [u8; 32]| false;
|
||||
|
||||
@@ -42,7 +42,7 @@ async fn serialize_tendermint() {
|
||||
async fn invalid_valid_round() {
|
||||
// signer
|
||||
let (_, signer, signer_id, validators) = tendermint_meta().await;
|
||||
let commit = |_: u32| -> Option<Commit<Arc<Validators>>> {
|
||||
let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {
|
||||
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
||||
};
|
||||
|
||||
@@ -78,7 +78,7 @@ async fn invalid_valid_round() {
|
||||
#[tokio::test]
|
||||
async fn invalid_precommit_signature() {
|
||||
let (_, signer, signer_id, validators) = tendermint_meta().await;
|
||||
let commit = |i: u32| -> Option<Commit<Arc<Validators>>> {
|
||||
let commit = |i: u64| -> Option<Commit<Arc<Validators>>> {
|
||||
assert_eq!(i, 0);
|
||||
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
||||
};
|
||||
@@ -127,7 +127,7 @@ async fn invalid_precommit_signature() {
|
||||
#[tokio::test]
|
||||
async fn evidence_with_prevote() {
|
||||
let (_, signer, signer_id, validators) = tendermint_meta().await;
|
||||
let commit = |_: u32| -> Option<Commit<Arc<Validators>>> {
|
||||
let commit = |_: u64| -> Option<Commit<Arc<Validators>>> {
|
||||
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
||||
};
|
||||
|
||||
@@ -156,14 +156,6 @@ async fn evidence_with_prevote() {
|
||||
.await
|
||||
.encode(),
|
||||
)));
|
||||
txs.push(TendermintTx::SlashEvidence(Evidence::ConflictingPrecommit(
|
||||
signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
|
||||
.await
|
||||
.encode(),
|
||||
signed_from_data::<N>(signer.clone().into(), signer_id, 0, 0, Data::Prevote(block_id))
|
||||
.await
|
||||
.encode(),
|
||||
)));
|
||||
txs
|
||||
}
|
||||
};
|
||||
@@ -180,7 +172,7 @@ async fn evidence_with_prevote() {
|
||||
#[tokio::test]
|
||||
async fn conflicting_msgs_evidence_tx() {
|
||||
let (genesis, signer, signer_id, validators) = tendermint_meta().await;
|
||||
let commit = |i: u32| -> Option<Commit<Arc<Validators>>> {
|
||||
let commit = |i: u64| -> Option<Commit<Arc<Validators>>> {
|
||||
assert_eq!(i, 0);
|
||||
Some(Commit::<Arc<Validators>> { end_time: 0, validators: vec![], signature: vec![] })
|
||||
};
|
||||
@@ -263,34 +255,6 @@ async fn conflicting_msgs_evidence_tx() {
|
||||
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap_err();
|
||||
}
|
||||
|
||||
// Precommit
|
||||
{
|
||||
let sig = signer.sign(&[]).await; // the inner signature doesn't matter
|
||||
|
||||
let signed_1 = signed_for_b_r(0, 0, Data::Precommit(Some(([0x11; 32], sig)))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingPrecommit(
|
||||
signed_1.encode(),
|
||||
signed_1.encode(),
|
||||
));
|
||||
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
|
||||
|
||||
// For precommit, the round number is ignored
|
||||
let signed_2 = signed_for_b_r(0, 1, Data::Precommit(Some(([0x22; 32], sig)))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingPrecommit(
|
||||
signed_1.encode(),
|
||||
signed_2.encode(),
|
||||
));
|
||||
verify_tendermint_tx::<N>(&tx, &validators, commit).unwrap();
|
||||
|
||||
// Yet the block number isn't
|
||||
let signed_2 = signed_for_b_r(1, 0, Data::Precommit(Some(([0x22; 32], sig)))).await;
|
||||
let tx = TendermintTx::SlashEvidence(Evidence::ConflictingPrecommit(
|
||||
signed_1.encode(),
|
||||
signed_2.encode(),
|
||||
));
|
||||
assert!(verify_tendermint_tx::<N>(&tx, &validators, commit).is_err());
|
||||
}
|
||||
|
||||
// msgs from different senders should fail
|
||||
{
|
||||
let signed_1 = signed_for_b_r(0, 0, Data::Proposal(None, TendermintBlock(vec![0x11]))).await;
|
||||
|
||||
@@ -122,6 +122,9 @@ pub enum TransactionKind<'a> {
|
||||
/// If a supermajority of validators produce a commit for a block with a provided transaction
|
||||
/// which isn't locally held, the block will be added to the local chain. When the transaction is
|
||||
/// locally provided, it will be compared for correctness to the on-chain version
|
||||
///
|
||||
/// In order to ensure TXs aren't accidentally provided multiple times, all provided transactions
|
||||
/// must have a unique hash which is also unique to all Unsigned transactions.
|
||||
Provided(&'static str),
|
||||
|
||||
/// An unsigned transaction, only able to be included by the block producer.
|
||||
@@ -129,6 +132,8 @@ pub enum TransactionKind<'a> {
|
||||
/// Once an Unsigned transaction is included on-chain, it may not be included again. In order to
|
||||
/// have multiple Unsigned transactions with the same values included on-chain, some distinct
|
||||
/// nonce must be included in order to cause a distinct hash.
|
||||
///
|
||||
/// The hash must also be unique with all Provided transactions.
|
||||
Unsigned,
|
||||
|
||||
/// A signed transaction.
|
||||
|
||||
@@ -19,6 +19,7 @@ pub mod time;
|
||||
use time::{sys_time, CanonicalInstant};
|
||||
|
||||
pub mod round;
|
||||
use round::RoundData;
|
||||
|
||||
mod block;
|
||||
use block::BlockData;
|
||||
@@ -103,10 +104,11 @@ impl<V: ValidatorId, B: Block, S: Signature> SignedMessage<V, B, S> {
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
enum TendermintError<N: Network> {
|
||||
pub enum TendermintError<N: Network> {
|
||||
Malicious(N::ValidatorId, Option<Evidence>),
|
||||
Temporal,
|
||||
AlreadyHandled,
|
||||
InvalidEvidence,
|
||||
}
|
||||
|
||||
// Type aliases to abstract over generic hell
|
||||
@@ -134,11 +136,93 @@ pub enum SlashReason {
|
||||
#[derive(Clone, PartialEq, Eq, Debug, Encode, Decode)]
|
||||
pub enum Evidence {
|
||||
ConflictingMessages(Vec<u8>, Vec<u8>),
|
||||
ConflictingPrecommit(Vec<u8>, Vec<u8>),
|
||||
InvalidPrecommit(Vec<u8>),
|
||||
InvalidValidRound(Vec<u8>),
|
||||
}
|
||||
|
||||
pub fn decode_signed_message<N: Network>(mut data: &[u8]) -> Option<SignedMessageFor<N>> {
|
||||
SignedMessageFor::<N>::decode(&mut data).ok()
|
||||
}
|
||||
|
||||
fn decode_and_verify_signed_message<N: Network>(
|
||||
data: &[u8],
|
||||
schema: &N::SignatureScheme,
|
||||
) -> Result<SignedMessageFor<N>, TendermintError<N>> {
|
||||
let msg = decode_signed_message::<N>(data).ok_or(TendermintError::InvalidEvidence)?;
|
||||
|
||||
// verify that evidence messages are signed correctly
|
||||
if !msg.verify_signature(schema) {
|
||||
Err(TendermintError::InvalidEvidence)?;
|
||||
}
|
||||
|
||||
Ok(msg)
|
||||
}
|
||||
|
||||
pub fn verify_tendermint_evience<N: Network>(
|
||||
evidence: &Evidence,
|
||||
schema: &N::SignatureScheme,
|
||||
commit: impl Fn(u64) -> Option<Commit<N::SignatureScheme>>,
|
||||
) -> Result<(), TendermintError<N>> {
|
||||
match evidence {
|
||||
Evidence::ConflictingMessages(first, second) => {
|
||||
let first = decode_and_verify_signed_message::<N>(first, schema)?.msg;
|
||||
let second = decode_and_verify_signed_message::<N>(second, schema)?.msg;
|
||||
|
||||
// Make sure they're distinct messages, from the same sender, within the same block
|
||||
if (first == second) || (first.sender != second.sender) || (first.block != second.block) {
|
||||
Err(TendermintError::InvalidEvidence)?;
|
||||
}
|
||||
|
||||
// Distinct messages within the same step
|
||||
if !((first.round == second.round) && (first.data.step() == second.data.step())) {
|
||||
Err(TendermintError::InvalidEvidence)?;
|
||||
}
|
||||
}
|
||||
Evidence::InvalidPrecommit(msg) => {
|
||||
let msg = decode_and_verify_signed_message::<N>(msg, schema)?.msg;
|
||||
|
||||
let Data::Precommit(Some((id, sig))) = &msg.data else {
|
||||
Err(TendermintError::InvalidEvidence)?
|
||||
};
|
||||
// TODO: We need to be passed in the genesis time to handle this edge case
|
||||
if msg.block.0 == 0 {
|
||||
todo!("invalid precommit signature on first block")
|
||||
}
|
||||
|
||||
// get the last commit
|
||||
let prior_commit = match commit(msg.block.0 - 1) {
|
||||
Some(c) => c,
|
||||
// If we have yet to sync the block in question, we will return InvalidContent based
|
||||
// on our own temporal ambiguity
|
||||
// This will also cause an InvalidContent for anything using a non-existent block,
|
||||
// yet that's valid behavior
|
||||
// TODO: Double check the ramifications of this
|
||||
_ => Err(TendermintError::InvalidEvidence)?,
|
||||
};
|
||||
|
||||
// calculate the end time till the msg round
|
||||
let mut last_end_time = CanonicalInstant::new(prior_commit.end_time);
|
||||
for r in 0 ..= msg.round.0 {
|
||||
last_end_time = RoundData::<N>::new(RoundNumber(r), last_end_time).end_time();
|
||||
}
|
||||
|
||||
// verify that the commit was actually invalid
|
||||
if schema.verify(msg.sender, &commit_msg(last_end_time.canonical(), id.as_ref()), sig) {
|
||||
Err(TendermintError::InvalidEvidence)?
|
||||
}
|
||||
}
|
||||
Evidence::InvalidValidRound(msg) => {
|
||||
let msg = decode_and_verify_signed_message::<N>(msg, schema)?.msg;
|
||||
|
||||
let Data::Proposal(Some(vr), _) = &msg.data else { Err(TendermintError::InvalidEvidence)? };
|
||||
if vr.0 < msg.round.0 {
|
||||
Err(TendermintError::InvalidEvidence)?
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||
pub enum SlashEvent {
|
||||
Id(SlashReason, u64, u32),
|
||||
@@ -543,7 +627,11 @@ impl<N: Network + 'static> TendermintMachine<N> {
|
||||
|
||||
self.slash(sender, slash).await
|
||||
}
|
||||
Err(TendermintError::Temporal | TendermintError::AlreadyHandled) => (),
|
||||
Err(
|
||||
TendermintError::Temporal |
|
||||
TendermintError::AlreadyHandled |
|
||||
TendermintError::InvalidEvidence,
|
||||
) => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,18 +3,17 @@ use std::{sync::Arc, collections::HashMap};
|
||||
use log::debug;
|
||||
use parity_scale_codec::Encode;
|
||||
|
||||
use crate::{ext::*, RoundNumber, Step, Data, DataFor, TendermintError, SignedMessageFor, Evidence};
|
||||
use crate::{ext::*, RoundNumber, Step, DataFor, TendermintError, SignedMessageFor, Evidence};
|
||||
|
||||
type RoundLog<N> = HashMap<<N as Network>::ValidatorId, HashMap<Step, SignedMessageFor<N>>>;
|
||||
pub(crate) struct MessageLog<N: Network> {
|
||||
weights: Arc<N::Weights>,
|
||||
precommitted: HashMap<N::ValidatorId, SignedMessageFor<N>>,
|
||||
pub(crate) log: HashMap<RoundNumber, RoundLog<N>>,
|
||||
}
|
||||
|
||||
impl<N: Network> MessageLog<N> {
|
||||
pub(crate) fn new(weights: Arc<N::Weights>) -> MessageLog<N> {
|
||||
MessageLog { weights, precommitted: HashMap::new(), log: HashMap::new() }
|
||||
MessageLog { weights, log: HashMap::new() }
|
||||
}
|
||||
|
||||
// Returns true if it's a new message
|
||||
@@ -40,24 +39,6 @@ impl<N: Network> MessageLog<N> {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// If they already precommitted to a distinct hash, error
|
||||
if let Data::Precommit(Some((hash, _))) = msg.data {
|
||||
if let Some(prev) = self.precommitted.get(&msg.sender) {
|
||||
if let Data::Precommit(Some((prev_hash, _))) = prev.msg.data {
|
||||
if hash != prev_hash {
|
||||
debug!(target: "tendermint", "Validator precommitted to multiple blocks");
|
||||
Err(TendermintError::Malicious(
|
||||
msg.sender,
|
||||
Some(Evidence::ConflictingPrecommit(prev.encode(), signed.encode())),
|
||||
))?;
|
||||
}
|
||||
} else {
|
||||
panic!("message in precommitted wasn't Precommit");
|
||||
}
|
||||
}
|
||||
self.precommitted.insert(msg.sender, signed.clone());
|
||||
}
|
||||
|
||||
msgs.insert(step, signed);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-featu
|
||||
|
||||
rand_core = { version = "0.6", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false }
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["derive"] }
|
||||
subtle = { version = "^2.4", default-features = false }
|
||||
|
||||
digest = { version = "0.10", default-features = false }
|
||||
|
||||
@@ -11,7 +11,6 @@ use crate::{
|
||||
tests::{THRESHOLD, PARTICIPANTS, clone_without},
|
||||
};
|
||||
|
||||
// Needed so rustfmt doesn't fail to format on line length issues
|
||||
type FrostEncryptedMessage<C> = EncryptedMessage<C, SecretShare<<C as Ciphersuite>::F>>;
|
||||
type FrostSecretShares<C> = HashMap<Participant, FrostEncryptedMessage<C>>;
|
||||
|
||||
|
||||
@@ -8,6 +8,7 @@ notice = "warn"
|
||||
unmaintained = "warn"
|
||||
|
||||
ignore = [
|
||||
"RUSTSEC-2021-0139", # https://github.com/serai-dex/serai/228
|
||||
"RUSTSEC-2022-0061", # https://github.com/serai-dex/serai/227
|
||||
]
|
||||
|
||||
@@ -64,7 +65,7 @@ exceptions = [
|
||||
{ allow = ["AGPL-3.0"], name = "serai-runtime" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-node" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "serai-client" },
|
||||
{ allow = ["AGPL-3.0"], name = "serai-orchestrator" },
|
||||
|
||||
{ allow = ["AGPL-3.0"], name = "mini-serai" },
|
||||
|
||||
@@ -97,6 +98,7 @@ allow-registry = ["https://github.com/rust-lang/crates.io-index"]
|
||||
allow-git = [
|
||||
"https://github.com/rust-lang-nursery/lazy-static.rs",
|
||||
"https://github.com/serai-dex/substrate-bip39",
|
||||
"https://github.com/serai-dex/polkadot-sdk",
|
||||
"https://github.com/serai-dex/substrate",
|
||||
"https://github.com/monero-rs/base58-monero",
|
||||
"https://github.com/kayabaNerve/dockertest-rs",
|
||||
]
|
||||
|
||||
@@ -69,25 +69,23 @@ Running tests requires:
|
||||
- [A rootless Docker setup](https://docs.docker.com/engine/security/rootless/)
|
||||
- A properly configured Bitcoin regtest node (available via Docker)
|
||||
- A properly configured Monero regtest node (available via Docker)
|
||||
- A properly configured monero-wallet-rpc instance
|
||||
- A debug Serai node (`cd substrate/node && cargo build`)
|
||||
- A properly configured monero-wallet-rpc instance (available via Docker)
|
||||
|
||||
To start the required daemons, one may run:
|
||||
|
||||
```
|
||||
cargo run -p serai-orchestrator -- key_gen dev
|
||||
cargo run -p serai-orchestrator -- setup dev
|
||||
```
|
||||
|
||||
and then:
|
||||
|
||||
```
|
||||
cargo run -p serai-orchestrator -- start dev bitcoin-daemon monero-daemon monero-wallet-rpc
|
||||
```
|
||||
|
||||
Finally, to run the tests:
|
||||
|
||||
```
|
||||
cargo test --all-features
|
||||
```
|
||||
|
||||
### Run Serai in Development Mode
|
||||
|
||||
```
|
||||
./target/release/serai-node --dev
|
||||
```
|
||||
|
||||
### Run Serai with Orchestration
|
||||
|
||||
Under `/orchestration`, you can find our orchestration components for running
|
||||
the entire infrastructure of Serai in a local environment using Docker Compose
|
||||
or Kubernetes.
|
||||
|
||||
[Run Serai with Docker Compose](../orchestration/README.md)
|
||||
|
||||
[Run Serai with Kubernetes](../orchestration/kubernetes/README.md)
|
||||
|
||||
@@ -40,6 +40,7 @@ env_logger = { version = "0.10", default-features = false, features = ["humantim
|
||||
# Uses a single threaded runtime since this shouldn't ever be CPU-bound
|
||||
tokio = { version = "1", default-features = false, features = ["rt", "time", "io-util", "net", "macros"] }
|
||||
|
||||
zalloc = { path = "../common/zalloc" }
|
||||
serai-db = { path = "../common/db", optional = true }
|
||||
|
||||
serai-env = { path = "../common/env" }
|
||||
|
||||
@@ -68,9 +68,13 @@ impl MessageQueue {
|
||||
async fn send(socket: &mut TcpStream, msg: MessageQueueRequest) -> bool {
|
||||
let msg = borsh::to_vec(&msg).unwrap();
|
||||
let Ok(()) = socket.write_all(&u32::try_from(msg.len()).unwrap().to_le_bytes()).await else {
|
||||
log::warn!("couldn't send the message len");
|
||||
return false;
|
||||
};
|
||||
let Ok(()) = socket.write_all(&msg).await else {
|
||||
log::warn!("couldn't write the message");
|
||||
return false;
|
||||
};
|
||||
let Ok(()) = socket.write_all(&msg).await else { return false };
|
||||
true
|
||||
}
|
||||
|
||||
@@ -118,20 +122,32 @@ impl MessageQueue {
|
||||
'outer: loop {
|
||||
if !first {
|
||||
tokio::time::sleep(core::time::Duration::from_secs(5)).await;
|
||||
continue;
|
||||
}
|
||||
first = false;
|
||||
|
||||
let Ok(mut socket) = TcpStream::connect(&self.url).await else { continue };
|
||||
log::trace!("opening socket to message-queue for next");
|
||||
let mut socket = match TcpStream::connect(&self.url).await {
|
||||
Ok(socket) => socket,
|
||||
Err(e) => {
|
||||
log::warn!("couldn't connect to message-queue server: {e:?}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
log::trace!("opened socket for next");
|
||||
|
||||
loop {
|
||||
if !Self::send(&mut socket, msg.clone()).await {
|
||||
continue 'outer;
|
||||
}
|
||||
let Ok(status) = socket.read_u8().await else {
|
||||
continue 'outer;
|
||||
let status = match socket.read_u8().await {
|
||||
Ok(status) => status,
|
||||
Err(e) => {
|
||||
log::warn!("couldn't read status u8: {e:?}");
|
||||
continue 'outer;
|
||||
}
|
||||
};
|
||||
// If there wasn't a message, check again in 1s
|
||||
// TODO: Use a notification system here
|
||||
if status == 0 {
|
||||
tokio::time::sleep(core::time::Duration::from_secs(1)).await;
|
||||
continue;
|
||||
@@ -143,12 +159,17 @@ impl MessageQueue {
|
||||
// Timeout after 5 seconds in case there's an issue with the length handling
|
||||
let Ok(msg) = tokio::time::timeout(core::time::Duration::from_secs(5), async {
|
||||
// Read the message length
|
||||
let Ok(len) = socket.read_u32_le().await else {
|
||||
return vec![];
|
||||
let len = match socket.read_u32_le().await {
|
||||
Ok(len) => len,
|
||||
Err(e) => {
|
||||
log::warn!("couldn't read len: {e:?}");
|
||||
return vec![];
|
||||
}
|
||||
};
|
||||
let mut buf = vec![0; usize::try_from(len).unwrap()];
|
||||
// Read the message
|
||||
let Ok(_) = socket.read_exact(&mut buf).await else {
|
||||
log::warn!("couldn't read the message");
|
||||
return vec![];
|
||||
};
|
||||
buf
|
||||
|
||||
@@ -1,6 +1,3 @@
|
||||
mod messages;
|
||||
mod queue;
|
||||
|
||||
pub(crate) use std::{
|
||||
sync::{Arc, RwLock},
|
||||
collections::HashMap,
|
||||
@@ -38,6 +35,13 @@ mod clippy {
|
||||
}
|
||||
pub(crate) use self::clippy::*;
|
||||
|
||||
mod messages;
|
||||
mod queue;
|
||||
|
||||
#[global_allocator]
|
||||
static ALLOCATOR: zalloc::ZeroizingAlloc<std::alloc::System> =
|
||||
zalloc::ZeroizingAlloc(std::alloc::System);
|
||||
|
||||
// queue RPC method
|
||||
/*
|
||||
Queues a message to be delivered from a processor to a coordinator, or vice versa.
|
||||
|
||||
30
orchestration/Cargo.toml
Normal file
30
orchestration/Cargo.toml
Normal file
@@ -0,0 +1,30 @@
|
||||
[package]
|
||||
name = "serai-orchestrator"
|
||||
version = "0.0.1"
|
||||
description = "Generates Dockerfiles for Serai"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/orchestration/"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
|
||||
zeroize = { version = "1", default-features = false, features = ["std"] }
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std", "getrandom"] }
|
||||
rand_chacha = { version = "0.3", default-features = false, features = ["std"] }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std", "ristretto"] }
|
||||
|
||||
zalloc = { path = "../common/zalloc" }
|
||||
|
||||
home = "0.5"
|
||||
@@ -1,6 +0,0 @@
|
||||
FROM alpine:latest as image
|
||||
|
||||
COPY --from=mimalloc libmimalloc.so /usr/lib
|
||||
ENV LD_PRELOAD=libmimalloc.so
|
||||
|
||||
RUN apk update && apk upgrade
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user