mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-13 06:29:25 +00:00
Compare commits
155 Commits
testnet-2
...
8c50a31633
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
8c50a31633 | ||
|
|
d943e037e5 | ||
|
|
3042697243 | ||
|
|
8de696f169 | ||
|
|
b8912e4b7b | ||
|
|
89fc88b283 | ||
|
|
2ae2883106 | ||
|
|
e74c8f38d5 | ||
|
|
9e8e134ef7 | ||
|
|
f08faeadff | ||
|
|
1b7613329c | ||
|
|
54eefbde0c | ||
|
|
58a435d4e9 | ||
|
|
5ed355902b | ||
|
|
fc51c9b71c | ||
|
|
9e716c07fc | ||
|
|
b5bf70bdb1 | ||
|
|
d3f0378f66 | ||
|
|
2f564c230e | ||
|
|
12f74e1813 | ||
|
|
fb7e966b94 | ||
|
|
65efbf46c7 | ||
|
|
c5cc0dc883 | ||
|
|
a6775d7dc5 | ||
|
|
681010f422 | ||
|
|
f93bd42b99 | ||
|
|
c960d6baaf | ||
|
|
31ac0ac299 | ||
|
|
4bd0d71406 | ||
|
|
ef68885600 | ||
|
|
00dc3087bd | ||
|
|
eca82f3f7b | ||
|
|
05c26d7818 | ||
|
|
96175e115d | ||
|
|
fa31f26397 | ||
|
|
7710da4db3 | ||
|
|
b7103038cb | ||
|
|
b6e688076f | ||
|
|
b8472963c9 | ||
|
|
772d033bb2 | ||
|
|
49a183194d | ||
|
|
db31809708 | ||
|
|
dcc26ecf33 | ||
|
|
d5205ce231 | ||
|
|
0f6878567f | ||
|
|
880565cb81 | ||
|
|
6f34c2ff77 | ||
|
|
1493f49416 | ||
|
|
2ccb0cd90d | ||
|
|
b33a6487aa | ||
|
|
491500057b | ||
|
|
d9f85fab26 | ||
|
|
7d2d739042 | ||
|
|
40cc180853 | ||
|
|
2aac6f6998 | ||
|
|
149c2a4437 | ||
|
|
e772b8a5f7 | ||
|
|
c0200df75a | ||
|
|
9955ef54a5 | ||
|
|
8e7e61adbd | ||
|
|
0cb24dde02 | ||
|
|
97bfb183e8 | ||
|
|
85fc31fd82 | ||
|
|
7b8bcae396 | ||
|
|
70fe52437c | ||
|
|
ba657e23d1 | ||
|
|
32c24917c4 | ||
|
|
4ba961b2cb | ||
|
|
c59be46e2f | ||
|
|
2c165e19ae | ||
|
|
ee10692b23 | ||
|
|
7a68b065e0 | ||
|
|
3ddf1eec0c | ||
|
|
84f0e6c26e | ||
|
|
5bb3256d1f | ||
|
|
774424b70b | ||
|
|
ed662568e2 | ||
|
|
b744ac9a76 | ||
|
|
d7f7f69738 | ||
|
|
a2c3aba82b | ||
|
|
703c6a2358 | ||
|
|
52bb918cc9 | ||
|
|
ba244e8090 | ||
|
|
3e99d68cfe | ||
|
|
4d9c2df38c | ||
|
|
8ab6f9c36e | ||
|
|
253cf3253d | ||
|
|
03445b3020 | ||
|
|
9af111b4aa | ||
|
|
41ce5b1738 | ||
|
|
2a05cf3225 | ||
|
|
f4147c39b2 | ||
|
|
cd69f3b9d6 | ||
|
|
1d2beb3ee4 | ||
|
|
ac709b2945 | ||
|
|
a473800c26 | ||
|
|
09aac20293 | ||
|
|
f93214012d | ||
|
|
400319cd29 | ||
|
|
a0a7d63dad | ||
|
|
fb7d12ee6e | ||
|
|
11ec9e3535 | ||
|
|
ae8a27b876 | ||
|
|
af79586488 | ||
|
|
d27d93480a | ||
|
|
02c4417a46 | ||
|
|
79a79db399 | ||
|
|
0c9dd5048e | ||
|
|
5501de1f3a | ||
|
|
21123590bb | ||
|
|
bc1dec7991 | ||
|
|
cef63a631a | ||
|
|
d57fef8999 | ||
|
|
d1474e9188 | ||
|
|
b39c751403 | ||
|
|
cc7202e0bf | ||
|
|
19e68f7f75 | ||
|
|
d94c9a4a5e | ||
|
|
43dc036660 | ||
|
|
95591218bb | ||
|
|
7dd587a864 | ||
|
|
023275bcb6 | ||
|
|
8cef9eff6f | ||
|
|
b5e22dca8f | ||
|
|
a41329c027 | ||
|
|
a25e6330bd | ||
|
|
558a2bfa46 | ||
|
|
c73acb3d62 | ||
|
|
933b17aa91 | ||
|
|
5fa7e3d450 | ||
|
|
749d783b1e | ||
|
|
5a3ea80943 | ||
|
|
fddbebc7c0 | ||
|
|
e01848aa9e | ||
|
|
320b5627b5 | ||
|
|
be7780e69d | ||
|
|
0ddbaefb38 | ||
|
|
0f0db14f05 | ||
|
|
43083dfd49 | ||
|
|
523d2ac911 | ||
|
|
fd4f247917 | ||
|
|
ac9e356af4 | ||
|
|
bba7d2a356 | ||
|
|
4c349ae605 | ||
|
|
a4428761f7 | ||
|
|
940e9553fd | ||
|
|
593aefd229 | ||
|
|
5830c2463d | ||
|
|
bcc88c3e86 | ||
|
|
fea16df567 | ||
|
|
4960c3222e | ||
|
|
6b4df4f2c0 | ||
|
|
dac46c8d7d | ||
|
|
db2e8376df | ||
|
|
33dd412e67 |
4
.github/actions/bitcoin/action.yml
vendored
4
.github/actions/bitcoin/action.yml
vendored
@@ -5,7 +5,7 @@ inputs:
|
|||||||
version:
|
version:
|
||||||
description: "Version to download and run"
|
description: "Version to download and run"
|
||||||
required: false
|
required: false
|
||||||
default: 24.0.1
|
default: "27.0"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -37,4 +37,4 @@ runs:
|
|||||||
|
|
||||||
- name: Bitcoin Regtest Daemon
|
- name: Bitcoin Regtest Daemon
|
||||||
shell: bash
|
shell: bash
|
||||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/coins/bitcoin/run.sh -daemon
|
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon
|
||||||
|
|||||||
2
.github/actions/monero/action.yml
vendored
2
.github/actions/monero/action.yml
vendored
@@ -43,4 +43,4 @@ runs:
|
|||||||
|
|
||||||
- name: Monero Regtest Daemon
|
- name: Monero Regtest Daemon
|
||||||
shell: bash
|
shell: bash
|
||||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/coins/monero/run.sh --detach
|
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/monero/run.sh --detach
|
||||||
|
|||||||
6
.github/actions/test-dependencies/action.yml
vendored
6
.github/actions/test-dependencies/action.yml
vendored
@@ -10,7 +10,7 @@ inputs:
|
|||||||
bitcoin-version:
|
bitcoin-version:
|
||||||
description: "Bitcoin version to download and run as a regtest node"
|
description: "Bitcoin version to download and run as a regtest node"
|
||||||
required: false
|
required: false
|
||||||
default: 24.0.1
|
default: "27.1"
|
||||||
|
|
||||||
runs:
|
runs:
|
||||||
using: "composite"
|
using: "composite"
|
||||||
@@ -19,9 +19,9 @@ runs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Install Foundry
|
- name: Install Foundry
|
||||||
uses: foundry-rs/foundry-toolchain@cb603ca0abb544f301eaed59ac0baf579aa6aecf
|
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||||
with:
|
with:
|
||||||
version: nightly-09fe3e041369a816365a020f715ad6f94dbce9f2
|
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
|
||||||
cache: false
|
cache: false
|
||||||
|
|
||||||
- name: Run a Monero Regtest Node
|
- name: Run a Monero Regtest Node
|
||||||
|
|||||||
2
.github/nightly-version
vendored
2
.github/nightly-version
vendored
@@ -1 +1 @@
|
|||||||
nightly-2024-02-07
|
nightly-2024-07-01
|
||||||
|
|||||||
35
.github/workflows/coins-tests.yml
vendored
35
.github/workflows/coins-tests.yml
vendored
@@ -1,35 +0,0 @@
|
|||||||
name: coins/ Tests
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- develop
|
|
||||||
paths:
|
|
||||||
- "common/**"
|
|
||||||
- "crypto/**"
|
|
||||||
- "coins/**"
|
|
||||||
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- "common/**"
|
|
||||||
- "crypto/**"
|
|
||||||
- "coins/**"
|
|
||||||
|
|
||||||
workflow_dispatch:
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test-coins:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
|
||||||
|
|
||||||
- name: Test Dependencies
|
|
||||||
uses: ./.github/actions/test-dependencies
|
|
||||||
|
|
||||||
- name: Run Tests
|
|
||||||
run: |
|
|
||||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
|
||||||
-p bitcoin-serai \
|
|
||||||
-p ethereum-serai \
|
|
||||||
-p monero-generators \
|
|
||||||
-p monero-serai
|
|
||||||
3
.github/workflows/common-tests.yml
vendored
3
.github/workflows/common-tests.yml
vendored
@@ -28,4 +28,5 @@ jobs:
|
|||||||
-p std-shims \
|
-p std-shims \
|
||||||
-p zalloc \
|
-p zalloc \
|
||||||
-p serai-db \
|
-p serai-db \
|
||||||
-p serai-env
|
-p serai-env \
|
||||||
|
-p simple-request
|
||||||
|
|||||||
6
.github/workflows/coordinator-tests.yml
vendored
6
.github/workflows/coordinator-tests.yml
vendored
@@ -7,7 +7,7 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- "common/**"
|
- "common/**"
|
||||||
- "crypto/**"
|
- "crypto/**"
|
||||||
- "coins/**"
|
- "networks/**"
|
||||||
- "message-queue/**"
|
- "message-queue/**"
|
||||||
- "coordinator/**"
|
- "coordinator/**"
|
||||||
- "orchestration/**"
|
- "orchestration/**"
|
||||||
@@ -18,7 +18,7 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- "common/**"
|
- "common/**"
|
||||||
- "crypto/**"
|
- "crypto/**"
|
||||||
- "coins/**"
|
- "networks/**"
|
||||||
- "message-queue/**"
|
- "message-queue/**"
|
||||||
- "coordinator/**"
|
- "coordinator/**"
|
||||||
- "orchestration/**"
|
- "orchestration/**"
|
||||||
@@ -37,4 +37,4 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run coordinator Docker tests
|
- name: Run coordinator Docker tests
|
||||||
run: cd tests/coordinator && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-coordinator-tests
|
||||||
|
|||||||
4
.github/workflows/crypto-tests.yml
vendored
4
.github/workflows/crypto-tests.yml
vendored
@@ -35,6 +35,10 @@ jobs:
|
|||||||
-p multiexp \
|
-p multiexp \
|
||||||
-p schnorr-signatures \
|
-p schnorr-signatures \
|
||||||
-p dleq \
|
-p dleq \
|
||||||
|
-p generalized-bulletproofs \
|
||||||
|
-p generalized-bulletproofs-circuit-abstraction \
|
||||||
|
-p ec-divisors \
|
||||||
|
-p generalized-bulletproofs-ec-gadgets \
|
||||||
-p dkg \
|
-p dkg \
|
||||||
-p modular-frost \
|
-p modular-frost \
|
||||||
-p frost-schnorrkel
|
-p frost-schnorrkel
|
||||||
|
|||||||
2
.github/workflows/full-stack-tests.yml
vendored
2
.github/workflows/full-stack-tests.yml
vendored
@@ -19,4 +19,4 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run Full Stack Docker tests
|
- name: Run Full Stack Docker tests
|
||||||
run: cd tests/full-stack && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-full-stack-tests
|
||||||
|
|||||||
2
.github/workflows/message-queue-tests.yml
vendored
2
.github/workflows/message-queue-tests.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run message-queue Docker tests
|
- name: Run message-queue Docker tests
|
||||||
run: cd tests/message-queue && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-message-queue-tests
|
||||||
|
|||||||
35
.github/workflows/monero-tests.yaml
vendored
35
.github/workflows/monero-tests.yaml
vendored
@@ -5,12 +5,12 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- develop
|
- develop
|
||||||
paths:
|
paths:
|
||||||
- "coins/monero/**"
|
- "networks/monero/**"
|
||||||
- "processor/**"
|
- "processor/**"
|
||||||
|
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- "coins/monero/**"
|
- "networks/monero/**"
|
||||||
- "processor/**"
|
- "processor/**"
|
||||||
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -26,7 +26,22 @@ jobs:
|
|||||||
uses: ./.github/actions/test-dependencies
|
uses: ./.github/actions/test-dependencies
|
||||||
|
|
||||||
- name: Run Unit Tests Without Features
|
- name: Run Unit Tests Without Features
|
||||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
run: |
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
|
||||||
|
|
||||||
# Doesn't run unit tests with features as the tests workflow will
|
# Doesn't run unit tests with features as the tests workflow will
|
||||||
|
|
||||||
@@ -46,11 +61,17 @@ jobs:
|
|||||||
monero-version: ${{ matrix.version }}
|
monero-version: ${{ matrix.version }}
|
||||||
|
|
||||||
- name: Run Integration Tests Without Features
|
- name: Run Integration Tests Without Features
|
||||||
# Runs with the binaries feature so the binaries build
|
run: |
|
||||||
# https://github.com/rust-lang/cargo/issues/8396
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
|
||||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --features binaries --test '*'
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
|
||||||
|
|
||||||
- name: Run Integration Tests
|
- name: Run Integration Tests
|
||||||
# Don't run if the the tests workflow also will
|
# Don't run if the the tests workflow also will
|
||||||
if: ${{ matrix.version != 'v0.18.2.0' }}
|
if: ${{ matrix.version != 'v0.18.2.0' }}
|
||||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
run: |
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'
|
||||||
|
|||||||
51
.github/workflows/networks-tests.yml
vendored
Normal file
51
.github/workflows/networks-tests.yml
vendored
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
name: networks/ Tests
|
||||||
|
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- develop
|
||||||
|
paths:
|
||||||
|
- "common/**"
|
||||||
|
- "crypto/**"
|
||||||
|
- "networks/**"
|
||||||
|
|
||||||
|
pull_request:
|
||||||
|
paths:
|
||||||
|
- "common/**"
|
||||||
|
- "crypto/**"
|
||||||
|
- "networks/**"
|
||||||
|
|
||||||
|
workflow_dispatch:
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
test-networks:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||||
|
|
||||||
|
- name: Test Dependencies
|
||||||
|
uses: ./.github/actions/test-dependencies
|
||||||
|
|
||||||
|
- name: Run Tests
|
||||||
|
run: |
|
||||||
|
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||||
|
-p bitcoin-serai \
|
||||||
|
-p alloy-simple-request-transport \
|
||||||
|
-p ethereum-serai \
|
||||||
|
-p serai-ethereum-relayer \
|
||||||
|
-p monero-io \
|
||||||
|
-p monero-generators \
|
||||||
|
-p monero-primitives \
|
||||||
|
-p monero-mlsag \
|
||||||
|
-p monero-clsag \
|
||||||
|
-p monero-borromean \
|
||||||
|
-p monero-bulletproofs \
|
||||||
|
-p monero-serai \
|
||||||
|
-p monero-rpc \
|
||||||
|
-p monero-simple-request-rpc \
|
||||||
|
-p monero-address \
|
||||||
|
-p monero-wallet \
|
||||||
|
-p monero-seed \
|
||||||
|
-p polyseed \
|
||||||
|
-p monero-wallet-util \
|
||||||
|
-p monero-serai-verify-chain
|
||||||
6
.github/workflows/no-std.yml
vendored
6
.github/workflows/no-std.yml
vendored
@@ -7,14 +7,14 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- "common/**"
|
- "common/**"
|
||||||
- "crypto/**"
|
- "crypto/**"
|
||||||
- "coins/**"
|
- "networks/**"
|
||||||
- "tests/no-std/**"
|
- "tests/no-std/**"
|
||||||
|
|
||||||
pull_request:
|
pull_request:
|
||||||
paths:
|
paths:
|
||||||
- "common/**"
|
- "common/**"
|
||||||
- "crypto/**"
|
- "crypto/**"
|
||||||
- "coins/**"
|
- "networks/**"
|
||||||
- "tests/no-std/**"
|
- "tests/no-std/**"
|
||||||
|
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
@@ -32,4 +32,4 @@ jobs:
|
|||||||
run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
|
run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
|
||||||
|
|
||||||
- name: Verify no-std builds
|
- name: Verify no-std builds
|
||||||
run: cd tests/no-std && CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf
|
run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
|
||||||
|
|||||||
6
.github/workflows/processor-tests.yml
vendored
6
.github/workflows/processor-tests.yml
vendored
@@ -7,7 +7,7 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- "common/**"
|
- "common/**"
|
||||||
- "crypto/**"
|
- "crypto/**"
|
||||||
- "coins/**"
|
- "networks/**"
|
||||||
- "message-queue/**"
|
- "message-queue/**"
|
||||||
- "processor/**"
|
- "processor/**"
|
||||||
- "orchestration/**"
|
- "orchestration/**"
|
||||||
@@ -18,7 +18,7 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- "common/**"
|
- "common/**"
|
||||||
- "crypto/**"
|
- "crypto/**"
|
||||||
- "coins/**"
|
- "networks/**"
|
||||||
- "message-queue/**"
|
- "message-queue/**"
|
||||||
- "processor/**"
|
- "processor/**"
|
||||||
- "orchestration/**"
|
- "orchestration/**"
|
||||||
@@ -37,4 +37,4 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run processor Docker tests
|
- name: Run processor Docker tests
|
||||||
run: cd tests/processor && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-processor-tests
|
||||||
|
|||||||
2
.github/workflows/reproducible-runtime.yml
vendored
2
.github/workflows/reproducible-runtime.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
|||||||
uses: ./.github/actions/build-dependencies
|
uses: ./.github/actions/build-dependencies
|
||||||
|
|
||||||
- name: Run Reproducible Runtime tests
|
- name: Run Reproducible Runtime tests
|
||||||
run: cd tests/reproducible-runtime && GITHUB_CI=true RUST_BACKTRACE=1 cargo test
|
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests
|
||||||
|
|||||||
7
.github/workflows/tests.yml
vendored
7
.github/workflows/tests.yml
vendored
@@ -7,7 +7,7 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- "common/**"
|
- "common/**"
|
||||||
- "crypto/**"
|
- "crypto/**"
|
||||||
- "coins/**"
|
- "networks/**"
|
||||||
- "message-queue/**"
|
- "message-queue/**"
|
||||||
- "processor/**"
|
- "processor/**"
|
||||||
- "coordinator/**"
|
- "coordinator/**"
|
||||||
@@ -17,7 +17,7 @@ on:
|
|||||||
paths:
|
paths:
|
||||||
- "common/**"
|
- "common/**"
|
||||||
- "crypto/**"
|
- "crypto/**"
|
||||||
- "coins/**"
|
- "networks/**"
|
||||||
- "message-queue/**"
|
- "message-queue/**"
|
||||||
- "processor/**"
|
- "processor/**"
|
||||||
- "coordinator/**"
|
- "coordinator/**"
|
||||||
@@ -43,6 +43,7 @@ jobs:
|
|||||||
-p tendermint-machine \
|
-p tendermint-machine \
|
||||||
-p tributary-chain \
|
-p tributary-chain \
|
||||||
-p serai-coordinator \
|
-p serai-coordinator \
|
||||||
|
-p serai-orchestrator \
|
||||||
-p serai-docker-tests
|
-p serai-docker-tests
|
||||||
|
|
||||||
test-substrate:
|
test-substrate:
|
||||||
@@ -64,7 +65,9 @@ jobs:
|
|||||||
-p serai-validator-sets-pallet \
|
-p serai-validator-sets-pallet \
|
||||||
-p serai-in-instructions-primitives \
|
-p serai-in-instructions-primitives \
|
||||||
-p serai-in-instructions-pallet \
|
-p serai-in-instructions-pallet \
|
||||||
|
-p serai-signals-primitives \
|
||||||
-p serai-signals-pallet \
|
-p serai-signals-pallet \
|
||||||
|
-p serai-abi \
|
||||||
-p serai-runtime \
|
-p serai-runtime \
|
||||||
-p serai-node
|
-p serai-node
|
||||||
|
|
||||||
|
|||||||
3016
Cargo.lock
generated
3016
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
68
Cargo.toml
68
Cargo.toml
@@ -2,6 +2,8 @@
|
|||||||
resolver = "2"
|
resolver = "2"
|
||||||
members = [
|
members = [
|
||||||
# Version patches
|
# Version patches
|
||||||
|
"patches/parking_lot_core",
|
||||||
|
"patches/parking_lot",
|
||||||
"patches/zstd",
|
"patches/zstd",
|
||||||
"patches/rocksdb",
|
"patches/rocksdb",
|
||||||
"patches/proc-macro-crate",
|
"patches/proc-macro-crate",
|
||||||
@@ -28,17 +30,42 @@ members = [
|
|||||||
"crypto/ciphersuite",
|
"crypto/ciphersuite",
|
||||||
|
|
||||||
"crypto/multiexp",
|
"crypto/multiexp",
|
||||||
|
|
||||||
"crypto/schnorr",
|
"crypto/schnorr",
|
||||||
"crypto/dleq",
|
"crypto/dleq",
|
||||||
|
|
||||||
|
"crypto/evrf/secq256k1",
|
||||||
|
"crypto/evrf/embedwards25519",
|
||||||
|
"crypto/evrf/generalized-bulletproofs",
|
||||||
|
"crypto/evrf/circuit-abstraction",
|
||||||
|
"crypto/evrf/divisors",
|
||||||
|
"crypto/evrf/ec-gadgets",
|
||||||
|
|
||||||
"crypto/dkg",
|
"crypto/dkg",
|
||||||
"crypto/frost",
|
"crypto/frost",
|
||||||
"crypto/schnorrkel",
|
"crypto/schnorrkel",
|
||||||
|
|
||||||
"coins/bitcoin",
|
"networks/bitcoin",
|
||||||
"coins/ethereum",
|
|
||||||
"coins/monero/generators",
|
"networks/ethereum/alloy-simple-request-transport",
|
||||||
"coins/monero",
|
"networks/ethereum",
|
||||||
|
"networks/ethereum/relayer",
|
||||||
|
|
||||||
|
"networks/monero/io",
|
||||||
|
"networks/monero/generators",
|
||||||
|
"networks/monero/primitives",
|
||||||
|
"networks/monero/ringct/mlsag",
|
||||||
|
"networks/monero/ringct/clsag",
|
||||||
|
"networks/monero/ringct/borromean",
|
||||||
|
"networks/monero/ringct/bulletproofs",
|
||||||
|
"networks/monero",
|
||||||
|
"networks/monero/rpc",
|
||||||
|
"networks/monero/rpc/simple-request",
|
||||||
|
"networks/monero/wallet/address",
|
||||||
|
"networks/monero/wallet",
|
||||||
|
"networks/monero/wallet/seed",
|
||||||
|
"networks/monero/wallet/polyseed",
|
||||||
|
"networks/monero/wallet/util",
|
||||||
|
"networks/monero/verify-chain",
|
||||||
|
|
||||||
"message-queue",
|
"message-queue",
|
||||||
|
|
||||||
@@ -54,12 +81,14 @@ members = [
|
|||||||
"substrate/coins/primitives",
|
"substrate/coins/primitives",
|
||||||
"substrate/coins/pallet",
|
"substrate/coins/pallet",
|
||||||
|
|
||||||
"substrate/in-instructions/primitives",
|
"substrate/dex/pallet",
|
||||||
"substrate/in-instructions/pallet",
|
|
||||||
|
|
||||||
"substrate/validator-sets/primitives",
|
"substrate/validator-sets/primitives",
|
||||||
"substrate/validator-sets/pallet",
|
"substrate/validator-sets/pallet",
|
||||||
|
|
||||||
|
"substrate/in-instructions/primitives",
|
||||||
|
"substrate/in-instructions/pallet",
|
||||||
|
|
||||||
"substrate/signals/primitives",
|
"substrate/signals/primitives",
|
||||||
"substrate/signals/pallet",
|
"substrate/signals/pallet",
|
||||||
|
|
||||||
@@ -88,18 +117,32 @@ members = [
|
|||||||
# to the extensive operations required for Bulletproofs
|
# to the extensive operations required for Bulletproofs
|
||||||
[profile.dev.package]
|
[profile.dev.package]
|
||||||
subtle = { opt-level = 3 }
|
subtle = { opt-level = 3 }
|
||||||
curve25519-dalek = { opt-level = 3 }
|
|
||||||
|
|
||||||
ff = { opt-level = 3 }
|
ff = { opt-level = 3 }
|
||||||
group = { opt-level = 3 }
|
group = { opt-level = 3 }
|
||||||
|
|
||||||
crypto-bigint = { opt-level = 3 }
|
crypto-bigint = { opt-level = 3 }
|
||||||
|
secp256k1 = { opt-level = 3 }
|
||||||
|
curve25519-dalek = { opt-level = 3 }
|
||||||
dalek-ff-group = { opt-level = 3 }
|
dalek-ff-group = { opt-level = 3 }
|
||||||
minimal-ed448 = { opt-level = 3 }
|
minimal-ed448 = { opt-level = 3 }
|
||||||
|
|
||||||
multiexp = { opt-level = 3 }
|
multiexp = { opt-level = 3 }
|
||||||
|
|
||||||
monero-serai = { opt-level = 3 }
|
secq256k1 = { opt-level = 3 }
|
||||||
|
embedwards25519 = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs-circuit-abstraction = { opt-level = 3 }
|
||||||
|
ec-divisors = { opt-level = 3 }
|
||||||
|
generalized-bulletproofs-ec-gadgets = { opt-level = 3 }
|
||||||
|
|
||||||
|
dkg = { opt-level = 3 }
|
||||||
|
|
||||||
|
monero-generators = { opt-level = 3 }
|
||||||
|
monero-borromean = { opt-level = 3 }
|
||||||
|
monero-bulletproofs = { opt-level = 3 }
|
||||||
|
monero-mlsag = { opt-level = 3 }
|
||||||
|
monero-clsag = { opt-level = 3 }
|
||||||
|
|
||||||
[profile.release]
|
[profile.release]
|
||||||
panic = "unwind"
|
panic = "unwind"
|
||||||
@@ -109,8 +152,10 @@ panic = "unwind"
|
|||||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||||
|
|
||||||
# Needed due to dockertest's usage of `Rc`s when we need `Arc`s
|
# Needed due to dockertest's usage of `Rc`s when we need `Arc`s
|
||||||
dockertest = { git = "https://github.com/kayabaNerve/dockertest-rs", branch = "arc" }
|
dockertest = { git = "https://github.com/orcalabs/dockertest-rs", rev = "4dd6ae24738aa6dc5c89444cc822ea4745517493" }
|
||||||
|
|
||||||
|
parking_lot_core = { path = "patches/parking_lot_core" }
|
||||||
|
parking_lot = { path = "patches/parking_lot" }
|
||||||
# wasmtime pulls in an old version for this
|
# wasmtime pulls in an old version for this
|
||||||
zstd = { path = "patches/zstd" }
|
zstd = { path = "patches/zstd" }
|
||||||
# Needed for WAL compression
|
# Needed for WAL compression
|
||||||
@@ -131,6 +176,9 @@ matches = { path = "patches/matches" }
|
|||||||
option-ext = { path = "patches/option-ext" }
|
option-ext = { path = "patches/option-ext" }
|
||||||
directories-next = { path = "patches/directories-next" }
|
directories-next = { path = "patches/directories-next" }
|
||||||
|
|
||||||
|
# The official pasta_curves repo doesn't support Zeroize
|
||||||
|
pasta_curves = { git = "https://github.com/kayabaNerve/pasta_curves", rev = "a46b5be95cacbff54d06aad8d3bbcba42e05d616" }
|
||||||
|
|
||||||
[workspace.lints.clippy]
|
[workspace.lints.clippy]
|
||||||
unwrap_or_default = "allow"
|
unwrap_or_default = "allow"
|
||||||
borrow_as_ptr = "deny"
|
borrow_as_ptr = "deny"
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ wallet.
|
|||||||
infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as
|
infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as
|
||||||
needed for Bitcoin-Monero atomic swaps.
|
needed for Bitcoin-Monero atomic swaps.
|
||||||
|
|
||||||
- `coins`: Various coin libraries intended for usage in Serai yet also by the
|
- `networks`: Various libraries intended for usage in Serai yet also by the
|
||||||
wider community. This means they will always support the functionality Serai
|
wider community. This means they will always support the functionality Serai
|
||||||
needs, yet won't disadvantage other use cases when possible.
|
needs, yet won't disadvantage other use cases when possible.
|
||||||
|
|
||||||
|
|||||||
@@ -1,6 +0,0 @@
|
|||||||
# Cypher Stack /coins/bitcoin Audit, August 2023
|
|
||||||
|
|
||||||
This audit was over the /coins/bitcoin folder. It is encompassing up to commit
|
|
||||||
5121ca75199dff7bd34230880a1fdd793012068c.
|
|
||||||
|
|
||||||
Please see https://github.com/cypherstack/serai-btc-audit for provenance.
|
|
||||||
@@ -0,0 +1,7 @@
|
|||||||
|
# Cypher Stack /networks/bitcoin Audit, August 2023
|
||||||
|
|
||||||
|
This audit was over the `/networks/bitcoin` folder (at the time located at
|
||||||
|
`/coins/bitcoin`). It is encompassing up to commit
|
||||||
|
5121ca75199dff7bd34230880a1fdd793012068c.
|
||||||
|
|
||||||
|
Please see https://github.com/cypherstack/serai-btc-audit for provenance.
|
||||||
7
coins/ethereum/.gitignore
vendored
7
coins/ethereum/.gitignore
vendored
@@ -1,7 +0,0 @@
|
|||||||
# Solidity build outputs
|
|
||||||
cache
|
|
||||||
artifacts
|
|
||||||
|
|
||||||
# Auto-generated ABI files
|
|
||||||
src/abi/schnorr.rs
|
|
||||||
src/abi/router.rs
|
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "ethereum-serai"
|
|
||||||
version = "0.1.0"
|
|
||||||
description = "An Ethereum library supporting Schnorr signing and on-chain verification"
|
|
||||||
license = "AGPL-3.0-only"
|
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/coins/ethereum"
|
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>", "Elizabeth Binks <elizabethjbinks@gmail.com>"]
|
|
||||||
edition = "2021"
|
|
||||||
publish = false
|
|
||||||
rust-version = "1.74"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
all-features = true
|
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
thiserror = { version = "1", default-features = false }
|
|
||||||
eyre = { version = "0.6", default-features = false }
|
|
||||||
|
|
||||||
sha3 = { version = "0.10", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
group = { version = "0.13", default-features = false }
|
|
||||||
k256 = { version = "^0.13.1", default-features = false, features = ["std", "ecdsa"] }
|
|
||||||
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["secp256k1", "tests"] }
|
|
||||||
|
|
||||||
ethers-core = { version = "2", default-features = false }
|
|
||||||
ethers-providers = { version = "2", default-features = false }
|
|
||||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
ethers-contract = { version = "2", default-features = false, features = ["abigen", "providers"] }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
|
||||||
serde = { version = "1", default-features = false, features = ["std"] }
|
|
||||||
serde_json = { version = "1", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
sha2 = { version = "0.10", default-features = false, features = ["std"] }
|
|
||||||
|
|
||||||
tokio = { version = "1", features = ["macros"] }
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
# Ethereum
|
|
||||||
|
|
||||||
This package contains Ethereum-related functionality, specifically deploying and
|
|
||||||
interacting with Serai contracts.
|
|
||||||
|
|
||||||
### Dependencies
|
|
||||||
|
|
||||||
- solc
|
|
||||||
- [Foundry](https://github.com/foundry-rs/foundry)
|
|
||||||
@@ -1,90 +0,0 @@
|
|||||||
// SPDX-License-Identifier: AGPLv3
|
|
||||||
pragma solidity ^0.8.0;
|
|
||||||
|
|
||||||
import "./Schnorr.sol";
|
|
||||||
|
|
||||||
contract Router is Schnorr {
|
|
||||||
// Contract initializer
|
|
||||||
// TODO: Replace with a MuSig of the genesis validators
|
|
||||||
address public initializer;
|
|
||||||
|
|
||||||
// Nonce is incremented for each batch of transactions executed
|
|
||||||
uint256 public nonce;
|
|
||||||
|
|
||||||
// fixed parity for the public keys used in this contract
|
|
||||||
uint8 constant public KEY_PARITY = 27;
|
|
||||||
|
|
||||||
// current public key's x-coordinate
|
|
||||||
// note: this key must always use the fixed parity defined above
|
|
||||||
bytes32 public seraiKey;
|
|
||||||
|
|
||||||
struct OutInstruction {
|
|
||||||
address to;
|
|
||||||
uint256 value;
|
|
||||||
bytes data;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Signature {
|
|
||||||
bytes32 c;
|
|
||||||
bytes32 s;
|
|
||||||
}
|
|
||||||
|
|
||||||
// success is a uint256 representing a bitfield of transaction successes
|
|
||||||
event Executed(uint256 nonce, bytes32 batch, uint256 success);
|
|
||||||
|
|
||||||
// error types
|
|
||||||
error NotInitializer();
|
|
||||||
error AlreadyInitialized();
|
|
||||||
error InvalidKey();
|
|
||||||
error TooManyTransactions();
|
|
||||||
|
|
||||||
constructor() {
|
|
||||||
initializer = msg.sender;
|
|
||||||
}
|
|
||||||
|
|
||||||
// initSeraiKey can be called by the contract initializer to set the first
|
|
||||||
// public key, only if the public key has yet to be set.
|
|
||||||
function initSeraiKey(bytes32 _seraiKey) external {
|
|
||||||
if (msg.sender != initializer) revert NotInitializer();
|
|
||||||
if (seraiKey != 0) revert AlreadyInitialized();
|
|
||||||
if (_seraiKey == bytes32(0)) revert InvalidKey();
|
|
||||||
seraiKey = _seraiKey;
|
|
||||||
}
|
|
||||||
|
|
||||||
// updateSeraiKey validates the given Schnorr signature against the current public key,
|
|
||||||
// and if successful, updates the contract's public key to the given one.
|
|
||||||
function updateSeraiKey(
|
|
||||||
bytes32 _seraiKey,
|
|
||||||
Signature memory sig
|
|
||||||
) public {
|
|
||||||
if (_seraiKey == bytes32(0)) revert InvalidKey();
|
|
||||||
bytes32 message = keccak256(abi.encodePacked("updateSeraiKey", _seraiKey));
|
|
||||||
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
|
|
||||||
seraiKey = _seraiKey;
|
|
||||||
}
|
|
||||||
|
|
||||||
// execute accepts a list of transactions to execute as well as a Schnorr signature.
|
|
||||||
// if signature verification passes, the given transactions are executed.
|
|
||||||
// if signature verification fails, this function will revert.
|
|
||||||
function execute(
|
|
||||||
OutInstruction[] calldata transactions,
|
|
||||||
Signature memory sig
|
|
||||||
) public {
|
|
||||||
if (transactions.length > 256) revert TooManyTransactions();
|
|
||||||
|
|
||||||
bytes32 message = keccak256(abi.encode("execute", nonce, transactions));
|
|
||||||
// This prevents re-entrancy from causing double spends yet does allow
|
|
||||||
// out-of-order execution via re-entrancy
|
|
||||||
nonce++;
|
|
||||||
if (!verify(KEY_PARITY, seraiKey, message, sig.c, sig.s)) revert InvalidSignature();
|
|
||||||
|
|
||||||
uint256 successes;
|
|
||||||
for(uint256 i = 0; i < transactions.length; i++) {
|
|
||||||
(bool success, ) = transactions[i].to.call{value: transactions[i].value, gas: 200_000}(transactions[i].data);
|
|
||||||
assembly {
|
|
||||||
successes := or(successes, shl(i, success))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
emit Executed(nonce, message, successes);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,39 +0,0 @@
|
|||||||
// SPDX-License-Identifier: AGPLv3
|
|
||||||
pragma solidity ^0.8.0;
|
|
||||||
|
|
||||||
// see https://github.com/noot/schnorr-verify for implementation details
|
|
||||||
contract Schnorr {
|
|
||||||
// secp256k1 group order
|
|
||||||
uint256 constant public Q =
|
|
||||||
0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFEBAAEDCE6AF48A03BBFD25E8CD0364141;
|
|
||||||
|
|
||||||
error InvalidSOrA();
|
|
||||||
error InvalidSignature();
|
|
||||||
|
|
||||||
// parity := public key y-coord parity (27 or 28)
|
|
||||||
// px := public key x-coord
|
|
||||||
// message := 32-byte hash of the message
|
|
||||||
// c := schnorr signature challenge
|
|
||||||
// s := schnorr signature
|
|
||||||
function verify(
|
|
||||||
uint8 parity,
|
|
||||||
bytes32 px,
|
|
||||||
bytes32 message,
|
|
||||||
bytes32 c,
|
|
||||||
bytes32 s
|
|
||||||
) public view returns (bool) {
|
|
||||||
// ecrecover = (m, v, r, s);
|
|
||||||
bytes32 sa = bytes32(Q - mulmod(uint256(s), uint256(px), Q));
|
|
||||||
bytes32 ca = bytes32(Q - mulmod(uint256(c), uint256(px), Q));
|
|
||||||
|
|
||||||
if (sa == 0) revert InvalidSOrA();
|
|
||||||
// the ecrecover precompile implementation checks that the `r` and `s`
|
|
||||||
// inputs are non-zero (in this case, `px` and `ca`), thus we don't need to
|
|
||||||
// check if they're zero.
|
|
||||||
address R = ecrecover(sa, parity, px, ca);
|
|
||||||
if (R == address(0)) revert InvalidSignature();
|
|
||||||
return c == keccak256(
|
|
||||||
abi.encodePacked(R, uint8(parity), px, block.chainid, message)
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::all)]
|
|
||||||
pub(crate) mod schnorr;
|
|
||||||
#[rustfmt::skip]
|
|
||||||
#[allow(clippy::all)]
|
|
||||||
pub(crate) mod router;
|
|
||||||
@@ -1,91 +0,0 @@
|
|||||||
use sha3::{Digest, Keccak256};
|
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
|
||||||
use k256::{
|
|
||||||
elliptic_curve::{
|
|
||||||
bigint::ArrayEncoding, ops::Reduce, point::AffineCoordinates, sec1::ToEncodedPoint,
|
|
||||||
},
|
|
||||||
ProjectivePoint, Scalar, U256,
|
|
||||||
};
|
|
||||||
|
|
||||||
use frost::{
|
|
||||||
algorithm::{Hram, SchnorrSignature},
|
|
||||||
curve::Secp256k1,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub(crate) fn keccak256(data: &[u8]) -> [u8; 32] {
|
|
||||||
Keccak256::digest(data).into()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn address(point: &ProjectivePoint) -> [u8; 20] {
|
|
||||||
let encoded_point = point.to_encoded_point(false);
|
|
||||||
// Last 20 bytes of the hash of the concatenated x and y coordinates
|
|
||||||
// We obtain the concatenated x and y coordinates via the uncompressed encoding of the point
|
|
||||||
keccak256(&encoded_point.as_ref()[1 .. 65])[12 ..].try_into().unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub struct PublicKey {
|
|
||||||
pub A: ProjectivePoint,
|
|
||||||
pub px: Scalar,
|
|
||||||
pub parity: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PublicKey {
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub fn new(A: ProjectivePoint) -> Option<PublicKey> {
|
|
||||||
let affine = A.to_affine();
|
|
||||||
let parity = u8::from(bool::from(affine.y_is_odd())) + 27;
|
|
||||||
if parity != 27 {
|
|
||||||
None?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let x_coord = affine.x();
|
|
||||||
let x_coord_scalar = <Scalar as Reduce<U256>>::reduce_bytes(&x_coord);
|
|
||||||
// Return None if a reduction would occur
|
|
||||||
if x_coord_scalar.to_repr() != x_coord {
|
|
||||||
None?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(PublicKey { A, px: x_coord_scalar, parity })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Default)]
|
|
||||||
pub struct EthereumHram {}
|
|
||||||
impl Hram<Secp256k1> for EthereumHram {
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
fn hram(R: &ProjectivePoint, A: &ProjectivePoint, m: &[u8]) -> Scalar {
|
|
||||||
let a_encoded_point = A.to_encoded_point(true);
|
|
||||||
let mut a_encoded = a_encoded_point.as_ref().to_owned();
|
|
||||||
a_encoded[0] += 25; // Ethereum uses 27/28 for point parity
|
|
||||||
assert!((a_encoded[0] == 27) || (a_encoded[0] == 28));
|
|
||||||
let mut data = address(R).to_vec();
|
|
||||||
data.append(&mut a_encoded);
|
|
||||||
data.extend(m);
|
|
||||||
Scalar::reduce(U256::from_be_slice(&keccak256(&data)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct Signature {
|
|
||||||
pub(crate) c: Scalar,
|
|
||||||
pub(crate) s: Scalar,
|
|
||||||
}
|
|
||||||
impl Signature {
|
|
||||||
pub fn new(
|
|
||||||
public_key: &PublicKey,
|
|
||||||
chain_id: U256,
|
|
||||||
m: &[u8],
|
|
||||||
signature: SchnorrSignature<Secp256k1>,
|
|
||||||
) -> Option<Signature> {
|
|
||||||
let c = EthereumHram::hram(
|
|
||||||
&signature.R,
|
|
||||||
&public_key.A,
|
|
||||||
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
|
|
||||||
);
|
|
||||||
if !signature.verify(public_key.A, c) {
|
|
||||||
None?;
|
|
||||||
}
|
|
||||||
Some(Signature { c, s: signature.s })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
use thiserror::Error;
|
|
||||||
|
|
||||||
pub mod crypto;
|
|
||||||
|
|
||||||
pub(crate) mod abi;
|
|
||||||
pub mod schnorr;
|
|
||||||
pub mod router;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests;
|
|
||||||
|
|
||||||
#[derive(Error, Debug)]
|
|
||||||
pub enum Error {
|
|
||||||
#[error("failed to verify Schnorr signature")]
|
|
||||||
InvalidSignature,
|
|
||||||
}
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
pub use crate::abi::router::*;
|
|
||||||
|
|
||||||
/*
|
|
||||||
use crate::crypto::{ProcessedSignature, PublicKey};
|
|
||||||
use ethers::{contract::ContractFactory, prelude::*, solc::artifacts::contract::ContractBytecode};
|
|
||||||
use eyre::Result;
|
|
||||||
use std::{convert::From, fs::File, sync::Arc};
|
|
||||||
|
|
||||||
pub async fn router_update_public_key<M: Middleware + 'static>(
|
|
||||||
contract: &Router<M>,
|
|
||||||
public_key: &PublicKey,
|
|
||||||
signature: &ProcessedSignature,
|
|
||||||
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
|
|
||||||
let tx = contract.update_public_key(public_key.px.to_bytes().into(), signature.into());
|
|
||||||
let pending_tx = tx.send().await?;
|
|
||||||
let receipt = pending_tx.await?;
|
|
||||||
Ok(receipt)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn router_execute<M: Middleware + 'static>(
|
|
||||||
contract: &Router<M>,
|
|
||||||
txs: Vec<Rtransaction>,
|
|
||||||
signature: &ProcessedSignature,
|
|
||||||
) -> std::result::Result<Option<TransactionReceipt>, eyre::ErrReport> {
|
|
||||||
let tx = contract.execute(txs, signature.into()).send();
|
|
||||||
let pending_tx = tx.send().await?;
|
|
||||||
let receipt = pending_tx.await?;
|
|
||||||
Ok(receipt)
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
@@ -1,34 +0,0 @@
|
|||||||
use eyre::{eyre, Result};
|
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
|
||||||
|
|
||||||
use ethers_providers::{Provider, Http};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Error,
|
|
||||||
crypto::{keccak256, PublicKey, Signature},
|
|
||||||
};
|
|
||||||
pub use crate::abi::schnorr::*;
|
|
||||||
|
|
||||||
pub async fn call_verify(
|
|
||||||
contract: &Schnorr<Provider<Http>>,
|
|
||||||
public_key: &PublicKey,
|
|
||||||
message: &[u8],
|
|
||||||
signature: &Signature,
|
|
||||||
) -> Result<()> {
|
|
||||||
if contract
|
|
||||||
.verify(
|
|
||||||
public_key.parity,
|
|
||||||
public_key.px.to_repr().into(),
|
|
||||||
keccak256(message),
|
|
||||||
signature.c.to_repr().into(),
|
|
||||||
signature.s.to_repr().into(),
|
|
||||||
)
|
|
||||||
.call()
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
Ok(())
|
|
||||||
} else {
|
|
||||||
Err(eyre!(Error::InvalidSignature))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,132 +0,0 @@
|
|||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use sha2::Sha256;
|
|
||||||
use sha3::{Digest, Keccak256};
|
|
||||||
|
|
||||||
use group::Group;
|
|
||||||
use k256::{
|
|
||||||
ecdsa::{hazmat::SignPrimitive, signature::DigestVerifier, SigningKey, VerifyingKey},
|
|
||||||
elliptic_curve::{bigint::ArrayEncoding, ops::Reduce, point::DecompressPoint},
|
|
||||||
U256, Scalar, AffinePoint, ProjectivePoint,
|
|
||||||
};
|
|
||||||
|
|
||||||
use frost::{
|
|
||||||
curve::Secp256k1,
|
|
||||||
algorithm::{Hram, IetfSchnorr},
|
|
||||||
tests::{algorithm_machines, sign},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{crypto::*, tests::key_gen};
|
|
||||||
|
|
||||||
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
|
||||||
Scalar::reduce(U256::from_be_slice(&keccak256(data)))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn ecrecover(message: Scalar, v: u8, r: Scalar, s: Scalar) -> Option<[u8; 20]> {
|
|
||||||
if r.is_zero().into() || s.is_zero().into() || !((v == 27) || (v == 28)) {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let R = AffinePoint::decompress(&r.to_bytes(), (v - 27).into());
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
if let Some(R) = Option::<AffinePoint>::from(R) {
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let R = ProjectivePoint::from(R);
|
|
||||||
|
|
||||||
let r = r.invert().unwrap();
|
|
||||||
let u1 = ProjectivePoint::GENERATOR * (-message * r);
|
|
||||||
let u2 = R * (s * r);
|
|
||||||
let key: ProjectivePoint = u1 + u2;
|
|
||||||
if !bool::from(key.is_identity()) {
|
|
||||||
return Some(address(&key));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_ecrecover() {
|
|
||||||
let private = SigningKey::random(&mut OsRng);
|
|
||||||
let public = VerifyingKey::from(&private);
|
|
||||||
|
|
||||||
// Sign the signature
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
let (sig, recovery_id) = private
|
|
||||||
.as_nonzero_scalar()
|
|
||||||
.try_sign_prehashed_rfc6979::<Sha256>(&Keccak256::digest(MESSAGE), b"")
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
// Sanity check the signature verifies
|
|
||||||
#[allow(clippy::unit_cmp)] // Intended to assert this wasn't changed to Result<bool>
|
|
||||||
{
|
|
||||||
assert_eq!(public.verify_digest(Keccak256::new_with_prefix(MESSAGE), &sig).unwrap(), ());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform the ecrecover
|
|
||||||
assert_eq!(
|
|
||||||
ecrecover(
|
|
||||||
hash_to_scalar(MESSAGE),
|
|
||||||
u8::from(recovery_id.unwrap().is_y_odd()) + 27,
|
|
||||||
*sig.r(),
|
|
||||||
*sig.s()
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
address(&ProjectivePoint::from(public.as_affine()))
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Run the sign test with the EthereumHram
|
|
||||||
#[test]
|
|
||||||
fn test_signing() {
|
|
||||||
let (keys, _) = key_gen();
|
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
|
||||||
let _sig =
|
|
||||||
sign(&mut OsRng, &algo, keys.clone(), algorithm_machines(&mut OsRng, &algo, &keys), MESSAGE);
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub fn preprocess_signature_for_ecrecover(
|
|
||||||
R: ProjectivePoint,
|
|
||||||
public_key: &PublicKey,
|
|
||||||
chain_id: U256,
|
|
||||||
m: &[u8],
|
|
||||||
s: Scalar,
|
|
||||||
) -> (u8, Scalar, Scalar) {
|
|
||||||
let c = EthereumHram::hram(
|
|
||||||
&R,
|
|
||||||
&public_key.A,
|
|
||||||
&[chain_id.to_be_byte_array().as_slice(), &keccak256(m)].concat(),
|
|
||||||
);
|
|
||||||
let sa = -(s * public_key.px);
|
|
||||||
let ca = -(c * public_key.px);
|
|
||||||
(public_key.parity, sa, ca)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_ecrecover_hack() {
|
|
||||||
let (keys, public_key) = key_gen();
|
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
let hashed_message = keccak256(MESSAGE);
|
|
||||||
let chain_id = U256::ONE;
|
|
||||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
|
||||||
let sig = sign(
|
|
||||||
&mut OsRng,
|
|
||||||
&algo,
|
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
|
|
||||||
let (parity, sa, ca) =
|
|
||||||
preprocess_signature_for_ecrecover(sig.R, &public_key, chain_id, MESSAGE, sig.s);
|
|
||||||
let q = ecrecover(sa, parity, public_key.px, ca).unwrap();
|
|
||||||
assert_eq!(q, address(&sig.R));
|
|
||||||
}
|
|
||||||
@@ -1,92 +0,0 @@
|
|||||||
use std::{sync::Arc, time::Duration, fs::File, collections::HashMap};
|
|
||||||
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
|
||||||
use k256::{Scalar, ProjectivePoint};
|
|
||||||
use frost::{curve::Secp256k1, Participant, ThresholdKeys, tests::key_gen as frost_key_gen};
|
|
||||||
|
|
||||||
use ethers_core::{
|
|
||||||
types::{H160, Signature as EthersSignature},
|
|
||||||
abi::Abi,
|
|
||||||
};
|
|
||||||
use ethers_contract::ContractFactory;
|
|
||||||
use ethers_providers::{Middleware, Provider, Http};
|
|
||||||
|
|
||||||
use crate::crypto::PublicKey;
|
|
||||||
|
|
||||||
mod crypto;
|
|
||||||
mod schnorr;
|
|
||||||
mod router;
|
|
||||||
|
|
||||||
pub fn key_gen() -> (HashMap<Participant, ThresholdKeys<Secp256k1>>, PublicKey) {
|
|
||||||
let mut keys = frost_key_gen::<_, Secp256k1>(&mut OsRng);
|
|
||||||
let mut group_key = keys[&Participant::new(1).unwrap()].group_key();
|
|
||||||
|
|
||||||
let mut offset = Scalar::ZERO;
|
|
||||||
while PublicKey::new(group_key).is_none() {
|
|
||||||
offset += Scalar::ONE;
|
|
||||||
group_key += ProjectivePoint::GENERATOR;
|
|
||||||
}
|
|
||||||
for keys in keys.values_mut() {
|
|
||||||
*keys = keys.offset(offset);
|
|
||||||
}
|
|
||||||
let public_key = PublicKey::new(group_key).unwrap();
|
|
||||||
|
|
||||||
(keys, public_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Replace with a contract deployment from an unknown account, so the environment solely has
|
|
||||||
// to fund the deployer, not create/pass a wallet
|
|
||||||
// TODO: Deterministic deployments across chains
|
|
||||||
pub async fn deploy_contract(
|
|
||||||
chain_id: u32,
|
|
||||||
client: Arc<Provider<Http>>,
|
|
||||||
wallet: &k256::ecdsa::SigningKey,
|
|
||||||
name: &str,
|
|
||||||
) -> eyre::Result<H160> {
|
|
||||||
let abi: Abi =
|
|
||||||
serde_json::from_reader(File::open(format!("./artifacts/{name}.abi")).unwrap()).unwrap();
|
|
||||||
|
|
||||||
let hex_bin_buf = std::fs::read_to_string(format!("./artifacts/{name}.bin")).unwrap();
|
|
||||||
let hex_bin =
|
|
||||||
if let Some(stripped) = hex_bin_buf.strip_prefix("0x") { stripped } else { &hex_bin_buf };
|
|
||||||
let bin = hex::decode(hex_bin).unwrap();
|
|
||||||
let factory = ContractFactory::new(abi, bin.into(), client.clone());
|
|
||||||
|
|
||||||
let mut deployment_tx = factory.deploy(())?.tx;
|
|
||||||
deployment_tx.set_chain_id(chain_id);
|
|
||||||
deployment_tx.set_gas(1_000_000);
|
|
||||||
let (max_fee_per_gas, max_priority_fee_per_gas) = client.estimate_eip1559_fees(None).await?;
|
|
||||||
deployment_tx.as_eip1559_mut().unwrap().max_fee_per_gas = Some(max_fee_per_gas);
|
|
||||||
deployment_tx.as_eip1559_mut().unwrap().max_priority_fee_per_gas = Some(max_priority_fee_per_gas);
|
|
||||||
|
|
||||||
let sig_hash = deployment_tx.sighash();
|
|
||||||
let (sig, rid) = wallet.sign_prehash_recoverable(sig_hash.as_ref()).unwrap();
|
|
||||||
|
|
||||||
// EIP-155 v
|
|
||||||
let mut v = u64::from(rid.to_byte());
|
|
||||||
assert!((v == 0) || (v == 1));
|
|
||||||
v += u64::from((chain_id * 2) + 35);
|
|
||||||
|
|
||||||
let r = sig.r().to_repr();
|
|
||||||
let r_ref: &[u8] = r.as_ref();
|
|
||||||
let s = sig.s().to_repr();
|
|
||||||
let s_ref: &[u8] = s.as_ref();
|
|
||||||
let deployment_tx =
|
|
||||||
deployment_tx.rlp_signed(&EthersSignature { r: r_ref.into(), s: s_ref.into(), v });
|
|
||||||
|
|
||||||
let pending_tx = client.send_raw_transaction(deployment_tx).await?;
|
|
||||||
|
|
||||||
let mut receipt;
|
|
||||||
while {
|
|
||||||
receipt = client.get_transaction_receipt(pending_tx.tx_hash()).await?;
|
|
||||||
receipt.is_none()
|
|
||||||
} {
|
|
||||||
tokio::time::sleep(Duration::from_secs(6)).await;
|
|
||||||
}
|
|
||||||
let receipt = receipt.unwrap();
|
|
||||||
assert!(receipt.status == Some(1.into()));
|
|
||||||
|
|
||||||
Ok(receipt.contract_address.unwrap())
|
|
||||||
}
|
|
||||||
@@ -1,109 +0,0 @@
|
|||||||
use std::{convert::TryFrom, sync::Arc, collections::HashMap};
|
|
||||||
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use group::ff::PrimeField;
|
|
||||||
use frost::{
|
|
||||||
curve::Secp256k1,
|
|
||||||
Participant, ThresholdKeys,
|
|
||||||
algorithm::IetfSchnorr,
|
|
||||||
tests::{algorithm_machines, sign},
|
|
||||||
};
|
|
||||||
|
|
||||||
use ethers_core::{
|
|
||||||
types::{H160, U256, Bytes},
|
|
||||||
abi::AbiEncode,
|
|
||||||
utils::{Anvil, AnvilInstance},
|
|
||||||
};
|
|
||||||
use ethers_providers::{Middleware, Provider, Http};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
crypto::{keccak256, PublicKey, EthereumHram, Signature},
|
|
||||||
router::{self, *},
|
|
||||||
tests::{key_gen, deploy_contract},
|
|
||||||
};
|
|
||||||
|
|
||||||
async fn setup_test() -> (
|
|
||||||
u32,
|
|
||||||
AnvilInstance,
|
|
||||||
Router<Provider<Http>>,
|
|
||||||
HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
|
||||||
PublicKey,
|
|
||||||
) {
|
|
||||||
let anvil = Anvil::new().spawn();
|
|
||||||
|
|
||||||
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
|
|
||||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
|
||||||
let wallet = anvil.keys()[0].clone().into();
|
|
||||||
let client = Arc::new(provider);
|
|
||||||
|
|
||||||
let contract_address =
|
|
||||||
deploy_contract(chain_id, client.clone(), &wallet, "Router").await.unwrap();
|
|
||||||
let contract = Router::new(contract_address, client.clone());
|
|
||||||
|
|
||||||
let (keys, public_key) = key_gen();
|
|
||||||
|
|
||||||
// Set the key to the threshold keys
|
|
||||||
let tx = contract.init_serai_key(public_key.px.to_repr().into()).gas(100_000);
|
|
||||||
let pending_tx = tx.send().await.unwrap();
|
|
||||||
let receipt = pending_tx.await.unwrap().unwrap();
|
|
||||||
assert!(receipt.status == Some(1.into()));
|
|
||||||
|
|
||||||
(chain_id, anvil, contract, keys, public_key)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_deploy_contract() {
|
|
||||||
setup_test().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn hash_and_sign(
|
|
||||||
keys: &HashMap<Participant, ThresholdKeys<Secp256k1>>,
|
|
||||||
public_key: &PublicKey,
|
|
||||||
chain_id: U256,
|
|
||||||
message: &[u8],
|
|
||||||
) -> Signature {
|
|
||||||
let hashed_message = keccak256(message);
|
|
||||||
|
|
||||||
let mut chain_id_bytes = [0; 32];
|
|
||||||
chain_id.to_big_endian(&mut chain_id_bytes);
|
|
||||||
let full_message = &[chain_id_bytes.as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
|
||||||
let sig = sign(
|
|
||||||
&mut OsRng,
|
|
||||||
&algo,
|
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
|
|
||||||
Signature::new(public_key, k256::U256::from_words(chain_id.0), message, sig).unwrap()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_router_execute() {
|
|
||||||
let (chain_id, _anvil, contract, keys, public_key) = setup_test().await;
|
|
||||||
|
|
||||||
let to = H160([0u8; 20]);
|
|
||||||
let value = U256([0u64; 4]);
|
|
||||||
let data = Bytes::from([0]);
|
|
||||||
let tx = OutInstruction { to, value, data: data.clone() };
|
|
||||||
|
|
||||||
let nonce_call = contract.nonce();
|
|
||||||
let nonce = nonce_call.call().await.unwrap();
|
|
||||||
|
|
||||||
let encoded =
|
|
||||||
("execute".to_string(), nonce, vec![router::OutInstruction { to, value, data }]).encode();
|
|
||||||
let sig = hash_and_sign(&keys, &public_key, chain_id.into(), &encoded);
|
|
||||||
|
|
||||||
let tx = contract
|
|
||||||
.execute(vec![tx], router::Signature { c: sig.c.to_repr().into(), s: sig.s.to_repr().into() })
|
|
||||||
.gas(300_000);
|
|
||||||
let pending_tx = tx.send().await.unwrap();
|
|
||||||
let receipt = dbg!(pending_tx.await.unwrap().unwrap());
|
|
||||||
assert!(receipt.status == Some(1.into()));
|
|
||||||
|
|
||||||
println!("gas used: {:?}", receipt.cumulative_gas_used);
|
|
||||||
println!("logs: {:?}", receipt.logs);
|
|
||||||
}
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
use std::{convert::TryFrom, sync::Arc};
|
|
||||||
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use ::k256::{elliptic_curve::bigint::ArrayEncoding, U256, Scalar};
|
|
||||||
|
|
||||||
use ethers_core::utils::{keccak256, Anvil, AnvilInstance};
|
|
||||||
use ethers_providers::{Middleware, Provider, Http};
|
|
||||||
|
|
||||||
use frost::{
|
|
||||||
curve::Secp256k1,
|
|
||||||
algorithm::IetfSchnorr,
|
|
||||||
tests::{algorithm_machines, sign},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
crypto::*,
|
|
||||||
schnorr::*,
|
|
||||||
tests::{key_gen, deploy_contract},
|
|
||||||
};
|
|
||||||
|
|
||||||
async fn setup_test() -> (u32, AnvilInstance, Schnorr<Provider<Http>>) {
|
|
||||||
let anvil = Anvil::new().spawn();
|
|
||||||
|
|
||||||
let provider = Provider::<Http>::try_from(anvil.endpoint()).unwrap();
|
|
||||||
let chain_id = provider.get_chainid().await.unwrap().as_u32();
|
|
||||||
let wallet = anvil.keys()[0].clone().into();
|
|
||||||
let client = Arc::new(provider);
|
|
||||||
|
|
||||||
let contract_address =
|
|
||||||
deploy_contract(chain_id, client.clone(), &wallet, "Schnorr").await.unwrap();
|
|
||||||
let contract = Schnorr::new(contract_address, client.clone());
|
|
||||||
(chain_id, anvil, contract)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_deploy_contract() {
|
|
||||||
setup_test().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tokio::test]
|
|
||||||
async fn test_ecrecover_hack() {
|
|
||||||
let (chain_id, _anvil, contract) = setup_test().await;
|
|
||||||
let chain_id = U256::from(chain_id);
|
|
||||||
|
|
||||||
let (keys, public_key) = key_gen();
|
|
||||||
|
|
||||||
const MESSAGE: &[u8] = b"Hello, World!";
|
|
||||||
let hashed_message = keccak256(MESSAGE);
|
|
||||||
let full_message = &[chain_id.to_be_byte_array().as_slice(), &hashed_message].concat();
|
|
||||||
|
|
||||||
let algo = IetfSchnorr::<Secp256k1, EthereumHram>::ietf();
|
|
||||||
let sig = sign(
|
|
||||||
&mut OsRng,
|
|
||||||
&algo,
|
|
||||||
keys.clone(),
|
|
||||||
algorithm_machines(&mut OsRng, &algo, &keys),
|
|
||||||
full_message,
|
|
||||||
);
|
|
||||||
let sig = Signature::new(&public_key, chain_id, MESSAGE, sig).unwrap();
|
|
||||||
|
|
||||||
call_verify(&contract, &public_key, MESSAGE, &sig).await.unwrap();
|
|
||||||
// Test an invalid signature fails
|
|
||||||
let mut sig = sig;
|
|
||||||
sig.s += Scalar::ONE;
|
|
||||||
assert!(call_verify(&contract, &public_key, MESSAGE, &sig).await.is_err());
|
|
||||||
}
|
|
||||||
@@ -1,113 +0,0 @@
|
|||||||
[package]
|
|
||||||
name = "monero-serai"
|
|
||||||
version = "0.1.4-alpha"
|
|
||||||
description = "A modern Monero transaction library"
|
|
||||||
license = "MIT"
|
|
||||||
repository = "https://github.com/serai-dex/serai/tree/develop/coins/monero"
|
|
||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
|
||||||
edition = "2021"
|
|
||||||
rust-version = "1.74"
|
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
|
||||||
all-features = true
|
|
||||||
rustdoc-args = ["--cfg", "docsrs"]
|
|
||||||
|
|
||||||
[lints]
|
|
||||||
workspace = true
|
|
||||||
|
|
||||||
[dependencies]
|
|
||||||
std-shims = { path = "../../common/std-shims", version = "^0.1.1", default-features = false }
|
|
||||||
|
|
||||||
async-trait = { version = "0.1", default-features = false }
|
|
||||||
thiserror = { version = "1", default-features = false, optional = true }
|
|
||||||
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["zeroize_derive"] }
|
|
||||||
subtle = { version = "^2.4", default-features = false }
|
|
||||||
|
|
||||||
rand_core = { version = "0.6", default-features = false }
|
|
||||||
# Used to send transactions
|
|
||||||
rand = { version = "0.8", default-features = false }
|
|
||||||
rand_chacha = { version = "0.3", default-features = false }
|
|
||||||
# Used to select decoys
|
|
||||||
rand_distr = { version = "0.4", default-features = false }
|
|
||||||
|
|
||||||
sha3 = { version = "0.10", default-features = false }
|
|
||||||
pbkdf2 = { version = "0.12", features = ["simple"], default-features = false }
|
|
||||||
|
|
||||||
curve25519-dalek = { version = "4", default-features = false, features = ["alloc", "zeroize", "precomputed-tables"] }
|
|
||||||
|
|
||||||
# Used for the hash to curve, along with the more complicated proofs
|
|
||||||
group = { version = "0.13", default-features = false }
|
|
||||||
dalek-ff-group = { path = "../../crypto/dalek-ff-group", version = "0.4", default-features = false }
|
|
||||||
multiexp = { path = "../../crypto/multiexp", version = "0.4", default-features = false, features = ["batch"] }
|
|
||||||
|
|
||||||
# Needed for multisig
|
|
||||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", version = "0.3", default-features = false, features = ["recommended"], optional = true }
|
|
||||||
dleq = { path = "../../crypto/dleq", version = "0.4", default-features = false, features = ["serialize"], optional = true }
|
|
||||||
frost = { package = "modular-frost", path = "../../crypto/frost", version = "0.8", default-features = false, features = ["ed25519"], optional = true }
|
|
||||||
|
|
||||||
monero-generators = { path = "generators", version = "0.4", default-features = false }
|
|
||||||
|
|
||||||
async-lock = { version = "3", default-features = false, optional = true }
|
|
||||||
|
|
||||||
hex-literal = "0.4"
|
|
||||||
hex = { version = "0.4", default-features = false, features = ["alloc"] }
|
|
||||||
serde = { version = "1", default-features = false, features = ["derive", "alloc"] }
|
|
||||||
serde_json = { version = "1", default-features = false, features = ["alloc"] }
|
|
||||||
|
|
||||||
base58-monero = { version = "2", default-features = false, features = ["check"] }
|
|
||||||
|
|
||||||
# Used for the provided HTTP RPC
|
|
||||||
digest_auth = { version = "0.3", default-features = false, optional = true }
|
|
||||||
simple-request = { path = "../../common/request", version = "0.1", default-features = false, features = ["tls"], optional = true }
|
|
||||||
tokio = { version = "1", default-features = false, optional = true }
|
|
||||||
|
|
||||||
[build-dependencies]
|
|
||||||
dalek-ff-group = { path = "../../crypto/dalek-ff-group", version = "0.4", default-features = false }
|
|
||||||
monero-generators = { path = "generators", version = "0.4", default-features = false }
|
|
||||||
|
|
||||||
[dev-dependencies]
|
|
||||||
tokio = { version = "1", features = ["sync", "macros"] }
|
|
||||||
|
|
||||||
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["tests"] }
|
|
||||||
|
|
||||||
[features]
|
|
||||||
std = [
|
|
||||||
"std-shims/std",
|
|
||||||
|
|
||||||
"thiserror",
|
|
||||||
|
|
||||||
"zeroize/std",
|
|
||||||
"subtle/std",
|
|
||||||
|
|
||||||
"rand_core/std",
|
|
||||||
"rand/std",
|
|
||||||
"rand_chacha/std",
|
|
||||||
"rand_distr/std",
|
|
||||||
|
|
||||||
"sha3/std",
|
|
||||||
"pbkdf2/std",
|
|
||||||
|
|
||||||
"multiexp/std",
|
|
||||||
|
|
||||||
"transcript/std",
|
|
||||||
"dleq/std",
|
|
||||||
|
|
||||||
"monero-generators/std",
|
|
||||||
|
|
||||||
"async-lock?/std",
|
|
||||||
|
|
||||||
"hex/std",
|
|
||||||
"serde/std",
|
|
||||||
"serde_json/std",
|
|
||||||
|
|
||||||
"base58-monero/std",
|
|
||||||
]
|
|
||||||
|
|
||||||
cache-distribution = ["async-lock"]
|
|
||||||
http-rpc = ["digest_auth", "simple-request", "tokio"]
|
|
||||||
multisig = ["transcript", "frost", "dleq", "std"]
|
|
||||||
binaries = ["tokio/rt-multi-thread", "tokio/macros", "http-rpc"]
|
|
||||||
experimental = []
|
|
||||||
|
|
||||||
default = ["std", "http-rpc"]
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
# monero-serai
|
|
||||||
|
|
||||||
A modern Monero transaction library intended for usage in wallets. It prides
|
|
||||||
itself on accuracy, correctness, and removing common pit falls developers may
|
|
||||||
face.
|
|
||||||
|
|
||||||
monero-serai also offers the following features:
|
|
||||||
|
|
||||||
- Featured Addresses
|
|
||||||
- A FROST-based multisig orders of magnitude more performant than Monero's
|
|
||||||
|
|
||||||
### Purpose and support
|
|
||||||
|
|
||||||
monero-serai was written for Serai, a decentralized exchange aiming to support
|
|
||||||
Monero. Despite this, monero-serai is intended to be a widely usable library,
|
|
||||||
accurate to Monero. monero-serai guarantees the functionality needed for Serai,
|
|
||||||
yet will not deprive functionality from other users.
|
|
||||||
|
|
||||||
Various legacy transaction formats are not currently implemented, yet we are
|
|
||||||
willing to add support for them. There aren't active development efforts around
|
|
||||||
them however.
|
|
||||||
|
|
||||||
### Caveats
|
|
||||||
|
|
||||||
This library DOES attempt to do the following:
|
|
||||||
|
|
||||||
- Create on-chain transactions identical to how wallet2 would (unless told not
|
|
||||||
to)
|
|
||||||
- Not be detectable as monero-serai when scanning outputs
|
|
||||||
- Not reveal spent outputs to the connected RPC node
|
|
||||||
|
|
||||||
This library DOES NOT attempt to do the following:
|
|
||||||
|
|
||||||
- Have identical RPC behavior when creating transactions
|
|
||||||
- Be a wallet
|
|
||||||
|
|
||||||
This means that monero-serai shouldn't be fingerprintable on-chain. It also
|
|
||||||
shouldn't be fingerprintable if a targeted attack occurs to detect if the
|
|
||||||
receiving wallet is monero-serai or wallet2. It also should be generally safe
|
|
||||||
for usage with remote nodes.
|
|
||||||
|
|
||||||
It won't hide from remote nodes it's monero-serai however, potentially
|
|
||||||
allowing a remote node to profile you. The implications of this are left to the
|
|
||||||
user to consider.
|
|
||||||
|
|
||||||
It also won't act as a wallet, just as a transaction library. wallet2 has
|
|
||||||
several *non-transaction-level* policies, such as always attempting to use two
|
|
||||||
inputs to create transactions. These are considered out of scope to
|
|
||||||
monero-serai.
|
|
||||||
@@ -1,67 +0,0 @@
|
|||||||
use std::{
|
|
||||||
io::Write,
|
|
||||||
env,
|
|
||||||
path::Path,
|
|
||||||
fs::{File, remove_file},
|
|
||||||
};
|
|
||||||
|
|
||||||
use dalek_ff_group::EdwardsPoint;
|
|
||||||
|
|
||||||
use monero_generators::bulletproofs_generators;
|
|
||||||
|
|
||||||
fn serialize(generators_string: &mut String, points: &[EdwardsPoint]) {
|
|
||||||
for generator in points {
|
|
||||||
generators_string.extend(
|
|
||||||
format!(
|
|
||||||
"
|
|
||||||
dalek_ff_group::EdwardsPoint(
|
|
||||||
curve25519_dalek::edwards::CompressedEdwardsY({:?}).decompress().unwrap()
|
|
||||||
),
|
|
||||||
",
|
|
||||||
generator.compress().to_bytes()
|
|
||||||
)
|
|
||||||
.chars(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn generators(prefix: &'static str, path: &str) {
|
|
||||||
let generators = bulletproofs_generators(prefix.as_bytes());
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let mut G_str = String::new();
|
|
||||||
serialize(&mut G_str, &generators.G);
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let mut H_str = String::new();
|
|
||||||
serialize(&mut H_str, &generators.H);
|
|
||||||
|
|
||||||
let path = Path::new(&env::var("OUT_DIR").unwrap()).join(path);
|
|
||||||
let _ = remove_file(&path);
|
|
||||||
File::create(&path)
|
|
||||||
.unwrap()
|
|
||||||
.write_all(
|
|
||||||
format!(
|
|
||||||
"
|
|
||||||
pub(crate) static GENERATORS_CELL: OnceLock<Generators> = OnceLock::new();
|
|
||||||
pub fn GENERATORS() -> &'static Generators {{
|
|
||||||
GENERATORS_CELL.get_or_init(|| Generators {{
|
|
||||||
G: vec![
|
|
||||||
{G_str}
|
|
||||||
],
|
|
||||||
H: vec![
|
|
||||||
{H_str}
|
|
||||||
],
|
|
||||||
}})
|
|
||||||
}}
|
|
||||||
",
|
|
||||||
)
|
|
||||||
.as_bytes(),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
fn main() {
|
|
||||||
println!("cargo:rerun-if-changed=build.rs");
|
|
||||||
|
|
||||||
generators("bulletproof", "generators.rs");
|
|
||||||
generators("bulletproof_plus", "generators_plus.rs");
|
|
||||||
}
|
|
||||||
@@ -1,7 +0,0 @@
|
|||||||
# Monero Generators
|
|
||||||
|
|
||||||
Generators used by Monero in both its Pedersen commitments and Bulletproofs(+).
|
|
||||||
An implementation of Monero's `ge_fromfe_frombytes_vartime`, simply called
|
|
||||||
`hash_to_point` here, is included, as needed to generate generators.
|
|
||||||
|
|
||||||
This library is usable under no-std when the `std` feature is disabled.
|
|
||||||
@@ -1,79 +0,0 @@
|
|||||||
//! Generators used by Monero in both its Pedersen commitments and Bulletproofs(+).
|
|
||||||
//!
|
|
||||||
//! An implementation of Monero's `ge_fromfe_frombytes_vartime`, simply called
|
|
||||||
//! `hash_to_point` here, is included, as needed to generate generators.
|
|
||||||
|
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
|
|
||||||
use std_shims::{sync::OnceLock, vec::Vec};
|
|
||||||
|
|
||||||
use sha3::{Digest, Keccak256};
|
|
||||||
|
|
||||||
use curve25519_dalek::edwards::{EdwardsPoint as DalekPoint};
|
|
||||||
|
|
||||||
use group::{Group, GroupEncoding};
|
|
||||||
use dalek_ff_group::EdwardsPoint;
|
|
||||||
|
|
||||||
mod varint;
|
|
||||||
use varint::write_varint;
|
|
||||||
|
|
||||||
mod hash_to_point;
|
|
||||||
pub use hash_to_point::{hash_to_point, decompress_point};
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests;
|
|
||||||
|
|
||||||
fn hash(data: &[u8]) -> [u8; 32] {
|
|
||||||
Keccak256::digest(data).into()
|
|
||||||
}
|
|
||||||
|
|
||||||
static H_CELL: OnceLock<DalekPoint> = OnceLock::new();
|
|
||||||
/// Monero's alternate generator `H`, used for amounts in Pedersen commitments.
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub fn H() -> DalekPoint {
|
|
||||||
*H_CELL.get_or_init(|| {
|
|
||||||
decompress_point(hash(&EdwardsPoint::generator().to_bytes())).unwrap().mul_by_cofactor()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
static H_POW_2_CELL: OnceLock<[DalekPoint; 64]> = OnceLock::new();
|
|
||||||
/// Monero's alternate generator `H`, multiplied by 2**i for i in 1 ..= 64.
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub fn H_pow_2() -> &'static [DalekPoint; 64] {
|
|
||||||
H_POW_2_CELL.get_or_init(|| {
|
|
||||||
let mut res = [H(); 64];
|
|
||||||
for i in 1 .. 64 {
|
|
||||||
res[i] = res[i - 1] + res[i - 1];
|
|
||||||
}
|
|
||||||
res
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
const MAX_M: usize = 16;
|
|
||||||
const N: usize = 64;
|
|
||||||
const MAX_MN: usize = MAX_M * N;
|
|
||||||
|
|
||||||
/// Container struct for Bulletproofs(+) generators.
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub struct Generators {
|
|
||||||
pub G: Vec<EdwardsPoint>,
|
|
||||||
pub H: Vec<EdwardsPoint>,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate generators as needed for Bulletproofs(+), as Monero does.
|
|
||||||
pub fn bulletproofs_generators(dst: &'static [u8]) -> Generators {
|
|
||||||
let mut res = Generators { G: Vec::with_capacity(MAX_MN), H: Vec::with_capacity(MAX_MN) };
|
|
||||||
for i in 0 .. MAX_MN {
|
|
||||||
let i = 2 * i;
|
|
||||||
|
|
||||||
let mut even = H().compress().to_bytes().to_vec();
|
|
||||||
even.extend(dst);
|
|
||||||
let mut odd = even.clone();
|
|
||||||
|
|
||||||
write_varint(&i.try_into().unwrap(), &mut even).unwrap();
|
|
||||||
write_varint(&(i + 1).try_into().unwrap(), &mut odd).unwrap();
|
|
||||||
res.H.push(EdwardsPoint(hash_to_point(hash(&even))));
|
|
||||||
res.G.push(EdwardsPoint(hash_to_point(hash(&odd))));
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
@@ -1 +0,0 @@
|
|||||||
mod hash_to_point;
|
|
||||||
@@ -1,16 +0,0 @@
|
|||||||
use std_shims::io::{self, Write};
|
|
||||||
|
|
||||||
const VARINT_CONTINUATION_MASK: u8 = 0b1000_0000;
|
|
||||||
pub(crate) fn write_varint<W: Write>(varint: &u64, w: &mut W) -> io::Result<()> {
|
|
||||||
let mut varint = *varint;
|
|
||||||
while {
|
|
||||||
let mut b = u8::try_from(varint & u64::from(!VARINT_CONTINUATION_MASK)).unwrap();
|
|
||||||
varint >>= 7;
|
|
||||||
if varint != 0 {
|
|
||||||
b |= VARINT_CONTINUATION_MASK;
|
|
||||||
}
|
|
||||||
w.write_all(&[b])?;
|
|
||||||
varint != 0
|
|
||||||
} {}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
@@ -1,321 +0,0 @@
|
|||||||
#[cfg(feature = "binaries")]
|
|
||||||
mod binaries {
|
|
||||||
pub(crate) use std::sync::Arc;
|
|
||||||
|
|
||||||
pub(crate) use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
|
||||||
|
|
||||||
pub(crate) use multiexp::BatchVerifier;
|
|
||||||
|
|
||||||
pub(crate) use serde::Deserialize;
|
|
||||||
pub(crate) use serde_json::json;
|
|
||||||
|
|
||||||
pub(crate) use monero_serai::{
|
|
||||||
Commitment,
|
|
||||||
ringct::RctPrunable,
|
|
||||||
transaction::{Input, Transaction},
|
|
||||||
block::Block,
|
|
||||||
rpc::{RpcError, Rpc, HttpRpc},
|
|
||||||
};
|
|
||||||
|
|
||||||
pub(crate) use monero_generators::decompress_point;
|
|
||||||
|
|
||||||
pub(crate) use tokio::task::JoinHandle;
|
|
||||||
|
|
||||||
pub(crate) async fn check_block(rpc: Arc<Rpc<HttpRpc>>, block_i: usize) {
|
|
||||||
let hash = loop {
|
|
||||||
match rpc.get_block_hash(block_i).await {
|
|
||||||
Ok(hash) => break hash,
|
|
||||||
Err(RpcError::ConnectionError(e)) => {
|
|
||||||
println!("get_block_hash ConnectionError: {e}");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Err(e) => panic!("couldn't get block {block_i}'s hash: {e:?}"),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: Grab the JSON to also check it was deserialized correctly
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct BlockResponse {
|
|
||||||
blob: String,
|
|
||||||
}
|
|
||||||
let res: BlockResponse = loop {
|
|
||||||
match rpc.json_rpc_call("get_block", Some(json!({ "hash": hex::encode(hash) }))).await {
|
|
||||||
Ok(res) => break res,
|
|
||||||
Err(RpcError::ConnectionError(e)) => {
|
|
||||||
println!("get_block ConnectionError: {e}");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Err(e) => panic!("couldn't get block {block_i} via block.hash(): {e:?}"),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let blob = hex::decode(res.blob).expect("node returned non-hex block");
|
|
||||||
let block = Block::read(&mut blob.as_slice())
|
|
||||||
.unwrap_or_else(|e| panic!("couldn't deserialize block {block_i}: {e}"));
|
|
||||||
assert_eq!(block.hash(), hash, "hash differs");
|
|
||||||
assert_eq!(block.serialize(), blob, "serialization differs");
|
|
||||||
|
|
||||||
let txs_len = 1 + block.txs.len();
|
|
||||||
|
|
||||||
if !block.txs.is_empty() {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct TransactionResponse {
|
|
||||||
tx_hash: String,
|
|
||||||
as_hex: String,
|
|
||||||
}
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct TransactionsResponse {
|
|
||||||
#[serde(default)]
|
|
||||||
missed_tx: Vec<String>,
|
|
||||||
txs: Vec<TransactionResponse>,
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut hashes_hex = block.txs.iter().map(hex::encode).collect::<Vec<_>>();
|
|
||||||
let mut all_txs = vec![];
|
|
||||||
while !hashes_hex.is_empty() {
|
|
||||||
let txs: TransactionsResponse = loop {
|
|
||||||
match rpc
|
|
||||||
.rpc_call(
|
|
||||||
"get_transactions",
|
|
||||||
Some(json!({
|
|
||||||
"txs_hashes": hashes_hex.drain(.. hashes_hex.len().min(100)).collect::<Vec<_>>(),
|
|
||||||
})),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(txs) => break txs,
|
|
||||||
Err(RpcError::ConnectionError(e)) => {
|
|
||||||
println!("get_transactions ConnectionError: {e}");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Err(e) => panic!("couldn't call get_transactions: {e:?}"),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
assert!(txs.missed_tx.is_empty());
|
|
||||||
all_txs.extend(txs.txs);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut batch = BatchVerifier::new(block.txs.len());
|
|
||||||
for (tx_hash, tx_res) in block.txs.into_iter().zip(all_txs) {
|
|
||||||
assert_eq!(
|
|
||||||
tx_res.tx_hash,
|
|
||||||
hex::encode(tx_hash),
|
|
||||||
"node returned a transaction with different hash"
|
|
||||||
);
|
|
||||||
|
|
||||||
let tx = Transaction::read(
|
|
||||||
&mut hex::decode(&tx_res.as_hex).expect("node returned non-hex transaction").as_slice(),
|
|
||||||
)
|
|
||||||
.expect("couldn't deserialize transaction");
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
hex::encode(tx.serialize()),
|
|
||||||
tx_res.as_hex,
|
|
||||||
"Transaction serialization was different"
|
|
||||||
);
|
|
||||||
assert_eq!(tx.hash(), tx_hash, "Transaction hash was different");
|
|
||||||
|
|
||||||
if matches!(tx.rct_signatures.prunable, RctPrunable::Null) {
|
|
||||||
assert_eq!(tx.prefix.version, 1);
|
|
||||||
assert!(!tx.signatures.is_empty());
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let sig_hash = tx.signature_hash();
|
|
||||||
// Verify all proofs we support proving for
|
|
||||||
// This is due to having debug_asserts calling verify within their proving, and CLSAG
|
|
||||||
// multisig explicitly calling verify as part of its signing process
|
|
||||||
// Accordingly, making sure our signature_hash algorithm is correct is great, and further
|
|
||||||
// making sure the verification functions are valid is appreciated
|
|
||||||
match tx.rct_signatures.prunable {
|
|
||||||
RctPrunable::Null |
|
|
||||||
RctPrunable::AggregateMlsagBorromean { .. } |
|
|
||||||
RctPrunable::MlsagBorromean { .. } => {}
|
|
||||||
RctPrunable::MlsagBulletproofs { bulletproofs, .. } => {
|
|
||||||
assert!(bulletproofs.batch_verify(
|
|
||||||
&mut rand_core::OsRng,
|
|
||||||
&mut batch,
|
|
||||||
(),
|
|
||||||
&tx.rct_signatures.base.commitments
|
|
||||||
));
|
|
||||||
}
|
|
||||||
RctPrunable::Clsag { bulletproofs, clsags, pseudo_outs } => {
|
|
||||||
assert!(bulletproofs.batch_verify(
|
|
||||||
&mut rand_core::OsRng,
|
|
||||||
&mut batch,
|
|
||||||
(),
|
|
||||||
&tx.rct_signatures.base.commitments
|
|
||||||
));
|
|
||||||
|
|
||||||
for (i, clsag) in clsags.into_iter().enumerate() {
|
|
||||||
let (amount, key_offsets, image) = match &tx.prefix.inputs[i] {
|
|
||||||
Input::Gen(_) => panic!("Input::Gen"),
|
|
||||||
Input::ToKey { amount, key_offsets, key_image } => (amount, key_offsets, key_image),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut running_sum = 0;
|
|
||||||
let mut actual_indexes = vec![];
|
|
||||||
for offset in key_offsets {
|
|
||||||
running_sum += offset;
|
|
||||||
actual_indexes.push(running_sum);
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_outs(
|
|
||||||
rpc: &Rpc<HttpRpc>,
|
|
||||||
amount: u64,
|
|
||||||
indexes: &[u64],
|
|
||||||
) -> Vec<[EdwardsPoint; 2]> {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct Out {
|
|
||||||
key: String,
|
|
||||||
mask: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct Outs {
|
|
||||||
outs: Vec<Out>,
|
|
||||||
}
|
|
||||||
|
|
||||||
let outs: Outs = loop {
|
|
||||||
match rpc
|
|
||||||
.rpc_call(
|
|
||||||
"get_outs",
|
|
||||||
Some(json!({
|
|
||||||
"get_txid": true,
|
|
||||||
"outputs": indexes.iter().map(|o| json!({
|
|
||||||
"amount": amount,
|
|
||||||
"index": o
|
|
||||||
})).collect::<Vec<_>>()
|
|
||||||
})),
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
{
|
|
||||||
Ok(outs) => break outs,
|
|
||||||
Err(RpcError::ConnectionError(e)) => {
|
|
||||||
println!("get_outs ConnectionError: {e}");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
Err(e) => panic!("couldn't connect to RPC to get outs: {e:?}"),
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let rpc_point = |point: &str| {
|
|
||||||
decompress_point(
|
|
||||||
hex::decode(point)
|
|
||||||
.expect("invalid hex for ring member")
|
|
||||||
.try_into()
|
|
||||||
.expect("invalid point len for ring member"),
|
|
||||||
)
|
|
||||||
.expect("invalid point for ring member")
|
|
||||||
};
|
|
||||||
|
|
||||||
outs
|
|
||||||
.outs
|
|
||||||
.iter()
|
|
||||||
.map(|out| {
|
|
||||||
let mask = rpc_point(&out.mask);
|
|
||||||
if amount != 0 {
|
|
||||||
assert_eq!(mask, Commitment::new(Scalar::from(1u8), amount).calculate());
|
|
||||||
}
|
|
||||||
[rpc_point(&out.key), mask]
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
clsag
|
|
||||||
.verify(
|
|
||||||
&get_outs(&rpc, amount.unwrap_or(0), &actual_indexes).await,
|
|
||||||
image,
|
|
||||||
&pseudo_outs[i],
|
|
||||||
&sig_hash,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
assert!(batch.verify_vartime());
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("Deserialized, hashed, and reserialized {block_i} with {txs_len} TXs");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "binaries")]
|
|
||||||
#[tokio::main]
|
|
||||||
async fn main() {
|
|
||||||
use binaries::*;
|
|
||||||
|
|
||||||
let args = std::env::args().collect::<Vec<String>>();
|
|
||||||
|
|
||||||
// Read start block as the first arg
|
|
||||||
let mut block_i = args[1].parse::<usize>().expect("invalid start block");
|
|
||||||
|
|
||||||
// How many blocks to work on at once
|
|
||||||
let async_parallelism: usize =
|
|
||||||
args.get(2).unwrap_or(&"8".to_string()).parse::<usize>().expect("invalid parallelism argument");
|
|
||||||
|
|
||||||
// Read further args as RPC URLs
|
|
||||||
let default_nodes = vec![
|
|
||||||
"http://xmr-node.cakewallet.com:18081".to_string(),
|
|
||||||
"https://node.sethforprivacy.com".to_string(),
|
|
||||||
];
|
|
||||||
let mut specified_nodes = vec![];
|
|
||||||
{
|
|
||||||
let mut i = 0;
|
|
||||||
loop {
|
|
||||||
let Some(node) = args.get(3 + i) else { break };
|
|
||||||
specified_nodes.push(node.clone());
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
let nodes = if specified_nodes.is_empty() { default_nodes } else { specified_nodes };
|
|
||||||
|
|
||||||
let rpc = |url: String| async move {
|
|
||||||
HttpRpc::new(url.clone())
|
|
||||||
.await
|
|
||||||
.unwrap_or_else(|_| panic!("couldn't create HttpRpc connected to {url}"))
|
|
||||||
};
|
|
||||||
let main_rpc = rpc(nodes[0].clone()).await;
|
|
||||||
let mut rpcs = vec![];
|
|
||||||
for i in 0 .. async_parallelism {
|
|
||||||
rpcs.push(Arc::new(rpc(nodes[i % nodes.len()].clone()).await));
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut rpc_i = 0;
|
|
||||||
let mut handles: Vec<JoinHandle<()>> = vec![];
|
|
||||||
let mut height = 0;
|
|
||||||
loop {
|
|
||||||
let new_height = main_rpc.get_height().await.expect("couldn't call get_height");
|
|
||||||
if new_height == height {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
height = new_height;
|
|
||||||
|
|
||||||
while block_i < height {
|
|
||||||
if handles.len() >= async_parallelism {
|
|
||||||
// Guarantee one handle is complete
|
|
||||||
handles.swap_remove(0).await.unwrap();
|
|
||||||
|
|
||||||
// Remove all of the finished handles
|
|
||||||
let mut i = 0;
|
|
||||||
while i < handles.len() {
|
|
||||||
if handles[i].is_finished() {
|
|
||||||
handles.swap_remove(i).await.unwrap();
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
i += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
handles.push(tokio::spawn(check_block(rpcs[rpc_i].clone(), block_i)));
|
|
||||||
rpc_i = (rpc_i + 1) % rpcs.len();
|
|
||||||
block_i += 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "binaries"))]
|
|
||||||
fn main() {
|
|
||||||
panic!("To run binaries, please build with `--feature binaries`.");
|
|
||||||
}
|
|
||||||
@@ -1,130 +0,0 @@
|
|||||||
use std_shims::{
|
|
||||||
vec::Vec,
|
|
||||||
io::{self, Read, Write},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
hash,
|
|
||||||
merkle::merkle_root,
|
|
||||||
serialize::*,
|
|
||||||
transaction::{Input, Transaction},
|
|
||||||
};
|
|
||||||
|
|
||||||
const CORRECT_BLOCK_HASH_202612: [u8; 32] =
|
|
||||||
hex_literal::hex!("426d16cff04c71f8b16340b722dc4010a2dd3831c22041431f772547ba6e331a");
|
|
||||||
const EXISTING_BLOCK_HASH_202612: [u8; 32] =
|
|
||||||
hex_literal::hex!("bbd604d2ba11ba27935e006ed39c9bfdd99b76bf4a50654bc1e1e61217962698");
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub struct BlockHeader {
|
|
||||||
pub major_version: u8,
|
|
||||||
pub minor_version: u8,
|
|
||||||
pub timestamp: u64,
|
|
||||||
pub previous: [u8; 32],
|
|
||||||
pub nonce: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl BlockHeader {
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
write_varint(&self.major_version, w)?;
|
|
||||||
write_varint(&self.minor_version, w)?;
|
|
||||||
write_varint(&self.timestamp, w)?;
|
|
||||||
w.write_all(&self.previous)?;
|
|
||||||
w.write_all(&self.nonce.to_le_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut serialized = vec![];
|
|
||||||
self.write(&mut serialized).unwrap();
|
|
||||||
serialized
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<BlockHeader> {
|
|
||||||
Ok(BlockHeader {
|
|
||||||
major_version: read_varint(r)?,
|
|
||||||
minor_version: read_varint(r)?,
|
|
||||||
timestamp: read_varint(r)?,
|
|
||||||
previous: read_bytes(r)?,
|
|
||||||
nonce: read_bytes(r).map(u32::from_le_bytes)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub struct Block {
|
|
||||||
pub header: BlockHeader,
|
|
||||||
pub miner_tx: Transaction,
|
|
||||||
pub txs: Vec<[u8; 32]>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Block {
|
|
||||||
pub fn number(&self) -> Option<u64> {
|
|
||||||
match self.miner_tx.prefix.inputs.first() {
|
|
||||||
Some(Input::Gen(number)) => Some(*number),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
self.header.write(w)?;
|
|
||||||
self.miner_tx.write(w)?;
|
|
||||||
write_varint(&self.txs.len(), w)?;
|
|
||||||
for tx in &self.txs {
|
|
||||||
w.write_all(tx)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn tx_merkle_root(&self) -> [u8; 32] {
|
|
||||||
merkle_root(self.miner_tx.hash(), &self.txs)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Serialize the block as required for the proof of work hash.
|
|
||||||
///
|
|
||||||
/// This is distinct from the serialization required for the block hash. To get the block hash,
|
|
||||||
/// use the [`Block::hash`] function.
|
|
||||||
pub fn serialize_hashable(&self) -> Vec<u8> {
|
|
||||||
let mut blob = self.header.serialize();
|
|
||||||
blob.extend_from_slice(&self.tx_merkle_root());
|
|
||||||
write_varint(&(1 + u64::try_from(self.txs.len()).unwrap()), &mut blob).unwrap();
|
|
||||||
|
|
||||||
blob
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn hash(&self) -> [u8; 32] {
|
|
||||||
let mut hashable = self.serialize_hashable();
|
|
||||||
// Monero pre-appends a VarInt of the block hashing blobs length before getting the block hash
|
|
||||||
// but doesn't do this when getting the proof of work hash :)
|
|
||||||
let mut hashing_blob = Vec::with_capacity(8 + hashable.len());
|
|
||||||
write_varint(&u64::try_from(hashable.len()).unwrap(), &mut hashing_blob).unwrap();
|
|
||||||
hashing_blob.append(&mut hashable);
|
|
||||||
|
|
||||||
let hash = hash(&hashing_blob);
|
|
||||||
if hash == CORRECT_BLOCK_HASH_202612 {
|
|
||||||
return EXISTING_BLOCK_HASH_202612;
|
|
||||||
};
|
|
||||||
|
|
||||||
hash
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut serialized = vec![];
|
|
||||||
self.write(&mut serialized).unwrap();
|
|
||||||
serialized
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Block> {
|
|
||||||
let header = BlockHeader::read(r)?;
|
|
||||||
|
|
||||||
let miner_tx = Transaction::read(r)?;
|
|
||||||
if !matches!(miner_tx.prefix.inputs.as_slice(), &[Input::Gen(_)]) {
|
|
||||||
Err(io::Error::other("Miner transaction has incorrect input type."))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Block {
|
|
||||||
header,
|
|
||||||
miner_tx,
|
|
||||||
txs: (0_usize .. read_varint(r)?).map(|_| read_bytes(r)).collect::<Result<_, _>>()?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,229 +0,0 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
|
||||||
#![doc = include_str!("../README.md")]
|
|
||||||
#![cfg_attr(not(feature = "std"), no_std)]
|
|
||||||
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
#[macro_use]
|
|
||||||
extern crate alloc;
|
|
||||||
|
|
||||||
use std_shims::{sync::OnceLock, io};
|
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
|
||||||
|
|
||||||
use sha3::{Digest, Keccak256};
|
|
||||||
|
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
|
|
||||||
|
|
||||||
pub use monero_generators::{H, decompress_point};
|
|
||||||
|
|
||||||
mod merkle;
|
|
||||||
|
|
||||||
mod serialize;
|
|
||||||
use serialize::{read_byte, read_u16};
|
|
||||||
|
|
||||||
/// UnreducedScalar struct with functionality for recovering incorrectly reduced scalars.
|
|
||||||
mod unreduced_scalar;
|
|
||||||
|
|
||||||
/// Ring Signature structs and functionality.
|
|
||||||
pub mod ring_signatures;
|
|
||||||
|
|
||||||
/// RingCT structs and functionality.
|
|
||||||
pub mod ringct;
|
|
||||||
use ringct::RctType;
|
|
||||||
|
|
||||||
/// Transaction structs.
|
|
||||||
pub mod transaction;
|
|
||||||
/// Block structs.
|
|
||||||
pub mod block;
|
|
||||||
|
|
||||||
/// Monero daemon RPC interface.
|
|
||||||
pub mod rpc;
|
|
||||||
/// Wallet functionality, enabling scanning and sending transactions.
|
|
||||||
pub mod wallet;
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
mod tests;
|
|
||||||
|
|
||||||
pub const DEFAULT_LOCK_WINDOW: usize = 10;
|
|
||||||
pub const COINBASE_LOCK_WINDOW: usize = 60;
|
|
||||||
pub const BLOCK_TIME: usize = 120;
|
|
||||||
|
|
||||||
static INV_EIGHT_CELL: OnceLock<Scalar> = OnceLock::new();
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub(crate) fn INV_EIGHT() -> Scalar {
|
|
||||||
*INV_EIGHT_CELL.get_or_init(|| Scalar::from(8u8).invert())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Monero protocol version.
|
|
||||||
///
|
|
||||||
/// v15 is omitted as v15 was simply v14 and v16 being active at the same time, with regards to the
|
|
||||||
/// transactions supported. Accordingly, v16 should be used during v15.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
|
||||||
#[allow(non_camel_case_types)]
|
|
||||||
pub enum Protocol {
|
|
||||||
v14,
|
|
||||||
v16,
|
|
||||||
Custom {
|
|
||||||
ring_len: usize,
|
|
||||||
bp_plus: bool,
|
|
||||||
optimal_rct_type: RctType,
|
|
||||||
view_tags: bool,
|
|
||||||
v16_fee: bool,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Protocol {
|
|
||||||
/// Amount of ring members under this protocol version.
|
|
||||||
pub fn ring_len(&self) -> usize {
|
|
||||||
match self {
|
|
||||||
Protocol::v14 => 11,
|
|
||||||
Protocol::v16 => 16,
|
|
||||||
Protocol::Custom { ring_len, .. } => *ring_len,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Whether or not the specified version uses Bulletproofs or Bulletproofs+.
|
|
||||||
///
|
|
||||||
/// This method will likely be reworked when versions not using Bulletproofs at all are added.
|
|
||||||
pub fn bp_plus(&self) -> bool {
|
|
||||||
match self {
|
|
||||||
Protocol::v14 => false,
|
|
||||||
Protocol::v16 => true,
|
|
||||||
Protocol::Custom { bp_plus, .. } => *bp_plus,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Make this an Option when we support pre-RCT protocols
|
|
||||||
pub fn optimal_rct_type(&self) -> RctType {
|
|
||||||
match self {
|
|
||||||
Protocol::v14 => RctType::Clsag,
|
|
||||||
Protocol::v16 => RctType::BulletproofsPlus,
|
|
||||||
Protocol::Custom { optimal_rct_type, .. } => *optimal_rct_type,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Whether or not the specified version uses view tags.
|
|
||||||
pub fn view_tags(&self) -> bool {
|
|
||||||
match self {
|
|
||||||
Protocol::v14 => false,
|
|
||||||
Protocol::v16 => true,
|
|
||||||
Protocol::Custom { view_tags, .. } => *view_tags,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Whether or not the specified version uses the fee algorithm from Monero
|
|
||||||
/// hard fork version 16 (released in v18 binaries).
|
|
||||||
pub fn v16_fee(&self) -> bool {
|
|
||||||
match self {
|
|
||||||
Protocol::v14 => false,
|
|
||||||
Protocol::v16 => true,
|
|
||||||
Protocol::Custom { v16_fee, .. } => *v16_fee,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn write<W: io::Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
match self {
|
|
||||||
Protocol::v14 => w.write_all(&[0, 14]),
|
|
||||||
Protocol::v16 => w.write_all(&[0, 16]),
|
|
||||||
Protocol::Custom { ring_len, bp_plus, optimal_rct_type, view_tags, v16_fee } => {
|
|
||||||
// Custom, version 0
|
|
||||||
w.write_all(&[1, 0])?;
|
|
||||||
w.write_all(&u16::try_from(*ring_len).unwrap().to_le_bytes())?;
|
|
||||||
w.write_all(&[u8::from(*bp_plus)])?;
|
|
||||||
w.write_all(&[optimal_rct_type.to_byte()])?;
|
|
||||||
w.write_all(&[u8::from(*view_tags)])?;
|
|
||||||
w.write_all(&[u8::from(*v16_fee)])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read<R: io::Read>(r: &mut R) -> io::Result<Protocol> {
|
|
||||||
Ok(match read_byte(r)? {
|
|
||||||
// Monero protocol
|
|
||||||
0 => match read_byte(r)? {
|
|
||||||
14 => Protocol::v14,
|
|
||||||
16 => Protocol::v16,
|
|
||||||
_ => Err(io::Error::other("unrecognized monero protocol"))?,
|
|
||||||
},
|
|
||||||
// Custom
|
|
||||||
1 => match read_byte(r)? {
|
|
||||||
0 => Protocol::Custom {
|
|
||||||
ring_len: read_u16(r)?.into(),
|
|
||||||
bp_plus: match read_byte(r)? {
|
|
||||||
0 => false,
|
|
||||||
1 => true,
|
|
||||||
_ => Err(io::Error::other("invalid bool serialization"))?,
|
|
||||||
},
|
|
||||||
optimal_rct_type: RctType::from_byte(read_byte(r)?)
|
|
||||||
.ok_or_else(|| io::Error::other("invalid RctType serialization"))?,
|
|
||||||
view_tags: match read_byte(r)? {
|
|
||||||
0 => false,
|
|
||||||
1 => true,
|
|
||||||
_ => Err(io::Error::other("invalid bool serialization"))?,
|
|
||||||
},
|
|
||||||
v16_fee: match read_byte(r)? {
|
|
||||||
0 => false,
|
|
||||||
1 => true,
|
|
||||||
_ => Err(io::Error::other("invalid bool serialization"))?,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
_ => Err(io::Error::other("unrecognized custom protocol serialization"))?,
|
|
||||||
},
|
|
||||||
_ => Err(io::Error::other("unrecognized protocol serialization"))?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Transparent structure representing a Pedersen commitment's contents.
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
#[derive(Clone, PartialEq, Eq, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub struct Commitment {
|
|
||||||
pub mask: Scalar,
|
|
||||||
pub amount: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl core::fmt::Debug for Commitment {
|
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
|
||||||
fmt.debug_struct("Commitment").field("amount", &self.amount).finish_non_exhaustive()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Commitment {
|
|
||||||
/// A commitment to zero, defined with a mask of 1 (as to not be the identity).
|
|
||||||
pub fn zero() -> Commitment {
|
|
||||||
Commitment { mask: Scalar::ONE, amount: 0 }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(mask: Scalar, amount: u64) -> Commitment {
|
|
||||||
Commitment { mask, amount }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calculate a Pedersen commitment, as a point, from the transparent structure.
|
|
||||||
pub fn calculate(&self) -> EdwardsPoint {
|
|
||||||
(&self.mask * ED25519_BASEPOINT_TABLE) + (Scalar::from(self.amount) * H())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Support generating a random scalar using a modern rand, as dalek's is notoriously dated.
|
|
||||||
pub fn random_scalar<R: RngCore + CryptoRng>(rng: &mut R) -> Scalar {
|
|
||||||
let mut r = [0; 64];
|
|
||||||
rng.fill_bytes(&mut r);
|
|
||||||
Scalar::from_bytes_mod_order_wide(&r)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn hash(data: &[u8]) -> [u8; 32] {
|
|
||||||
Keccak256::digest(data).into()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Hash the provided data to a scalar via keccak256(data) % l.
|
|
||||||
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
|
||||||
let scalar = Scalar::from_bytes_mod_order(hash(data));
|
|
||||||
// Monero will explicitly error in this case
|
|
||||||
// This library acknowledges its practical impossibility of it occurring, and doesn't bother to
|
|
||||||
// code in logic to handle it. That said, if it ever occurs, something must happen in order to
|
|
||||||
// not generate/verify a proof we believe to be valid when it isn't
|
|
||||||
assert!(scalar != Scalar::ZERO, "ZERO HASH: {data:?}");
|
|
||||||
scalar
|
|
||||||
}
|
|
||||||
@@ -1,72 +0,0 @@
|
|||||||
use std_shims::{
|
|
||||||
io::{self, *},
|
|
||||||
vec::Vec,
|
|
||||||
};
|
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
|
||||||
|
|
||||||
use curve25519_dalek::{EdwardsPoint, Scalar};
|
|
||||||
|
|
||||||
use monero_generators::hash_to_point;
|
|
||||||
|
|
||||||
use crate::{serialize::*, hash_to_scalar};
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
|
||||||
pub struct Signature {
|
|
||||||
c: Scalar,
|
|
||||||
r: Scalar,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Signature {
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
write_scalar(&self.c, w)?;
|
|
||||||
write_scalar(&self.r, w)?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Signature> {
|
|
||||||
Ok(Signature { c: read_scalar(r)?, r: read_scalar(r)? })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize)]
|
|
||||||
pub struct RingSignature {
|
|
||||||
sigs: Vec<Signature>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RingSignature {
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
for sig in &self.sigs {
|
|
||||||
sig.write(w)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(members: usize, r: &mut R) -> io::Result<RingSignature> {
|
|
||||||
Ok(RingSignature { sigs: read_raw_vec(Signature::read, members, r)? })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn verify(&self, msg: &[u8; 32], ring: &[EdwardsPoint], key_image: &EdwardsPoint) -> bool {
|
|
||||||
if ring.len() != self.sigs.len() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut buf = Vec::with_capacity(32 + (32 * 2 * ring.len()));
|
|
||||||
buf.extend_from_slice(msg);
|
|
||||||
|
|
||||||
let mut sum = Scalar::ZERO;
|
|
||||||
|
|
||||||
for (ring_member, sig) in ring.iter().zip(&self.sigs) {
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let Li = EdwardsPoint::vartime_double_scalar_mul_basepoint(&sig.c, ring_member, &sig.r);
|
|
||||||
buf.extend_from_slice(Li.compress().as_bytes());
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let Ri = (sig.r * hash_to_point(ring_member.compress().to_bytes())) + (sig.c * key_image);
|
|
||||||
buf.extend_from_slice(Ri.compress().as_bytes());
|
|
||||||
|
|
||||||
sum += sig.c;
|
|
||||||
}
|
|
||||||
|
|
||||||
sum == hash_to_scalar(&buf)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,151 +0,0 @@
|
|||||||
use std_shims::{vec::Vec, sync::OnceLock};
|
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
|
||||||
|
|
||||||
use subtle::{Choice, ConditionallySelectable};
|
|
||||||
|
|
||||||
use curve25519_dalek::edwards::EdwardsPoint as DalekPoint;
|
|
||||||
|
|
||||||
use group::{ff::Field, Group};
|
|
||||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
|
||||||
|
|
||||||
use multiexp::multiexp as multiexp_const;
|
|
||||||
|
|
||||||
pub(crate) use monero_generators::Generators;
|
|
||||||
|
|
||||||
use crate::{INV_EIGHT as DALEK_INV_EIGHT, H as DALEK_H, Commitment, hash_to_scalar as dalek_hash};
|
|
||||||
pub(crate) use crate::ringct::bulletproofs::scalar_vector::*;
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub(crate) fn INV_EIGHT() -> Scalar {
|
|
||||||
Scalar(DALEK_INV_EIGHT())
|
|
||||||
}
|
|
||||||
|
|
||||||
#[inline]
|
|
||||||
pub(crate) fn H() -> EdwardsPoint {
|
|
||||||
EdwardsPoint(DALEK_H())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
|
|
||||||
Scalar(dalek_hash(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Components common between variants
|
|
||||||
pub(crate) const MAX_M: usize = 16;
|
|
||||||
pub(crate) const LOG_N: usize = 6; // 2 << 6 == N
|
|
||||||
pub(crate) const N: usize = 64;
|
|
||||||
|
|
||||||
pub(crate) fn prove_multiexp(pairs: &[(Scalar, EdwardsPoint)]) -> EdwardsPoint {
|
|
||||||
multiexp_const(pairs) * INV_EIGHT()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn vector_exponent(
|
|
||||||
generators: &Generators,
|
|
||||||
a: &ScalarVector,
|
|
||||||
b: &ScalarVector,
|
|
||||||
) -> EdwardsPoint {
|
|
||||||
debug_assert_eq!(a.len(), b.len());
|
|
||||||
(a * &generators.G[.. a.len()]) + (b * &generators.H[.. b.len()])
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn hash_cache(cache: &mut Scalar, mash: &[[u8; 32]]) -> Scalar {
|
|
||||||
let slice =
|
|
||||||
&[cache.to_bytes().as_ref(), mash.iter().copied().flatten().collect::<Vec<_>>().as_ref()]
|
|
||||||
.concat();
|
|
||||||
*cache = hash_to_scalar(slice);
|
|
||||||
*cache
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn MN(outputs: usize) -> (usize, usize, usize) {
|
|
||||||
let mut logM = 0;
|
|
||||||
let mut M;
|
|
||||||
while {
|
|
||||||
M = 1 << logM;
|
|
||||||
(M <= MAX_M) && (M < outputs)
|
|
||||||
} {
|
|
||||||
logM += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
(logM + LOG_N, M, M * N)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn bit_decompose(commitments: &[Commitment]) -> (ScalarVector, ScalarVector) {
|
|
||||||
let (_, M, MN) = MN(commitments.len());
|
|
||||||
|
|
||||||
let sv = commitments.iter().map(|c| Scalar::from(c.amount)).collect::<Vec<_>>();
|
|
||||||
let mut aL = ScalarVector::new(MN);
|
|
||||||
let mut aR = ScalarVector::new(MN);
|
|
||||||
|
|
||||||
for j in 0 .. M {
|
|
||||||
for i in (0 .. N).rev() {
|
|
||||||
let bit =
|
|
||||||
if j < sv.len() { Choice::from((sv[j][i / 8] >> (i % 8)) & 1) } else { Choice::from(0) };
|
|
||||||
aL.0[(j * N) + i] = Scalar::conditional_select(&Scalar::ZERO, &Scalar::ONE, bit);
|
|
||||||
aR.0[(j * N) + i] = Scalar::conditional_select(&-Scalar::ONE, &Scalar::ZERO, bit);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
(aL, aR)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn hash_commitments<C: IntoIterator<Item = DalekPoint>>(
|
|
||||||
commitments: C,
|
|
||||||
) -> (Scalar, Vec<EdwardsPoint>) {
|
|
||||||
let V = commitments.into_iter().map(|c| EdwardsPoint(c) * INV_EIGHT()).collect::<Vec<_>>();
|
|
||||||
(hash_to_scalar(&V.iter().flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>()), V)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn alpha_rho<R: RngCore + CryptoRng>(
|
|
||||||
rng: &mut R,
|
|
||||||
generators: &Generators,
|
|
||||||
aL: &ScalarVector,
|
|
||||||
aR: &ScalarVector,
|
|
||||||
) -> (Scalar, EdwardsPoint) {
|
|
||||||
let ar = Scalar::random(rng);
|
|
||||||
(ar, (vector_exponent(generators, aL, aR) + (EdwardsPoint::generator() * ar)) * INV_EIGHT())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn LR_statements(
|
|
||||||
a: &ScalarVector,
|
|
||||||
G_i: &[EdwardsPoint],
|
|
||||||
b: &ScalarVector,
|
|
||||||
H_i: &[EdwardsPoint],
|
|
||||||
cL: Scalar,
|
|
||||||
U: EdwardsPoint,
|
|
||||||
) -> Vec<(Scalar, EdwardsPoint)> {
|
|
||||||
let mut res = a
|
|
||||||
.0
|
|
||||||
.iter()
|
|
||||||
.copied()
|
|
||||||
.zip(G_i.iter().copied())
|
|
||||||
.chain(b.0.iter().copied().zip(H_i.iter().copied()))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
res.push((cL, U));
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
static TWO_N_CELL: OnceLock<ScalarVector> = OnceLock::new();
|
|
||||||
pub(crate) fn TWO_N() -> &'static ScalarVector {
|
|
||||||
TWO_N_CELL.get_or_init(|| ScalarVector::powers(Scalar::from(2u8), N))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn challenge_products(w: &[Scalar], winv: &[Scalar]) -> Vec<Scalar> {
|
|
||||||
let mut products = vec![Scalar::ZERO; 1 << w.len()];
|
|
||||||
products[0] = winv[0];
|
|
||||||
products[1] = w[0];
|
|
||||||
for j in 1 .. w.len() {
|
|
||||||
let mut slots = (1 << (j + 1)) - 1;
|
|
||||||
while slots > 0 {
|
|
||||||
products[slots] = products[slots / 2] * w[j];
|
|
||||||
products[slots - 1] = products[slots / 2] * winv[j];
|
|
||||||
slots = slots.saturating_sub(2);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sanity check as if the above failed to populate, it'd be critical
|
|
||||||
for w in &products {
|
|
||||||
debug_assert!(!bool::from(w.is_zero()));
|
|
||||||
}
|
|
||||||
|
|
||||||
products
|
|
||||||
}
|
|
||||||
@@ -1,229 +0,0 @@
|
|||||||
#![allow(non_snake_case)]
|
|
||||||
|
|
||||||
use std_shims::{
|
|
||||||
vec::Vec,
|
|
||||||
io::{self, Read, Write},
|
|
||||||
};
|
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, Zeroizing};
|
|
||||||
|
|
||||||
use curve25519_dalek::edwards::EdwardsPoint;
|
|
||||||
use multiexp::BatchVerifier;
|
|
||||||
|
|
||||||
use crate::{Commitment, wallet::TransactionError, serialize::*};
|
|
||||||
|
|
||||||
pub(crate) mod scalar_vector;
|
|
||||||
pub(crate) mod core;
|
|
||||||
use self::core::LOG_N;
|
|
||||||
|
|
||||||
pub(crate) mod original;
|
|
||||||
use self::original::OriginalStruct;
|
|
||||||
|
|
||||||
pub(crate) mod plus;
|
|
||||||
use self::plus::*;
|
|
||||||
|
|
||||||
pub(crate) const MAX_OUTPUTS: usize = self::core::MAX_M;
|
|
||||||
|
|
||||||
/// Bulletproofs enum, supporting the original and plus formulations.
|
|
||||||
#[allow(clippy::large_enum_variant)]
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub enum Bulletproofs {
|
|
||||||
Original(OriginalStruct),
|
|
||||||
Plus(AggregateRangeProof),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Bulletproofs {
|
|
||||||
fn bp_fields(plus: bool) -> usize {
|
|
||||||
if plus {
|
|
||||||
6
|
|
||||||
} else {
|
|
||||||
9
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
|
|
||||||
// src/cryptonote_basic/cryptonote_format_utils.cpp#L106-L124
|
|
||||||
pub(crate) fn calculate_bp_clawback(plus: bool, n_outputs: usize) -> (usize, usize) {
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let mut LR_len = 0;
|
|
||||||
let mut n_padded_outputs = 1;
|
|
||||||
while n_padded_outputs < n_outputs {
|
|
||||||
LR_len += 1;
|
|
||||||
n_padded_outputs = 1 << LR_len;
|
|
||||||
}
|
|
||||||
LR_len += LOG_N;
|
|
||||||
|
|
||||||
let mut bp_clawback = 0;
|
|
||||||
if n_padded_outputs > 2 {
|
|
||||||
let fields = Bulletproofs::bp_fields(plus);
|
|
||||||
let base = ((fields + (2 * (LOG_N + 1))) * 32) / 2;
|
|
||||||
let size = (fields + (2 * LR_len)) * 32;
|
|
||||||
bp_clawback = ((base * n_padded_outputs) - size) * 4 / 5;
|
|
||||||
}
|
|
||||||
|
|
||||||
(bp_clawback, LR_len)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn fee_weight(plus: bool, outputs: usize) -> usize {
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let (bp_clawback, LR_len) = Bulletproofs::calculate_bp_clawback(plus, outputs);
|
|
||||||
32 * (Bulletproofs::bp_fields(plus) + (2 * LR_len)) + 2 + bp_clawback
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Prove the list of commitments are within [0 .. 2^64).
|
|
||||||
pub fn prove<R: RngCore + CryptoRng>(
|
|
||||||
rng: &mut R,
|
|
||||||
outputs: &[Commitment],
|
|
||||||
plus: bool,
|
|
||||||
) -> Result<Bulletproofs, TransactionError> {
|
|
||||||
if outputs.is_empty() {
|
|
||||||
Err(TransactionError::NoOutputs)?;
|
|
||||||
}
|
|
||||||
if outputs.len() > MAX_OUTPUTS {
|
|
||||||
Err(TransactionError::TooManyOutputs)?;
|
|
||||||
}
|
|
||||||
Ok(if !plus {
|
|
||||||
Bulletproofs::Original(OriginalStruct::prove(rng, outputs))
|
|
||||||
} else {
|
|
||||||
use dalek_ff_group::EdwardsPoint as DfgPoint;
|
|
||||||
Bulletproofs::Plus(
|
|
||||||
AggregateRangeStatement::new(outputs.iter().map(|com| DfgPoint(com.calculate())).collect())
|
|
||||||
.unwrap()
|
|
||||||
.prove(rng, &Zeroizing::new(AggregateRangeWitness::new(outputs).unwrap()))
|
|
||||||
.unwrap(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verify the given Bulletproofs.
|
|
||||||
#[must_use]
|
|
||||||
pub fn verify<R: RngCore + CryptoRng>(&self, rng: &mut R, commitments: &[EdwardsPoint]) -> bool {
|
|
||||||
match self {
|
|
||||||
Bulletproofs::Original(bp) => bp.verify(rng, commitments),
|
|
||||||
Bulletproofs::Plus(bp) => {
|
|
||||||
let mut verifier = BatchVerifier::new(1);
|
|
||||||
// If this commitment is torsioned (which is allowed), this won't be a well-formed
|
|
||||||
// dfg::EdwardsPoint (expected to be of prime-order)
|
|
||||||
// The actual BP+ impl will perform a torsion clear though, making this safe
|
|
||||||
// TODO: Have AggregateRangeStatement take in dalek EdwardsPoint for clarity on this
|
|
||||||
let Some(statement) = AggregateRangeStatement::new(
|
|
||||||
commitments.iter().map(|c| dalek_ff_group::EdwardsPoint(*c)).collect(),
|
|
||||||
) else {
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
if !statement.verify(rng, &mut verifier, (), bp.clone()) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
verifier.verify_vartime()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Accumulate the verification for the given Bulletproofs into the specified BatchVerifier.
|
|
||||||
/// Returns false if the Bulletproofs aren't sane, without mutating the BatchVerifier.
|
|
||||||
/// Returns true if the Bulletproofs are sane, regardless of their validity.
|
|
||||||
#[must_use]
|
|
||||||
pub fn batch_verify<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
|
|
||||||
&self,
|
|
||||||
rng: &mut R,
|
|
||||||
verifier: &mut BatchVerifier<ID, dalek_ff_group::EdwardsPoint>,
|
|
||||||
id: ID,
|
|
||||||
commitments: &[EdwardsPoint],
|
|
||||||
) -> bool {
|
|
||||||
match self {
|
|
||||||
Bulletproofs::Original(bp) => bp.batch_verify(rng, verifier, id, commitments),
|
|
||||||
Bulletproofs::Plus(bp) => {
|
|
||||||
let Some(statement) = AggregateRangeStatement::new(
|
|
||||||
commitments.iter().map(|c| dalek_ff_group::EdwardsPoint(*c)).collect(),
|
|
||||||
) else {
|
|
||||||
return false;
|
|
||||||
};
|
|
||||||
statement.verify(rng, verifier, id, bp.clone())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write_core<W: Write, F: Fn(&[EdwardsPoint], &mut W) -> io::Result<()>>(
|
|
||||||
&self,
|
|
||||||
w: &mut W,
|
|
||||||
specific_write_vec: F,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
match self {
|
|
||||||
Bulletproofs::Original(bp) => {
|
|
||||||
write_point(&bp.A, w)?;
|
|
||||||
write_point(&bp.S, w)?;
|
|
||||||
write_point(&bp.T1, w)?;
|
|
||||||
write_point(&bp.T2, w)?;
|
|
||||||
write_scalar(&bp.taux, w)?;
|
|
||||||
write_scalar(&bp.mu, w)?;
|
|
||||||
specific_write_vec(&bp.L, w)?;
|
|
||||||
specific_write_vec(&bp.R, w)?;
|
|
||||||
write_scalar(&bp.a, w)?;
|
|
||||||
write_scalar(&bp.b, w)?;
|
|
||||||
write_scalar(&bp.t, w)
|
|
||||||
}
|
|
||||||
|
|
||||||
Bulletproofs::Plus(bp) => {
|
|
||||||
write_point(&bp.A.0, w)?;
|
|
||||||
write_point(&bp.wip.A.0, w)?;
|
|
||||||
write_point(&bp.wip.B.0, w)?;
|
|
||||||
write_scalar(&bp.wip.r_answer.0, w)?;
|
|
||||||
write_scalar(&bp.wip.s_answer.0, w)?;
|
|
||||||
write_scalar(&bp.wip.delta_answer.0, w)?;
|
|
||||||
specific_write_vec(&bp.wip.L.iter().copied().map(|L| L.0).collect::<Vec<_>>(), w)?;
|
|
||||||
specific_write_vec(&bp.wip.R.iter().copied().map(|R| R.0).collect::<Vec<_>>(), w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn signature_write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
self.write_core(w, |points, w| write_raw_vec(write_point, points, w))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
self.write_core(w, |points, w| write_vec(write_point, points, w))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut serialized = vec![];
|
|
||||||
self.write(&mut serialized).unwrap();
|
|
||||||
serialized
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read Bulletproofs.
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Bulletproofs> {
|
|
||||||
Ok(Bulletproofs::Original(OriginalStruct {
|
|
||||||
A: read_point(r)?,
|
|
||||||
S: read_point(r)?,
|
|
||||||
T1: read_point(r)?,
|
|
||||||
T2: read_point(r)?,
|
|
||||||
taux: read_scalar(r)?,
|
|
||||||
mu: read_scalar(r)?,
|
|
||||||
L: read_vec(read_point, r)?,
|
|
||||||
R: read_vec(read_point, r)?,
|
|
||||||
a: read_scalar(r)?,
|
|
||||||
b: read_scalar(r)?,
|
|
||||||
t: read_scalar(r)?,
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Read Bulletproofs+.
|
|
||||||
pub fn read_plus<R: Read>(r: &mut R) -> io::Result<Bulletproofs> {
|
|
||||||
use dalek_ff_group::{Scalar as DfgScalar, EdwardsPoint as DfgPoint};
|
|
||||||
|
|
||||||
Ok(Bulletproofs::Plus(AggregateRangeProof {
|
|
||||||
A: DfgPoint(read_point(r)?),
|
|
||||||
wip: WipProof {
|
|
||||||
A: DfgPoint(read_point(r)?),
|
|
||||||
B: DfgPoint(read_point(r)?),
|
|
||||||
r_answer: DfgScalar(read_scalar(r)?),
|
|
||||||
s_answer: DfgScalar(read_scalar(r)?),
|
|
||||||
delta_answer: DfgScalar(read_scalar(r)?),
|
|
||||||
L: read_vec(read_point, r)?.into_iter().map(DfgPoint).collect(),
|
|
||||||
R: read_vec(read_point, r)?.into_iter().map(DfgPoint).collect(),
|
|
||||||
},
|
|
||||||
}))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,322 +0,0 @@
|
|||||||
use std_shims::{vec::Vec, sync::OnceLock};
|
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
|
||||||
|
|
||||||
use curve25519_dalek::{scalar::Scalar as DalekScalar, edwards::EdwardsPoint as DalekPoint};
|
|
||||||
|
|
||||||
use group::{ff::Field, Group};
|
|
||||||
use dalek_ff_group::{ED25519_BASEPOINT_POINT as G, Scalar, EdwardsPoint};
|
|
||||||
|
|
||||||
use multiexp::{BatchVerifier, multiexp};
|
|
||||||
|
|
||||||
use crate::{Commitment, ringct::bulletproofs::core::*};
|
|
||||||
|
|
||||||
include!(concat!(env!("OUT_DIR"), "/generators.rs"));
|
|
||||||
|
|
||||||
static IP12_CELL: OnceLock<Scalar> = OnceLock::new();
|
|
||||||
pub(crate) fn IP12() -> Scalar {
|
|
||||||
*IP12_CELL.get_or_init(|| ScalarVector(vec![Scalar::ONE; N]).inner_product(TWO_N()))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn hadamard_fold(
|
|
||||||
l: &[EdwardsPoint],
|
|
||||||
r: &[EdwardsPoint],
|
|
||||||
a: Scalar,
|
|
||||||
b: Scalar,
|
|
||||||
) -> Vec<EdwardsPoint> {
|
|
||||||
let mut res = Vec::with_capacity(l.len() / 2);
|
|
||||||
for i in 0 .. l.len() {
|
|
||||||
res.push(multiexp(&[(a, l[i]), (b, r[i])]));
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub struct OriginalStruct {
|
|
||||||
pub(crate) A: DalekPoint,
|
|
||||||
pub(crate) S: DalekPoint,
|
|
||||||
pub(crate) T1: DalekPoint,
|
|
||||||
pub(crate) T2: DalekPoint,
|
|
||||||
pub(crate) taux: DalekScalar,
|
|
||||||
pub(crate) mu: DalekScalar,
|
|
||||||
pub(crate) L: Vec<DalekPoint>,
|
|
||||||
pub(crate) R: Vec<DalekPoint>,
|
|
||||||
pub(crate) a: DalekScalar,
|
|
||||||
pub(crate) b: DalekScalar,
|
|
||||||
pub(crate) t: DalekScalar,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OriginalStruct {
|
|
||||||
pub(crate) fn prove<R: RngCore + CryptoRng>(
|
|
||||||
rng: &mut R,
|
|
||||||
commitments: &[Commitment],
|
|
||||||
) -> OriginalStruct {
|
|
||||||
let (logMN, M, MN) = MN(commitments.len());
|
|
||||||
|
|
||||||
let (aL, aR) = bit_decompose(commitments);
|
|
||||||
let commitments_points = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
|
|
||||||
let (mut cache, _) = hash_commitments(commitments_points.clone());
|
|
||||||
|
|
||||||
let (sL, sR) =
|
|
||||||
ScalarVector((0 .. (MN * 2)).map(|_| Scalar::random(&mut *rng)).collect::<Vec<_>>()).split();
|
|
||||||
|
|
||||||
let generators = GENERATORS();
|
|
||||||
let (mut alpha, A) = alpha_rho(&mut *rng, generators, &aL, &aR);
|
|
||||||
let (mut rho, S) = alpha_rho(&mut *rng, generators, &sL, &sR);
|
|
||||||
|
|
||||||
let y = hash_cache(&mut cache, &[A.compress().to_bytes(), S.compress().to_bytes()]);
|
|
||||||
let mut cache = hash_to_scalar(&y.to_bytes());
|
|
||||||
let z = cache;
|
|
||||||
|
|
||||||
let l0 = aL - z;
|
|
||||||
let l1 = sL;
|
|
||||||
|
|
||||||
let mut zero_twos = Vec::with_capacity(MN);
|
|
||||||
let zpow = ScalarVector::powers(z, M + 2);
|
|
||||||
for j in 0 .. M {
|
|
||||||
for i in 0 .. N {
|
|
||||||
zero_twos.push(zpow[j + 2] * TWO_N()[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let yMN = ScalarVector::powers(y, MN);
|
|
||||||
let r0 = ((aR + z) * &yMN) + &ScalarVector(zero_twos);
|
|
||||||
let r1 = yMN * &sR;
|
|
||||||
|
|
||||||
let (T1, T2, x, mut taux) = {
|
|
||||||
let t1 = l0.clone().inner_product(&r1) + r0.clone().inner_product(&l1);
|
|
||||||
let t2 = l1.clone().inner_product(&r1);
|
|
||||||
|
|
||||||
let mut tau1 = Scalar::random(&mut *rng);
|
|
||||||
let mut tau2 = Scalar::random(&mut *rng);
|
|
||||||
|
|
||||||
let T1 = prove_multiexp(&[(t1, H()), (tau1, EdwardsPoint::generator())]);
|
|
||||||
let T2 = prove_multiexp(&[(t2, H()), (tau2, EdwardsPoint::generator())]);
|
|
||||||
|
|
||||||
let x =
|
|
||||||
hash_cache(&mut cache, &[z.to_bytes(), T1.compress().to_bytes(), T2.compress().to_bytes()]);
|
|
||||||
|
|
||||||
let taux = (tau2 * (x * x)) + (tau1 * x);
|
|
||||||
|
|
||||||
tau1.zeroize();
|
|
||||||
tau2.zeroize();
|
|
||||||
(T1, T2, x, taux)
|
|
||||||
};
|
|
||||||
|
|
||||||
let mu = (x * rho) + alpha;
|
|
||||||
alpha.zeroize();
|
|
||||||
rho.zeroize();
|
|
||||||
|
|
||||||
for (i, gamma) in commitments.iter().map(|c| Scalar(c.mask)).enumerate() {
|
|
||||||
taux += zpow[i + 2] * gamma;
|
|
||||||
}
|
|
||||||
|
|
||||||
let l = l0 + &(l1 * x);
|
|
||||||
let r = r0 + &(r1 * x);
|
|
||||||
|
|
||||||
let t = l.clone().inner_product(&r);
|
|
||||||
|
|
||||||
let x_ip =
|
|
||||||
hash_cache(&mut cache, &[x.to_bytes(), taux.to_bytes(), mu.to_bytes(), t.to_bytes()]);
|
|
||||||
|
|
||||||
let mut a = l;
|
|
||||||
let mut b = r;
|
|
||||||
|
|
||||||
let yinv = y.invert().unwrap();
|
|
||||||
let yinvpow = ScalarVector::powers(yinv, MN);
|
|
||||||
|
|
||||||
let mut G_proof = generators.G[.. a.len()].to_vec();
|
|
||||||
let mut H_proof = generators.H[.. a.len()].to_vec();
|
|
||||||
H_proof.iter_mut().zip(yinvpow.0.iter()).for_each(|(this_H, yinvpow)| *this_H *= yinvpow);
|
|
||||||
let U = H() * x_ip;
|
|
||||||
|
|
||||||
let mut L = Vec::with_capacity(logMN);
|
|
||||||
let mut R = Vec::with_capacity(logMN);
|
|
||||||
|
|
||||||
while a.len() != 1 {
|
|
||||||
let (aL, aR) = a.split();
|
|
||||||
let (bL, bR) = b.split();
|
|
||||||
|
|
||||||
let cL = aL.clone().inner_product(&bR);
|
|
||||||
let cR = aR.clone().inner_product(&bL);
|
|
||||||
|
|
||||||
let (G_L, G_R) = G_proof.split_at(aL.len());
|
|
||||||
let (H_L, H_R) = H_proof.split_at(aL.len());
|
|
||||||
|
|
||||||
let L_i = prove_multiexp(&LR_statements(&aL, G_R, &bR, H_L, cL, U));
|
|
||||||
let R_i = prove_multiexp(&LR_statements(&aR, G_L, &bL, H_R, cR, U));
|
|
||||||
L.push(L_i);
|
|
||||||
R.push(R_i);
|
|
||||||
|
|
||||||
let w = hash_cache(&mut cache, &[L_i.compress().to_bytes(), R_i.compress().to_bytes()]);
|
|
||||||
let winv = w.invert().unwrap();
|
|
||||||
|
|
||||||
a = (aL * w) + &(aR * winv);
|
|
||||||
b = (bL * winv) + &(bR * w);
|
|
||||||
|
|
||||||
if a.len() != 1 {
|
|
||||||
G_proof = hadamard_fold(G_L, G_R, winv, w);
|
|
||||||
H_proof = hadamard_fold(H_L, H_R, w, winv);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let res = OriginalStruct {
|
|
||||||
A: *A,
|
|
||||||
S: *S,
|
|
||||||
T1: *T1,
|
|
||||||
T2: *T2,
|
|
||||||
taux: *taux,
|
|
||||||
mu: *mu,
|
|
||||||
L: L.drain(..).map(|L| *L).collect(),
|
|
||||||
R: R.drain(..).map(|R| *R).collect(),
|
|
||||||
a: *a[0],
|
|
||||||
b: *b[0],
|
|
||||||
t: *t,
|
|
||||||
};
|
|
||||||
debug_assert!(res.verify(rng, &commitments_points));
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
#[must_use]
|
|
||||||
fn verify_core<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
|
|
||||||
&self,
|
|
||||||
rng: &mut R,
|
|
||||||
verifier: &mut BatchVerifier<ID, EdwardsPoint>,
|
|
||||||
id: ID,
|
|
||||||
commitments: &[DalekPoint],
|
|
||||||
) -> bool {
|
|
||||||
// Verify commitments are valid
|
|
||||||
if commitments.is_empty() || (commitments.len() > MAX_M) {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify L and R are properly sized
|
|
||||||
if self.L.len() != self.R.len() {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (logMN, M, MN) = MN(commitments.len());
|
|
||||||
if self.L.len() != logMN {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rebuild all challenges
|
|
||||||
let (mut cache, commitments) = hash_commitments(commitments.iter().copied());
|
|
||||||
let y = hash_cache(&mut cache, &[self.A.compress().to_bytes(), self.S.compress().to_bytes()]);
|
|
||||||
|
|
||||||
let z = hash_to_scalar(&y.to_bytes());
|
|
||||||
cache = z;
|
|
||||||
|
|
||||||
let x = hash_cache(
|
|
||||||
&mut cache,
|
|
||||||
&[z.to_bytes(), self.T1.compress().to_bytes(), self.T2.compress().to_bytes()],
|
|
||||||
);
|
|
||||||
|
|
||||||
let x_ip = hash_cache(
|
|
||||||
&mut cache,
|
|
||||||
&[x.to_bytes(), self.taux.to_bytes(), self.mu.to_bytes(), self.t.to_bytes()],
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut w = Vec::with_capacity(logMN);
|
|
||||||
let mut winv = Vec::with_capacity(logMN);
|
|
||||||
for (L, R) in self.L.iter().zip(&self.R) {
|
|
||||||
w.push(hash_cache(&mut cache, &[L.compress().to_bytes(), R.compress().to_bytes()]));
|
|
||||||
winv.push(cache.invert().unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert the proof from * INV_EIGHT to its actual form
|
|
||||||
let normalize = |point: &DalekPoint| EdwardsPoint(point.mul_by_cofactor());
|
|
||||||
|
|
||||||
let L = self.L.iter().map(normalize).collect::<Vec<_>>();
|
|
||||||
let R = self.R.iter().map(normalize).collect::<Vec<_>>();
|
|
||||||
let T1 = normalize(&self.T1);
|
|
||||||
let T2 = normalize(&self.T2);
|
|
||||||
let A = normalize(&self.A);
|
|
||||||
let S = normalize(&self.S);
|
|
||||||
|
|
||||||
let commitments = commitments.iter().map(EdwardsPoint::mul_by_cofactor).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
// Verify it
|
|
||||||
let mut proof = Vec::with_capacity(4 + commitments.len());
|
|
||||||
|
|
||||||
let zpow = ScalarVector::powers(z, M + 3);
|
|
||||||
let ip1y = ScalarVector::powers(y, M * N).sum();
|
|
||||||
let mut k = -(zpow[2] * ip1y);
|
|
||||||
for j in 1 ..= M {
|
|
||||||
k -= zpow[j + 2] * IP12();
|
|
||||||
}
|
|
||||||
let y1 = Scalar(self.t) - ((z * ip1y) + k);
|
|
||||||
proof.push((-y1, H()));
|
|
||||||
|
|
||||||
proof.push((-Scalar(self.taux), G));
|
|
||||||
|
|
||||||
for (j, commitment) in commitments.iter().enumerate() {
|
|
||||||
proof.push((zpow[j + 2], *commitment));
|
|
||||||
}
|
|
||||||
|
|
||||||
proof.push((x, T1));
|
|
||||||
proof.push((x * x, T2));
|
|
||||||
verifier.queue(&mut *rng, id, proof);
|
|
||||||
|
|
||||||
proof = Vec::with_capacity(4 + (2 * (MN + logMN)));
|
|
||||||
let z3 = (Scalar(self.t) - (Scalar(self.a) * Scalar(self.b))) * x_ip;
|
|
||||||
proof.push((z3, H()));
|
|
||||||
proof.push((-Scalar(self.mu), G));
|
|
||||||
|
|
||||||
proof.push((Scalar::ONE, A));
|
|
||||||
proof.push((x, S));
|
|
||||||
|
|
||||||
{
|
|
||||||
let ypow = ScalarVector::powers(y, MN);
|
|
||||||
let yinv = y.invert().unwrap();
|
|
||||||
let yinvpow = ScalarVector::powers(yinv, MN);
|
|
||||||
|
|
||||||
let w_cache = challenge_products(&w, &winv);
|
|
||||||
|
|
||||||
let generators = GENERATORS();
|
|
||||||
for i in 0 .. MN {
|
|
||||||
let g = (Scalar(self.a) * w_cache[i]) + z;
|
|
||||||
proof.push((-g, generators.G[i]));
|
|
||||||
|
|
||||||
let mut h = Scalar(self.b) * yinvpow[i] * w_cache[(!i) & (MN - 1)];
|
|
||||||
h -= ((zpow[(i / N) + 2] * TWO_N()[i % N]) + (z * ypow[i])) * yinvpow[i];
|
|
||||||
proof.push((-h, generators.H[i]));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for i in 0 .. logMN {
|
|
||||||
proof.push((w[i] * w[i], L[i]));
|
|
||||||
proof.push((winv[i] * winv[i], R[i]));
|
|
||||||
}
|
|
||||||
verifier.queue(rng, id, proof);
|
|
||||||
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
#[must_use]
|
|
||||||
pub(crate) fn verify<R: RngCore + CryptoRng>(
|
|
||||||
&self,
|
|
||||||
rng: &mut R,
|
|
||||||
commitments: &[DalekPoint],
|
|
||||||
) -> bool {
|
|
||||||
let mut verifier = BatchVerifier::new(1);
|
|
||||||
if self.verify_core(rng, &mut verifier, (), commitments) {
|
|
||||||
verifier.verify_vartime()
|
|
||||||
} else {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[must_use]
|
|
||||||
pub(crate) fn batch_verify<ID: Copy + Zeroize, R: RngCore + CryptoRng>(
|
|
||||||
&self,
|
|
||||||
rng: &mut R,
|
|
||||||
verifier: &mut BatchVerifier<ID, EdwardsPoint>,
|
|
||||||
id: ID,
|
|
||||||
commitments: &[DalekPoint],
|
|
||||||
) -> bool {
|
|
||||||
self.verify_core(rng, verifier, id, commitments)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
use std_shims::{sync::OnceLock, vec::Vec};
|
|
||||||
|
|
||||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
|
||||||
|
|
||||||
use monero_generators::{hash_to_point as raw_hash_to_point};
|
|
||||||
use crate::{hash, hash_to_scalar as dalek_hash};
|
|
||||||
|
|
||||||
// Monero starts BP+ transcripts with the following constant.
|
|
||||||
static TRANSCRIPT_CELL: OnceLock<[u8; 32]> = OnceLock::new();
|
|
||||||
pub(crate) fn TRANSCRIPT() -> [u8; 32] {
|
|
||||||
// Why this uses a hash_to_point is completely unknown.
|
|
||||||
*TRANSCRIPT_CELL
|
|
||||||
.get_or_init(|| raw_hash_to_point(hash(b"bulletproof_plus_transcript")).compress().to_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn hash_to_scalar(data: &[u8]) -> Scalar {
|
|
||||||
Scalar(dalek_hash(data))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn initial_transcript(commitments: core::slice::Iter<'_, EdwardsPoint>) -> Scalar {
|
|
||||||
let commitments_hash =
|
|
||||||
hash_to_scalar(&commitments.flat_map(|V| V.compress().to_bytes()).collect::<Vec<_>>());
|
|
||||||
hash_to_scalar(&[TRANSCRIPT().as_ref(), &commitments_hash.to_bytes()].concat())
|
|
||||||
}
|
|
||||||
@@ -1,324 +0,0 @@
|
|||||||
#![allow(non_snake_case)]
|
|
||||||
|
|
||||||
use core::ops::Deref;
|
|
||||||
use std_shims::{
|
|
||||||
vec::Vec,
|
|
||||||
io::{self, Read, Write},
|
|
||||||
};
|
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
|
||||||
use subtle::{ConstantTimeEq, ConditionallySelectable};
|
|
||||||
|
|
||||||
use curve25519_dalek::{
|
|
||||||
constants::ED25519_BASEPOINT_TABLE,
|
|
||||||
scalar::Scalar,
|
|
||||||
traits::{IsIdentity, VartimePrecomputedMultiscalarMul},
|
|
||||||
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
INV_EIGHT, Commitment, random_scalar, hash_to_scalar, wallet::decoys::Decoys,
|
|
||||||
ringct::hash_to_point, serialize::*,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
mod multisig;
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
pub use multisig::{ClsagDetails, ClsagAddendum, ClsagMultisig};
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
pub(crate) use multisig::add_key_image_share;
|
|
||||||
|
|
||||||
/// Errors returned when CLSAG signing fails.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
||||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
|
||||||
pub enum ClsagError {
|
|
||||||
#[cfg_attr(feature = "std", error("internal error ({0})"))]
|
|
||||||
InternalError(&'static str),
|
|
||||||
#[cfg_attr(feature = "std", error("invalid ring"))]
|
|
||||||
InvalidRing,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid ring member (member {0}, ring size {1})"))]
|
|
||||||
InvalidRingMember(u8, u8),
|
|
||||||
#[cfg_attr(feature = "std", error("invalid commitment"))]
|
|
||||||
InvalidCommitment,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid key image"))]
|
|
||||||
InvalidImage,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid D"))]
|
|
||||||
InvalidD,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid s"))]
|
|
||||||
InvalidS,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid c1"))]
|
|
||||||
InvalidC1,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Input being signed for.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub struct ClsagInput {
|
|
||||||
// The actual commitment for the true spend
|
|
||||||
pub(crate) commitment: Commitment,
|
|
||||||
// True spend index, offsets, and ring
|
|
||||||
pub(crate) decoys: Decoys,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClsagInput {
|
|
||||||
pub fn new(commitment: Commitment, decoys: Decoys) -> Result<ClsagInput, ClsagError> {
|
|
||||||
let n = decoys.len();
|
|
||||||
if n > u8::MAX.into() {
|
|
||||||
Err(ClsagError::InternalError("max ring size in this library is u8 max"))?;
|
|
||||||
}
|
|
||||||
let n = u8::try_from(n).unwrap();
|
|
||||||
if decoys.i >= n {
|
|
||||||
Err(ClsagError::InvalidRingMember(decoys.i, n))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Validate the commitment matches
|
|
||||||
if decoys.ring[usize::from(decoys.i)][1] != commitment.calculate() {
|
|
||||||
Err(ClsagError::InvalidCommitment)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ClsagInput { commitment, decoys })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::large_enum_variant)]
|
|
||||||
enum Mode {
|
|
||||||
Sign(usize, EdwardsPoint, EdwardsPoint),
|
|
||||||
Verify(Scalar),
|
|
||||||
}
|
|
||||||
|
|
||||||
// Core of the CLSAG algorithm, applicable to both sign and verify with minimal differences
|
|
||||||
// Said differences are covered via the above Mode
|
|
||||||
fn core(
|
|
||||||
ring: &[[EdwardsPoint; 2]],
|
|
||||||
I: &EdwardsPoint,
|
|
||||||
pseudo_out: &EdwardsPoint,
|
|
||||||
msg: &[u8; 32],
|
|
||||||
D: &EdwardsPoint,
|
|
||||||
s: &[Scalar],
|
|
||||||
A_c1: &Mode,
|
|
||||||
) -> ((EdwardsPoint, Scalar, Scalar), Scalar) {
|
|
||||||
let n = ring.len();
|
|
||||||
|
|
||||||
let images_precomp = VartimeEdwardsPrecomputation::new([I, D]);
|
|
||||||
let D = D * INV_EIGHT();
|
|
||||||
|
|
||||||
// Generate the transcript
|
|
||||||
// Instead of generating multiple, a single transcript is created and then edited as needed
|
|
||||||
const PREFIX: &[u8] = b"CLSAG_";
|
|
||||||
#[rustfmt::skip]
|
|
||||||
const AGG_0: &[u8] = b"agg_0";
|
|
||||||
#[rustfmt::skip]
|
|
||||||
const ROUND: &[u8] = b"round";
|
|
||||||
const PREFIX_AGG_0_LEN: usize = PREFIX.len() + AGG_0.len();
|
|
||||||
|
|
||||||
let mut to_hash = Vec::with_capacity(((2 * n) + 5) * 32);
|
|
||||||
to_hash.extend(PREFIX);
|
|
||||||
to_hash.extend(AGG_0);
|
|
||||||
to_hash.extend([0; 32 - PREFIX_AGG_0_LEN]);
|
|
||||||
|
|
||||||
let mut P = Vec::with_capacity(n);
|
|
||||||
for member in ring {
|
|
||||||
P.push(member[0]);
|
|
||||||
to_hash.extend(member[0].compress().to_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut C = Vec::with_capacity(n);
|
|
||||||
for member in ring {
|
|
||||||
C.push(member[1] - pseudo_out);
|
|
||||||
to_hash.extend(member[1].compress().to_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
to_hash.extend(I.compress().to_bytes());
|
|
||||||
to_hash.extend(D.compress().to_bytes());
|
|
||||||
to_hash.extend(pseudo_out.compress().to_bytes());
|
|
||||||
// mu_P with agg_0
|
|
||||||
let mu_P = hash_to_scalar(&to_hash);
|
|
||||||
// mu_C with agg_1
|
|
||||||
to_hash[PREFIX_AGG_0_LEN - 1] = b'1';
|
|
||||||
let mu_C = hash_to_scalar(&to_hash);
|
|
||||||
|
|
||||||
// Truncate it for the round transcript, altering the DST as needed
|
|
||||||
to_hash.truncate(((2 * n) + 1) * 32);
|
|
||||||
for i in 0 .. ROUND.len() {
|
|
||||||
to_hash[PREFIX.len() + i] = ROUND[i];
|
|
||||||
}
|
|
||||||
// Unfortunately, it's I D pseudo_out instead of pseudo_out I D, meaning this needs to be
|
|
||||||
// truncated just to add it back
|
|
||||||
to_hash.extend(pseudo_out.compress().to_bytes());
|
|
||||||
to_hash.extend(msg);
|
|
||||||
|
|
||||||
// Configure the loop based on if we're signing or verifying
|
|
||||||
let start;
|
|
||||||
let end;
|
|
||||||
let mut c;
|
|
||||||
match A_c1 {
|
|
||||||
Mode::Sign(r, A, AH) => {
|
|
||||||
start = r + 1;
|
|
||||||
end = r + n;
|
|
||||||
to_hash.extend(A.compress().to_bytes());
|
|
||||||
to_hash.extend(AH.compress().to_bytes());
|
|
||||||
c = hash_to_scalar(&to_hash);
|
|
||||||
}
|
|
||||||
|
|
||||||
Mode::Verify(c1) => {
|
|
||||||
start = 0;
|
|
||||||
end = n;
|
|
||||||
c = *c1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Perform the core loop
|
|
||||||
let mut c1 = c;
|
|
||||||
for i in (start .. end).map(|i| i % n) {
|
|
||||||
let c_p = mu_P * c;
|
|
||||||
let c_c = mu_C * c;
|
|
||||||
|
|
||||||
let L = (&s[i] * ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]);
|
|
||||||
let PH = hash_to_point(&P[i]);
|
|
||||||
// Shouldn't be an issue as all of the variables in this vartime statement are public
|
|
||||||
let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul([c_p, c_c]);
|
|
||||||
|
|
||||||
to_hash.truncate(((2 * n) + 3) * 32);
|
|
||||||
to_hash.extend(L.compress().to_bytes());
|
|
||||||
to_hash.extend(R.compress().to_bytes());
|
|
||||||
c = hash_to_scalar(&to_hash);
|
|
||||||
|
|
||||||
// This will only execute once and shouldn't need to be constant time. Making it constant time
|
|
||||||
// removes the risk of branch prediction creating timing differences depending on ring index
|
|
||||||
// however
|
|
||||||
c1.conditional_assign(&c, i.ct_eq(&(n - 1)));
|
|
||||||
}
|
|
||||||
|
|
||||||
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
|
|
||||||
((D, c * mu_P, c * mu_C), c1)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// CLSAG signature, as used in Monero.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub struct Clsag {
|
|
||||||
pub D: EdwardsPoint,
|
|
||||||
pub s: Vec<Scalar>,
|
|
||||||
pub c1: Scalar,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Clsag {
|
|
||||||
// Sign core is the extension of core as needed for signing, yet is shared between single signer
|
|
||||||
// and multisig, hence why it's still core
|
|
||||||
pub(crate) fn sign_core<R: RngCore + CryptoRng>(
|
|
||||||
rng: &mut R,
|
|
||||||
I: &EdwardsPoint,
|
|
||||||
input: &ClsagInput,
|
|
||||||
mask: Scalar,
|
|
||||||
msg: &[u8; 32],
|
|
||||||
A: EdwardsPoint,
|
|
||||||
AH: EdwardsPoint,
|
|
||||||
) -> (Clsag, EdwardsPoint, Scalar, Scalar) {
|
|
||||||
let r: usize = input.decoys.i.into();
|
|
||||||
|
|
||||||
let pseudo_out = Commitment::new(mask, input.commitment.amount).calculate();
|
|
||||||
let z = input.commitment.mask - mask;
|
|
||||||
|
|
||||||
let H = hash_to_point(&input.decoys.ring[r][0]);
|
|
||||||
let D = H * z;
|
|
||||||
let mut s = Vec::with_capacity(input.decoys.ring.len());
|
|
||||||
for _ in 0 .. input.decoys.ring.len() {
|
|
||||||
s.push(random_scalar(rng));
|
|
||||||
}
|
|
||||||
let ((D, p, c), c1) =
|
|
||||||
core(&input.decoys.ring, I, &pseudo_out, msg, &D, &s, &Mode::Sign(r, A, AH));
|
|
||||||
|
|
||||||
(Clsag { D, s, c1 }, pseudo_out, p, c * z)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Generate CLSAG signatures for the given inputs.
|
|
||||||
/// inputs is of the form (private key, key image, input).
|
|
||||||
/// sum_outputs is for the sum of the outputs' commitment masks.
|
|
||||||
pub fn sign<R: RngCore + CryptoRng>(
|
|
||||||
rng: &mut R,
|
|
||||||
mut inputs: Vec<(Zeroizing<Scalar>, EdwardsPoint, ClsagInput)>,
|
|
||||||
sum_outputs: Scalar,
|
|
||||||
msg: [u8; 32],
|
|
||||||
) -> Vec<(Clsag, EdwardsPoint)> {
|
|
||||||
let mut res = Vec::with_capacity(inputs.len());
|
|
||||||
let mut sum_pseudo_outs = Scalar::ZERO;
|
|
||||||
for i in 0 .. inputs.len() {
|
|
||||||
let mut mask = random_scalar(rng);
|
|
||||||
if i == (inputs.len() - 1) {
|
|
||||||
mask = sum_outputs - sum_pseudo_outs;
|
|
||||||
} else {
|
|
||||||
sum_pseudo_outs += mask;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut nonce = Zeroizing::new(random_scalar(rng));
|
|
||||||
let (mut clsag, pseudo_out, p, c) = Clsag::sign_core(
|
|
||||||
rng,
|
|
||||||
&inputs[i].1,
|
|
||||||
&inputs[i].2,
|
|
||||||
mask,
|
|
||||||
&msg,
|
|
||||||
nonce.deref() * ED25519_BASEPOINT_TABLE,
|
|
||||||
nonce.deref() *
|
|
||||||
hash_to_point(&inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0]),
|
|
||||||
);
|
|
||||||
clsag.s[usize::from(inputs[i].2.decoys.i)] =
|
|
||||||
(-((p * inputs[i].0.deref()) + c)) + nonce.deref();
|
|
||||||
inputs[i].0.zeroize();
|
|
||||||
nonce.zeroize();
|
|
||||||
|
|
||||||
debug_assert!(clsag
|
|
||||||
.verify(&inputs[i].2.decoys.ring, &inputs[i].1, &pseudo_out, &msg)
|
|
||||||
.is_ok());
|
|
||||||
|
|
||||||
res.push((clsag, pseudo_out));
|
|
||||||
}
|
|
||||||
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Verify the CLSAG signature against the given Transaction data.
|
|
||||||
pub fn verify(
|
|
||||||
&self,
|
|
||||||
ring: &[[EdwardsPoint; 2]],
|
|
||||||
I: &EdwardsPoint,
|
|
||||||
pseudo_out: &EdwardsPoint,
|
|
||||||
msg: &[u8; 32],
|
|
||||||
) -> Result<(), ClsagError> {
|
|
||||||
// Preliminary checks. s, c1, and points must also be encoded canonically, which isn't checked
|
|
||||||
// here
|
|
||||||
if ring.is_empty() {
|
|
||||||
Err(ClsagError::InvalidRing)?;
|
|
||||||
}
|
|
||||||
if ring.len() != self.s.len() {
|
|
||||||
Err(ClsagError::InvalidS)?;
|
|
||||||
}
|
|
||||||
if I.is_identity() {
|
|
||||||
Err(ClsagError::InvalidImage)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let D = self.D.mul_by_cofactor();
|
|
||||||
if D.is_identity() {
|
|
||||||
Err(ClsagError::InvalidD)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let (_, c1) = core(ring, I, pseudo_out, msg, &D, &self.s, &Mode::Verify(self.c1));
|
|
||||||
if c1 != self.c1 {
|
|
||||||
Err(ClsagError::InvalidC1)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn fee_weight(ring_len: usize) -> usize {
|
|
||||||
(ring_len * 32) + 32 + 32
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
write_raw_vec(write_scalar, &self.s, w)?;
|
|
||||||
w.write_all(&self.c1.to_bytes())?;
|
|
||||||
write_point(&self.D, w)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(decoys: usize, r: &mut R) -> io::Result<Clsag> {
|
|
||||||
Ok(Clsag { s: read_raw_vec(read_scalar, decoys, r)?, c1: read_scalar(r)?, D: read_point(r)? })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,305 +0,0 @@
|
|||||||
use core::{ops::Deref, fmt::Debug};
|
|
||||||
use std_shims::io::{self, Read, Write};
|
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng, SeedableRng};
|
|
||||||
use rand_chacha::ChaCha20Rng;
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
|
||||||
|
|
||||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
|
||||||
|
|
||||||
use group::{ff::Field, Group, GroupEncoding};
|
|
||||||
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
|
||||||
use dalek_ff_group as dfg;
|
|
||||||
use dleq::DLEqProof;
|
|
||||||
use frost::{
|
|
||||||
dkg::lagrange,
|
|
||||||
curve::Ed25519,
|
|
||||||
Participant, FrostError, ThresholdKeys, ThresholdView,
|
|
||||||
algorithm::{WriteAddendum, Algorithm},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::ringct::{
|
|
||||||
hash_to_point,
|
|
||||||
clsag::{ClsagInput, Clsag},
|
|
||||||
};
|
|
||||||
|
|
||||||
fn dleq_transcript() -> RecommendedTranscript {
|
|
||||||
RecommendedTranscript::new(b"monero_key_image_dleq")
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClsagInput {
|
|
||||||
fn transcript<T: Transcript>(&self, transcript: &mut T) {
|
|
||||||
// Doesn't domain separate as this is considered part of the larger CLSAG proof
|
|
||||||
|
|
||||||
// Ring index
|
|
||||||
transcript.append_message(b"real_spend", [self.decoys.i]);
|
|
||||||
|
|
||||||
// Ring
|
|
||||||
for (i, pair) in self.decoys.ring.iter().enumerate() {
|
|
||||||
// Doesn't include global output indexes as CLSAG doesn't care and won't be affected by it
|
|
||||||
// They're just a unreliable reference to this data which will be included in the message
|
|
||||||
// if in use
|
|
||||||
transcript.append_message(b"member", [u8::try_from(i).expect("ring size exceeded 255")]);
|
|
||||||
transcript.append_message(b"key", pair[0].compress().to_bytes());
|
|
||||||
transcript.append_message(b"commitment", pair[1].compress().to_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
// Doesn't include the commitment's parts as the above ring + index includes the commitment
|
|
||||||
// The only potential malleability would be if the G/H relationship is known breaking the
|
|
||||||
// discrete log problem, which breaks everything already
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// CLSAG input and the mask to use for it.
|
|
||||||
#[derive(Clone, Debug, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub struct ClsagDetails {
|
|
||||||
input: ClsagInput,
|
|
||||||
mask: Scalar,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClsagDetails {
|
|
||||||
pub fn new(input: ClsagInput, mask: Scalar) -> ClsagDetails {
|
|
||||||
ClsagDetails { input, mask }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Addendum produced during the FROST signing process with relevant data.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Zeroize, Debug)]
|
|
||||||
pub struct ClsagAddendum {
|
|
||||||
pub(crate) key_image: dfg::EdwardsPoint,
|
|
||||||
dleq: DLEqProof<dfg::EdwardsPoint>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl WriteAddendum for ClsagAddendum {
|
|
||||||
fn write<W: Write>(&self, writer: &mut W) -> io::Result<()> {
|
|
||||||
writer.write_all(self.key_image.compress().to_bytes().as_ref())?;
|
|
||||||
self.dleq.write(writer)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
struct Interim {
|
|
||||||
p: Scalar,
|
|
||||||
c: Scalar,
|
|
||||||
|
|
||||||
clsag: Clsag,
|
|
||||||
pseudo_out: EdwardsPoint,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// FROST algorithm for producing a CLSAG signature.
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct ClsagMultisig {
|
|
||||||
transcript: RecommendedTranscript,
|
|
||||||
|
|
||||||
pub(crate) H: EdwardsPoint,
|
|
||||||
// Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires
|
|
||||||
// an extra round
|
|
||||||
image: EdwardsPoint,
|
|
||||||
|
|
||||||
details: Arc<RwLock<Option<ClsagDetails>>>,
|
|
||||||
|
|
||||||
msg: Option<[u8; 32]>,
|
|
||||||
interim: Option<Interim>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ClsagMultisig {
|
|
||||||
pub fn new(
|
|
||||||
transcript: RecommendedTranscript,
|
|
||||||
output_key: EdwardsPoint,
|
|
||||||
details: Arc<RwLock<Option<ClsagDetails>>>,
|
|
||||||
) -> ClsagMultisig {
|
|
||||||
ClsagMultisig {
|
|
||||||
transcript,
|
|
||||||
|
|
||||||
H: hash_to_point(&output_key),
|
|
||||||
image: EdwardsPoint::identity(),
|
|
||||||
|
|
||||||
details,
|
|
||||||
|
|
||||||
msg: None,
|
|
||||||
interim: None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn input(&self) -> ClsagInput {
|
|
||||||
(*self.details.read().unwrap()).as_ref().unwrap().input.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn mask(&self) -> Scalar {
|
|
||||||
(*self.details.read().unwrap()).as_ref().unwrap().mask
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn add_key_image_share(
|
|
||||||
image: &mut EdwardsPoint,
|
|
||||||
generator: EdwardsPoint,
|
|
||||||
offset: Scalar,
|
|
||||||
included: &[Participant],
|
|
||||||
participant: Participant,
|
|
||||||
share: EdwardsPoint,
|
|
||||||
) {
|
|
||||||
if image.is_identity().into() {
|
|
||||||
*image = generator * offset;
|
|
||||||
}
|
|
||||||
*image += share * lagrange::<dfg::Scalar>(participant, included).0;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Algorithm<Ed25519> for ClsagMultisig {
|
|
||||||
type Transcript = RecommendedTranscript;
|
|
||||||
type Addendum = ClsagAddendum;
|
|
||||||
type Signature = (Clsag, EdwardsPoint);
|
|
||||||
|
|
||||||
fn nonces(&self) -> Vec<Vec<dfg::EdwardsPoint>> {
|
|
||||||
vec![vec![dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)]]
|
|
||||||
}
|
|
||||||
|
|
||||||
fn preprocess_addendum<R: RngCore + CryptoRng>(
|
|
||||||
&mut self,
|
|
||||||
rng: &mut R,
|
|
||||||
keys: &ThresholdKeys<Ed25519>,
|
|
||||||
) -> ClsagAddendum {
|
|
||||||
ClsagAddendum {
|
|
||||||
key_image: dfg::EdwardsPoint(self.H) * keys.secret_share().deref(),
|
|
||||||
dleq: DLEqProof::prove(
|
|
||||||
rng,
|
|
||||||
// Doesn't take in a larger transcript object due to the usage of this
|
|
||||||
// Every prover would immediately write their own DLEq proof, when they can only do so in
|
|
||||||
// the proper order if they want to reach consensus
|
|
||||||
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to
|
|
||||||
// try to merge later in some form, when it should instead just merge xH (as it does)
|
|
||||||
&mut dleq_transcript(),
|
|
||||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
|
|
||||||
keys.secret_share(),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_addendum<R: Read>(&self, reader: &mut R) -> io::Result<ClsagAddendum> {
|
|
||||||
let mut bytes = [0; 32];
|
|
||||||
reader.read_exact(&mut bytes)?;
|
|
||||||
// dfg ensures the point is torsion free
|
|
||||||
let xH = Option::<dfg::EdwardsPoint>::from(dfg::EdwardsPoint::from_bytes(&bytes))
|
|
||||||
.ok_or_else(|| io::Error::other("invalid key image"))?;
|
|
||||||
// Ensure this is a canonical point
|
|
||||||
if xH.to_bytes() != bytes {
|
|
||||||
Err(io::Error::other("non-canonical key image"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(ClsagAddendum { key_image: xH, dleq: DLEqProof::<dfg::EdwardsPoint>::read(reader)? })
|
|
||||||
}
|
|
||||||
|
|
||||||
fn process_addendum(
|
|
||||||
&mut self,
|
|
||||||
view: &ThresholdView<Ed25519>,
|
|
||||||
l: Participant,
|
|
||||||
addendum: ClsagAddendum,
|
|
||||||
) -> Result<(), FrostError> {
|
|
||||||
// TODO: This check is faulty if two shares are additive inverses of each other
|
|
||||||
if self.image.is_identity().into() {
|
|
||||||
self.transcript.domain_separate(b"CLSAG");
|
|
||||||
self.input().transcript(&mut self.transcript);
|
|
||||||
self.transcript.append_message(b"mask", self.mask().to_bytes());
|
|
||||||
}
|
|
||||||
|
|
||||||
self.transcript.append_message(b"participant", l.to_bytes());
|
|
||||||
|
|
||||||
addendum
|
|
||||||
.dleq
|
|
||||||
.verify(
|
|
||||||
&mut dleq_transcript(),
|
|
||||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)],
|
|
||||||
&[view.original_verification_share(l), addendum.key_image],
|
|
||||||
)
|
|
||||||
.map_err(|_| FrostError::InvalidPreprocess(l))?;
|
|
||||||
|
|
||||||
self.transcript.append_message(b"key_image_share", addendum.key_image.compress().to_bytes());
|
|
||||||
add_key_image_share(
|
|
||||||
&mut self.image,
|
|
||||||
self.H,
|
|
||||||
view.offset().0,
|
|
||||||
view.included(),
|
|
||||||
l,
|
|
||||||
addendum.key_image.0,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn transcript(&mut self) -> &mut Self::Transcript {
|
|
||||||
&mut self.transcript
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sign_share(
|
|
||||||
&mut self,
|
|
||||||
view: &ThresholdView<Ed25519>,
|
|
||||||
nonce_sums: &[Vec<dfg::EdwardsPoint>],
|
|
||||||
nonces: Vec<Zeroizing<dfg::Scalar>>,
|
|
||||||
msg: &[u8],
|
|
||||||
) -> dfg::Scalar {
|
|
||||||
// Use the transcript to get a seeded random number generator
|
|
||||||
// The transcript contains private data, preventing passive adversaries from recreating this
|
|
||||||
// process even if they have access to commitments (specifically, the ring index being signed
|
|
||||||
// for, along with the mask which should not only require knowing the shared keys yet also the
|
|
||||||
// input commitment masks)
|
|
||||||
let mut rng = ChaCha20Rng::from_seed(self.transcript.rng_seed(b"decoy_responses"));
|
|
||||||
|
|
||||||
self.msg = Some(msg.try_into().expect("CLSAG message should be 32-bytes"));
|
|
||||||
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
let (clsag, pseudo_out, p, c) = Clsag::sign_core(
|
|
||||||
&mut rng,
|
|
||||||
&self.image,
|
|
||||||
&self.input(),
|
|
||||||
self.mask(),
|
|
||||||
self.msg.as_ref().unwrap(),
|
|
||||||
nonce_sums[0][0].0,
|
|
||||||
nonce_sums[0][1].0,
|
|
||||||
);
|
|
||||||
self.interim = Some(Interim { p, c, clsag, pseudo_out });
|
|
||||||
|
|
||||||
(-(dfg::Scalar(p) * view.secret_share().deref())) + nonces[0].deref()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[must_use]
|
|
||||||
fn verify(
|
|
||||||
&self,
|
|
||||||
_: dfg::EdwardsPoint,
|
|
||||||
_: &[Vec<dfg::EdwardsPoint>],
|
|
||||||
sum: dfg::Scalar,
|
|
||||||
) -> Option<Self::Signature> {
|
|
||||||
let interim = self.interim.as_ref().unwrap();
|
|
||||||
let mut clsag = interim.clsag.clone();
|
|
||||||
clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c;
|
|
||||||
if clsag
|
|
||||||
.verify(
|
|
||||||
&self.input().decoys.ring,
|
|
||||||
&self.image,
|
|
||||||
&interim.pseudo_out,
|
|
||||||
self.msg.as_ref().unwrap(),
|
|
||||||
)
|
|
||||||
.is_ok()
|
|
||||||
{
|
|
||||||
return Some((clsag, interim.pseudo_out));
|
|
||||||
}
|
|
||||||
None
|
|
||||||
}
|
|
||||||
|
|
||||||
fn verify_share(
|
|
||||||
&self,
|
|
||||||
verification_share: dfg::EdwardsPoint,
|
|
||||||
nonces: &[Vec<dfg::EdwardsPoint>],
|
|
||||||
share: dfg::Scalar,
|
|
||||||
) -> Result<Vec<(dfg::Scalar, dfg::EdwardsPoint)>, ()> {
|
|
||||||
let interim = self.interim.as_ref().unwrap();
|
|
||||||
Ok(vec![
|
|
||||||
(share, dfg::EdwardsPoint::generator()),
|
|
||||||
(dfg::Scalar(interim.p), verification_share),
|
|
||||||
(-dfg::Scalar::ONE, nonces[0][0]),
|
|
||||||
])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
use curve25519_dalek::edwards::EdwardsPoint;
|
|
||||||
|
|
||||||
pub use monero_generators::{hash_to_point as raw_hash_to_point};
|
|
||||||
|
|
||||||
/// Monero's hash to point function, as named `ge_fromfe_frombytes_vartime`.
|
|
||||||
pub fn hash_to_point(key: &EdwardsPoint) -> EdwardsPoint {
|
|
||||||
raw_hash_to_point(key.compress().to_bytes())
|
|
||||||
}
|
|
||||||
@@ -1,400 +0,0 @@
|
|||||||
use core::ops::Deref;
|
|
||||||
use std_shims::{
|
|
||||||
vec::Vec,
|
|
||||||
io::{self, Read, Write},
|
|
||||||
};
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, Zeroizing};
|
|
||||||
|
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
|
|
||||||
|
|
||||||
pub(crate) mod hash_to_point;
|
|
||||||
pub use hash_to_point::{raw_hash_to_point, hash_to_point};
|
|
||||||
|
|
||||||
/// MLSAG struct, along with verifying functionality.
|
|
||||||
pub mod mlsag;
|
|
||||||
/// CLSAG struct, along with signing and verifying functionality.
|
|
||||||
pub mod clsag;
|
|
||||||
/// BorromeanRange struct, along with verifying functionality.
|
|
||||||
pub mod borromean;
|
|
||||||
/// Bulletproofs(+) structs, along with proving and verifying functionality.
|
|
||||||
pub mod bulletproofs;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Protocol,
|
|
||||||
serialize::*,
|
|
||||||
ringct::{mlsag::Mlsag, clsag::Clsag, borromean::BorromeanRange, bulletproofs::Bulletproofs},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// Generate a key image for a given key. Defined as `x * hash_to_point(xG)`.
|
|
||||||
pub fn generate_key_image(secret: &Zeroizing<Scalar>) -> EdwardsPoint {
|
|
||||||
hash_to_point(&(ED25519_BASEPOINT_TABLE * secret.deref())) * secret.deref()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub enum EncryptedAmount {
|
|
||||||
Original { mask: [u8; 32], amount: [u8; 32] },
|
|
||||||
Compact { amount: [u8; 8] },
|
|
||||||
}
|
|
||||||
|
|
||||||
impl EncryptedAmount {
|
|
||||||
pub fn read<R: Read>(compact: bool, r: &mut R) -> io::Result<EncryptedAmount> {
|
|
||||||
Ok(if !compact {
|
|
||||||
EncryptedAmount::Original { mask: read_bytes(r)?, amount: read_bytes(r)? }
|
|
||||||
} else {
|
|
||||||
EncryptedAmount::Compact { amount: read_bytes(r)? }
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
match self {
|
|
||||||
EncryptedAmount::Original { mask, amount } => {
|
|
||||||
w.write_all(mask)?;
|
|
||||||
w.write_all(amount)
|
|
||||||
}
|
|
||||||
EncryptedAmount::Compact { amount } => w.write_all(amount),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
|
||||||
pub enum RctType {
|
|
||||||
/// No RCT proofs.
|
|
||||||
Null,
|
|
||||||
/// One MLSAG for multiple inputs and Borromean range proofs (RCTTypeFull).
|
|
||||||
MlsagAggregate,
|
|
||||||
// One MLSAG for each input and a Borromean range proof (RCTTypeSimple).
|
|
||||||
MlsagIndividual,
|
|
||||||
// One MLSAG for each input and a Bulletproof (RCTTypeBulletproof).
|
|
||||||
Bulletproofs,
|
|
||||||
/// One MLSAG for each input and a Bulletproof, yet starting to use EncryptedAmount::Compact
|
|
||||||
/// (RCTTypeBulletproof2).
|
|
||||||
BulletproofsCompactAmount,
|
|
||||||
/// One CLSAG for each input and a Bulletproof (RCTTypeCLSAG).
|
|
||||||
Clsag,
|
|
||||||
/// One CLSAG for each input and a Bulletproof+ (RCTTypeBulletproofPlus).
|
|
||||||
BulletproofsPlus,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RctType {
|
|
||||||
pub fn to_byte(self) -> u8 {
|
|
||||||
match self {
|
|
||||||
RctType::Null => 0,
|
|
||||||
RctType::MlsagAggregate => 1,
|
|
||||||
RctType::MlsagIndividual => 2,
|
|
||||||
RctType::Bulletproofs => 3,
|
|
||||||
RctType::BulletproofsCompactAmount => 4,
|
|
||||||
RctType::Clsag => 5,
|
|
||||||
RctType::BulletproofsPlus => 6,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_byte(byte: u8) -> Option<Self> {
|
|
||||||
Some(match byte {
|
|
||||||
0 => RctType::Null,
|
|
||||||
1 => RctType::MlsagAggregate,
|
|
||||||
2 => RctType::MlsagIndividual,
|
|
||||||
3 => RctType::Bulletproofs,
|
|
||||||
4 => RctType::BulletproofsCompactAmount,
|
|
||||||
5 => RctType::Clsag,
|
|
||||||
6 => RctType::BulletproofsPlus,
|
|
||||||
_ => None?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn compact_encrypted_amounts(&self) -> bool {
|
|
||||||
match self {
|
|
||||||
RctType::Null |
|
|
||||||
RctType::MlsagAggregate |
|
|
||||||
RctType::MlsagIndividual |
|
|
||||||
RctType::Bulletproofs => false,
|
|
||||||
RctType::BulletproofsCompactAmount | RctType::Clsag | RctType::BulletproofsPlus => true,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub struct RctBase {
|
|
||||||
pub fee: u64,
|
|
||||||
pub pseudo_outs: Vec<EdwardsPoint>,
|
|
||||||
pub encrypted_amounts: Vec<EncryptedAmount>,
|
|
||||||
pub commitments: Vec<EdwardsPoint>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RctBase {
|
|
||||||
pub(crate) fn fee_weight(outputs: usize, fee: u64) -> usize {
|
|
||||||
// 1 byte for the RCT signature type
|
|
||||||
1 + (outputs * (8 + 32)) + varint_len(fee)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W, rct_type: RctType) -> io::Result<()> {
|
|
||||||
w.write_all(&[rct_type.to_byte()])?;
|
|
||||||
match rct_type {
|
|
||||||
RctType::Null => Ok(()),
|
|
||||||
_ => {
|
|
||||||
write_varint(&self.fee, w)?;
|
|
||||||
if rct_type == RctType::MlsagIndividual {
|
|
||||||
write_raw_vec(write_point, &self.pseudo_outs, w)?;
|
|
||||||
}
|
|
||||||
for encrypted_amount in &self.encrypted_amounts {
|
|
||||||
encrypted_amount.write(w)?;
|
|
||||||
}
|
|
||||||
write_raw_vec(write_point, &self.commitments, w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(inputs: usize, outputs: usize, r: &mut R) -> io::Result<(RctBase, RctType)> {
|
|
||||||
let rct_type =
|
|
||||||
RctType::from_byte(read_byte(r)?).ok_or_else(|| io::Error::other("invalid RCT type"))?;
|
|
||||||
|
|
||||||
match rct_type {
|
|
||||||
RctType::Null | RctType::MlsagAggregate | RctType::MlsagIndividual => {}
|
|
||||||
RctType::Bulletproofs |
|
|
||||||
RctType::BulletproofsCompactAmount |
|
|
||||||
RctType::Clsag |
|
|
||||||
RctType::BulletproofsPlus => {
|
|
||||||
if outputs == 0 {
|
|
||||||
// Because the Bulletproofs(+) layout must be canonical, there must be 1 Bulletproof if
|
|
||||||
// Bulletproofs are in use
|
|
||||||
// If there are Bulletproofs, there must be a matching amount of outputs, implicitly
|
|
||||||
// banning 0 outputs
|
|
||||||
// Since HF 12 (CLSAG being 13), a 2-output minimum has also been enforced
|
|
||||||
Err(io::Error::other("RCT with Bulletproofs(+) had 0 outputs"))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok((
|
|
||||||
if rct_type == RctType::Null {
|
|
||||||
RctBase { fee: 0, pseudo_outs: vec![], encrypted_amounts: vec![], commitments: vec![] }
|
|
||||||
} else {
|
|
||||||
RctBase {
|
|
||||||
fee: read_varint(r)?,
|
|
||||||
pseudo_outs: if rct_type == RctType::MlsagIndividual {
|
|
||||||
read_raw_vec(read_point, inputs, r)?
|
|
||||||
} else {
|
|
||||||
vec![]
|
|
||||||
},
|
|
||||||
encrypted_amounts: (0 .. outputs)
|
|
||||||
.map(|_| EncryptedAmount::read(rct_type.compact_encrypted_amounts(), r))
|
|
||||||
.collect::<Result<_, _>>()?,
|
|
||||||
commitments: read_raw_vec(read_point, outputs, r)?,
|
|
||||||
}
|
|
||||||
},
|
|
||||||
rct_type,
|
|
||||||
))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub enum RctPrunable {
|
|
||||||
Null,
|
|
||||||
AggregateMlsagBorromean {
|
|
||||||
borromean: Vec<BorromeanRange>,
|
|
||||||
mlsag: Mlsag,
|
|
||||||
},
|
|
||||||
MlsagBorromean {
|
|
||||||
borromean: Vec<BorromeanRange>,
|
|
||||||
mlsags: Vec<Mlsag>,
|
|
||||||
},
|
|
||||||
MlsagBulletproofs {
|
|
||||||
bulletproofs: Bulletproofs,
|
|
||||||
mlsags: Vec<Mlsag>,
|
|
||||||
pseudo_outs: Vec<EdwardsPoint>,
|
|
||||||
},
|
|
||||||
Clsag {
|
|
||||||
bulletproofs: Bulletproofs,
|
|
||||||
clsags: Vec<Clsag>,
|
|
||||||
pseudo_outs: Vec<EdwardsPoint>,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RctPrunable {
|
|
||||||
pub(crate) fn fee_weight(protocol: Protocol, inputs: usize, outputs: usize) -> usize {
|
|
||||||
// 1 byte for number of BPs (technically a VarInt, yet there's always just zero or one)
|
|
||||||
1 + Bulletproofs::fee_weight(protocol.bp_plus(), outputs) +
|
|
||||||
(inputs * (Clsag::fee_weight(protocol.ring_len()) + 32))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W, rct_type: RctType) -> io::Result<()> {
|
|
||||||
match self {
|
|
||||||
RctPrunable::Null => Ok(()),
|
|
||||||
RctPrunable::AggregateMlsagBorromean { borromean, mlsag } => {
|
|
||||||
write_raw_vec(BorromeanRange::write, borromean, w)?;
|
|
||||||
mlsag.write(w)
|
|
||||||
}
|
|
||||||
RctPrunable::MlsagBorromean { borromean, mlsags } => {
|
|
||||||
write_raw_vec(BorromeanRange::write, borromean, w)?;
|
|
||||||
write_raw_vec(Mlsag::write, mlsags, w)
|
|
||||||
}
|
|
||||||
RctPrunable::MlsagBulletproofs { bulletproofs, mlsags, pseudo_outs } => {
|
|
||||||
if rct_type == RctType::Bulletproofs {
|
|
||||||
w.write_all(&1u32.to_le_bytes())?;
|
|
||||||
} else {
|
|
||||||
w.write_all(&[1])?;
|
|
||||||
}
|
|
||||||
bulletproofs.write(w)?;
|
|
||||||
|
|
||||||
write_raw_vec(Mlsag::write, mlsags, w)?;
|
|
||||||
write_raw_vec(write_point, pseudo_outs, w)
|
|
||||||
}
|
|
||||||
RctPrunable::Clsag { bulletproofs, clsags, pseudo_outs } => {
|
|
||||||
w.write_all(&[1])?;
|
|
||||||
bulletproofs.write(w)?;
|
|
||||||
|
|
||||||
write_raw_vec(Clsag::write, clsags, w)?;
|
|
||||||
write_raw_vec(write_point, pseudo_outs, w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self, rct_type: RctType) -> Vec<u8> {
|
|
||||||
let mut serialized = vec![];
|
|
||||||
self.write(&mut serialized, rct_type).unwrap();
|
|
||||||
serialized
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(
|
|
||||||
rct_type: RctType,
|
|
||||||
ring_length: usize,
|
|
||||||
inputs: usize,
|
|
||||||
outputs: usize,
|
|
||||||
r: &mut R,
|
|
||||||
) -> io::Result<RctPrunable> {
|
|
||||||
// While we generally don't bother with misc consensus checks, this affects the safety of
|
|
||||||
// the below defined rct_type function
|
|
||||||
// The exact line preventing zero-input transactions is:
|
|
||||||
// https://github.com/monero-project/monero/blob/00fd416a99686f0956361d1cd0337fe56e58d4a7/
|
|
||||||
// src/ringct/rctSigs.cpp#L609
|
|
||||||
// And then for RctNull, that's only allowed for miner TXs which require one input of
|
|
||||||
// Input::Gen
|
|
||||||
if inputs == 0 {
|
|
||||||
Err(io::Error::other("transaction had no inputs"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(match rct_type {
|
|
||||||
RctType::Null => RctPrunable::Null,
|
|
||||||
RctType::MlsagAggregate => RctPrunable::AggregateMlsagBorromean {
|
|
||||||
borromean: read_raw_vec(BorromeanRange::read, outputs, r)?,
|
|
||||||
mlsag: Mlsag::read(ring_length, inputs + 1, r)?,
|
|
||||||
},
|
|
||||||
RctType::MlsagIndividual => RctPrunable::MlsagBorromean {
|
|
||||||
borromean: read_raw_vec(BorromeanRange::read, outputs, r)?,
|
|
||||||
mlsags: (0 .. inputs).map(|_| Mlsag::read(ring_length, 2, r)).collect::<Result<_, _>>()?,
|
|
||||||
},
|
|
||||||
RctType::Bulletproofs | RctType::BulletproofsCompactAmount => {
|
|
||||||
RctPrunable::MlsagBulletproofs {
|
|
||||||
bulletproofs: {
|
|
||||||
if (if rct_type == RctType::Bulletproofs {
|
|
||||||
u64::from(read_u32(r)?)
|
|
||||||
} else {
|
|
||||||
read_varint(r)?
|
|
||||||
}) != 1
|
|
||||||
{
|
|
||||||
Err(io::Error::other("n bulletproofs instead of one"))?;
|
|
||||||
}
|
|
||||||
Bulletproofs::read(r)?
|
|
||||||
},
|
|
||||||
mlsags: (0 .. inputs)
|
|
||||||
.map(|_| Mlsag::read(ring_length, 2, r))
|
|
||||||
.collect::<Result<_, _>>()?,
|
|
||||||
pseudo_outs: read_raw_vec(read_point, inputs, r)?,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RctType::Clsag | RctType::BulletproofsPlus => RctPrunable::Clsag {
|
|
||||||
bulletproofs: {
|
|
||||||
if read_varint::<_, u64>(r)? != 1 {
|
|
||||||
Err(io::Error::other("n bulletproofs instead of one"))?;
|
|
||||||
}
|
|
||||||
(if rct_type == RctType::Clsag { Bulletproofs::read } else { Bulletproofs::read_plus })(
|
|
||||||
r,
|
|
||||||
)?
|
|
||||||
},
|
|
||||||
clsags: (0 .. inputs).map(|_| Clsag::read(ring_length, r)).collect::<Result<_, _>>()?,
|
|
||||||
pseudo_outs: read_raw_vec(read_point, inputs, r)?,
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn signature_write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
match self {
|
|
||||||
RctPrunable::Null => panic!("Serializing RctPrunable::Null for a signature"),
|
|
||||||
RctPrunable::AggregateMlsagBorromean { borromean, .. } |
|
|
||||||
RctPrunable::MlsagBorromean { borromean, .. } => {
|
|
||||||
borromean.iter().try_for_each(|rs| rs.write(w))
|
|
||||||
}
|
|
||||||
RctPrunable::MlsagBulletproofs { bulletproofs, .. } |
|
|
||||||
RctPrunable::Clsag { bulletproofs, .. } => bulletproofs.signature_write(w),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub struct RctSignatures {
|
|
||||||
pub base: RctBase,
|
|
||||||
pub prunable: RctPrunable,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl RctSignatures {
|
|
||||||
/// RctType for a given RctSignatures struct.
|
|
||||||
pub fn rct_type(&self) -> RctType {
|
|
||||||
match &self.prunable {
|
|
||||||
RctPrunable::Null => RctType::Null,
|
|
||||||
RctPrunable::AggregateMlsagBorromean { .. } => RctType::MlsagAggregate,
|
|
||||||
RctPrunable::MlsagBorromean { .. } => RctType::MlsagIndividual,
|
|
||||||
// RctBase ensures there's at least one output, making the following
|
|
||||||
// inferences guaranteed/expects impossible on any valid RctSignatures
|
|
||||||
RctPrunable::MlsagBulletproofs { .. } => {
|
|
||||||
if matches!(
|
|
||||||
self
|
|
||||||
.base
|
|
||||||
.encrypted_amounts
|
|
||||||
.first()
|
|
||||||
.expect("MLSAG with Bulletproofs didn't have any outputs"),
|
|
||||||
EncryptedAmount::Original { .. }
|
|
||||||
) {
|
|
||||||
RctType::Bulletproofs
|
|
||||||
} else {
|
|
||||||
RctType::BulletproofsCompactAmount
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RctPrunable::Clsag { bulletproofs, .. } => {
|
|
||||||
if matches!(bulletproofs, Bulletproofs::Original { .. }) {
|
|
||||||
RctType::Clsag
|
|
||||||
} else {
|
|
||||||
RctType::BulletproofsPlus
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn fee_weight(protocol: Protocol, inputs: usize, outputs: usize, fee: u64) -> usize {
|
|
||||||
RctBase::fee_weight(outputs, fee) + RctPrunable::fee_weight(protocol, inputs, outputs)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
let rct_type = self.rct_type();
|
|
||||||
self.base.write(w, rct_type)?;
|
|
||||||
self.prunable.write(w, rct_type)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut serialized = vec![];
|
|
||||||
self.write(&mut serialized).unwrap();
|
|
||||||
serialized
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(
|
|
||||||
ring_length: usize,
|
|
||||||
inputs: usize,
|
|
||||||
outputs: usize,
|
|
||||||
r: &mut R,
|
|
||||||
) -> io::Result<RctSignatures> {
|
|
||||||
let base = RctBase::read(inputs, outputs, r)?;
|
|
||||||
Ok(RctSignatures {
|
|
||||||
base: base.0,
|
|
||||||
prunable: RctPrunable::read(base.1, ring_length, inputs, outputs, r)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,761 +0,0 @@
|
|||||||
use core::fmt::Debug;
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
use alloc::boxed::Box;
|
|
||||||
use std_shims::{
|
|
||||||
vec::Vec,
|
|
||||||
io,
|
|
||||||
string::{String, ToString},
|
|
||||||
};
|
|
||||||
|
|
||||||
use async_trait::async_trait;
|
|
||||||
|
|
||||||
use curve25519_dalek::edwards::EdwardsPoint;
|
|
||||||
|
|
||||||
use monero_generators::decompress_point;
|
|
||||||
|
|
||||||
use serde::{Serialize, Deserialize, de::DeserializeOwned};
|
|
||||||
use serde_json::{Value, json};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Protocol,
|
|
||||||
serialize::*,
|
|
||||||
transaction::{Input, Timelock, Transaction},
|
|
||||||
block::Block,
|
|
||||||
wallet::{FeePriority, Fee},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(feature = "http-rpc")]
|
|
||||||
mod http;
|
|
||||||
#[cfg(feature = "http-rpc")]
|
|
||||||
pub use http::*;
|
|
||||||
|
|
||||||
// Number of blocks the fee estimate will be valid for
|
|
||||||
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
|
|
||||||
// src/wallet/wallet2.cpp#L121
|
|
||||||
const GRACE_BLOCKS_FOR_FEE_ESTIMATE: u64 = 10;
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
pub struct EmptyResponse {}
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
pub struct JsonRpcResponse<T> {
|
|
||||||
result: T,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct TransactionResponse {
|
|
||||||
tx_hash: String,
|
|
||||||
as_hex: String,
|
|
||||||
pruned_as_hex: String,
|
|
||||||
}
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct TransactionsResponse {
|
|
||||||
#[serde(default)]
|
|
||||||
missed_tx: Vec<String>,
|
|
||||||
txs: Vec<TransactionResponse>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
pub struct OutputResponse {
|
|
||||||
pub height: usize,
|
|
||||||
pub unlocked: bool,
|
|
||||||
key: String,
|
|
||||||
mask: String,
|
|
||||||
txid: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
|
||||||
pub enum RpcError {
|
|
||||||
#[cfg_attr(feature = "std", error("internal error ({0})"))]
|
|
||||||
InternalError(&'static str),
|
|
||||||
#[cfg_attr(feature = "std", error("connection error ({0})"))]
|
|
||||||
ConnectionError(String),
|
|
||||||
#[cfg_attr(feature = "std", error("invalid node ({0})"))]
|
|
||||||
InvalidNode(String),
|
|
||||||
#[cfg_attr(feature = "std", error("unsupported protocol version ({0})"))]
|
|
||||||
UnsupportedProtocol(usize),
|
|
||||||
#[cfg_attr(feature = "std", error("transactions not found"))]
|
|
||||||
TransactionsNotFound(Vec<[u8; 32]>),
|
|
||||||
#[cfg_attr(feature = "std", error("invalid point ({0})"))]
|
|
||||||
InvalidPoint(String),
|
|
||||||
#[cfg_attr(feature = "std", error("pruned transaction"))]
|
|
||||||
PrunedTransaction,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid transaction ({0:?})"))]
|
|
||||||
InvalidTransaction([u8; 32]),
|
|
||||||
#[cfg_attr(feature = "std", error("unexpected fee response"))]
|
|
||||||
InvalidFee,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid priority"))]
|
|
||||||
InvalidPriority,
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rpc_hex(value: &str) -> Result<Vec<u8>, RpcError> {
|
|
||||||
hex::decode(value).map_err(|_| RpcError::InvalidNode("expected hex wasn't hex".to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn hash_hex(hash: &str) -> Result<[u8; 32], RpcError> {
|
|
||||||
rpc_hex(hash)?.try_into().map_err(|_| RpcError::InvalidNode("hash wasn't 32-bytes".to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn rpc_point(point: &str) -> Result<EdwardsPoint, RpcError> {
|
|
||||||
decompress_point(
|
|
||||||
rpc_hex(point)?.try_into().map_err(|_| RpcError::InvalidPoint(point.to_string()))?,
|
|
||||||
)
|
|
||||||
.ok_or_else(|| RpcError::InvalidPoint(point.to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Read an EPEE VarInt, distinct from the VarInts used throughout the rest of the protocol
|
|
||||||
fn read_epee_vi<R: io::Read>(reader: &mut R) -> io::Result<u64> {
|
|
||||||
let vi_start = read_byte(reader)?;
|
|
||||||
let len = match vi_start & 0b11 {
|
|
||||||
0 => 1,
|
|
||||||
1 => 2,
|
|
||||||
2 => 4,
|
|
||||||
3 => 8,
|
|
||||||
_ => unreachable!(),
|
|
||||||
};
|
|
||||||
let mut vi = u64::from(vi_start >> 2);
|
|
||||||
for i in 1 .. len {
|
|
||||||
vi |= u64::from(read_byte(reader)?) << (((i - 1) * 8) + 6);
|
|
||||||
}
|
|
||||||
Ok(vi)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[async_trait]
|
|
||||||
pub trait RpcConnection: Clone + Debug {
|
|
||||||
/// Perform a POST request to the specified route with the specified body.
|
|
||||||
///
|
|
||||||
/// The implementor is left to handle anything such as authentication.
|
|
||||||
async fn post(&self, route: &str, body: Vec<u8>) -> Result<Vec<u8>, RpcError>;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Make this provided methods for RpcConnection?
|
|
||||||
#[derive(Clone, Debug)]
|
|
||||||
pub struct Rpc<R: RpcConnection>(R);
|
|
||||||
impl<R: RpcConnection> Rpc<R> {
|
|
||||||
/// Perform a RPC call to the specified route with the provided parameters.
|
|
||||||
///
|
|
||||||
/// This is NOT a JSON-RPC call. They use a route of "json_rpc" and are available via
|
|
||||||
/// `json_rpc_call`.
|
|
||||||
pub async fn rpc_call<Params: Serialize + Debug, Response: DeserializeOwned + Debug>(
|
|
||||||
&self,
|
|
||||||
route: &str,
|
|
||||||
params: Option<Params>,
|
|
||||||
) -> Result<Response, RpcError> {
|
|
||||||
let res = self
|
|
||||||
.0
|
|
||||||
.post(
|
|
||||||
route,
|
|
||||||
if let Some(params) = params {
|
|
||||||
serde_json::to_string(¶ms).unwrap().into_bytes()
|
|
||||||
} else {
|
|
||||||
vec![]
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
let res_str = std_shims::str::from_utf8(&res)
|
|
||||||
.map_err(|_| RpcError::InvalidNode("response wasn't utf-8".to_string()))?;
|
|
||||||
serde_json::from_str(res_str)
|
|
||||||
.map_err(|_| RpcError::InvalidNode(format!("response wasn't json: {res_str}")))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Perform a JSON-RPC call with the specified method with the provided parameters
|
|
||||||
pub async fn json_rpc_call<Response: DeserializeOwned + Debug>(
|
|
||||||
&self,
|
|
||||||
method: &str,
|
|
||||||
params: Option<Value>,
|
|
||||||
) -> Result<Response, RpcError> {
|
|
||||||
let mut req = json!({ "method": method });
|
|
||||||
if let Some(params) = params {
|
|
||||||
req.as_object_mut().unwrap().insert("params".into(), params);
|
|
||||||
}
|
|
||||||
Ok(self.rpc_call::<_, JsonRpcResponse<Response>>("json_rpc", Some(req)).await?.result)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Perform a binary call to the specified route with the provided parameters.
|
|
||||||
pub async fn bin_call(&self, route: &str, params: Vec<u8>) -> Result<Vec<u8>, RpcError> {
|
|
||||||
self.0.post(route, params).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the active blockchain protocol version.
|
|
||||||
pub async fn get_protocol(&self) -> Result<Protocol, RpcError> {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct ProtocolResponse {
|
|
||||||
major_version: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct LastHeaderResponse {
|
|
||||||
block_header: ProtocolResponse,
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(
|
|
||||||
match self
|
|
||||||
.json_rpc_call::<LastHeaderResponse>("get_last_block_header", None)
|
|
||||||
.await?
|
|
||||||
.block_header
|
|
||||||
.major_version
|
|
||||||
{
|
|
||||||
13 | 14 => Protocol::v14,
|
|
||||||
15 | 16 => Protocol::v16,
|
|
||||||
protocol => Err(RpcError::UnsupportedProtocol(protocol))?,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_height(&self) -> Result<usize, RpcError> {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct HeightResponse {
|
|
||||||
height: usize,
|
|
||||||
}
|
|
||||||
Ok(self.rpc_call::<Option<()>, HeightResponse>("get_height", None).await?.height)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_transactions(&self, hashes: &[[u8; 32]]) -> Result<Vec<Transaction>, RpcError> {
|
|
||||||
if hashes.is_empty() {
|
|
||||||
return Ok(vec![]);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut hashes_hex = hashes.iter().map(hex::encode).collect::<Vec<_>>();
|
|
||||||
let mut all_txs = Vec::with_capacity(hashes.len());
|
|
||||||
while !hashes_hex.is_empty() {
|
|
||||||
// Monero errors if more than 100 is requested unless using a non-restricted RPC
|
|
||||||
const TXS_PER_REQUEST: usize = 100;
|
|
||||||
let this_count = TXS_PER_REQUEST.min(hashes_hex.len());
|
|
||||||
|
|
||||||
let txs: TransactionsResponse = self
|
|
||||||
.rpc_call(
|
|
||||||
"get_transactions",
|
|
||||||
Some(json!({
|
|
||||||
"txs_hashes": hashes_hex.drain(.. this_count).collect::<Vec<_>>(),
|
|
||||||
})),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if !txs.missed_tx.is_empty() {
|
|
||||||
Err(RpcError::TransactionsNotFound(
|
|
||||||
txs.missed_tx.iter().map(|hash| hash_hex(hash)).collect::<Result<_, _>>()?,
|
|
||||||
))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
all_txs.extend(txs.txs);
|
|
||||||
}
|
|
||||||
|
|
||||||
all_txs
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, res)| {
|
|
||||||
let tx = Transaction::read::<&[u8]>(
|
|
||||||
&mut rpc_hex(if !res.as_hex.is_empty() { &res.as_hex } else { &res.pruned_as_hex })?
|
|
||||||
.as_ref(),
|
|
||||||
)
|
|
||||||
.map_err(|_| match hash_hex(&res.tx_hash) {
|
|
||||||
Ok(hash) => RpcError::InvalidTransaction(hash),
|
|
||||||
Err(err) => err,
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// https://github.com/monero-project/monero/issues/8311
|
|
||||||
if res.as_hex.is_empty() {
|
|
||||||
match tx.prefix.inputs.first() {
|
|
||||||
Some(Input::Gen { .. }) => (),
|
|
||||||
_ => Err(RpcError::PrunedTransaction)?,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This does run a few keccak256 hashes, which is pointless if the node is trusted
|
|
||||||
// In exchange, this provides resilience against invalid/malicious nodes
|
|
||||||
if tx.hash() != hashes[i] {
|
|
||||||
Err(RpcError::InvalidNode(
|
|
||||||
"replied with transaction wasn't the requested transaction".to_string(),
|
|
||||||
))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(tx)
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_transaction(&self, tx: [u8; 32]) -> Result<Transaction, RpcError> {
|
|
||||||
self.get_transactions(&[tx]).await.map(|mut txs| txs.swap_remove(0))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the hash of a block from the node by the block's numbers.
|
|
||||||
/// This function does not verify the returned block hash is actually for the number in question.
|
|
||||||
pub async fn get_block_hash(&self, number: usize) -> Result<[u8; 32], RpcError> {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct BlockHeaderResponse {
|
|
||||||
hash: String,
|
|
||||||
}
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct BlockHeaderByHeightResponse {
|
|
||||||
block_header: BlockHeaderResponse,
|
|
||||||
}
|
|
||||||
|
|
||||||
let header: BlockHeaderByHeightResponse =
|
|
||||||
self.json_rpc_call("get_block_header_by_height", Some(json!({ "height": number }))).await?;
|
|
||||||
hash_hex(&header.block_header.hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get a block from the node by its hash.
|
|
||||||
/// This function does not verify the returned block actually has the hash in question.
|
|
||||||
pub async fn get_block(&self, hash: [u8; 32]) -> Result<Block, RpcError> {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct BlockResponse {
|
|
||||||
blob: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
let res: BlockResponse =
|
|
||||||
self.json_rpc_call("get_block", Some(json!({ "hash": hex::encode(hash) }))).await?;
|
|
||||||
|
|
||||||
let block = Block::read::<&[u8]>(&mut rpc_hex(&res.blob)?.as_ref())
|
|
||||||
.map_err(|_| RpcError::InvalidNode("invalid block".to_string()))?;
|
|
||||||
if block.hash() != hash {
|
|
||||||
Err(RpcError::InvalidNode("different block than requested (hash)".to_string()))?;
|
|
||||||
}
|
|
||||||
Ok(block)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_block_by_number(&self, number: usize) -> Result<Block, RpcError> {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct BlockResponse {
|
|
||||||
blob: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
let res: BlockResponse =
|
|
||||||
self.json_rpc_call("get_block", Some(json!({ "height": number }))).await?;
|
|
||||||
|
|
||||||
let block = Block::read::<&[u8]>(&mut rpc_hex(&res.blob)?.as_ref())
|
|
||||||
.map_err(|_| RpcError::InvalidNode("invalid block".to_string()))?;
|
|
||||||
|
|
||||||
// Make sure this is actually the block for this number
|
|
||||||
match block.miner_tx.prefix.inputs.first() {
|
|
||||||
Some(Input::Gen(actual)) => {
|
|
||||||
if usize::try_from(*actual).unwrap() == number {
|
|
||||||
Ok(block)
|
|
||||||
} else {
|
|
||||||
Err(RpcError::InvalidNode("different block than requested (number)".to_string()))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => Err(RpcError::InvalidNode(
|
|
||||||
"block's miner_tx didn't have an input of kind Input::Gen".to_string(),
|
|
||||||
)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_block_transactions(&self, hash: [u8; 32]) -> Result<Vec<Transaction>, RpcError> {
|
|
||||||
let block = self.get_block(hash).await?;
|
|
||||||
let mut res = vec![block.miner_tx];
|
|
||||||
res.extend(self.get_transactions(&block.txs).await?);
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn get_block_transactions_by_number(
|
|
||||||
&self,
|
|
||||||
number: usize,
|
|
||||||
) -> Result<Vec<Transaction>, RpcError> {
|
|
||||||
self.get_block_transactions(self.get_block_hash(number).await?).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the output indexes of the specified transaction.
|
|
||||||
pub async fn get_o_indexes(&self, hash: [u8; 32]) -> Result<Vec<u64>, RpcError> {
|
|
||||||
/*
|
|
||||||
TODO: Use these when a suitable epee serde lib exists
|
|
||||||
|
|
||||||
#[derive(Serialize, Debug)]
|
|
||||||
struct Request {
|
|
||||||
txid: [u8; 32],
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct OIndexes {
|
|
||||||
o_indexes: Vec<u64>,
|
|
||||||
}
|
|
||||||
*/
|
|
||||||
|
|
||||||
// Given the immaturity of Rust epee libraries, this is a homegrown one which is only validated
|
|
||||||
// to work against this specific function
|
|
||||||
|
|
||||||
// Header for EPEE, an 8-byte magic and a version
|
|
||||||
const EPEE_HEADER: &[u8] = b"\x01\x11\x01\x01\x01\x01\x02\x01\x01";
|
|
||||||
|
|
||||||
let mut request = EPEE_HEADER.to_vec();
|
|
||||||
// Number of fields (shifted over 2 bits as the 2 LSBs are reserved for metadata)
|
|
||||||
request.push(1 << 2);
|
|
||||||
// Length of field name
|
|
||||||
request.push(4);
|
|
||||||
// Field name
|
|
||||||
request.extend(b"txid");
|
|
||||||
// Type of field
|
|
||||||
request.push(10);
|
|
||||||
// Length of string, since this byte array is technically a string
|
|
||||||
request.push(32 << 2);
|
|
||||||
// The "string"
|
|
||||||
request.extend(hash);
|
|
||||||
|
|
||||||
let indexes_buf = self.bin_call("get_o_indexes.bin", request).await?;
|
|
||||||
let mut indexes: &[u8] = indexes_buf.as_ref();
|
|
||||||
|
|
||||||
(|| {
|
|
||||||
let mut res = None;
|
|
||||||
let mut is_okay = false;
|
|
||||||
|
|
||||||
if read_bytes::<_, { EPEE_HEADER.len() }>(&mut indexes)? != EPEE_HEADER {
|
|
||||||
Err(io::Error::other("invalid header"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let read_object = |reader: &mut &[u8]| -> io::Result<Vec<u64>> {
|
|
||||||
let fields = read_byte(reader)? >> 2;
|
|
||||||
|
|
||||||
for _ in 0 .. fields {
|
|
||||||
let name_len = read_byte(reader)?;
|
|
||||||
let name = read_raw_vec(read_byte, name_len.into(), reader)?;
|
|
||||||
|
|
||||||
let type_with_array_flag = read_byte(reader)?;
|
|
||||||
let kind = type_with_array_flag & (!0x80);
|
|
||||||
|
|
||||||
let iters = if type_with_array_flag != kind { read_epee_vi(reader)? } else { 1 };
|
|
||||||
|
|
||||||
if (&name == b"o_indexes") && (kind != 5) {
|
|
||||||
Err(io::Error::other("o_indexes weren't u64s"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let f = match kind {
|
|
||||||
// i64
|
|
||||||
1 => |reader: &mut &[u8]| read_raw_vec(read_byte, 8, reader),
|
|
||||||
// i32
|
|
||||||
2 => |reader: &mut &[u8]| read_raw_vec(read_byte, 4, reader),
|
|
||||||
// i16
|
|
||||||
3 => |reader: &mut &[u8]| read_raw_vec(read_byte, 2, reader),
|
|
||||||
// i8
|
|
||||||
4 => |reader: &mut &[u8]| read_raw_vec(read_byte, 1, reader),
|
|
||||||
// u64
|
|
||||||
5 => |reader: &mut &[u8]| read_raw_vec(read_byte, 8, reader),
|
|
||||||
// u32
|
|
||||||
6 => |reader: &mut &[u8]| read_raw_vec(read_byte, 4, reader),
|
|
||||||
// u16
|
|
||||||
7 => |reader: &mut &[u8]| read_raw_vec(read_byte, 2, reader),
|
|
||||||
// u8
|
|
||||||
8 => |reader: &mut &[u8]| read_raw_vec(read_byte, 1, reader),
|
|
||||||
// double
|
|
||||||
9 => |reader: &mut &[u8]| read_raw_vec(read_byte, 8, reader),
|
|
||||||
// string, or any collection of bytes
|
|
||||||
10 => |reader: &mut &[u8]| {
|
|
||||||
let len = read_epee_vi(reader)?;
|
|
||||||
read_raw_vec(
|
|
||||||
read_byte,
|
|
||||||
len.try_into().map_err(|_| io::Error::other("u64 length exceeded usize"))?,
|
|
||||||
reader,
|
|
||||||
)
|
|
||||||
},
|
|
||||||
// bool
|
|
||||||
11 => |reader: &mut &[u8]| read_raw_vec(read_byte, 1, reader),
|
|
||||||
// object, errors here as it shouldn't be used on this call
|
|
||||||
12 => {
|
|
||||||
|_: &mut &[u8]| Err(io::Error::other("node used object in reply to get_o_indexes"))
|
|
||||||
}
|
|
||||||
// array, so far unused
|
|
||||||
13 => |_: &mut &[u8]| Err(io::Error::other("node used the unused array type")),
|
|
||||||
_ => |_: &mut &[u8]| Err(io::Error::other("node used an invalid type")),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut bytes_res = vec![];
|
|
||||||
for _ in 0 .. iters {
|
|
||||||
bytes_res.push(f(reader)?);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut actual_res = Vec::with_capacity(bytes_res.len());
|
|
||||||
match name.as_slice() {
|
|
||||||
b"o_indexes" => {
|
|
||||||
for o_index in bytes_res {
|
|
||||||
actual_res.push(u64::from_le_bytes(
|
|
||||||
o_index
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| io::Error::other("node didn't provide 8 bytes for a u64"))?,
|
|
||||||
));
|
|
||||||
}
|
|
||||||
res = Some(actual_res);
|
|
||||||
}
|
|
||||||
b"status" => {
|
|
||||||
if bytes_res
|
|
||||||
.first()
|
|
||||||
.ok_or_else(|| io::Error::other("status wasn't a string"))?
|
|
||||||
.as_slice() !=
|
|
||||||
b"OK"
|
|
||||||
{
|
|
||||||
// TODO: Better handle non-OK responses
|
|
||||||
Err(io::Error::other("response wasn't OK"))?;
|
|
||||||
}
|
|
||||||
is_okay = true;
|
|
||||||
}
|
|
||||||
_ => continue,
|
|
||||||
}
|
|
||||||
|
|
||||||
if is_okay && res.is_some() {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Didn't return a response with a status
|
|
||||||
// (if the status wasn't okay, we would've already errored)
|
|
||||||
if !is_okay {
|
|
||||||
Err(io::Error::other("response didn't contain a status"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the Vec was empty, it would've been omitted, hence the unwrap_or
|
|
||||||
// TODO: Test against a 0-output TX, such as the ones found in block 202612
|
|
||||||
Ok(res.unwrap_or(vec![]))
|
|
||||||
};
|
|
||||||
|
|
||||||
read_object(&mut indexes)
|
|
||||||
})()
|
|
||||||
.map_err(|_| RpcError::InvalidNode("invalid binary response".to_string()))
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the output distribution, from the specified height to the specified height (both
|
|
||||||
/// inclusive).
|
|
||||||
pub async fn get_output_distribution(
|
|
||||||
&self,
|
|
||||||
from: usize,
|
|
||||||
to: usize,
|
|
||||||
) -> Result<Vec<u64>, RpcError> {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct Distribution {
|
|
||||||
distribution: Vec<u64>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct Distributions {
|
|
||||||
distributions: Vec<Distribution>,
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut distributions: Distributions = self
|
|
||||||
.json_rpc_call(
|
|
||||||
"get_output_distribution",
|
|
||||||
Some(json!({
|
|
||||||
"binary": false,
|
|
||||||
"amounts": [0],
|
|
||||||
"cumulative": true,
|
|
||||||
"from_height": from,
|
|
||||||
"to_height": to,
|
|
||||||
})),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(distributions.distributions.swap_remove(0).distribution)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the specified outputs from the RingCT (zero-amount) pool
|
|
||||||
pub async fn get_outs(&self, indexes: &[u64]) -> Result<Vec<OutputResponse>, RpcError> {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct OutsResponse {
|
|
||||||
status: String,
|
|
||||||
outs: Vec<OutputResponse>,
|
|
||||||
}
|
|
||||||
|
|
||||||
let res: OutsResponse = self
|
|
||||||
.rpc_call(
|
|
||||||
"get_outs",
|
|
||||||
Some(json!({
|
|
||||||
"get_txid": true,
|
|
||||||
"outputs": indexes.iter().map(|o| json!({
|
|
||||||
"amount": 0,
|
|
||||||
"index": o
|
|
||||||
})).collect::<Vec<_>>()
|
|
||||||
})),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if res.status != "OK" {
|
|
||||||
Err(RpcError::InvalidNode("bad response to get_outs".to_string()))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(res.outs)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the specified outputs from the RingCT (zero-amount) pool, but only return them if their
|
|
||||||
/// timelock has been satisfied.
|
|
||||||
///
|
|
||||||
/// The timelock being satisfied is distinct from being free of the 10-block lock applied to all
|
|
||||||
/// Monero transactions.
|
|
||||||
pub async fn get_unlocked_outputs(
|
|
||||||
&self,
|
|
||||||
indexes: &[u64],
|
|
||||||
height: usize,
|
|
||||||
fingerprintable_canonical: bool,
|
|
||||||
) -> Result<Vec<Option<[EdwardsPoint; 2]>>, RpcError> {
|
|
||||||
let outs: Vec<OutputResponse> = self.get_outs(indexes).await?;
|
|
||||||
|
|
||||||
// Only need to fetch txs to do canonical check on timelock
|
|
||||||
let txs = if fingerprintable_canonical {
|
|
||||||
self
|
|
||||||
.get_transactions(
|
|
||||||
&outs.iter().map(|out| hash_hex(&out.txid)).collect::<Result<Vec<_>, _>>()?,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
} else {
|
|
||||||
Vec::new()
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: https://github.com/serai-dex/serai/issues/104
|
|
||||||
outs
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.map(|(i, out)| {
|
|
||||||
// Allow keys to be invalid, though if they are, return None to trigger selection of a new
|
|
||||||
// decoy
|
|
||||||
// Only valid keys can be used in CLSAG proofs, hence the need for re-selection, yet
|
|
||||||
// invalid keys may honestly exist on the blockchain
|
|
||||||
// Only a recent hard fork checked output keys were valid points
|
|
||||||
let Some(key) = decompress_point(
|
|
||||||
rpc_hex(&out.key)?
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| RpcError::InvalidNode("non-32-byte point".to_string()))?,
|
|
||||||
) else {
|
|
||||||
return Ok(None);
|
|
||||||
};
|
|
||||||
Ok(Some([key, rpc_point(&out.mask)?]).filter(|_| {
|
|
||||||
if fingerprintable_canonical {
|
|
||||||
Timelock::Block(height) >= txs[i].prefix.timelock
|
|
||||||
} else {
|
|
||||||
out.unlocked
|
|
||||||
}
|
|
||||||
}))
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn get_fee_v14(&self, priority: FeePriority) -> Result<Fee, RpcError> {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct FeeResponseV14 {
|
|
||||||
status: String,
|
|
||||||
fee: u64,
|
|
||||||
quantization_mask: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
|
|
||||||
// src/wallet/wallet2.cpp#L7569-L7584
|
|
||||||
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
|
|
||||||
// src/wallet/wallet2.cpp#L7660-L7661
|
|
||||||
let priority_idx =
|
|
||||||
usize::try_from(if priority.fee_priority() == 0 { 1 } else { priority.fee_priority() - 1 })
|
|
||||||
.map_err(|_| RpcError::InvalidPriority)?;
|
|
||||||
let multipliers = [1, 5, 25, 1000];
|
|
||||||
if priority_idx >= multipliers.len() {
|
|
||||||
// though not an RPC error, it seems sensible to treat as such
|
|
||||||
Err(RpcError::InvalidPriority)?;
|
|
||||||
}
|
|
||||||
let fee_multiplier = multipliers[priority_idx];
|
|
||||||
|
|
||||||
let res: FeeResponseV14 = self
|
|
||||||
.json_rpc_call(
|
|
||||||
"get_fee_estimate",
|
|
||||||
Some(json!({ "grace_blocks": GRACE_BLOCKS_FOR_FEE_ESTIMATE })),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if res.status != "OK" {
|
|
||||||
Err(RpcError::InvalidFee)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Fee { per_weight: res.fee * fee_multiplier, mask: res.quantization_mask })
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the currently estimated fee from the node.
|
|
||||||
///
|
|
||||||
/// This may be manipulated to unsafe levels and MUST be sanity checked.
|
|
||||||
// TODO: Take a sanity check argument
|
|
||||||
pub async fn get_fee(&self, protocol: Protocol, priority: FeePriority) -> Result<Fee, RpcError> {
|
|
||||||
// TODO: Implement wallet2's adjust_priority which by default automatically uses a lower
|
|
||||||
// priority than provided depending on the backlog in the pool
|
|
||||||
if protocol.v16_fee() {
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct FeeResponse {
|
|
||||||
status: String,
|
|
||||||
fees: Vec<u64>,
|
|
||||||
quantization_mask: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
let res: FeeResponse = self
|
|
||||||
.json_rpc_call(
|
|
||||||
"get_fee_estimate",
|
|
||||||
Some(json!({ "grace_blocks": GRACE_BLOCKS_FOR_FEE_ESTIMATE })),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
// https://github.com/monero-project/monero/blob/94e67bf96bbc010241f29ada6abc89f49a81759c/
|
|
||||||
// src/wallet/wallet2.cpp#L7615-L7620
|
|
||||||
let priority_idx = usize::try_from(if priority.fee_priority() >= 4 {
|
|
||||||
3
|
|
||||||
} else {
|
|
||||||
priority.fee_priority().saturating_sub(1)
|
|
||||||
})
|
|
||||||
.map_err(|_| RpcError::InvalidPriority)?;
|
|
||||||
|
|
||||||
if res.status != "OK" {
|
|
||||||
Err(RpcError::InvalidFee)
|
|
||||||
} else if priority_idx >= res.fees.len() {
|
|
||||||
Err(RpcError::InvalidPriority)
|
|
||||||
} else {
|
|
||||||
Ok(Fee { per_weight: res.fees[priority_idx], mask: res.quantization_mask })
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
self.get_fee_v14(priority).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn publish_transaction(&self, tx: &Transaction) -> Result<(), RpcError> {
|
|
||||||
#[allow(dead_code)]
|
|
||||||
#[derive(Deserialize, Debug)]
|
|
||||||
struct SendRawResponse {
|
|
||||||
status: String,
|
|
||||||
double_spend: bool,
|
|
||||||
fee_too_low: bool,
|
|
||||||
invalid_input: bool,
|
|
||||||
invalid_output: bool,
|
|
||||||
low_mixin: bool,
|
|
||||||
not_relayed: bool,
|
|
||||||
overspend: bool,
|
|
||||||
too_big: bool,
|
|
||||||
too_few_outputs: bool,
|
|
||||||
reason: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
let res: SendRawResponse = self
|
|
||||||
.rpc_call("send_raw_transaction", Some(json!({ "tx_as_hex": hex::encode(tx.serialize()) })))
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
if res.status != "OK" {
|
|
||||||
Err(RpcError::InvalidTransaction(tx.hash()))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Take &Address, not &str?
|
|
||||||
pub async fn generate_blocks(
|
|
||||||
&self,
|
|
||||||
address: &str,
|
|
||||||
block_count: usize,
|
|
||||||
) -> Result<(Vec<[u8; 32]>, usize), RpcError> {
|
|
||||||
#[derive(Debug, Deserialize)]
|
|
||||||
struct BlocksResponse {
|
|
||||||
blocks: Vec<String>,
|
|
||||||
height: usize,
|
|
||||||
}
|
|
||||||
|
|
||||||
let res = self
|
|
||||||
.json_rpc_call::<BlocksResponse>(
|
|
||||||
"generateblocks",
|
|
||||||
Some(json!({
|
|
||||||
"wallet_address": address,
|
|
||||||
"amount_of_blocks": block_count
|
|
||||||
})),
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
let mut blocks = Vec::with_capacity(res.blocks.len());
|
|
||||||
for block in res.blocks {
|
|
||||||
blocks.push(hash_hex(&block)?);
|
|
||||||
}
|
|
||||||
Ok((blocks, res.height))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,172 +0,0 @@
|
|||||||
use core::fmt::Debug;
|
|
||||||
use std_shims::{
|
|
||||||
vec::Vec,
|
|
||||||
io::{self, Read, Write},
|
|
||||||
};
|
|
||||||
|
|
||||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
|
||||||
|
|
||||||
use monero_generators::decompress_point;
|
|
||||||
|
|
||||||
const VARINT_CONTINUATION_MASK: u8 = 0b1000_0000;
|
|
||||||
|
|
||||||
mod sealed {
|
|
||||||
pub trait VarInt: TryInto<u64> + TryFrom<u64> + Copy {
|
|
||||||
const BITS: usize;
|
|
||||||
}
|
|
||||||
impl VarInt for u8 {
|
|
||||||
const BITS: usize = 8;
|
|
||||||
}
|
|
||||||
impl VarInt for u32 {
|
|
||||||
const BITS: usize = 32;
|
|
||||||
}
|
|
||||||
impl VarInt for u64 {
|
|
||||||
const BITS: usize = 64;
|
|
||||||
}
|
|
||||||
impl VarInt for usize {
|
|
||||||
const BITS: usize = core::mem::size_of::<usize>() * 8;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// This will panic if the VarInt exceeds u64::MAX
|
|
||||||
pub(crate) fn varint_len<U: sealed::VarInt>(varint: U) -> usize {
|
|
||||||
let varint_u64: u64 = varint.try_into().map_err(|_| "varint exceeded u64").unwrap();
|
|
||||||
((usize::try_from(u64::BITS - varint_u64.leading_zeros()).unwrap().saturating_sub(1)) / 7) + 1
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn write_byte<W: Write>(byte: &u8, w: &mut W) -> io::Result<()> {
|
|
||||||
w.write_all(&[*byte])
|
|
||||||
}
|
|
||||||
|
|
||||||
// This will panic if the VarInt exceeds u64::MAX
|
|
||||||
pub(crate) fn write_varint<W: Write, U: sealed::VarInt>(varint: &U, w: &mut W) -> io::Result<()> {
|
|
||||||
let mut varint: u64 = (*varint).try_into().map_err(|_| "varint exceeded u64").unwrap();
|
|
||||||
while {
|
|
||||||
let mut b = u8::try_from(varint & u64::from(!VARINT_CONTINUATION_MASK)).unwrap();
|
|
||||||
varint >>= 7;
|
|
||||||
if varint != 0 {
|
|
||||||
b |= VARINT_CONTINUATION_MASK;
|
|
||||||
}
|
|
||||||
write_byte(&b, w)?;
|
|
||||||
varint != 0
|
|
||||||
} {}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn write_scalar<W: Write>(scalar: &Scalar, w: &mut W) -> io::Result<()> {
|
|
||||||
w.write_all(&scalar.to_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn write_point<W: Write>(point: &EdwardsPoint, w: &mut W) -> io::Result<()> {
|
|
||||||
w.write_all(&point.compress().to_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn write_raw_vec<T, W: Write, F: Fn(&T, &mut W) -> io::Result<()>>(
|
|
||||||
f: F,
|
|
||||||
values: &[T],
|
|
||||||
w: &mut W,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
for value in values {
|
|
||||||
f(value, w)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn write_vec<T, W: Write, F: Fn(&T, &mut W) -> io::Result<()>>(
|
|
||||||
f: F,
|
|
||||||
values: &[T],
|
|
||||||
w: &mut W,
|
|
||||||
) -> io::Result<()> {
|
|
||||||
write_varint(&values.len(), w)?;
|
|
||||||
write_raw_vec(f, values, w)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_bytes<R: Read, const N: usize>(r: &mut R) -> io::Result<[u8; N]> {
|
|
||||||
let mut res = [0; N];
|
|
||||||
r.read_exact(&mut res)?;
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_byte<R: Read>(r: &mut R) -> io::Result<u8> {
|
|
||||||
Ok(read_bytes::<_, 1>(r)?[0])
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_u16<R: Read>(r: &mut R) -> io::Result<u16> {
|
|
||||||
read_bytes(r).map(u16::from_le_bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_u32<R: Read>(r: &mut R) -> io::Result<u32> {
|
|
||||||
read_bytes(r).map(u32::from_le_bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_u64<R: Read>(r: &mut R) -> io::Result<u64> {
|
|
||||||
read_bytes(r).map(u64::from_le_bytes)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_varint<R: Read, U: sealed::VarInt>(r: &mut R) -> io::Result<U> {
|
|
||||||
let mut bits = 0;
|
|
||||||
let mut res = 0;
|
|
||||||
while {
|
|
||||||
let b = read_byte(r)?;
|
|
||||||
if (bits != 0) && (b == 0) {
|
|
||||||
Err(io::Error::other("non-canonical varint"))?;
|
|
||||||
}
|
|
||||||
if ((bits + 7) >= U::BITS) && (b >= (1 << (U::BITS - bits))) {
|
|
||||||
Err(io::Error::other("varint overflow"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
res += u64::from(b & (!VARINT_CONTINUATION_MASK)) << bits;
|
|
||||||
bits += 7;
|
|
||||||
b & VARINT_CONTINUATION_MASK == VARINT_CONTINUATION_MASK
|
|
||||||
} {}
|
|
||||||
res.try_into().map_err(|_| io::Error::other("VarInt does not fit into integer type"))
|
|
||||||
}
|
|
||||||
|
|
||||||
// All scalar fields supported by monero-serai are checked to be canonical for valid transactions
|
|
||||||
// While from_bytes_mod_order would be more flexible, it's not currently needed and would be
|
|
||||||
// inaccurate to include now. While casting a wide net may be preferable, it'd also be inaccurate
|
|
||||||
// for now. There's also further edge cases as noted by
|
|
||||||
// https://github.com/monero-project/monero/issues/8438, where some scalars had an archaic
|
|
||||||
// reduction applied
|
|
||||||
pub(crate) fn read_scalar<R: Read>(r: &mut R) -> io::Result<Scalar> {
|
|
||||||
Option::from(Scalar::from_canonical_bytes(read_bytes(r)?))
|
|
||||||
.ok_or_else(|| io::Error::other("unreduced scalar"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_point<R: Read>(r: &mut R) -> io::Result<EdwardsPoint> {
|
|
||||||
let bytes = read_bytes(r)?;
|
|
||||||
decompress_point(bytes).ok_or_else(|| io::Error::other("invalid point"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_torsion_free_point<R: Read>(r: &mut R) -> io::Result<EdwardsPoint> {
|
|
||||||
read_point(r)
|
|
||||||
.ok()
|
|
||||||
.filter(EdwardsPoint::is_torsion_free)
|
|
||||||
.ok_or_else(|| io::Error::other("invalid point"))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_raw_vec<R: Read, T, F: Fn(&mut R) -> io::Result<T>>(
|
|
||||||
f: F,
|
|
||||||
len: usize,
|
|
||||||
r: &mut R,
|
|
||||||
) -> io::Result<Vec<T>> {
|
|
||||||
let mut res = vec![];
|
|
||||||
for _ in 0 .. len {
|
|
||||||
res.push(f(r)?);
|
|
||||||
}
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_array<R: Read, T: Debug, F: Fn(&mut R) -> io::Result<T>, const N: usize>(
|
|
||||||
f: F,
|
|
||||||
r: &mut R,
|
|
||||||
) -> io::Result<[T; N]> {
|
|
||||||
read_raw_vec(f, N, r).map(|vec| vec.try_into().unwrap())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn read_vec<R: Read, T, F: Fn(&mut R) -> io::Result<T>>(
|
|
||||||
f: F,
|
|
||||||
r: &mut R,
|
|
||||||
) -> io::Result<Vec<T>> {
|
|
||||||
read_raw_vec(f, read_varint(r)?, r)
|
|
||||||
}
|
|
||||||
@@ -1,95 +0,0 @@
|
|||||||
use hex_literal::hex;
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use curve25519_dalek::scalar::Scalar;
|
|
||||||
use monero_generators::decompress_point;
|
|
||||||
use multiexp::BatchVerifier;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Commitment, random_scalar,
|
|
||||||
ringct::bulletproofs::{Bulletproofs, original::OriginalStruct},
|
|
||||||
};
|
|
||||||
|
|
||||||
mod plus;
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn bulletproofs_vector() {
|
|
||||||
let scalar = |scalar| Scalar::from_canonical_bytes(scalar).unwrap();
|
|
||||||
let point = |point| decompress_point(point).unwrap();
|
|
||||||
|
|
||||||
// Generated from Monero
|
|
||||||
assert!(Bulletproofs::Original(OriginalStruct {
|
|
||||||
A: point(hex!("ef32c0b9551b804decdcb107eb22aa715b7ce259bf3c5cac20e24dfa6b28ac71")),
|
|
||||||
S: point(hex!("e1285960861783574ee2b689ae53622834eb0b035d6943103f960cd23e063fa0")),
|
|
||||||
T1: point(hex!("4ea07735f184ba159d0e0eb662bac8cde3eb7d39f31e567b0fbda3aa23fe5620")),
|
|
||||||
T2: point(hex!("b8390aa4b60b255630d40e592f55ec6b7ab5e3a96bfcdcd6f1cd1d2fc95f441e")),
|
|
||||||
taux: scalar(hex!("5957dba8ea9afb23d6e81cc048a92f2d502c10c749dc1b2bd148ae8d41ec7107")),
|
|
||||||
mu: scalar(hex!("923023b234c2e64774b820b4961f7181f6c1dc152c438643e5a25b0bf271bc02")),
|
|
||||||
L: vec![
|
|
||||||
point(hex!("c45f656316b9ebf9d357fb6a9f85b5f09e0b991dd50a6e0ae9b02de3946c9d99")),
|
|
||||||
point(hex!("9304d2bf0f27183a2acc58cc755a0348da11bd345485fda41b872fee89e72aac")),
|
|
||||||
point(hex!("1bb8b71925d155dd9569f64129ea049d6149fdc4e7a42a86d9478801d922129b")),
|
|
||||||
point(hex!("5756a7bf887aa72b9a952f92f47182122e7b19d89e5dd434c747492b00e1c6b7")),
|
|
||||||
point(hex!("6e497c910d102592830555356af5ff8340e8d141e3fb60ea24cfa587e964f07d")),
|
|
||||||
point(hex!("f4fa3898e7b08e039183d444f3d55040f3c790ed806cb314de49f3068bdbb218")),
|
|
||||||
point(hex!("0bbc37597c3ead517a3841e159c8b7b79a5ceaee24b2a9a20350127aab428713")),
|
|
||||||
],
|
|
||||||
R: vec![
|
|
||||||
point(hex!("609420ba1702781692e84accfd225adb3d077aedc3cf8125563400466b52dbd9")),
|
|
||||||
point(hex!("fb4e1d079e7a2b0ec14f7e2a3943bf50b6d60bc346a54fcf562fb234b342abf8")),
|
|
||||||
point(hex!("6ae3ac97289c48ce95b9c557289e82a34932055f7f5e32720139824fe81b12e5")),
|
|
||||||
point(hex!("d071cc2ffbdab2d840326ad15f68c01da6482271cae3cf644670d1632f29a15c")),
|
|
||||||
point(hex!("e52a1754b95e1060589ba7ce0c43d0060820ebfc0d49dc52884bc3c65ad18af5")),
|
|
||||||
point(hex!("41573b06140108539957df71aceb4b1816d2409ce896659aa5c86f037ca5e851")),
|
|
||||||
point(hex!("a65970b2cc3c7b08b2b5b739dbc8e71e646783c41c625e2a5b1535e3d2e0f742")),
|
|
||||||
],
|
|
||||||
a: scalar(hex!("0077c5383dea44d3cd1bc74849376bd60679612dc4b945255822457fa0c0a209")),
|
|
||||||
b: scalar(hex!("fe80cf5756473482581e1d38644007793ddc66fdeb9404ec1689a907e4863302")),
|
|
||||||
t: scalar(hex!("40dfb08e09249040df997851db311bd6827c26e87d6f0f332c55be8eef10e603"))
|
|
||||||
})
|
|
||||||
.verify(
|
|
||||||
&mut OsRng,
|
|
||||||
&[
|
|
||||||
// For some reason, these vectors are * INV_EIGHT
|
|
||||||
point(hex!("8e8f23f315edae4f6c2f948d9a861e0ae32d356b933cd11d2f0e031ac744c41f"))
|
|
||||||
.mul_by_cofactor(),
|
|
||||||
point(hex!("2829cbd025aa54cd6e1b59a032564f22f0b2e5627f7f2c4297f90da438b5510f"))
|
|
||||||
.mul_by_cofactor(),
|
|
||||||
]
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
macro_rules! bulletproofs_tests {
|
|
||||||
($name: ident, $max: ident, $plus: literal) => {
|
|
||||||
#[test]
|
|
||||||
fn $name() {
|
|
||||||
// Create Bulletproofs for all possible output quantities
|
|
||||||
let mut verifier = BatchVerifier::new(16);
|
|
||||||
for i in 1 ..= 16 {
|
|
||||||
let commitments = (1 ..= i)
|
|
||||||
.map(|i| Commitment::new(random_scalar(&mut OsRng), u64::try_from(i).unwrap()))
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
let bp = Bulletproofs::prove(&mut OsRng, &commitments, $plus).unwrap();
|
|
||||||
|
|
||||||
let commitments = commitments.iter().map(Commitment::calculate).collect::<Vec<_>>();
|
|
||||||
assert!(bp.verify(&mut OsRng, &commitments));
|
|
||||||
assert!(bp.batch_verify(&mut OsRng, &mut verifier, i, &commitments));
|
|
||||||
}
|
|
||||||
assert!(verifier.verify_vartime());
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn $max() {
|
|
||||||
// Check Bulletproofs errors if we try to prove for too many outputs
|
|
||||||
let mut commitments = vec![];
|
|
||||||
for _ in 0 .. 17 {
|
|
||||||
commitments.push(Commitment::new(Scalar::ZERO, 0));
|
|
||||||
}
|
|
||||||
assert!(Bulletproofs::prove(&mut OsRng, &commitments, $plus).is_err());
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
bulletproofs_tests!(bulletproofs, bulletproofs_max, false);
|
|
||||||
bulletproofs_tests!(bulletproofs_plus, bulletproofs_plus_max, true);
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
use rand_core::{RngCore, OsRng};
|
|
||||||
|
|
||||||
use multiexp::BatchVerifier;
|
|
||||||
use group::ff::Field;
|
|
||||||
use dalek_ff_group::{Scalar, EdwardsPoint};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Commitment,
|
|
||||||
ringct::bulletproofs::plus::aggregate_range_proof::{
|
|
||||||
AggregateRangeStatement, AggregateRangeWitness,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_aggregate_range_proof() {
|
|
||||||
let mut verifier = BatchVerifier::new(16);
|
|
||||||
for m in 1 ..= 16 {
|
|
||||||
let mut commitments = vec![];
|
|
||||||
for _ in 0 .. m {
|
|
||||||
commitments.push(Commitment::new(*Scalar::random(&mut OsRng), OsRng.next_u64()));
|
|
||||||
}
|
|
||||||
let commitment_points = commitments.iter().map(|com| EdwardsPoint(com.calculate())).collect();
|
|
||||||
let statement = AggregateRangeStatement::new(commitment_points).unwrap();
|
|
||||||
let witness = AggregateRangeWitness::new(&commitments).unwrap();
|
|
||||||
|
|
||||||
let proof = statement.clone().prove(&mut OsRng, &witness).unwrap();
|
|
||||||
statement.verify(&mut OsRng, &mut verifier, (), proof);
|
|
||||||
}
|
|
||||||
assert!(verifier.verify_vartime());
|
|
||||||
}
|
|
||||||
@@ -1,6 +0,0 @@
|
|||||||
mod unreduced_scalar;
|
|
||||||
mod clsag;
|
|
||||||
mod bulletproofs;
|
|
||||||
mod address;
|
|
||||||
mod seed;
|
|
||||||
mod extra;
|
|
||||||
@@ -1,482 +0,0 @@
|
|||||||
use zeroize::Zeroizing;
|
|
||||||
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use curve25519_dalek::scalar::Scalar;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
hash,
|
|
||||||
wallet::seed::{
|
|
||||||
Seed, SeedType, SeedError,
|
|
||||||
classic::{self, trim_by_lang},
|
|
||||||
polyseed,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_classic_seed() {
|
|
||||||
struct Vector {
|
|
||||||
language: classic::Language,
|
|
||||||
seed: String,
|
|
||||||
spend: String,
|
|
||||||
view: String,
|
|
||||||
}
|
|
||||||
|
|
||||||
let vectors = [
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Chinese,
|
|
||||||
seed: "摇 曲 艺 武 滴 然 效 似 赏 式 祥 歌 买 疑 小 碧 堆 博 键 房 鲜 悲 付 喷 武".into(),
|
|
||||||
spend: "a5e4fff1706ef9212993a69f246f5c95ad6d84371692d63e9bb0ea112a58340d".into(),
|
|
||||||
view: "1176c43ce541477ea2f3ef0b49b25112b084e26b8a843e1304ac4677b74cdf02".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::English,
|
|
||||||
seed: "washing thirsty occur lectures tuesday fainted toxic adapt \
|
|
||||||
abnormal memoir nylon mostly building shrugged online ember northern \
|
|
||||||
ruby woes dauntless boil family illness inroads northern"
|
|
||||||
.into(),
|
|
||||||
spend: "c0af65c0dd837e666b9d0dfed62745f4df35aed7ea619b2798a709f0fe545403".into(),
|
|
||||||
view: "513ba91c538a5a9069e0094de90e927c0cd147fa10428ce3ac1afd49f63e3b01".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Dutch,
|
|
||||||
seed: "setwinst riphagen vimmetje extase blief tuitelig fuiven meifeest \
|
|
||||||
ponywagen zesmaal ripdeal matverf codetaal leut ivoor rotten \
|
|
||||||
wisgerhof winzucht typograaf atrium rein zilt traktaat verzaagd setwinst"
|
|
||||||
.into(),
|
|
||||||
spend: "e2d2873085c447c2bc7664222ac8f7d240df3aeac137f5ff2022eaa629e5b10a".into(),
|
|
||||||
view: "eac30b69477e3f68093d131c7fd961564458401b07f8c87ff8f6030c1a0c7301".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::French,
|
|
||||||
seed: "poids vaseux tarte bazar poivre effet entier nuance \
|
|
||||||
sensuel ennui pacte osselet poudre battre alibi mouton \
|
|
||||||
stade paquet pliage gibier type question position projet pliage"
|
|
||||||
.into(),
|
|
||||||
spend: "2dd39ff1a4628a94b5c2ec3e42fb3dfe15c2b2f010154dc3b3de6791e805b904".into(),
|
|
||||||
view: "6725b32230400a1032f31d622b44c3a227f88258939b14a7c72e00939e7bdf0e".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Spanish,
|
|
||||||
seed: "minero ocupar mirar evadir octubre cal logro miope \
|
|
||||||
opaco disco ancla litio clase cuello nasal clase \
|
|
||||||
fiar avance deseo mente grumo negro cordón croqueta clase"
|
|
||||||
.into(),
|
|
||||||
spend: "ae2c9bebdddac067d73ec0180147fc92bdf9ac7337f1bcafbbe57dd13558eb02".into(),
|
|
||||||
view: "18deafb34d55b7a43cae2c1c1c206a3c80c12cc9d1f84640b484b95b7fec3e05".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::German,
|
|
||||||
seed: "Kaliber Gabelung Tapir Liveband Favorit Specht Enklave Nabel \
|
|
||||||
Jupiter Foliant Chronik nisten löten Vase Aussage Rekord \
|
|
||||||
Yeti Gesetz Eleganz Alraune Künstler Almweide Jahr Kastanie Almweide"
|
|
||||||
.into(),
|
|
||||||
spend: "79801b7a1b9796856e2397d862a113862e1fdc289a205e79d8d70995b276db06".into(),
|
|
||||||
view: "99f0ec556643bd9c038a4ed86edcb9c6c16032c4622ed2e000299d527a792701".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Italian,
|
|
||||||
seed: "cavo pancetta auto fulmine alleanza filmato diavolo prato \
|
|
||||||
forzare meritare litigare lezione segreto evasione votare buio \
|
|
||||||
licenza cliente dorso natale crescere vento tutelare vetta evasione"
|
|
||||||
.into(),
|
|
||||||
spend: "5e7fd774eb00fa5877e2a8b4dc9c7ffe111008a3891220b56a6e49ac816d650a".into(),
|
|
||||||
view: "698a1dce6018aef5516e82ca0cb3e3ec7778d17dfb41a137567bfa2e55e63a03".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Portuguese,
|
|
||||||
seed: "agito eventualidade onus itrio holograma sodomizar objetos dobro \
|
|
||||||
iugoslavo bcrepuscular odalisca abjeto iuane darwinista eczema acetona \
|
|
||||||
cibernetico hoquei gleba driver buffer azoto megera nogueira agito"
|
|
||||||
.into(),
|
|
||||||
spend: "13b3115f37e35c6aa1db97428b897e584698670c1b27854568d678e729200c0f".into(),
|
|
||||||
view: "ad1b4fd35270f5f36c4da7166672b347e75c3f4d41346ec2a06d1d0193632801".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Japanese,
|
|
||||||
seed: "ぜんぶ どうぐ おたがい せんきょ おうじ そんちょう じゅしん いろえんぴつ \
|
|
||||||
かほう つかれる えらぶ にちじょう くのう にちようび ぬまえび さんきゃく \
|
|
||||||
おおや ちぬき うすめる いがく せつでん さうな すいえい せつだん おおや"
|
|
||||||
.into(),
|
|
||||||
spend: "c56e895cdb13007eda8399222974cdbab493640663804b93cbef3d8c3df80b0b".into(),
|
|
||||||
view: "6c3634a313ec2ee979d565c33888fd7c3502d696ce0134a8bc1a2698c7f2c508".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Russian,
|
|
||||||
seed: "шатер икра нация ехать получать инерция доза реальный \
|
|
||||||
рыжий таможня лопата душа веселый клетка атлас лекция \
|
|
||||||
обгонять паек наивный лыжный дурак стать ежик задача паек"
|
|
||||||
.into(),
|
|
||||||
spend: "7cb5492df5eb2db4c84af20766391cd3e3662ab1a241c70fc881f3d02c381f05".into(),
|
|
||||||
view: "fcd53e41ec0df995ab43927f7c44bc3359c93523d5009fb3f5ba87431d545a03".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Esperanto,
|
|
||||||
seed: "ukazo klini peco etikedo fabriko imitado onklino urino \
|
|
||||||
pudro incidento kumuluso ikono smirgi hirundo uretro krii \
|
|
||||||
sparkado super speciala pupo alpinisto cvana vokegi zombio fabriko"
|
|
||||||
.into(),
|
|
||||||
spend: "82ebf0336d3b152701964ed41df6b6e9a035e57fc98b84039ed0bd4611c58904".into(),
|
|
||||||
view: "cd4d120e1ea34360af528f6a3e6156063312d9cefc9aa6b5218d366c0ed6a201".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Lojban,
|
|
||||||
seed: "jetnu vensa julne xrotu xamsi julne cutci dakli \
|
|
||||||
mlatu xedja muvgau palpi xindo sfubu ciste cinri \
|
|
||||||
blabi darno dembi janli blabi fenki bukpu burcu blabi"
|
|
||||||
.into(),
|
|
||||||
spend: "e4f8c6819ab6cf792cebb858caabac9307fd646901d72123e0367ebc0a79c200".into(),
|
|
||||||
view: "c806ce62bafaa7b2d597f1a1e2dbe4a2f96bfd804bf6f8420fc7f4a6bd700c00".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::EnglishOld,
|
|
||||||
seed: "glorious especially puff son moment add youth nowhere \
|
|
||||||
throw glide grip wrong rhythm consume very swear \
|
|
||||||
bitter heavy eventually begin reason flirt type unable"
|
|
||||||
.into(),
|
|
||||||
spend: "647f4765b66b636ff07170ab6280a9a6804dfbaf19db2ad37d23be024a18730b".into(),
|
|
||||||
view: "045da65316a906a8c30046053119c18020b07a7a3a6ef5c01ab2a8755416bd02".into(),
|
|
||||||
},
|
|
||||||
// The following seeds require the language specification in order to calculate
|
|
||||||
// a single valid checksum
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Spanish,
|
|
||||||
seed: "pluma laico atraer pintor peor cerca balde buscar \
|
|
||||||
lancha batir nulo reloj resto gemelo nevera poder columna gol \
|
|
||||||
oveja latir amplio bolero feliz fuerza nevera"
|
|
||||||
.into(),
|
|
||||||
spend: "30303983fc8d215dd020cc6b8223793318d55c466a86e4390954f373fdc7200a".into(),
|
|
||||||
view: "97c649143f3c147ba59aa5506cc09c7992c5c219bb26964442142bf97980800e".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Spanish,
|
|
||||||
seed: "pluma pluma pluma pluma pluma pluma pluma pluma \
|
|
||||||
pluma pluma pluma pluma pluma pluma pluma pluma \
|
|
||||||
pluma pluma pluma pluma pluma pluma pluma pluma pluma"
|
|
||||||
.into(),
|
|
||||||
spend: "b4050000b4050000b4050000b4050000b4050000b4050000b4050000b4050000".into(),
|
|
||||||
view: "d73534f7912b395eb70ef911791a2814eb6df7ce56528eaaa83ff2b72d9f5e0f".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::English,
|
|
||||||
seed: "plus plus plus plus plus plus plus plus \
|
|
||||||
plus plus plus plus plus plus plus plus \
|
|
||||||
plus plus plus plus plus plus plus plus plus"
|
|
||||||
.into(),
|
|
||||||
spend: "3b0400003b0400003b0400003b0400003b0400003b0400003b0400003b040000".into(),
|
|
||||||
view: "43a8a7715eed11eff145a2024ddcc39740255156da7bbd736ee66a0838053a02".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::Spanish,
|
|
||||||
seed: "audio audio audio audio audio audio audio audio \
|
|
||||||
audio audio audio audio audio audio audio audio \
|
|
||||||
audio audio audio audio audio audio audio audio audio"
|
|
||||||
.into(),
|
|
||||||
spend: "ba000000ba000000ba000000ba000000ba000000ba000000ba000000ba000000".into(),
|
|
||||||
view: "1437256da2c85d029b293d8c6b1d625d9374969301869b12f37186e3f906c708".into(),
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: classic::Language::English,
|
|
||||||
seed: "audio audio audio audio audio audio audio audio \
|
|
||||||
audio audio audio audio audio audio audio audio \
|
|
||||||
audio audio audio audio audio audio audio audio audio"
|
|
||||||
.into(),
|
|
||||||
spend: "7900000079000000790000007900000079000000790000007900000079000000".into(),
|
|
||||||
view: "20bec797ab96780ae6a045dd816676ca7ed1d7c6773f7022d03ad234b581d600".into(),
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
for vector in vectors {
|
|
||||||
let trim_seed = |seed: &str| {
|
|
||||||
seed
|
|
||||||
.split_whitespace()
|
|
||||||
.map(|word| trim_by_lang(word, vector.language))
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(" ")
|
|
||||||
};
|
|
||||||
|
|
||||||
// Test against Monero
|
|
||||||
{
|
|
||||||
println!("{}. language: {:?}, seed: {}", line!(), vector.language, vector.seed.clone());
|
|
||||||
let seed =
|
|
||||||
Seed::from_string(SeedType::Classic(vector.language), Zeroizing::new(vector.seed.clone()))
|
|
||||||
.unwrap();
|
|
||||||
let trim = trim_seed(&vector.seed);
|
|
||||||
assert_eq!(
|
|
||||||
seed,
|
|
||||||
Seed::from_string(SeedType::Classic(vector.language), Zeroizing::new(trim)).unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
let spend: [u8; 32] = hex::decode(vector.spend).unwrap().try_into().unwrap();
|
|
||||||
// For classical seeds, Monero directly uses the entropy as a spend key
|
|
||||||
assert_eq!(
|
|
||||||
Option::<Scalar>::from(Scalar::from_canonical_bytes(*seed.entropy())),
|
|
||||||
Option::<Scalar>::from(Scalar::from_canonical_bytes(spend)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let view: [u8; 32] = hex::decode(vector.view).unwrap().try_into().unwrap();
|
|
||||||
// Monero then derives the view key as H(spend)
|
|
||||||
assert_eq!(
|
|
||||||
Scalar::from_bytes_mod_order(hash(&spend)),
|
|
||||||
Scalar::from_canonical_bytes(view).unwrap()
|
|
||||||
);
|
|
||||||
|
|
||||||
assert_eq!(
|
|
||||||
Seed::from_entropy(SeedType::Classic(vector.language), Zeroizing::new(spend), None)
|
|
||||||
.unwrap(),
|
|
||||||
seed
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Test against ourselves
|
|
||||||
{
|
|
||||||
let seed = Seed::new(&mut OsRng, SeedType::Classic(vector.language));
|
|
||||||
println!("{}. seed: {}", line!(), *seed.to_string());
|
|
||||||
let trim = trim_seed(&seed.to_string());
|
|
||||||
assert_eq!(
|
|
||||||
seed,
|
|
||||||
Seed::from_string(SeedType::Classic(vector.language), Zeroizing::new(trim)).unwrap()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
seed,
|
|
||||||
Seed::from_entropy(SeedType::Classic(vector.language), seed.entropy(), None).unwrap()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
seed,
|
|
||||||
Seed::from_string(SeedType::Classic(vector.language), seed.to_string()).unwrap()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_polyseed() {
|
|
||||||
struct Vector {
|
|
||||||
language: polyseed::Language,
|
|
||||||
seed: String,
|
|
||||||
entropy: String,
|
|
||||||
birthday: u64,
|
|
||||||
has_prefix: bool,
|
|
||||||
has_accent: bool,
|
|
||||||
}
|
|
||||||
|
|
||||||
let vectors = [
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::English,
|
|
||||||
seed: "raven tail swear infant grief assist regular lamp \
|
|
||||||
duck valid someone little harsh puppy airport language"
|
|
||||||
.into(),
|
|
||||||
entropy: "dd76e7359a0ded37cd0ff0f3c829a5ae01673300000000000000000000000000".into(),
|
|
||||||
birthday: 1638446400,
|
|
||||||
has_prefix: true,
|
|
||||||
has_accent: false,
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::Spanish,
|
|
||||||
seed: "eje fin parte célebre tabú pestaña lienzo puma \
|
|
||||||
prisión hora regalo lengua existir lápiz lote sonoro"
|
|
||||||
.into(),
|
|
||||||
entropy: "5a2b02df7db21fcbe6ec6df137d54c7b20fd2b00000000000000000000000000".into(),
|
|
||||||
birthday: 3118651200,
|
|
||||||
has_prefix: true,
|
|
||||||
has_accent: true,
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::French,
|
|
||||||
seed: "valable arracher décaler jeudi amusant dresser mener épaissir risible \
|
|
||||||
prouesse réserve ampleur ajuster muter caméra enchère"
|
|
||||||
.into(),
|
|
||||||
entropy: "11cfd870324b26657342c37360c424a14a050b00000000000000000000000000".into(),
|
|
||||||
birthday: 1679314966,
|
|
||||||
has_prefix: true,
|
|
||||||
has_accent: true,
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::Italian,
|
|
||||||
seed: "caduco midollo copione meninge isotopo illogico riflesso tartaruga fermento \
|
|
||||||
olandese normale tristezza episodio voragine forbito achille"
|
|
||||||
.into(),
|
|
||||||
entropy: "7ecc57c9b4652d4e31428f62bec91cfd55500600000000000000000000000000".into(),
|
|
||||||
birthday: 1679316358,
|
|
||||||
has_prefix: true,
|
|
||||||
has_accent: false,
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::Portuguese,
|
|
||||||
seed: "caverna custear azedo adeus senador apertada sedoso omitir \
|
|
||||||
sujeito aurora videira molho cartaz gesso dentista tapar"
|
|
||||||
.into(),
|
|
||||||
entropy: "45473063711376cae38f1b3eba18c874124e1d00000000000000000000000000".into(),
|
|
||||||
birthday: 1679316657,
|
|
||||||
has_prefix: true,
|
|
||||||
has_accent: false,
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::Czech,
|
|
||||||
seed: "usmrtit nora dotaz komunita zavalit funkce mzda sotva akce \
|
|
||||||
vesta kabel herna stodola uvolnit ustrnout email"
|
|
||||||
.into(),
|
|
||||||
entropy: "7ac8a4efd62d9c3c4c02e350d32326df37821c00000000000000000000000000".into(),
|
|
||||||
birthday: 1679316898,
|
|
||||||
has_prefix: true,
|
|
||||||
has_accent: false,
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::Korean,
|
|
||||||
seed: "전망 선풍기 국제 무궁화 설사 기름 이론적 해안 절망 예선 \
|
|
||||||
지우개 보관 절망 말기 시각 귀신"
|
|
||||||
.into(),
|
|
||||||
entropy: "684663fda420298f42ed94b2c512ed38ddf12b00000000000000000000000000".into(),
|
|
||||||
birthday: 1679317073,
|
|
||||||
has_prefix: false,
|
|
||||||
has_accent: false,
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::Japanese,
|
|
||||||
seed: "うちあわせ ちつじょ つごう しはい けんこう とおる てみやげ はんとし たんとう \
|
|
||||||
といれ おさない おさえる むかう ぬぐう なふだ せまる"
|
|
||||||
.into(),
|
|
||||||
entropy: "94e6665518a6286c6e3ba508a2279eb62b771f00000000000000000000000000".into(),
|
|
||||||
birthday: 1679318722,
|
|
||||||
has_prefix: false,
|
|
||||||
has_accent: false,
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::ChineseTraditional,
|
|
||||||
seed: "亂 挖 斤 柄 代 圈 枝 轄 魯 論 函 開 勘 番 榮 壁".into(),
|
|
||||||
entropy: "b1594f585987ab0fd5a31da1f0d377dae5283f00000000000000000000000000".into(),
|
|
||||||
birthday: 1679426433,
|
|
||||||
has_prefix: false,
|
|
||||||
has_accent: false,
|
|
||||||
},
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::ChineseSimplified,
|
|
||||||
seed: "啊 百 族 府 票 划 伪 仓 叶 虾 借 溜 晨 左 等 鬼".into(),
|
|
||||||
entropy: "21cdd366f337b89b8d1bc1df9fe73047c22b0300000000000000000000000000".into(),
|
|
||||||
birthday: 1679426817,
|
|
||||||
has_prefix: false,
|
|
||||||
has_accent: false,
|
|
||||||
},
|
|
||||||
// The following seed requires the language specification in order to calculate
|
|
||||||
// a single valid checksum
|
|
||||||
Vector {
|
|
||||||
language: polyseed::Language::Spanish,
|
|
||||||
seed: "impo sort usua cabi venu nobl oliv clim \
|
|
||||||
cont barr marc auto prod vaca torn fati"
|
|
||||||
.into(),
|
|
||||||
entropy: "dbfce25fe09b68a340e01c62417eeef43ad51800000000000000000000000000".into(),
|
|
||||||
birthday: 1701511650,
|
|
||||||
has_prefix: true,
|
|
||||||
has_accent: true,
|
|
||||||
},
|
|
||||||
];
|
|
||||||
|
|
||||||
for vector in vectors {
|
|
||||||
let add_whitespace = |mut seed: String| {
|
|
||||||
seed.push(' ');
|
|
||||||
seed
|
|
||||||
};
|
|
||||||
|
|
||||||
let seed_without_accents = |seed: &str| {
|
|
||||||
seed
|
|
||||||
.split_whitespace()
|
|
||||||
.map(|w| w.chars().filter(char::is_ascii).collect::<String>())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(" ")
|
|
||||||
};
|
|
||||||
|
|
||||||
let trim_seed = |seed: &str| {
|
|
||||||
let seed_to_trim =
|
|
||||||
if vector.has_accent { seed_without_accents(seed) } else { seed.to_string() };
|
|
||||||
seed_to_trim
|
|
||||||
.split_whitespace()
|
|
||||||
.map(|w| {
|
|
||||||
let mut ascii = 0;
|
|
||||||
let mut to_take = w.len();
|
|
||||||
for (i, char) in w.chars().enumerate() {
|
|
||||||
if char.is_ascii() {
|
|
||||||
ascii += 1;
|
|
||||||
}
|
|
||||||
if ascii == polyseed::PREFIX_LEN {
|
|
||||||
// +1 to include this character, which put us at the prefix length
|
|
||||||
to_take = i + 1;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
w.chars().take(to_take).collect::<String>()
|
|
||||||
})
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.join(" ")
|
|
||||||
};
|
|
||||||
|
|
||||||
// String -> Seed
|
|
||||||
println!("{}. language: {:?}, seed: {}", line!(), vector.language, vector.seed.clone());
|
|
||||||
let seed =
|
|
||||||
Seed::from_string(SeedType::Polyseed(vector.language), Zeroizing::new(vector.seed.clone()))
|
|
||||||
.unwrap();
|
|
||||||
let trim = trim_seed(&vector.seed);
|
|
||||||
let add_whitespace = add_whitespace(vector.seed.clone());
|
|
||||||
let seed_without_accents = seed_without_accents(&vector.seed);
|
|
||||||
|
|
||||||
// Make sure a version with added whitespace still works
|
|
||||||
let whitespaced_seed =
|
|
||||||
Seed::from_string(SeedType::Polyseed(vector.language), Zeroizing::new(add_whitespace))
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(seed, whitespaced_seed);
|
|
||||||
// Check trimmed versions works
|
|
||||||
if vector.has_prefix {
|
|
||||||
let trimmed_seed =
|
|
||||||
Seed::from_string(SeedType::Polyseed(vector.language), Zeroizing::new(trim)).unwrap();
|
|
||||||
assert_eq!(seed, trimmed_seed);
|
|
||||||
}
|
|
||||||
// Check versions without accents work
|
|
||||||
if vector.has_accent {
|
|
||||||
let seed_without_accents = Seed::from_string(
|
|
||||||
SeedType::Polyseed(vector.language),
|
|
||||||
Zeroizing::new(seed_without_accents),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(seed, seed_without_accents);
|
|
||||||
}
|
|
||||||
|
|
||||||
let entropy = Zeroizing::new(hex::decode(vector.entropy).unwrap().try_into().unwrap());
|
|
||||||
assert_eq!(seed.entropy(), entropy);
|
|
||||||
assert!(seed.birthday().abs_diff(vector.birthday) < polyseed::TIME_STEP);
|
|
||||||
|
|
||||||
// Entropy -> Seed
|
|
||||||
let from_entropy =
|
|
||||||
Seed::from_entropy(SeedType::Polyseed(vector.language), entropy, Some(seed.birthday()))
|
|
||||||
.unwrap();
|
|
||||||
assert_eq!(seed.to_string(), from_entropy.to_string());
|
|
||||||
|
|
||||||
// Check against ourselves
|
|
||||||
{
|
|
||||||
let seed = Seed::new(&mut OsRng, SeedType::Polyseed(vector.language));
|
|
||||||
println!("{}. seed: {}", line!(), *seed.to_string());
|
|
||||||
assert_eq!(
|
|
||||||
seed,
|
|
||||||
Seed::from_string(SeedType::Polyseed(vector.language), seed.to_string()).unwrap()
|
|
||||||
);
|
|
||||||
assert_eq!(
|
|
||||||
seed,
|
|
||||||
Seed::from_entropy(
|
|
||||||
SeedType::Polyseed(vector.language),
|
|
||||||
seed.entropy(),
|
|
||||||
Some(seed.birthday())
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[test]
|
|
||||||
fn test_invalid_polyseed() {
|
|
||||||
// This seed includes unsupported features bits and should error on decode
|
|
||||||
let seed = "include domain claim resemble urban hire lunch bird \
|
|
||||||
crucial fire best wife ring warm ignore model"
|
|
||||||
.into();
|
|
||||||
let res =
|
|
||||||
Seed::from_string(SeedType::Polyseed(polyseed::Language::English), Zeroizing::new(seed));
|
|
||||||
assert_eq!(res, Err(SeedError::UnsupportedFeatures));
|
|
||||||
}
|
|
||||||
@@ -1,432 +0,0 @@
|
|||||||
use core::cmp::Ordering;
|
|
||||||
use std_shims::{
|
|
||||||
vec::Vec,
|
|
||||||
io::{self, Read, Write},
|
|
||||||
};
|
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
|
||||||
|
|
||||||
use curve25519_dalek::edwards::{EdwardsPoint, CompressedEdwardsY};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Protocol, hash,
|
|
||||||
serialize::*,
|
|
||||||
ring_signatures::RingSignature,
|
|
||||||
ringct::{bulletproofs::Bulletproofs, RctType, RctBase, RctPrunable, RctSignatures},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub enum Input {
|
|
||||||
Gen(u64),
|
|
||||||
ToKey { amount: Option<u64>, key_offsets: Vec<u64>, key_image: EdwardsPoint },
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Input {
|
|
||||||
pub(crate) fn fee_weight(offsets_weight: usize) -> usize {
|
|
||||||
// Uses 1 byte for the input type
|
|
||||||
// Uses 1 byte for the VarInt amount due to amount being 0
|
|
||||||
1 + 1 + offsets_weight + 32
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
match self {
|
|
||||||
Input::Gen(height) => {
|
|
||||||
w.write_all(&[255])?;
|
|
||||||
write_varint(height, w)
|
|
||||||
}
|
|
||||||
|
|
||||||
Input::ToKey { amount, key_offsets, key_image } => {
|
|
||||||
w.write_all(&[2])?;
|
|
||||||
write_varint(&amount.unwrap_or(0), w)?;
|
|
||||||
write_vec(write_varint, key_offsets, w)?;
|
|
||||||
write_point(key_image, w)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut res = vec![];
|
|
||||||
self.write(&mut res).unwrap();
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Input> {
|
|
||||||
Ok(match read_byte(r)? {
|
|
||||||
255 => Input::Gen(read_varint(r)?),
|
|
||||||
2 => {
|
|
||||||
let amount = read_varint(r)?;
|
|
||||||
// https://github.com/monero-project/monero/
|
|
||||||
// blob/00fd416a99686f0956361d1cd0337fe56e58d4a7/
|
|
||||||
// src/cryptonote_basic/cryptonote_format_utils.cpp#L860-L863
|
|
||||||
// A non-RCT 0-amount input can't exist because only RCT TXs can have a 0-amount output
|
|
||||||
// That's why collapsing to None if the amount is 0 is safe, even without knowing if RCT
|
|
||||||
let amount = if amount == 0 { None } else { Some(amount) };
|
|
||||||
Input::ToKey {
|
|
||||||
amount,
|
|
||||||
key_offsets: read_vec(read_varint, r)?,
|
|
||||||
key_image: read_torsion_free_point(r)?,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
_ => Err(io::Error::other("Tried to deserialize unknown/unused input type"))?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Doesn't bother moving to an enum for the unused Script classes
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub struct Output {
|
|
||||||
pub amount: Option<u64>,
|
|
||||||
pub key: CompressedEdwardsY,
|
|
||||||
pub view_tag: Option<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Output {
|
|
||||||
pub(crate) fn fee_weight(view_tags: bool) -> usize {
|
|
||||||
// Uses 1 byte for the output type
|
|
||||||
// Uses 1 byte for the VarInt amount due to amount being 0
|
|
||||||
1 + 1 + 32 + if view_tags { 1 } else { 0 }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
write_varint(&self.amount.unwrap_or(0), w)?;
|
|
||||||
w.write_all(&[2 + u8::from(self.view_tag.is_some())])?;
|
|
||||||
w.write_all(&self.key.to_bytes())?;
|
|
||||||
if let Some(view_tag) = self.view_tag {
|
|
||||||
w.write_all(&[view_tag])?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut res = Vec::with_capacity(8 + 1 + 32);
|
|
||||||
self.write(&mut res).unwrap();
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(rct: bool, r: &mut R) -> io::Result<Output> {
|
|
||||||
let amount = read_varint(r)?;
|
|
||||||
let amount = if rct {
|
|
||||||
if amount != 0 {
|
|
||||||
Err(io::Error::other("RCT TX output wasn't 0"))?;
|
|
||||||
}
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(amount)
|
|
||||||
};
|
|
||||||
|
|
||||||
let view_tag = match read_byte(r)? {
|
|
||||||
2 => false,
|
|
||||||
3 => true,
|
|
||||||
_ => Err(io::Error::other("Tried to deserialize unknown/unused output type"))?,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Output {
|
|
||||||
amount,
|
|
||||||
key: CompressedEdwardsY(read_bytes(r)?),
|
|
||||||
view_tag: if view_tag { Some(read_byte(r)?) } else { None },
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
|
||||||
pub enum Timelock {
|
|
||||||
None,
|
|
||||||
Block(usize),
|
|
||||||
Time(u64),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Timelock {
|
|
||||||
fn from_raw(raw: u64) -> Timelock {
|
|
||||||
if raw == 0 {
|
|
||||||
Timelock::None
|
|
||||||
} else if raw < 500_000_000 {
|
|
||||||
Timelock::Block(usize::try_from(raw).unwrap())
|
|
||||||
} else {
|
|
||||||
Timelock::Time(raw)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
write_varint(
|
|
||||||
&match self {
|
|
||||||
Timelock::None => 0,
|
|
||||||
Timelock::Block(block) => (*block).try_into().unwrap(),
|
|
||||||
Timelock::Time(time) => *time,
|
|
||||||
},
|
|
||||||
w,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialOrd for Timelock {
|
|
||||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
|
||||||
match (self, other) {
|
|
||||||
(Timelock::None, Timelock::None) => Some(Ordering::Equal),
|
|
||||||
(Timelock::None, _) => Some(Ordering::Less),
|
|
||||||
(_, Timelock::None) => Some(Ordering::Greater),
|
|
||||||
(Timelock::Block(a), Timelock::Block(b)) => a.partial_cmp(b),
|
|
||||||
(Timelock::Time(a), Timelock::Time(b)) => a.partial_cmp(b),
|
|
||||||
_ => None,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub struct TransactionPrefix {
|
|
||||||
pub version: u64,
|
|
||||||
pub timelock: Timelock,
|
|
||||||
pub inputs: Vec<Input>,
|
|
||||||
pub outputs: Vec<Output>,
|
|
||||||
pub extra: Vec<u8>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl TransactionPrefix {
|
|
||||||
pub(crate) fn fee_weight(
|
|
||||||
decoy_weights: &[usize],
|
|
||||||
outputs: usize,
|
|
||||||
view_tags: bool,
|
|
||||||
extra: usize,
|
|
||||||
) -> usize {
|
|
||||||
// Assumes Timelock::None since this library won't let you create a TX with a timelock
|
|
||||||
// 1 input for every decoy weight
|
|
||||||
1 + 1 +
|
|
||||||
varint_len(decoy_weights.len()) +
|
|
||||||
decoy_weights.iter().map(|&offsets_weight| Input::fee_weight(offsets_weight)).sum::<usize>() +
|
|
||||||
varint_len(outputs) +
|
|
||||||
(outputs * Output::fee_weight(view_tags)) +
|
|
||||||
varint_len(extra) +
|
|
||||||
extra
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
write_varint(&self.version, w)?;
|
|
||||||
self.timelock.write(w)?;
|
|
||||||
write_vec(Input::write, &self.inputs, w)?;
|
|
||||||
write_vec(Output::write, &self.outputs, w)?;
|
|
||||||
write_varint(&self.extra.len(), w)?;
|
|
||||||
w.write_all(&self.extra)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut res = vec![];
|
|
||||||
self.write(&mut res).unwrap();
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<TransactionPrefix> {
|
|
||||||
let version = read_varint(r)?;
|
|
||||||
// TODO: Create an enum out of version
|
|
||||||
if (version == 0) || (version > 2) {
|
|
||||||
Err(io::Error::other("unrecognized transaction version"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let timelock = Timelock::from_raw(read_varint(r)?);
|
|
||||||
|
|
||||||
let inputs = read_vec(|r| Input::read(r), r)?;
|
|
||||||
if inputs.is_empty() {
|
|
||||||
Err(io::Error::other("transaction had no inputs"))?;
|
|
||||||
}
|
|
||||||
let is_miner_tx = matches!(inputs[0], Input::Gen { .. });
|
|
||||||
|
|
||||||
let mut prefix = TransactionPrefix {
|
|
||||||
version,
|
|
||||||
timelock,
|
|
||||||
inputs,
|
|
||||||
outputs: read_vec(|r| Output::read((!is_miner_tx) && (version == 2), r), r)?,
|
|
||||||
extra: vec![],
|
|
||||||
};
|
|
||||||
prefix.extra = read_vec(read_byte, r)?;
|
|
||||||
Ok(prefix)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn hash(&self) -> [u8; 32] {
|
|
||||||
hash(&self.serialize())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Monero transaction. For version 1, rct_signatures still contains an accurate fee value.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
|
||||||
pub struct Transaction {
|
|
||||||
pub prefix: TransactionPrefix,
|
|
||||||
pub signatures: Vec<RingSignature>,
|
|
||||||
pub rct_signatures: RctSignatures,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Transaction {
|
|
||||||
pub(crate) fn fee_weight(
|
|
||||||
protocol: Protocol,
|
|
||||||
decoy_weights: &[usize],
|
|
||||||
outputs: usize,
|
|
||||||
extra: usize,
|
|
||||||
fee: u64,
|
|
||||||
) -> usize {
|
|
||||||
TransactionPrefix::fee_weight(decoy_weights, outputs, protocol.view_tags(), extra) +
|
|
||||||
RctSignatures::fee_weight(protocol, decoy_weights.len(), outputs, fee)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
self.prefix.write(w)?;
|
|
||||||
if self.prefix.version == 1 {
|
|
||||||
for ring_sig in &self.signatures {
|
|
||||||
ring_sig.write(w)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
} else if self.prefix.version == 2 {
|
|
||||||
self.rct_signatures.write(w)
|
|
||||||
} else {
|
|
||||||
panic!("Serializing a transaction with an unknown version");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut res = Vec::with_capacity(2048);
|
|
||||||
self.write(&mut res).unwrap();
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Transaction> {
|
|
||||||
let prefix = TransactionPrefix::read(r)?;
|
|
||||||
let mut signatures = vec![];
|
|
||||||
let mut rct_signatures = RctSignatures {
|
|
||||||
base: RctBase { fee: 0, encrypted_amounts: vec![], pseudo_outs: vec![], commitments: vec![] },
|
|
||||||
prunable: RctPrunable::Null,
|
|
||||||
};
|
|
||||||
|
|
||||||
if prefix.version == 1 {
|
|
||||||
signatures = prefix
|
|
||||||
.inputs
|
|
||||||
.iter()
|
|
||||||
.filter_map(|input| match input {
|
|
||||||
Input::ToKey { key_offsets, .. } => Some(RingSignature::read(key_offsets.len(), r)),
|
|
||||||
_ => None,
|
|
||||||
})
|
|
||||||
.collect::<Result<_, _>>()?;
|
|
||||||
|
|
||||||
if !matches!(prefix.inputs[0], Input::Gen(..)) {
|
|
||||||
let in_amount = prefix
|
|
||||||
.inputs
|
|
||||||
.iter()
|
|
||||||
.map(|input| match input {
|
|
||||||
Input::Gen(..) => Err(io::Error::other("Input::Gen present in non-coinbase v1 TX"))?,
|
|
||||||
// v1 TXs can burn v2 outputs
|
|
||||||
// dcff3fe4f914d6b6bd4a5b800cc4cca8f2fdd1bd73352f0700d463d36812f328 is one such TX
|
|
||||||
// It includes a pre-RCT signature for a RCT output, yet if you interpret the RCT
|
|
||||||
// output as being worth 0, it passes a sum check (guaranteed since no outputs are RCT)
|
|
||||||
Input::ToKey { amount, .. } => Ok(amount.unwrap_or(0)),
|
|
||||||
})
|
|
||||||
.collect::<io::Result<Vec<_>>>()?
|
|
||||||
.into_iter()
|
|
||||||
.sum::<u64>();
|
|
||||||
|
|
||||||
let mut out = 0;
|
|
||||||
for output in &prefix.outputs {
|
|
||||||
if output.amount.is_none() {
|
|
||||||
Err(io::Error::other("v1 transaction had a 0-amount output"))?;
|
|
||||||
}
|
|
||||||
out += output.amount.unwrap();
|
|
||||||
}
|
|
||||||
|
|
||||||
if in_amount < out {
|
|
||||||
Err(io::Error::other("transaction spent more than it had as inputs"))?;
|
|
||||||
}
|
|
||||||
rct_signatures.base.fee = in_amount - out;
|
|
||||||
}
|
|
||||||
} else if prefix.version == 2 {
|
|
||||||
rct_signatures = RctSignatures::read(
|
|
||||||
prefix.inputs.first().map_or(0, |input| match input {
|
|
||||||
Input::Gen(_) => 0,
|
|
||||||
Input::ToKey { key_offsets, .. } => key_offsets.len(),
|
|
||||||
}),
|
|
||||||
prefix.inputs.len(),
|
|
||||||
prefix.outputs.len(),
|
|
||||||
r,
|
|
||||||
)?;
|
|
||||||
} else {
|
|
||||||
Err(io::Error::other("Tried to deserialize unknown version"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Transaction { prefix, signatures, rct_signatures })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn hash(&self) -> [u8; 32] {
|
|
||||||
let mut buf = Vec::with_capacity(2048);
|
|
||||||
if self.prefix.version == 1 {
|
|
||||||
self.write(&mut buf).unwrap();
|
|
||||||
hash(&buf)
|
|
||||||
} else {
|
|
||||||
let mut hashes = Vec::with_capacity(96);
|
|
||||||
|
|
||||||
hashes.extend(self.prefix.hash());
|
|
||||||
|
|
||||||
self.rct_signatures.base.write(&mut buf, self.rct_signatures.rct_type()).unwrap();
|
|
||||||
hashes.extend(hash(&buf));
|
|
||||||
buf.clear();
|
|
||||||
|
|
||||||
hashes.extend(&match self.rct_signatures.prunable {
|
|
||||||
RctPrunable::Null => [0; 32],
|
|
||||||
_ => {
|
|
||||||
self.rct_signatures.prunable.write(&mut buf, self.rct_signatures.rct_type()).unwrap();
|
|
||||||
hash(&buf)
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
hash(&hashes)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calculate the hash of this transaction as needed for signing it.
|
|
||||||
pub fn signature_hash(&self) -> [u8; 32] {
|
|
||||||
if self.prefix.version == 1 {
|
|
||||||
return self.prefix.hash();
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut buf = Vec::with_capacity(2048);
|
|
||||||
let mut sig_hash = Vec::with_capacity(96);
|
|
||||||
|
|
||||||
sig_hash.extend(self.prefix.hash());
|
|
||||||
|
|
||||||
self.rct_signatures.base.write(&mut buf, self.rct_signatures.rct_type()).unwrap();
|
|
||||||
sig_hash.extend(hash(&buf));
|
|
||||||
buf.clear();
|
|
||||||
|
|
||||||
self.rct_signatures.prunable.signature_write(&mut buf).unwrap();
|
|
||||||
sig_hash.extend(hash(&buf));
|
|
||||||
|
|
||||||
hash(&sig_hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_rct_bulletproof(&self) -> bool {
|
|
||||||
match &self.rct_signatures.rct_type() {
|
|
||||||
RctType::Bulletproofs | RctType::BulletproofsCompactAmount | RctType::Clsag => true,
|
|
||||||
RctType::Null |
|
|
||||||
RctType::MlsagAggregate |
|
|
||||||
RctType::MlsagIndividual |
|
|
||||||
RctType::BulletproofsPlus => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn is_rct_bulletproof_plus(&self) -> bool {
|
|
||||||
match &self.rct_signatures.rct_type() {
|
|
||||||
RctType::BulletproofsPlus => true,
|
|
||||||
RctType::Null |
|
|
||||||
RctType::MlsagAggregate |
|
|
||||||
RctType::MlsagIndividual |
|
|
||||||
RctType::Bulletproofs |
|
|
||||||
RctType::BulletproofsCompactAmount |
|
|
||||||
RctType::Clsag => false,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Calculate the transaction's weight.
|
|
||||||
pub fn weight(&self) -> usize {
|
|
||||||
let blob_size = self.serialize().len();
|
|
||||||
|
|
||||||
let bp = self.is_rct_bulletproof();
|
|
||||||
let bp_plus = self.is_rct_bulletproof_plus();
|
|
||||||
if !(bp || bp_plus) {
|
|
||||||
blob_size
|
|
||||||
} else {
|
|
||||||
blob_size + Bulletproofs::calculate_bp_clawback(bp_plus, self.prefix.outputs.len()).0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,325 +0,0 @@
|
|||||||
use core::{marker::PhantomData, fmt::Debug};
|
|
||||||
use std_shims::string::{String, ToString};
|
|
||||||
|
|
||||||
use zeroize::Zeroize;
|
|
||||||
|
|
||||||
use curve25519_dalek::edwards::EdwardsPoint;
|
|
||||||
|
|
||||||
use monero_generators::decompress_point;
|
|
||||||
|
|
||||||
use base58_monero::base58::{encode_check, decode_check};
|
|
||||||
|
|
||||||
/// The network this address is for.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
|
||||||
pub enum Network {
|
|
||||||
Mainnet,
|
|
||||||
Testnet,
|
|
||||||
Stagenet,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The address type, supporting the officially documented addresses, along with
|
|
||||||
/// [Featured Addresses](https://gist.github.com/kayabaNerve/01c50bbc35441e0bbdcee63a9d823789).
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
|
||||||
pub enum AddressType {
|
|
||||||
Standard,
|
|
||||||
Integrated([u8; 8]),
|
|
||||||
Subaddress,
|
|
||||||
Featured { subaddress: bool, payment_id: Option<[u8; 8]>, guaranteed: bool },
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
|
||||||
pub struct SubaddressIndex {
|
|
||||||
pub(crate) account: u32,
|
|
||||||
pub(crate) address: u32,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SubaddressIndex {
|
|
||||||
pub const fn new(account: u32, address: u32) -> Option<SubaddressIndex> {
|
|
||||||
if (account == 0) && (address == 0) {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
Some(SubaddressIndex { account, address })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn account(&self) -> u32 {
|
|
||||||
self.account
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn address(&self) -> u32 {
|
|
||||||
self.address
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Address specification. Used internally to create addresses.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Zeroize)]
|
|
||||||
pub enum AddressSpec {
|
|
||||||
Standard,
|
|
||||||
Integrated([u8; 8]),
|
|
||||||
Subaddress(SubaddressIndex),
|
|
||||||
Featured { subaddress: Option<SubaddressIndex>, payment_id: Option<[u8; 8]>, guaranteed: bool },
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AddressType {
|
|
||||||
pub fn is_subaddress(&self) -> bool {
|
|
||||||
matches!(self, AddressType::Subaddress) ||
|
|
||||||
matches!(self, AddressType::Featured { subaddress: true, .. })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn payment_id(&self) -> Option<[u8; 8]> {
|
|
||||||
if let AddressType::Integrated(id) = self {
|
|
||||||
Some(*id)
|
|
||||||
} else if let AddressType::Featured { payment_id, .. } = self {
|
|
||||||
*payment_id
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_guaranteed(&self) -> bool {
|
|
||||||
matches!(self, AddressType::Featured { guaranteed: true, .. })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A type which returns the byte for a given address.
|
|
||||||
pub trait AddressBytes: Clone + Copy + PartialEq + Eq + Debug {
|
|
||||||
fn network_bytes(network: Network) -> (u8, u8, u8, u8);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Address bytes for Monero.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
||||||
pub struct MoneroAddressBytes;
|
|
||||||
impl AddressBytes for MoneroAddressBytes {
|
|
||||||
fn network_bytes(network: Network) -> (u8, u8, u8, u8) {
|
|
||||||
match network {
|
|
||||||
Network::Mainnet => (18, 19, 42, 70),
|
|
||||||
Network::Testnet => (53, 54, 63, 111),
|
|
||||||
Network::Stagenet => (24, 25, 36, 86),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Address metadata.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
||||||
pub struct AddressMeta<B: AddressBytes> {
|
|
||||||
_bytes: PhantomData<B>,
|
|
||||||
pub network: Network,
|
|
||||||
pub kind: AddressType,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: AddressBytes> Zeroize for AddressMeta<B> {
|
|
||||||
fn zeroize(&mut self) {
|
|
||||||
self.network.zeroize();
|
|
||||||
self.kind.zeroize();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Error when decoding an address.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
||||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
|
||||||
pub enum AddressError {
|
|
||||||
#[cfg_attr(feature = "std", error("invalid address byte"))]
|
|
||||||
InvalidByte,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid address encoding"))]
|
|
||||||
InvalidEncoding,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid length"))]
|
|
||||||
InvalidLength,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid key"))]
|
|
||||||
InvalidKey,
|
|
||||||
#[cfg_attr(feature = "std", error("unknown features"))]
|
|
||||||
UnknownFeatures,
|
|
||||||
#[cfg_attr(feature = "std", error("different network than expected"))]
|
|
||||||
DifferentNetwork,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: AddressBytes> AddressMeta<B> {
|
|
||||||
#[allow(clippy::wrong_self_convention)]
|
|
||||||
fn to_byte(&self) -> u8 {
|
|
||||||
let bytes = B::network_bytes(self.network);
|
|
||||||
match self.kind {
|
|
||||||
AddressType::Standard => bytes.0,
|
|
||||||
AddressType::Integrated(_) => bytes.1,
|
|
||||||
AddressType::Subaddress => bytes.2,
|
|
||||||
AddressType::Featured { .. } => bytes.3,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Create an address's metadata.
|
|
||||||
pub fn new(network: Network, kind: AddressType) -> Self {
|
|
||||||
AddressMeta { _bytes: PhantomData, network, kind }
|
|
||||||
}
|
|
||||||
|
|
||||||
// Returns an incomplete instantiation in the case of Integrated/Featured addresses
|
|
||||||
fn from_byte(byte: u8) -> Result<Self, AddressError> {
|
|
||||||
let mut meta = None;
|
|
||||||
for network in [Network::Mainnet, Network::Testnet, Network::Stagenet] {
|
|
||||||
let (standard, integrated, subaddress, featured) = B::network_bytes(network);
|
|
||||||
if let Some(kind) = match byte {
|
|
||||||
_ if byte == standard => Some(AddressType::Standard),
|
|
||||||
_ if byte == integrated => Some(AddressType::Integrated([0; 8])),
|
|
||||||
_ if byte == subaddress => Some(AddressType::Subaddress),
|
|
||||||
_ if byte == featured => {
|
|
||||||
Some(AddressType::Featured { subaddress: false, payment_id: None, guaranteed: false })
|
|
||||||
}
|
|
||||||
_ => None,
|
|
||||||
} {
|
|
||||||
meta = Some(AddressMeta::new(network, kind));
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
meta.ok_or(AddressError::InvalidByte)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_subaddress(&self) -> bool {
|
|
||||||
self.kind.is_subaddress()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn payment_id(&self) -> Option<[u8; 8]> {
|
|
||||||
self.kind.payment_id()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_guaranteed(&self) -> bool {
|
|
||||||
self.kind.is_guaranteed()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A Monero address, composed of metadata and a spend/view key.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq)]
|
|
||||||
pub struct Address<B: AddressBytes> {
|
|
||||||
pub meta: AddressMeta<B>,
|
|
||||||
pub spend: EdwardsPoint,
|
|
||||||
pub view: EdwardsPoint,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: AddressBytes> core::fmt::Debug for Address<B> {
|
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
|
||||||
fmt
|
|
||||||
.debug_struct("Address")
|
|
||||||
.field("meta", &self.meta)
|
|
||||||
.field("spend", &hex::encode(self.spend.compress().0))
|
|
||||||
.field("view", &hex::encode(self.view.compress().0))
|
|
||||||
// This is not a real field yet is the most valuable thing to know when debugging
|
|
||||||
.field("(address)", &self.to_string())
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: AddressBytes> Zeroize for Address<B> {
|
|
||||||
fn zeroize(&mut self) {
|
|
||||||
self.meta.zeroize();
|
|
||||||
self.spend.zeroize();
|
|
||||||
self.view.zeroize();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: AddressBytes> ToString for Address<B> {
|
|
||||||
fn to_string(&self) -> String {
|
|
||||||
let mut data = vec![self.meta.to_byte()];
|
|
||||||
data.extend(self.spend.compress().to_bytes());
|
|
||||||
data.extend(self.view.compress().to_bytes());
|
|
||||||
if let AddressType::Featured { subaddress, payment_id, guaranteed } = self.meta.kind {
|
|
||||||
// Technically should be a VarInt, yet we don't have enough features it's needed
|
|
||||||
data.push(
|
|
||||||
u8::from(subaddress) + (u8::from(payment_id.is_some()) << 1) + (u8::from(guaranteed) << 2),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
if let Some(id) = self.meta.kind.payment_id() {
|
|
||||||
data.extend(id);
|
|
||||||
}
|
|
||||||
encode_check(&data).unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<B: AddressBytes> Address<B> {
|
|
||||||
pub fn new(meta: AddressMeta<B>, spend: EdwardsPoint, view: EdwardsPoint) -> Self {
|
|
||||||
Address { meta, spend, view }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_str_raw(s: &str) -> Result<Self, AddressError> {
|
|
||||||
let raw = decode_check(s).map_err(|_| AddressError::InvalidEncoding)?;
|
|
||||||
if raw.len() < (1 + 32 + 32) {
|
|
||||||
Err(AddressError::InvalidLength)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut meta = AddressMeta::from_byte(raw[0])?;
|
|
||||||
let spend =
|
|
||||||
decompress_point(raw[1 .. 33].try_into().unwrap()).ok_or(AddressError::InvalidKey)?;
|
|
||||||
let view =
|
|
||||||
decompress_point(raw[33 .. 65].try_into().unwrap()).ok_or(AddressError::InvalidKey)?;
|
|
||||||
let mut read = 65;
|
|
||||||
|
|
||||||
if matches!(meta.kind, AddressType::Featured { .. }) {
|
|
||||||
if raw[read] >= (2 << 3) {
|
|
||||||
Err(AddressError::UnknownFeatures)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let subaddress = (raw[read] & 1) == 1;
|
|
||||||
let integrated = ((raw[read] >> 1) & 1) == 1;
|
|
||||||
let guaranteed = ((raw[read] >> 2) & 1) == 1;
|
|
||||||
|
|
||||||
meta.kind = AddressType::Featured {
|
|
||||||
subaddress,
|
|
||||||
payment_id: Some([0; 8]).filter(|_| integrated),
|
|
||||||
guaranteed,
|
|
||||||
};
|
|
||||||
read += 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Update read early so we can verify the length
|
|
||||||
if meta.kind.payment_id().is_some() {
|
|
||||||
read += 8;
|
|
||||||
}
|
|
||||||
if raw.len() != read {
|
|
||||||
Err(AddressError::InvalidLength)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let AddressType::Integrated(ref mut id) = meta.kind {
|
|
||||||
id.copy_from_slice(&raw[(read - 8) .. read]);
|
|
||||||
}
|
|
||||||
if let AddressType::Featured { payment_id: Some(ref mut id), .. } = meta.kind {
|
|
||||||
id.copy_from_slice(&raw[(read - 8) .. read]);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(Address { meta, spend, view })
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn from_str(network: Network, s: &str) -> Result<Self, AddressError> {
|
|
||||||
Self::from_str_raw(s).and_then(|addr| {
|
|
||||||
if addr.meta.network == network {
|
|
||||||
Ok(addr)
|
|
||||||
} else {
|
|
||||||
Err(AddressError::DifferentNetwork)?
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn network(&self) -> Network {
|
|
||||||
self.meta.network
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_subaddress(&self) -> bool {
|
|
||||||
self.meta.is_subaddress()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn payment_id(&self) -> Option<[u8; 8]> {
|
|
||||||
self.meta.payment_id()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn is_guaranteed(&self) -> bool {
|
|
||||||
self.meta.is_guaranteed()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Instantiation of the Address type with Monero's network bytes.
|
|
||||||
pub type MoneroAddress = Address<MoneroAddressBytes>;
|
|
||||||
// Allow re-interpreting of an arbitrary address as a monero address so it can be used with the
|
|
||||||
// rest of this library. Doesn't use From as it was conflicting with From<T> for T.
|
|
||||||
impl MoneroAddress {
|
|
||||||
pub fn from<B: AddressBytes>(address: Address<B>) -> MoneroAddress {
|
|
||||||
MoneroAddress::new(
|
|
||||||
AddressMeta::new(address.meta.network, address.meta.kind),
|
|
||||||
address.spend,
|
|
||||||
address.view,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,356 +0,0 @@
|
|||||||
use std_shims::{vec::Vec, collections::HashSet};
|
|
||||||
|
|
||||||
#[cfg(feature = "cache-distribution")]
|
|
||||||
use std_shims::sync::OnceLock;
|
|
||||||
|
|
||||||
#[cfg(all(feature = "cache-distribution", not(feature = "std")))]
|
|
||||||
use std_shims::sync::Mutex;
|
|
||||||
#[cfg(all(feature = "cache-distribution", feature = "std"))]
|
|
||||||
use async_lock::Mutex;
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
|
||||||
use rand_distr::{Distribution, Gamma};
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
use rand_distr::num_traits::Float;
|
|
||||||
|
|
||||||
use curve25519_dalek::edwards::EdwardsPoint;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
serialize::varint_len,
|
|
||||||
wallet::SpendableOutput,
|
|
||||||
rpc::{RpcError, RpcConnection, Rpc},
|
|
||||||
DEFAULT_LOCK_WINDOW, COINBASE_LOCK_WINDOW, BLOCK_TIME,
|
|
||||||
};
|
|
||||||
|
|
||||||
const RECENT_WINDOW: usize = 15;
|
|
||||||
const BLOCKS_PER_YEAR: usize = 365 * 24 * 60 * 60 / BLOCK_TIME;
|
|
||||||
#[allow(clippy::cast_precision_loss)]
|
|
||||||
const TIP_APPLICATION: f64 = (DEFAULT_LOCK_WINDOW * BLOCK_TIME) as f64;
|
|
||||||
|
|
||||||
// TODO: Resolve safety of this in case a reorg occurs/the network changes
|
|
||||||
// TODO: Update this when scanning a block, as possible
|
|
||||||
#[cfg(feature = "cache-distribution")]
|
|
||||||
static DISTRIBUTION_CELL: OnceLock<Mutex<Vec<u64>>> = OnceLock::new();
|
|
||||||
#[cfg(feature = "cache-distribution")]
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
fn DISTRIBUTION() -> &'static Mutex<Vec<u64>> {
|
|
||||||
DISTRIBUTION_CELL.get_or_init(|| Mutex::new(Vec::with_capacity(3000000)))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::too_many_arguments)]
|
|
||||||
async fn select_n<'a, R: RngCore + CryptoRng, RPC: RpcConnection>(
|
|
||||||
rng: &mut R,
|
|
||||||
rpc: &Rpc<RPC>,
|
|
||||||
distribution: &[u64],
|
|
||||||
height: usize,
|
|
||||||
high: u64,
|
|
||||||
per_second: f64,
|
|
||||||
real: &[u64],
|
|
||||||
used: &mut HashSet<u64>,
|
|
||||||
count: usize,
|
|
||||||
fingerprintable_canonical: bool,
|
|
||||||
) -> Result<Vec<(u64, [EdwardsPoint; 2])>, RpcError> {
|
|
||||||
// TODO: consider removing this extra RPC and expect the caller to handle it
|
|
||||||
if fingerprintable_canonical && height > rpc.get_height().await? {
|
|
||||||
// TODO: Don't use InternalError for the caller's failure
|
|
||||||
Err(RpcError::InternalError("decoys being requested from too young blocks"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(test)]
|
|
||||||
let mut iters = 0;
|
|
||||||
let mut confirmed = Vec::with_capacity(count);
|
|
||||||
// Retries on failure. Retries are obvious as decoys, yet should be minimal
|
|
||||||
while confirmed.len() != count {
|
|
||||||
let remaining = count - confirmed.len();
|
|
||||||
// TODO: over-request candidates in case some are locked to avoid needing
|
|
||||||
// round trips to the daemon (and revealing obvious decoys to the daemon)
|
|
||||||
let mut candidates = Vec::with_capacity(remaining);
|
|
||||||
while candidates.len() != remaining {
|
|
||||||
#[cfg(test)]
|
|
||||||
{
|
|
||||||
iters += 1;
|
|
||||||
// This is cheap and on fresh chains, a lot of rounds may be needed
|
|
||||||
if iters == 100 {
|
|
||||||
Err(RpcError::InternalError("hit decoy selection round limit"))?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Use a gamma distribution
|
|
||||||
let mut age = Gamma::<f64>::new(19.28, 1.0 / 1.61).unwrap().sample(rng).exp();
|
|
||||||
#[allow(clippy::cast_precision_loss)]
|
|
||||||
if age > TIP_APPLICATION {
|
|
||||||
age -= TIP_APPLICATION;
|
|
||||||
} else {
|
|
||||||
// f64 does not have try_from available, which is why these are written with `as`
|
|
||||||
age = (rng.next_u64() % u64::try_from(RECENT_WINDOW * BLOCK_TIME).unwrap()) as f64;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::cast_sign_loss, clippy::cast_possible_truncation)]
|
|
||||||
let o = (age * per_second) as u64;
|
|
||||||
if o < high {
|
|
||||||
let i = distribution.partition_point(|s| *s < (high - 1 - o));
|
|
||||||
let prev = i.saturating_sub(1);
|
|
||||||
let n = distribution[i] - distribution[prev];
|
|
||||||
if n != 0 {
|
|
||||||
let o = distribution[prev] + (rng.next_u64() % n);
|
|
||||||
if !used.contains(&o) {
|
|
||||||
// It will either actually be used, or is unusable and this prevents trying it again
|
|
||||||
used.insert(o);
|
|
||||||
candidates.push(o);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If this is the first time we're requesting these outputs, include the real one as well
|
|
||||||
// Prevents the node we're connected to from having a list of known decoys and then seeing a
|
|
||||||
// TX which uses all of them, with one additional output (the true spend)
|
|
||||||
let mut real_indexes = HashSet::with_capacity(real.len());
|
|
||||||
if confirmed.is_empty() {
|
|
||||||
for real in real {
|
|
||||||
candidates.push(*real);
|
|
||||||
}
|
|
||||||
// Sort candidates so the real spends aren't the ones at the end
|
|
||||||
candidates.sort();
|
|
||||||
for real in real {
|
|
||||||
real_indexes.insert(candidates.binary_search(real).unwrap());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: make sure that the real output is included in the response, and
|
|
||||||
// that mask and key are equal to expected
|
|
||||||
for (i, output) in rpc
|
|
||||||
.get_unlocked_outputs(&candidates, height, fingerprintable_canonical)
|
|
||||||
.await?
|
|
||||||
.iter_mut()
|
|
||||||
.enumerate()
|
|
||||||
{
|
|
||||||
// Don't include the real spend as a decoy, despite requesting it
|
|
||||||
if real_indexes.contains(&i) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(output) = output.take() {
|
|
||||||
confirmed.push((candidates[i], output));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(confirmed)
|
|
||||||
}
|
|
||||||
|
|
||||||
fn offset(ring: &[u64]) -> Vec<u64> {
|
|
||||||
let mut res = vec![ring[0]];
|
|
||||||
res.resize(ring.len(), 0);
|
|
||||||
for m in (1 .. ring.len()).rev() {
|
|
||||||
res[m] = ring[m] - ring[m - 1];
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn select_decoys<R: RngCore + CryptoRng, RPC: RpcConnection>(
|
|
||||||
rng: &mut R,
|
|
||||||
rpc: &Rpc<RPC>,
|
|
||||||
ring_len: usize,
|
|
||||||
height: usize,
|
|
||||||
inputs: &[SpendableOutput],
|
|
||||||
fingerprintable_canonical: bool,
|
|
||||||
) -> Result<Vec<Decoys>, RpcError> {
|
|
||||||
#[cfg(feature = "cache-distribution")]
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
let mut distribution = DISTRIBUTION().lock();
|
|
||||||
#[cfg(feature = "cache-distribution")]
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
let mut distribution = DISTRIBUTION().lock().await;
|
|
||||||
|
|
||||||
#[cfg(not(feature = "cache-distribution"))]
|
|
||||||
let mut distribution = vec![];
|
|
||||||
|
|
||||||
let decoy_count = ring_len - 1;
|
|
||||||
|
|
||||||
// Convert the inputs in question to the raw output data
|
|
||||||
let mut real = Vec::with_capacity(inputs.len());
|
|
||||||
let mut outputs = Vec::with_capacity(inputs.len());
|
|
||||||
for input in inputs {
|
|
||||||
real.push(input.global_index);
|
|
||||||
outputs.push((real[real.len() - 1], [input.key(), input.commitment().calculate()]));
|
|
||||||
}
|
|
||||||
|
|
||||||
if distribution.len() < height {
|
|
||||||
// TODO: verify distribution elems are strictly increasing
|
|
||||||
let extension =
|
|
||||||
rpc.get_output_distribution(distribution.len(), height.saturating_sub(1)).await?;
|
|
||||||
distribution.extend(extension);
|
|
||||||
}
|
|
||||||
// If asked to use an older height than previously asked, truncate to ensure accuracy
|
|
||||||
// Should never happen, yet risks desyncing if it did
|
|
||||||
distribution.truncate(height);
|
|
||||||
|
|
||||||
if distribution.len() < DEFAULT_LOCK_WINDOW {
|
|
||||||
Err(RpcError::InternalError("not enough decoy candidates"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::cast_precision_loss)]
|
|
||||||
let per_second = {
|
|
||||||
let blocks = distribution.len().min(BLOCKS_PER_YEAR);
|
|
||||||
let initial = distribution[distribution.len().saturating_sub(blocks + 1)];
|
|
||||||
let outputs = distribution[distribution.len() - 1].saturating_sub(initial);
|
|
||||||
(outputs as f64) / ((blocks * BLOCK_TIME) as f64)
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut used = HashSet::<u64>::new();
|
|
||||||
for o in &outputs {
|
|
||||||
used.insert(o.0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Create a TX with less than the target amount, as allowed by the protocol
|
|
||||||
let high = distribution[distribution.len() - DEFAULT_LOCK_WINDOW];
|
|
||||||
if high.saturating_sub(COINBASE_LOCK_WINDOW as u64) <
|
|
||||||
u64::try_from(inputs.len() * ring_len).unwrap()
|
|
||||||
{
|
|
||||||
Err(RpcError::InternalError("not enough coinbase candidates"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select all decoys for this transaction, assuming we generate a sane transaction
|
|
||||||
// We should almost never naturally generate an insane transaction, hence why this doesn't
|
|
||||||
// bother with an overage
|
|
||||||
let mut decoys = select_n(
|
|
||||||
rng,
|
|
||||||
rpc,
|
|
||||||
&distribution,
|
|
||||||
height,
|
|
||||||
high,
|
|
||||||
per_second,
|
|
||||||
&real,
|
|
||||||
&mut used,
|
|
||||||
inputs.len() * decoy_count,
|
|
||||||
fingerprintable_canonical,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
real.zeroize();
|
|
||||||
|
|
||||||
let mut res = Vec::with_capacity(inputs.len());
|
|
||||||
for o in outputs {
|
|
||||||
// Grab the decoys for this specific output
|
|
||||||
let mut ring = decoys.drain((decoys.len() - decoy_count) ..).collect::<Vec<_>>();
|
|
||||||
ring.push(o);
|
|
||||||
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
|
||||||
|
|
||||||
// Sanity checks are only run when 1000 outputs are available in Monero
|
|
||||||
// We run this check whenever the highest output index, which we acknowledge, is > 500
|
|
||||||
// This means we assume (for presumably test blockchains) the height being used has not had
|
|
||||||
// 500 outputs since while itself not being a sufficiently mature blockchain
|
|
||||||
// Considering Monero's p2p layer doesn't actually check transaction sanity, it should be
|
|
||||||
// fine for us to not have perfectly matching rules, especially since this code will infinite
|
|
||||||
// loop if it can't determine sanity, which is possible with sufficient inputs on
|
|
||||||
// sufficiently small chains
|
|
||||||
if high > 500 {
|
|
||||||
// Make sure the TX passes the sanity check that the median output is within the last 40%
|
|
||||||
let target_median = high * 3 / 5;
|
|
||||||
while ring[ring_len / 2].0 < target_median {
|
|
||||||
// If it's not, update the bottom half with new values to ensure the median only moves up
|
|
||||||
for removed in ring.drain(0 .. (ring_len / 2)).collect::<Vec<_>>() {
|
|
||||||
// If we removed the real spend, add it back
|
|
||||||
if removed.0 == o.0 {
|
|
||||||
ring.push(o);
|
|
||||||
} else {
|
|
||||||
// We could not remove this, saving CPU time and removing low values as
|
|
||||||
// possibilities, yet it'd increase the amount of decoys required to create this
|
|
||||||
// transaction and some removed outputs may be the best option (as we drop the first
|
|
||||||
// half, not just the bottom n)
|
|
||||||
used.remove(&removed.0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Select new outputs until we have a full sized ring again
|
|
||||||
ring.extend(
|
|
||||||
select_n(
|
|
||||||
rng,
|
|
||||||
rpc,
|
|
||||||
&distribution,
|
|
||||||
height,
|
|
||||||
high,
|
|
||||||
per_second,
|
|
||||||
&[],
|
|
||||||
&mut used,
|
|
||||||
ring_len - ring.len(),
|
|
||||||
fingerprintable_canonical,
|
|
||||||
)
|
|
||||||
.await?,
|
|
||||||
);
|
|
||||||
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
|
||||||
}
|
|
||||||
|
|
||||||
// The other sanity check rule is about duplicates, yet we already enforce unique ring
|
|
||||||
// members
|
|
||||||
}
|
|
||||||
|
|
||||||
res.push(Decoys {
|
|
||||||
// Binary searches for the real spend since we don't know where it sorted to
|
|
||||||
i: u8::try_from(ring.partition_point(|x| x.0 < o.0)).unwrap(),
|
|
||||||
offsets: offset(&ring.iter().map(|output| output.0).collect::<Vec<_>>()),
|
|
||||||
ring: ring.iter().map(|output| output.1).collect(),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Decoy data, containing the actual member as well (at index `i`).
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub struct Decoys {
|
|
||||||
pub(crate) i: u8,
|
|
||||||
pub(crate) offsets: Vec<u64>,
|
|
||||||
pub(crate) ring: Vec<[EdwardsPoint; 2]>,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[allow(clippy::len_without_is_empty)]
|
|
||||||
impl Decoys {
|
|
||||||
pub fn fee_weight(offsets: &[u64]) -> usize {
|
|
||||||
varint_len(offsets.len()) + offsets.iter().map(|offset| varint_len(*offset)).sum::<usize>()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn len(&self) -> usize {
|
|
||||||
self.offsets.len()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn indexes(&self) -> Vec<u64> {
|
|
||||||
let mut res = vec![self.offsets[0]; self.len()];
|
|
||||||
for m in 1 .. res.len() {
|
|
||||||
res[m] = res[m - 1] + self.offsets[m];
|
|
||||||
}
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Select decoys using the same distribution as Monero. Relies on the monerod RPC
|
|
||||||
/// response for an output's unlocked status, minimizing trips to the daemon.
|
|
||||||
pub async fn select<R: RngCore + CryptoRng, RPC: RpcConnection>(
|
|
||||||
rng: &mut R,
|
|
||||||
rpc: &Rpc<RPC>,
|
|
||||||
ring_len: usize,
|
|
||||||
height: usize,
|
|
||||||
inputs: &[SpendableOutput],
|
|
||||||
) -> Result<Vec<Decoys>, RpcError> {
|
|
||||||
select_decoys(rng, rpc, ring_len, height, inputs, false).await
|
|
||||||
}
|
|
||||||
|
|
||||||
/// If no reorg has occurred and an honest RPC, any caller who passes the same height to this
|
|
||||||
/// function will use the same distribution to select decoys. It is fingerprintable
|
|
||||||
/// because a caller using this will not be able to select decoys that are timelocked
|
|
||||||
/// with a timestamp. Any transaction which includes timestamp timelocked decoys in its
|
|
||||||
/// rings could not be constructed using this function.
|
|
||||||
///
|
|
||||||
/// TODO: upstream change to monerod get_outs RPC to accept a height param for checking
|
|
||||||
/// output's unlocked status and remove all usage of fingerprintable_canonical
|
|
||||||
pub async fn fingerprintable_canonical_select<R: RngCore + CryptoRng, RPC: RpcConnection>(
|
|
||||||
rng: &mut R,
|
|
||||||
rpc: &Rpc<RPC>,
|
|
||||||
ring_len: usize,
|
|
||||||
height: usize,
|
|
||||||
inputs: &[SpendableOutput],
|
|
||||||
) -> Result<Vec<Decoys>, RpcError> {
|
|
||||||
select_decoys(rng, rpc, ring_len, height, inputs, true).await
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,268 +0,0 @@
|
|||||||
use core::ops::Deref;
|
|
||||||
use std_shims::collections::{HashSet, HashMap};
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
|
||||||
|
|
||||||
use curve25519_dalek::{
|
|
||||||
constants::ED25519_BASEPOINT_TABLE,
|
|
||||||
scalar::Scalar,
|
|
||||||
edwards::{EdwardsPoint, CompressedEdwardsY},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
hash, hash_to_scalar, serialize::write_varint, ringct::EncryptedAmount, transaction::Input,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub mod extra;
|
|
||||||
pub(crate) use extra::{PaymentId, ExtraField, Extra};
|
|
||||||
|
|
||||||
/// Seed creation and parsing functionality.
|
|
||||||
pub mod seed;
|
|
||||||
|
|
||||||
/// Address encoding and decoding functionality.
|
|
||||||
pub mod address;
|
|
||||||
use address::{Network, AddressType, SubaddressIndex, AddressSpec, AddressMeta, MoneroAddress};
|
|
||||||
|
|
||||||
mod scan;
|
|
||||||
pub use scan::{ReceivedOutput, SpendableOutput, Timelocked};
|
|
||||||
|
|
||||||
pub mod decoys;
|
|
||||||
pub use decoys::Decoys;
|
|
||||||
|
|
||||||
mod send;
|
|
||||||
pub use send::{FeePriority, Fee, TransactionError, Change, SignableTransaction, Eventuality};
|
|
||||||
#[cfg(feature = "std")]
|
|
||||||
pub use send::SignableTransactionBuilder;
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
pub(crate) use send::InternalPayment;
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
pub use send::TransactionMachine;
|
|
||||||
|
|
||||||
fn key_image_sort(x: &EdwardsPoint, y: &EdwardsPoint) -> core::cmp::Ordering {
|
|
||||||
x.compress().to_bytes().cmp(&y.compress().to_bytes()).reverse()
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://gist.github.com/kayabaNerve/8066c13f1fe1573286ba7a2fd79f6100
|
|
||||||
pub(crate) fn uniqueness(inputs: &[Input]) -> [u8; 32] {
|
|
||||||
let mut u = b"uniqueness".to_vec();
|
|
||||||
for input in inputs {
|
|
||||||
match input {
|
|
||||||
// If Gen, this should be the only input, making this loop somewhat pointless
|
|
||||||
// This works and even if there were somehow multiple inputs, it'd be a false negative
|
|
||||||
Input::Gen(height) => {
|
|
||||||
write_varint(height, &mut u).unwrap();
|
|
||||||
}
|
|
||||||
Input::ToKey { key_image, .. } => u.extend(key_image.compress().to_bytes()),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
hash(&u)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Hs("view_tag" || 8Ra || o), Hs(8Ra || o), and H(8Ra || 0x8d) with uniqueness inclusion in the
|
|
||||||
// Scalar as an option
|
|
||||||
#[allow(non_snake_case)]
|
|
||||||
pub(crate) fn shared_key(
|
|
||||||
uniqueness: Option<[u8; 32]>,
|
|
||||||
ecdh: EdwardsPoint,
|
|
||||||
o: usize,
|
|
||||||
) -> (u8, Scalar, [u8; 8]) {
|
|
||||||
// 8Ra
|
|
||||||
let mut output_derivation = ecdh.mul_by_cofactor().compress().to_bytes().to_vec();
|
|
||||||
|
|
||||||
let mut payment_id_xor = [0; 8];
|
|
||||||
payment_id_xor
|
|
||||||
.copy_from_slice(&hash(&[output_derivation.as_ref(), [0x8d].as_ref()].concat())[.. 8]);
|
|
||||||
|
|
||||||
// || o
|
|
||||||
write_varint(&o, &mut output_derivation).unwrap();
|
|
||||||
|
|
||||||
let view_tag = hash(&[b"view_tag".as_ref(), &output_derivation].concat())[0];
|
|
||||||
|
|
||||||
// uniqueness ||
|
|
||||||
let shared_key = if let Some(uniqueness) = uniqueness {
|
|
||||||
[uniqueness.as_ref(), &output_derivation].concat()
|
|
||||||
} else {
|
|
||||||
output_derivation
|
|
||||||
};
|
|
||||||
|
|
||||||
(view_tag, hash_to_scalar(&shared_key), payment_id_xor)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn commitment_mask(shared_key: Scalar) -> Scalar {
|
|
||||||
let mut mask = b"commitment_mask".to_vec();
|
|
||||||
mask.extend(shared_key.to_bytes());
|
|
||||||
hash_to_scalar(&mask)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn amount_encryption(amount: u64, key: Scalar) -> [u8; 8] {
|
|
||||||
let mut amount_mask = b"amount".to_vec();
|
|
||||||
amount_mask.extend(key.to_bytes());
|
|
||||||
(amount ^ u64::from_le_bytes(hash(&amount_mask)[.. 8].try_into().unwrap())).to_le_bytes()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Move this under EncryptedAmount?
|
|
||||||
fn amount_decryption(amount: &EncryptedAmount, key: Scalar) -> (Scalar, u64) {
|
|
||||||
match amount {
|
|
||||||
EncryptedAmount::Original { mask, amount } => {
|
|
||||||
#[cfg(feature = "experimental")]
|
|
||||||
{
|
|
||||||
let mask_shared_sec = hash(key.as_bytes());
|
|
||||||
let mask =
|
|
||||||
Scalar::from_bytes_mod_order(*mask) - Scalar::from_bytes_mod_order(mask_shared_sec);
|
|
||||||
|
|
||||||
let amount_shared_sec = hash(&mask_shared_sec);
|
|
||||||
let amount_scalar =
|
|
||||||
Scalar::from_bytes_mod_order(*amount) - Scalar::from_bytes_mod_order(amount_shared_sec);
|
|
||||||
// d2b from rctTypes.cpp
|
|
||||||
let amount = u64::from_le_bytes(amount_scalar.to_bytes()[0 .. 8].try_into().unwrap());
|
|
||||||
|
|
||||||
(mask, amount)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(not(feature = "experimental"))]
|
|
||||||
{
|
|
||||||
let _ = mask;
|
|
||||||
let _ = amount;
|
|
||||||
todo!("decrypting a legacy monero transaction's amount")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
EncryptedAmount::Compact { amount } => (
|
|
||||||
commitment_mask(key),
|
|
||||||
u64::from_le_bytes(amount_encryption(u64::from_le_bytes(*amount), key)),
|
|
||||||
),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The private view key and public spend key, enabling scanning transactions.
|
|
||||||
#[derive(Clone, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub struct ViewPair {
|
|
||||||
spend: EdwardsPoint,
|
|
||||||
view: Zeroizing<Scalar>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ViewPair {
|
|
||||||
pub fn new(spend: EdwardsPoint, view: Zeroizing<Scalar>) -> ViewPair {
|
|
||||||
ViewPair { spend, view }
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn spend(&self) -> EdwardsPoint {
|
|
||||||
self.spend
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn view(&self) -> EdwardsPoint {
|
|
||||||
self.view.deref() * ED25519_BASEPOINT_TABLE
|
|
||||||
}
|
|
||||||
|
|
||||||
fn subaddress_derivation(&self, index: SubaddressIndex) -> Scalar {
|
|
||||||
hash_to_scalar(&Zeroizing::new(
|
|
||||||
[
|
|
||||||
b"SubAddr\0".as_ref(),
|
|
||||||
Zeroizing::new(self.view.to_bytes()).as_ref(),
|
|
||||||
&index.account().to_le_bytes(),
|
|
||||||
&index.address().to_le_bytes(),
|
|
||||||
]
|
|
||||||
.concat(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
|
|
||||||
fn subaddress_keys(&self, index: SubaddressIndex) -> (EdwardsPoint, EdwardsPoint) {
|
|
||||||
let scalar = self.subaddress_derivation(index);
|
|
||||||
let spend = self.spend + (&scalar * ED25519_BASEPOINT_TABLE);
|
|
||||||
let view = self.view.deref() * spend;
|
|
||||||
(spend, view)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns an address with the provided specification.
|
|
||||||
pub fn address(&self, network: Network, spec: AddressSpec) -> MoneroAddress {
|
|
||||||
let mut spend = self.spend;
|
|
||||||
let mut view: EdwardsPoint = self.view.deref() * ED25519_BASEPOINT_TABLE;
|
|
||||||
|
|
||||||
// construct the address meta
|
|
||||||
let meta = match spec {
|
|
||||||
AddressSpec::Standard => AddressMeta::new(network, AddressType::Standard),
|
|
||||||
AddressSpec::Integrated(payment_id) => {
|
|
||||||
AddressMeta::new(network, AddressType::Integrated(payment_id))
|
|
||||||
}
|
|
||||||
AddressSpec::Subaddress(index) => {
|
|
||||||
(spend, view) = self.subaddress_keys(index);
|
|
||||||
AddressMeta::new(network, AddressType::Subaddress)
|
|
||||||
}
|
|
||||||
AddressSpec::Featured { subaddress, payment_id, guaranteed } => {
|
|
||||||
if let Some(index) = subaddress {
|
|
||||||
(spend, view) = self.subaddress_keys(index);
|
|
||||||
}
|
|
||||||
AddressMeta::new(
|
|
||||||
network,
|
|
||||||
AddressType::Featured { subaddress: subaddress.is_some(), payment_id, guaranteed },
|
|
||||||
)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
MoneroAddress::new(meta, spend, view)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Transaction scanner.
|
|
||||||
/// This scanner is capable of generating subaddresses, additionally scanning for them once they've
|
|
||||||
/// been explicitly generated. If the burning bug is attempted, any secondary outputs will be
|
|
||||||
/// ignored.
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct Scanner {
|
|
||||||
pair: ViewPair,
|
|
||||||
// Also contains the spend key as None
|
|
||||||
pub(crate) subaddresses: HashMap<CompressedEdwardsY, Option<SubaddressIndex>>,
|
|
||||||
pub(crate) burning_bug: Option<HashSet<CompressedEdwardsY>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Zeroize for Scanner {
|
|
||||||
fn zeroize(&mut self) {
|
|
||||||
self.pair.zeroize();
|
|
||||||
|
|
||||||
// These may not be effective, unfortunately
|
|
||||||
for (mut key, mut value) in self.subaddresses.drain() {
|
|
||||||
key.zeroize();
|
|
||||||
value.zeroize();
|
|
||||||
}
|
|
||||||
if let Some(ref mut burning_bug) = self.burning_bug.take() {
|
|
||||||
for mut output in burning_bug.drain() {
|
|
||||||
output.zeroize();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Drop for Scanner {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.zeroize();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ZeroizeOnDrop for Scanner {}
|
|
||||||
|
|
||||||
impl Scanner {
|
|
||||||
/// Create a Scanner from a ViewPair.
|
|
||||||
///
|
|
||||||
/// burning_bug is a HashSet of used keys, intended to prevent key reuse which would burn funds.
|
|
||||||
///
|
|
||||||
/// When an output is successfully scanned, the output key MUST be saved to disk.
|
|
||||||
///
|
|
||||||
/// When a new scanner is created, ALL saved output keys must be passed in to be secure.
|
|
||||||
///
|
|
||||||
/// If None is passed, a modified shared key derivation is used which is immune to the burning
|
|
||||||
/// bug (specifically the Guaranteed feature from Featured Addresses).
|
|
||||||
pub fn from_view(pair: ViewPair, burning_bug: Option<HashSet<CompressedEdwardsY>>) -> Scanner {
|
|
||||||
let mut subaddresses = HashMap::new();
|
|
||||||
subaddresses.insert(pair.spend.compress(), None);
|
|
||||||
Scanner { pair, subaddresses, burning_bug }
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Register a subaddress.
|
|
||||||
// There used to be an address function here, yet it wasn't safe. It could generate addresses
|
|
||||||
// incompatible with the Scanner. While we could return None for that, then we have the issue
|
|
||||||
// of runtime failures to generate an address.
|
|
||||||
// Removing that API was the simplest option.
|
|
||||||
pub fn register_subaddress(&mut self, subaddress: SubaddressIndex) {
|
|
||||||
let (spend, _) = self.pair.subaddress_keys(subaddress);
|
|
||||||
self.subaddresses.insert(spend.compress(), Some(subaddress));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,521 +0,0 @@
|
|||||||
use core::ops::Deref;
|
|
||||||
use std_shims::{
|
|
||||||
vec::Vec,
|
|
||||||
string::ToString,
|
|
||||||
io::{self, Read, Write},
|
|
||||||
};
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop};
|
|
||||||
|
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
|
|
||||||
|
|
||||||
use monero_generators::decompress_point;
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Commitment,
|
|
||||||
serialize::{read_byte, read_u32, read_u64, read_bytes, read_scalar, read_point, read_raw_vec},
|
|
||||||
transaction::{Input, Timelock, Transaction},
|
|
||||||
block::Block,
|
|
||||||
rpc::{RpcError, RpcConnection, Rpc},
|
|
||||||
wallet::{
|
|
||||||
PaymentId, Extra, address::SubaddressIndex, Scanner, uniqueness, shared_key, amount_decryption,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// An absolute output ID, defined as its transaction hash and output index.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub struct AbsoluteId {
|
|
||||||
pub tx: [u8; 32],
|
|
||||||
pub o: u8,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl core::fmt::Debug for AbsoluteId {
|
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
|
||||||
fmt.debug_struct("AbsoluteId").field("tx", &hex::encode(self.tx)).field("o", &self.o).finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl AbsoluteId {
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
w.write_all(&self.tx)?;
|
|
||||||
w.write_all(&[self.o])
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut serialized = Vec::with_capacity(32 + 1);
|
|
||||||
self.write(&mut serialized).unwrap();
|
|
||||||
serialized
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<AbsoluteId> {
|
|
||||||
Ok(AbsoluteId { tx: read_bytes(r)?, o: read_byte(r)? })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The data contained with an output.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub struct OutputData {
|
|
||||||
pub key: EdwardsPoint,
|
|
||||||
/// Absolute difference between the spend key and the key in this output
|
|
||||||
pub key_offset: Scalar,
|
|
||||||
pub commitment: Commitment,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl core::fmt::Debug for OutputData {
|
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
|
||||||
fmt
|
|
||||||
.debug_struct("OutputData")
|
|
||||||
.field("key", &hex::encode(self.key.compress().0))
|
|
||||||
.field("key_offset", &hex::encode(self.key_offset.to_bytes()))
|
|
||||||
.field("commitment", &self.commitment)
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl OutputData {
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
w.write_all(&self.key.compress().to_bytes())?;
|
|
||||||
w.write_all(&self.key_offset.to_bytes())?;
|
|
||||||
w.write_all(&self.commitment.mask.to_bytes())?;
|
|
||||||
w.write_all(&self.commitment.amount.to_le_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut serialized = Vec::with_capacity(32 + 32 + 32 + 8);
|
|
||||||
self.write(&mut serialized).unwrap();
|
|
||||||
serialized
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<OutputData> {
|
|
||||||
Ok(OutputData {
|
|
||||||
key: read_point(r)?,
|
|
||||||
key_offset: read_scalar(r)?,
|
|
||||||
commitment: Commitment::new(read_scalar(r)?, read_u64(r)?),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// The metadata for an output.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub struct Metadata {
|
|
||||||
/// The subaddress this output was sent to.
|
|
||||||
pub subaddress: Option<SubaddressIndex>,
|
|
||||||
/// The payment ID included with this output.
|
|
||||||
/// There are 2 circumstances in which the reference wallet2 ignores the payment ID
|
|
||||||
/// but the payment ID will be returned here anyway:
|
|
||||||
///
|
|
||||||
/// 1) If the payment ID is tied to an output received by a subaddress account
|
|
||||||
/// that spent Monero in the transaction (the received output is considered
|
|
||||||
/// "change" and is not considered a "payment" in this case). If there are multiple
|
|
||||||
/// spending subaddress accounts in a transaction, the highest index spent key image
|
|
||||||
/// is used to determine the spending subaddress account.
|
|
||||||
///
|
|
||||||
/// 2) If the payment ID is the unencrypted variant and the block's hf version is
|
|
||||||
/// v12 or higher (https://github.com/serai-dex/serai/issues/512)
|
|
||||||
pub payment_id: Option<PaymentId>,
|
|
||||||
/// Arbitrary data encoded in TX extra.
|
|
||||||
pub arbitrary_data: Vec<Vec<u8>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl core::fmt::Debug for Metadata {
|
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
|
||||||
fmt
|
|
||||||
.debug_struct("Metadata")
|
|
||||||
.field("subaddress", &self.subaddress)
|
|
||||||
.field("payment_id", &self.payment_id)
|
|
||||||
.field("arbitrary_data", &self.arbitrary_data.iter().map(hex::encode).collect::<Vec<_>>())
|
|
||||||
.finish()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Metadata {
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
if let Some(subaddress) = self.subaddress {
|
|
||||||
w.write_all(&[1])?;
|
|
||||||
w.write_all(&subaddress.account().to_le_bytes())?;
|
|
||||||
w.write_all(&subaddress.address().to_le_bytes())?;
|
|
||||||
} else {
|
|
||||||
w.write_all(&[0])?;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(payment_id) = self.payment_id {
|
|
||||||
w.write_all(&[1])?;
|
|
||||||
payment_id.write(w)?;
|
|
||||||
} else {
|
|
||||||
w.write_all(&[0])?;
|
|
||||||
}
|
|
||||||
|
|
||||||
w.write_all(&u32::try_from(self.arbitrary_data.len()).unwrap().to_le_bytes())?;
|
|
||||||
for part in &self.arbitrary_data {
|
|
||||||
w.write_all(&[u8::try_from(part.len()).unwrap()])?;
|
|
||||||
w.write_all(part)?;
|
|
||||||
}
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut serialized = Vec::with_capacity(1 + 8 + 1);
|
|
||||||
self.write(&mut serialized).unwrap();
|
|
||||||
serialized
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<Metadata> {
|
|
||||||
let subaddress = if read_byte(r)? == 1 {
|
|
||||||
Some(
|
|
||||||
SubaddressIndex::new(read_u32(r)?, read_u32(r)?)
|
|
||||||
.ok_or_else(|| io::Error::other("invalid subaddress in metadata"))?,
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(Metadata {
|
|
||||||
subaddress,
|
|
||||||
payment_id: if read_byte(r)? == 1 { PaymentId::read(r).ok() } else { None },
|
|
||||||
arbitrary_data: {
|
|
||||||
let mut data = vec![];
|
|
||||||
for _ in 0 .. read_u32(r)? {
|
|
||||||
let len = read_byte(r)?;
|
|
||||||
data.push(read_raw_vec(read_byte, usize::from(len), r)?);
|
|
||||||
}
|
|
||||||
data
|
|
||||||
},
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A received output, defined as its absolute ID, data, and metadara.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub struct ReceivedOutput {
|
|
||||||
pub absolute: AbsoluteId,
|
|
||||||
pub data: OutputData,
|
|
||||||
pub metadata: Metadata,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl ReceivedOutput {
|
|
||||||
pub fn key(&self) -> EdwardsPoint {
|
|
||||||
self.data.key
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn key_offset(&self) -> Scalar {
|
|
||||||
self.data.key_offset
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn commitment(&self) -> Commitment {
|
|
||||||
self.data.commitment.clone()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn arbitrary_data(&self) -> &[Vec<u8>] {
|
|
||||||
&self.metadata.arbitrary_data
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
self.absolute.write(w)?;
|
|
||||||
self.data.write(w)?;
|
|
||||||
self.metadata.write(w)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut serialized = vec![];
|
|
||||||
self.write(&mut serialized).unwrap();
|
|
||||||
serialized
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<ReceivedOutput> {
|
|
||||||
Ok(ReceivedOutput {
|
|
||||||
absolute: AbsoluteId::read(r)?,
|
|
||||||
data: OutputData::read(r)?,
|
|
||||||
metadata: Metadata::read(r)?,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A spendable output, defined as a received output and its index on the Monero blockchain.
|
|
||||||
/// This index is dependent on the Monero blockchain and will only be known once the output is
|
|
||||||
/// included within a block. This may change if there's a reorganization.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub struct SpendableOutput {
|
|
||||||
pub output: ReceivedOutput,
|
|
||||||
pub global_index: u64,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SpendableOutput {
|
|
||||||
/// Update the spendable output's global index. This is intended to be called if a
|
|
||||||
/// re-organization occurred.
|
|
||||||
pub async fn refresh_global_index<RPC: RpcConnection>(
|
|
||||||
&mut self,
|
|
||||||
rpc: &Rpc<RPC>,
|
|
||||||
) -> Result<(), RpcError> {
|
|
||||||
self.global_index = *rpc
|
|
||||||
.get_o_indexes(self.output.absolute.tx)
|
|
||||||
.await?
|
|
||||||
.get(usize::from(self.output.absolute.o))
|
|
||||||
.ok_or(RpcError::InvalidNode(
|
|
||||||
"node returned output indexes didn't include an index for this output".to_string(),
|
|
||||||
))?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn from<RPC: RpcConnection>(
|
|
||||||
rpc: &Rpc<RPC>,
|
|
||||||
output: ReceivedOutput,
|
|
||||||
) -> Result<SpendableOutput, RpcError> {
|
|
||||||
let mut output = SpendableOutput { output, global_index: 0 };
|
|
||||||
output.refresh_global_index(rpc).await?;
|
|
||||||
Ok(output)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn key(&self) -> EdwardsPoint {
|
|
||||||
self.output.key()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn key_offset(&self) -> Scalar {
|
|
||||||
self.output.key_offset()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn commitment(&self) -> Commitment {
|
|
||||||
self.output.commitment()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn arbitrary_data(&self) -> &[Vec<u8>] {
|
|
||||||
self.output.arbitrary_data()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn write<W: Write>(&self, w: &mut W) -> io::Result<()> {
|
|
||||||
self.output.write(w)?;
|
|
||||||
w.write_all(&self.global_index.to_le_bytes())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn serialize(&self) -> Vec<u8> {
|
|
||||||
let mut serialized = vec![];
|
|
||||||
self.write(&mut serialized).unwrap();
|
|
||||||
serialized
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn read<R: Read>(r: &mut R) -> io::Result<SpendableOutput> {
|
|
||||||
Ok(SpendableOutput { output: ReceivedOutput::read(r)?, global_index: read_u64(r)? })
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A collection of timelocked outputs, either received or spendable.
|
|
||||||
#[derive(Zeroize)]
|
|
||||||
pub struct Timelocked<O: Clone + Zeroize>(Timelock, Vec<O>);
|
|
||||||
impl<O: Clone + Zeroize> Drop for Timelocked<O> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.zeroize();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl<O: Clone + Zeroize> ZeroizeOnDrop for Timelocked<O> {}
|
|
||||||
|
|
||||||
impl<O: Clone + Zeroize> Timelocked<O> {
|
|
||||||
pub fn timelock(&self) -> Timelock {
|
|
||||||
self.0
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Return the outputs if they're not timelocked, or an empty vector if they are.
|
|
||||||
#[must_use]
|
|
||||||
pub fn not_locked(&self) -> Vec<O> {
|
|
||||||
if self.0 == Timelock::None {
|
|
||||||
return self.1.clone();
|
|
||||||
}
|
|
||||||
vec![]
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns None if the Timelocks aren't comparable. Returns Some(vec![]) if none are unlocked.
|
|
||||||
#[must_use]
|
|
||||||
pub fn unlocked(&self, timelock: Timelock) -> Option<Vec<O>> {
|
|
||||||
// If the Timelocks are comparable, return the outputs if they're now unlocked
|
|
||||||
if self.0 <= timelock {
|
|
||||||
Some(self.1.clone())
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[must_use]
|
|
||||||
pub fn ignore_timelock(&self) -> Vec<O> {
|
|
||||||
self.1.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Scanner {
|
|
||||||
/// Scan a transaction to discover the received outputs.
|
|
||||||
pub fn scan_transaction(&mut self, tx: &Transaction) -> Timelocked<ReceivedOutput> {
|
|
||||||
// Only scan RCT TXs since we can only spend RCT outputs
|
|
||||||
if tx.prefix.version != 2 {
|
|
||||||
return Timelocked(tx.prefix.timelock, vec![]);
|
|
||||||
}
|
|
||||||
|
|
||||||
let Ok(extra) = Extra::read::<&[u8]>(&mut tx.prefix.extra.as_ref()) else {
|
|
||||||
return Timelocked(tx.prefix.timelock, vec![]);
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some((tx_keys, additional)) = extra.keys() else {
|
|
||||||
return Timelocked(tx.prefix.timelock, vec![]);
|
|
||||||
};
|
|
||||||
|
|
||||||
let payment_id = extra.payment_id();
|
|
||||||
|
|
||||||
let mut res = vec![];
|
|
||||||
for (o, output) in tx.prefix.outputs.iter().enumerate() {
|
|
||||||
// https://github.com/serai-dex/serai/issues/106
|
|
||||||
if let Some(burning_bug) = self.burning_bug.as_ref() {
|
|
||||||
if burning_bug.contains(&output.key) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let output_key = decompress_point(output.key.to_bytes());
|
|
||||||
if output_key.is_none() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let output_key = output_key.unwrap();
|
|
||||||
|
|
||||||
let additional = additional.as_ref().map(|additional| additional.get(o));
|
|
||||||
|
|
||||||
for key in tx_keys.iter().map(|key| Some(Some(key))).chain(core::iter::once(additional)) {
|
|
||||||
let key = match key {
|
|
||||||
Some(Some(key)) => key,
|
|
||||||
Some(None) => {
|
|
||||||
// This is non-standard. There were additional keys, yet not one for this output
|
|
||||||
// https://github.com/monero-project/monero/
|
|
||||||
// blob/04a1e2875d6e35e27bb21497988a6c822d319c28/
|
|
||||||
// src/cryptonote_basic/cryptonote_format_utils.cpp#L1062
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
None => {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let (view_tag, shared_key, payment_id_xor) = shared_key(
|
|
||||||
if self.burning_bug.is_none() { Some(uniqueness(&tx.prefix.inputs)) } else { None },
|
|
||||||
self.pair.view.deref() * key,
|
|
||||||
o,
|
|
||||||
);
|
|
||||||
|
|
||||||
let payment_id = payment_id.map(|id| id ^ payment_id_xor);
|
|
||||||
|
|
||||||
if let Some(actual_view_tag) = output.view_tag {
|
|
||||||
if actual_view_tag != view_tag {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// P - shared == spend
|
|
||||||
let subaddress =
|
|
||||||
self.subaddresses.get(&(output_key - (&shared_key * ED25519_BASEPOINT_TABLE)).compress());
|
|
||||||
if subaddress.is_none() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
let subaddress = *subaddress.unwrap();
|
|
||||||
|
|
||||||
// If it has torsion, it'll subtract the non-torsioned shared key to a torsioned key
|
|
||||||
// We will not have a torsioned key in our HashMap of keys, so we wouldn't identify it as
|
|
||||||
// ours
|
|
||||||
// If we did though, it'd enable bypassing the included burning bug protection
|
|
||||||
assert!(output_key.is_torsion_free());
|
|
||||||
|
|
||||||
let mut key_offset = shared_key;
|
|
||||||
if let Some(subaddress) = subaddress {
|
|
||||||
key_offset += self.pair.subaddress_derivation(subaddress);
|
|
||||||
}
|
|
||||||
// Since we've found an output to us, get its amount
|
|
||||||
let mut commitment = Commitment::zero();
|
|
||||||
|
|
||||||
// Miner transaction
|
|
||||||
if let Some(amount) = output.amount {
|
|
||||||
commitment.amount = amount;
|
|
||||||
// Regular transaction
|
|
||||||
} else {
|
|
||||||
let (mask, amount) = match tx.rct_signatures.base.encrypted_amounts.get(o) {
|
|
||||||
Some(amount) => amount_decryption(amount, shared_key),
|
|
||||||
// This should never happen, yet it may be possible with miner transactions?
|
|
||||||
// Using get just decreases the possibility of a panic and lets us move on in that case
|
|
||||||
None => break,
|
|
||||||
};
|
|
||||||
|
|
||||||
// Rebuild the commitment to verify it
|
|
||||||
commitment = Commitment::new(mask, amount);
|
|
||||||
// If this is a malicious commitment, move to the next output
|
|
||||||
// Any other R value will calculate to a different spend key and are therefore ignorable
|
|
||||||
if Some(&commitment.calculate()) != tx.rct_signatures.base.commitments.get(o) {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if commitment.amount != 0 {
|
|
||||||
res.push(ReceivedOutput {
|
|
||||||
absolute: AbsoluteId { tx: tx.hash(), o: o.try_into().unwrap() },
|
|
||||||
|
|
||||||
data: OutputData { key: output_key, key_offset, commitment },
|
|
||||||
|
|
||||||
metadata: Metadata { subaddress, payment_id, arbitrary_data: extra.data() },
|
|
||||||
});
|
|
||||||
|
|
||||||
if let Some(burning_bug) = self.burning_bug.as_mut() {
|
|
||||||
burning_bug.insert(output.key);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Break to prevent public keys from being included multiple times, triggering multiple
|
|
||||||
// inclusions of the same output
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Timelocked(tx.prefix.timelock, res)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Scan a block to obtain its spendable outputs. Its the presence in a block giving these
|
|
||||||
/// transactions their global index, and this must be batched as asking for the index of specific
|
|
||||||
/// transactions is a dead giveaway for which transactions you successfully scanned. This
|
|
||||||
/// function obtains the output indexes for the miner transaction, incrementing from there
|
|
||||||
/// instead.
|
|
||||||
pub async fn scan<RPC: RpcConnection>(
|
|
||||||
&mut self,
|
|
||||||
rpc: &Rpc<RPC>,
|
|
||||||
block: &Block,
|
|
||||||
) -> Result<Vec<Timelocked<SpendableOutput>>, RpcError> {
|
|
||||||
let mut index = rpc.get_o_indexes(block.miner_tx.hash()).await?[0];
|
|
||||||
let mut txs = vec![block.miner_tx.clone()];
|
|
||||||
txs.extend(rpc.get_transactions(&block.txs).await?);
|
|
||||||
|
|
||||||
let map = |mut timelock: Timelocked<ReceivedOutput>, index| {
|
|
||||||
if timelock.1.is_empty() {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(Timelocked(
|
|
||||||
timelock.0,
|
|
||||||
timelock
|
|
||||||
.1
|
|
||||||
.drain(..)
|
|
||||||
.map(|output| SpendableOutput {
|
|
||||||
global_index: index + u64::from(output.absolute.o),
|
|
||||||
output,
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut res = vec![];
|
|
||||||
for tx in txs {
|
|
||||||
if let Some(timelock) = map(self.scan_transaction(&tx), index) {
|
|
||||||
res.push(timelock);
|
|
||||||
}
|
|
||||||
index += u64::try_from(
|
|
||||||
tx.prefix
|
|
||||||
.outputs
|
|
||||||
.iter()
|
|
||||||
// Filter to v2 miner TX outputs/RCT outputs since we're tracking the RCT output index
|
|
||||||
.filter(|output| {
|
|
||||||
let is_v2_miner_tx =
|
|
||||||
(tx.prefix.version == 2) && matches!(tx.prefix.inputs.first(), Some(Input::Gen(..)));
|
|
||||||
is_v2_miner_tx || output.amount.is_none()
|
|
||||||
})
|
|
||||||
.count(),
|
|
||||||
)
|
|
||||||
.unwrap()
|
|
||||||
}
|
|
||||||
Ok(res)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,136 +0,0 @@
|
|||||||
use core::fmt;
|
|
||||||
use std_shims::string::String;
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
|
||||||
use rand_core::{RngCore, CryptoRng};
|
|
||||||
|
|
||||||
pub(crate) mod classic;
|
|
||||||
pub(crate) mod polyseed;
|
|
||||||
use classic::{CLASSIC_SEED_LENGTH, CLASSIC_SEED_LENGTH_WITH_CHECKSUM, ClassicSeed};
|
|
||||||
use polyseed::{POLYSEED_LENGTH, Polyseed};
|
|
||||||
|
|
||||||
/// Error when decoding a seed.
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
||||||
#[cfg_attr(feature = "std", derive(thiserror::Error))]
|
|
||||||
pub enum SeedError {
|
|
||||||
#[cfg_attr(feature = "std", error("invalid number of words in seed"))]
|
|
||||||
InvalidSeedLength,
|
|
||||||
#[cfg_attr(feature = "std", error("unknown language"))]
|
|
||||||
UnknownLanguage,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid checksum"))]
|
|
||||||
InvalidChecksum,
|
|
||||||
#[cfg_attr(feature = "std", error("english old seeds don't support checksums"))]
|
|
||||||
EnglishOldWithChecksum,
|
|
||||||
#[cfg_attr(feature = "std", error("provided entropy is not valid"))]
|
|
||||||
InvalidEntropy,
|
|
||||||
#[cfg_attr(feature = "std", error("invalid seed"))]
|
|
||||||
InvalidSeed,
|
|
||||||
#[cfg_attr(feature = "std", error("provided features are not supported"))]
|
|
||||||
UnsupportedFeatures,
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
|
||||||
pub enum SeedType {
|
|
||||||
Classic(classic::Language),
|
|
||||||
Polyseed(polyseed::Language),
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A Monero seed.
|
|
||||||
#[derive(Clone, PartialEq, Eq, Zeroize, ZeroizeOnDrop)]
|
|
||||||
pub enum Seed {
|
|
||||||
Classic(ClassicSeed),
|
|
||||||
Polyseed(Polyseed),
|
|
||||||
}
|
|
||||||
|
|
||||||
impl fmt::Debug for Seed {
|
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
||||||
match self {
|
|
||||||
Seed::Classic(_) => f.debug_struct("Seed::Classic").finish_non_exhaustive(),
|
|
||||||
Seed::Polyseed(_) => f.debug_struct("Seed::Polyseed").finish_non_exhaustive(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Seed {
|
|
||||||
/// Creates a new `Seed`.
|
|
||||||
pub fn new<R: RngCore + CryptoRng>(rng: &mut R, seed_type: SeedType) -> Seed {
|
|
||||||
match seed_type {
|
|
||||||
SeedType::Classic(lang) => Seed::Classic(ClassicSeed::new(rng, lang)),
|
|
||||||
SeedType::Polyseed(lang) => Seed::Polyseed(Polyseed::new(rng, lang)),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Parse a seed from a `String`.
|
|
||||||
pub fn from_string(seed_type: SeedType, words: Zeroizing<String>) -> Result<Seed, SeedError> {
|
|
||||||
let word_count = words.split_whitespace().count();
|
|
||||||
match seed_type {
|
|
||||||
SeedType::Classic(lang) => {
|
|
||||||
if word_count != CLASSIC_SEED_LENGTH && word_count != CLASSIC_SEED_LENGTH_WITH_CHECKSUM {
|
|
||||||
Err(SeedError::InvalidSeedLength)?
|
|
||||||
} else {
|
|
||||||
ClassicSeed::from_string(lang, words).map(Seed::Classic)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
SeedType::Polyseed(lang) => {
|
|
||||||
if word_count != POLYSEED_LENGTH {
|
|
||||||
Err(SeedError::InvalidSeedLength)?
|
|
||||||
} else {
|
|
||||||
Polyseed::from_string(lang, words).map(Seed::Polyseed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Creates a `Seed` from an entropy and an optional birthday (denoted in seconds since the
|
|
||||||
/// epoch).
|
|
||||||
///
|
|
||||||
/// For `SeedType::Classic`, the birthday is ignored.
|
|
||||||
///
|
|
||||||
/// For `SeedType::Polyseed`, the last 13 bytes of `entropy` must be `0`.
|
|
||||||
// TODO: Return Result, not Option
|
|
||||||
pub fn from_entropy(
|
|
||||||
seed_type: SeedType,
|
|
||||||
entropy: Zeroizing<[u8; 32]>,
|
|
||||||
birthday: Option<u64>,
|
|
||||||
) -> Option<Seed> {
|
|
||||||
match seed_type {
|
|
||||||
SeedType::Classic(lang) => ClassicSeed::from_entropy(lang, entropy).map(Seed::Classic),
|
|
||||||
SeedType::Polyseed(lang) => {
|
|
||||||
Polyseed::from(lang, 0, birthday.unwrap_or(0), entropy).map(Seed::Polyseed).ok()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns seed as `String`.
|
|
||||||
pub fn to_string(&self) -> Zeroizing<String> {
|
|
||||||
match self {
|
|
||||||
Seed::Classic(seed) => seed.to_string(),
|
|
||||||
Seed::Polyseed(seed) => seed.to_string(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the entropy for this seed.
|
|
||||||
pub fn entropy(&self) -> Zeroizing<[u8; 32]> {
|
|
||||||
match self {
|
|
||||||
Seed::Classic(seed) => seed.entropy(),
|
|
||||||
Seed::Polyseed(seed) => seed.entropy().clone(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the key derived from this seed.
|
|
||||||
pub fn key(&self) -> Zeroizing<[u8; 32]> {
|
|
||||||
match self {
|
|
||||||
// Classic does not differentiate between its entropy and its key
|
|
||||||
Seed::Classic(seed) => seed.entropy(),
|
|
||||||
Seed::Polyseed(seed) => seed.key(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns the birthday of this seed.
|
|
||||||
pub fn birthday(&self) -> u64 {
|
|
||||||
match self {
|
|
||||||
Seed::Classic(_) => 0,
|
|
||||||
Seed::Polyseed(seed) => seed.birthday(),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,144 +0,0 @@
|
|||||||
use std::sync::{Arc, RwLock};
|
|
||||||
|
|
||||||
use zeroize::{Zeroize, ZeroizeOnDrop, Zeroizing};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
Protocol,
|
|
||||||
wallet::{
|
|
||||||
address::MoneroAddress, Fee, SpendableOutput, Change, Decoys, SignableTransaction,
|
|
||||||
TransactionError, extra::MAX_ARBITRARY_DATA_SIZE,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq, Debug, Zeroize, ZeroizeOnDrop)]
|
|
||||||
struct SignableTransactionBuilderInternal {
|
|
||||||
protocol: Protocol,
|
|
||||||
fee_rate: Fee,
|
|
||||||
|
|
||||||
r_seed: Option<Zeroizing<[u8; 32]>>,
|
|
||||||
inputs: Vec<(SpendableOutput, Decoys)>,
|
|
||||||
payments: Vec<(MoneroAddress, u64)>,
|
|
||||||
change_address: Change,
|
|
||||||
data: Vec<Vec<u8>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SignableTransactionBuilderInternal {
|
|
||||||
// Takes in the change address so users don't miss that they have to manually set one
|
|
||||||
// If they don't, all leftover funds will become part of the fee
|
|
||||||
fn new(protocol: Protocol, fee_rate: Fee, change_address: Change) -> Self {
|
|
||||||
Self {
|
|
||||||
protocol,
|
|
||||||
fee_rate,
|
|
||||||
r_seed: None,
|
|
||||||
inputs: vec![],
|
|
||||||
payments: vec![],
|
|
||||||
change_address,
|
|
||||||
data: vec![],
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn set_r_seed(&mut self, r_seed: Zeroizing<[u8; 32]>) {
|
|
||||||
self.r_seed = Some(r_seed);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn add_input(&mut self, input: (SpendableOutput, Decoys)) {
|
|
||||||
self.inputs.push(input);
|
|
||||||
}
|
|
||||||
fn add_inputs(&mut self, inputs: &[(SpendableOutput, Decoys)]) {
|
|
||||||
self.inputs.extend(inputs.iter().cloned());
|
|
||||||
}
|
|
||||||
|
|
||||||
fn add_payment(&mut self, dest: MoneroAddress, amount: u64) {
|
|
||||||
self.payments.push((dest, amount));
|
|
||||||
}
|
|
||||||
fn add_payments(&mut self, payments: &[(MoneroAddress, u64)]) {
|
|
||||||
self.payments.extend(payments);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn add_data(&mut self, data: Vec<u8>) {
|
|
||||||
self.data.push(data);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// A Transaction Builder for Monero transactions.
|
|
||||||
/// All methods provided will modify self while also returning a shallow copy, enabling efficient
|
|
||||||
/// chaining with a clean API.
|
|
||||||
/// In order to fork the builder at some point, clone will still return a deep copy.
|
|
||||||
#[derive(Debug)]
|
|
||||||
pub struct SignableTransactionBuilder(Arc<RwLock<SignableTransactionBuilderInternal>>);
|
|
||||||
impl Clone for SignableTransactionBuilder {
|
|
||||||
fn clone(&self) -> Self {
|
|
||||||
Self(Arc::new(RwLock::new((*self.0.read().unwrap()).clone())))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PartialEq for SignableTransactionBuilder {
|
|
||||||
fn eq(&self, other: &Self) -> bool {
|
|
||||||
*self.0.read().unwrap() == *other.0.read().unwrap()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
impl Eq for SignableTransactionBuilder {}
|
|
||||||
|
|
||||||
impl Zeroize for SignableTransactionBuilder {
|
|
||||||
fn zeroize(&mut self) {
|
|
||||||
self.0.write().unwrap().zeroize()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SignableTransactionBuilder {
|
|
||||||
fn shallow_copy(&self) -> Self {
|
|
||||||
Self(self.0.clone())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn new(protocol: Protocol, fee_rate: Fee, change_address: Change) -> Self {
|
|
||||||
Self(Arc::new(RwLock::new(SignableTransactionBuilderInternal::new(
|
|
||||||
protocol,
|
|
||||||
fee_rate,
|
|
||||||
change_address,
|
|
||||||
))))
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn set_r_seed(&mut self, r_seed: Zeroizing<[u8; 32]>) -> Self {
|
|
||||||
self.0.write().unwrap().set_r_seed(r_seed);
|
|
||||||
self.shallow_copy()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_input(&mut self, input: (SpendableOutput, Decoys)) -> Self {
|
|
||||||
self.0.write().unwrap().add_input(input);
|
|
||||||
self.shallow_copy()
|
|
||||||
}
|
|
||||||
pub fn add_inputs(&mut self, inputs: &[(SpendableOutput, Decoys)]) -> Self {
|
|
||||||
self.0.write().unwrap().add_inputs(inputs);
|
|
||||||
self.shallow_copy()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_payment(&mut self, dest: MoneroAddress, amount: u64) -> Self {
|
|
||||||
self.0.write().unwrap().add_payment(dest, amount);
|
|
||||||
self.shallow_copy()
|
|
||||||
}
|
|
||||||
pub fn add_payments(&mut self, payments: &[(MoneroAddress, u64)]) -> Self {
|
|
||||||
self.0.write().unwrap().add_payments(payments);
|
|
||||||
self.shallow_copy()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn add_data(&mut self, data: Vec<u8>) -> Result<Self, TransactionError> {
|
|
||||||
if data.len() > MAX_ARBITRARY_DATA_SIZE {
|
|
||||||
Err(TransactionError::TooMuchData)?;
|
|
||||||
}
|
|
||||||
self.0.write().unwrap().add_data(data);
|
|
||||||
Ok(self.shallow_copy())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn build(self) -> Result<SignableTransaction, TransactionError> {
|
|
||||||
let read = self.0.read().unwrap();
|
|
||||||
SignableTransaction::new(
|
|
||||||
read.protocol,
|
|
||||||
read.r_seed.clone(),
|
|
||||||
read.inputs.clone(),
|
|
||||||
read.payments.clone(),
|
|
||||||
&read.change_address,
|
|
||||||
read.data.clone(),
|
|
||||||
read.fee_rate,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,425 +0,0 @@
|
|||||||
use std_shims::{
|
|
||||||
vec::Vec,
|
|
||||||
io::{self, Read},
|
|
||||||
collections::HashMap,
|
|
||||||
};
|
|
||||||
use std::sync::{Arc, RwLock};
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
|
|
||||||
use rand_core::{RngCore, CryptoRng, SeedableRng};
|
|
||||||
use rand_chacha::ChaCha20Rng;
|
|
||||||
|
|
||||||
use group::ff::Field;
|
|
||||||
use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::EdwardsPoint};
|
|
||||||
use dalek_ff_group as dfg;
|
|
||||||
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
|
||||||
use frost::{
|
|
||||||
curve::Ed25519,
|
|
||||||
Participant, FrostError, ThresholdKeys,
|
|
||||||
sign::{
|
|
||||||
Writable, Preprocess, CachedPreprocess, SignatureShare, PreprocessMachine, SignMachine,
|
|
||||||
SignatureMachine, AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{
|
|
||||||
random_scalar,
|
|
||||||
ringct::{
|
|
||||||
clsag::{ClsagInput, ClsagDetails, ClsagAddendum, ClsagMultisig, add_key_image_share},
|
|
||||||
RctPrunable,
|
|
||||||
},
|
|
||||||
transaction::{Input, Transaction},
|
|
||||||
wallet::{TransactionError, InternalPayment, SignableTransaction, key_image_sort, uniqueness},
|
|
||||||
};
|
|
||||||
|
|
||||||
/// FROST signing machine to produce a signed transaction.
|
|
||||||
pub struct TransactionMachine {
|
|
||||||
signable: SignableTransaction,
|
|
||||||
|
|
||||||
i: Participant,
|
|
||||||
transcript: RecommendedTranscript,
|
|
||||||
|
|
||||||
// Hashed key and scalar offset
|
|
||||||
key_images: Vec<(EdwardsPoint, Scalar)>,
|
|
||||||
inputs: Vec<Arc<RwLock<Option<ClsagDetails>>>>,
|
|
||||||
clsags: Vec<AlgorithmMachine<Ed25519, ClsagMultisig>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct TransactionSignMachine {
|
|
||||||
signable: SignableTransaction,
|
|
||||||
|
|
||||||
i: Participant,
|
|
||||||
transcript: RecommendedTranscript,
|
|
||||||
|
|
||||||
key_images: Vec<(EdwardsPoint, Scalar)>,
|
|
||||||
inputs: Vec<Arc<RwLock<Option<ClsagDetails>>>>,
|
|
||||||
clsags: Vec<AlgorithmSignMachine<Ed25519, ClsagMultisig>>,
|
|
||||||
|
|
||||||
our_preprocess: Vec<Preprocess<Ed25519, ClsagAddendum>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct TransactionSignatureMachine {
|
|
||||||
tx: Transaction,
|
|
||||||
clsags: Vec<AlgorithmSignatureMachine<Ed25519, ClsagMultisig>>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SignableTransaction {
|
|
||||||
/// Create a FROST signing machine out of this signable transaction.
|
|
||||||
/// The height is the Monero blockchain height to synchronize around.
|
|
||||||
pub fn multisig(
|
|
||||||
self,
|
|
||||||
keys: &ThresholdKeys<Ed25519>,
|
|
||||||
mut transcript: RecommendedTranscript,
|
|
||||||
) -> Result<TransactionMachine, TransactionError> {
|
|
||||||
let mut inputs = vec![];
|
|
||||||
for _ in 0 .. self.inputs.len() {
|
|
||||||
// Doesn't resize as that will use a single Rc for the entire Vec
|
|
||||||
inputs.push(Arc::new(RwLock::new(None)));
|
|
||||||
}
|
|
||||||
let mut clsags = vec![];
|
|
||||||
|
|
||||||
// Create a RNG out of the input shared keys, which either requires the view key or being every
|
|
||||||
// sender, and the payments (address and amount), which a passive adversary may be able to know
|
|
||||||
// depending on how these transactions are coordinated
|
|
||||||
// Being every sender would already let you note rings which happen to use your transactions
|
|
||||||
// multiple times, already breaking privacy there
|
|
||||||
|
|
||||||
transcript.domain_separate(b"monero_transaction");
|
|
||||||
|
|
||||||
// Also include the spend_key as below only the key offset is included, so this transcripts the
|
|
||||||
// sum product
|
|
||||||
// Useful as transcripting the sum product effectively transcripts the key image, further
|
|
||||||
// guaranteeing the one time properties noted below
|
|
||||||
transcript.append_message(b"spend_key", keys.group_key().0.compress().to_bytes());
|
|
||||||
|
|
||||||
if let Some(r_seed) = &self.r_seed {
|
|
||||||
transcript.append_message(b"r_seed", r_seed);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (input, decoys) in &self.inputs {
|
|
||||||
// These outputs can only be spent once. Therefore, it forces all RNGs derived from this
|
|
||||||
// transcript (such as the one used to create one time keys) to be unique
|
|
||||||
transcript.append_message(b"input_hash", input.output.absolute.tx);
|
|
||||||
transcript.append_message(b"input_output_index", [input.output.absolute.o]);
|
|
||||||
// Not including this, with a doxxed list of payments, would allow brute forcing the inputs
|
|
||||||
// to determine RNG seeds and therefore the true spends
|
|
||||||
transcript.append_message(b"input_shared_key", input.key_offset().to_bytes());
|
|
||||||
|
|
||||||
// Ensure all signers are signing the same rings
|
|
||||||
transcript.append_message(b"real_spend", [decoys.i]);
|
|
||||||
for (i, ring_member) in decoys.ring.iter().enumerate() {
|
|
||||||
transcript
|
|
||||||
.append_message(b"ring_member", [u8::try_from(i).expect("ring size exceeded 255")]);
|
|
||||||
transcript.append_message(b"ring_member_offset", decoys.offsets[i].to_le_bytes());
|
|
||||||
transcript.append_message(b"ring_member_key", ring_member[0].compress().to_bytes());
|
|
||||||
transcript.append_message(b"ring_member_commitment", ring_member[1].compress().to_bytes());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
for payment in &self.payments {
|
|
||||||
match payment {
|
|
||||||
InternalPayment::Payment(payment, need_dummy_payment_id) => {
|
|
||||||
transcript.append_message(b"payment_address", payment.0.to_string().as_bytes());
|
|
||||||
transcript.append_message(b"payment_amount", payment.1.to_le_bytes());
|
|
||||||
transcript.append_message(
|
|
||||||
b"need_dummy_payment_id",
|
|
||||||
[if *need_dummy_payment_id { 1u8 } else { 0u8 }],
|
|
||||||
);
|
|
||||||
}
|
|
||||||
InternalPayment::Change(change, change_view) => {
|
|
||||||
transcript.append_message(b"change_address", change.0.to_string().as_bytes());
|
|
||||||
transcript.append_message(b"change_amount", change.1.to_le_bytes());
|
|
||||||
if let Some(view) = change_view.as_ref() {
|
|
||||||
transcript.append_message(b"change_view_key", Zeroizing::new(view.to_bytes()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut key_images = vec![];
|
|
||||||
for (i, (input, _)) in self.inputs.iter().enumerate() {
|
|
||||||
// Check this the right set of keys
|
|
||||||
let offset = keys.offset(dfg::Scalar(input.key_offset()));
|
|
||||||
if offset.group_key().0 != input.key() {
|
|
||||||
Err(TransactionError::WrongPrivateKey)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let clsag = ClsagMultisig::new(transcript.clone(), input.key(), inputs[i].clone());
|
|
||||||
key_images.push((
|
|
||||||
clsag.H,
|
|
||||||
keys.current_offset().unwrap_or(dfg::Scalar::ZERO).0 + self.inputs[i].0.key_offset(),
|
|
||||||
));
|
|
||||||
clsags.push(AlgorithmMachine::new(clsag, offset));
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(TransactionMachine {
|
|
||||||
signable: self,
|
|
||||||
|
|
||||||
i: keys.params().i(),
|
|
||||||
transcript,
|
|
||||||
|
|
||||||
key_images,
|
|
||||||
inputs,
|
|
||||||
clsags,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl PreprocessMachine for TransactionMachine {
|
|
||||||
type Preprocess = Vec<Preprocess<Ed25519, ClsagAddendum>>;
|
|
||||||
type Signature = Transaction;
|
|
||||||
type SignMachine = TransactionSignMachine;
|
|
||||||
|
|
||||||
fn preprocess<R: RngCore + CryptoRng>(
|
|
||||||
mut self,
|
|
||||||
rng: &mut R,
|
|
||||||
) -> (TransactionSignMachine, Self::Preprocess) {
|
|
||||||
// Iterate over each CLSAG calling preprocess
|
|
||||||
let mut preprocesses = Vec::with_capacity(self.clsags.len());
|
|
||||||
let clsags = self
|
|
||||||
.clsags
|
|
||||||
.drain(..)
|
|
||||||
.map(|clsag| {
|
|
||||||
let (clsag, preprocess) = clsag.preprocess(rng);
|
|
||||||
preprocesses.push(preprocess);
|
|
||||||
clsag
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
let our_preprocess = preprocesses.clone();
|
|
||||||
|
|
||||||
// We could add further entropy here, and previous versions of this library did so
|
|
||||||
// As of right now, the multisig's key, the inputs being spent, and the FROST data itself
|
|
||||||
// will be used for RNG seeds. In order to recreate these RNG seeds, breaking privacy,
|
|
||||||
// counterparties must have knowledge of the multisig, either the view key or access to the
|
|
||||||
// coordination layer, and then access to the actual FROST signing process
|
|
||||||
// If the commitments are sent in plain text, then entropy here also would be, making it not
|
|
||||||
// increase privacy. If they're not sent in plain text, or are otherwise inaccessible, they
|
|
||||||
// already offer sufficient entropy. That's why further entropy is not included
|
|
||||||
|
|
||||||
(
|
|
||||||
TransactionSignMachine {
|
|
||||||
signable: self.signable,
|
|
||||||
|
|
||||||
i: self.i,
|
|
||||||
transcript: self.transcript,
|
|
||||||
|
|
||||||
key_images: self.key_images,
|
|
||||||
inputs: self.inputs,
|
|
||||||
clsags,
|
|
||||||
|
|
||||||
our_preprocess,
|
|
||||||
},
|
|
||||||
preprocesses,
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SignMachine<Transaction> for TransactionSignMachine {
|
|
||||||
type Params = ();
|
|
||||||
type Keys = ThresholdKeys<Ed25519>;
|
|
||||||
type Preprocess = Vec<Preprocess<Ed25519, ClsagAddendum>>;
|
|
||||||
type SignatureShare = Vec<SignatureShare<Ed25519>>;
|
|
||||||
type SignatureMachine = TransactionSignatureMachine;
|
|
||||||
|
|
||||||
fn cache(self) -> CachedPreprocess {
|
|
||||||
unimplemented!(
|
|
||||||
"Monero transactions don't support caching their preprocesses due to {}",
|
|
||||||
"being already bound to a specific transaction"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn from_cache(
|
|
||||||
(): (),
|
|
||||||
_: ThresholdKeys<Ed25519>,
|
|
||||||
_: CachedPreprocess,
|
|
||||||
) -> (Self, Self::Preprocess) {
|
|
||||||
unimplemented!(
|
|
||||||
"Monero transactions don't support caching their preprocesses due to {}",
|
|
||||||
"being already bound to a specific transaction"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
fn read_preprocess<R: Read>(&self, reader: &mut R) -> io::Result<Self::Preprocess> {
|
|
||||||
self.clsags.iter().map(|clsag| clsag.read_preprocess(reader)).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn sign(
|
|
||||||
mut self,
|
|
||||||
mut commitments: HashMap<Participant, Self::Preprocess>,
|
|
||||||
msg: &[u8],
|
|
||||||
) -> Result<(TransactionSignatureMachine, Self::SignatureShare), FrostError> {
|
|
||||||
if !msg.is_empty() {
|
|
||||||
panic!("message was passed to the TransactionMachine when it generates its own");
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find out who's included
|
|
||||||
// This may not be a valid set of signers yet the algorithm machine will error if it's not
|
|
||||||
commitments.remove(&self.i); // Remove, if it was included for some reason
|
|
||||||
let mut included = commitments.keys().copied().collect::<Vec<_>>();
|
|
||||||
included.push(self.i);
|
|
||||||
included.sort_unstable();
|
|
||||||
|
|
||||||
// Convert the unified commitments to a Vec of the individual commitments
|
|
||||||
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
|
|
||||||
let mut commitments = (0 .. self.clsags.len())
|
|
||||||
.map(|c| {
|
|
||||||
included
|
|
||||||
.iter()
|
|
||||||
.map(|l| {
|
|
||||||
// Add all commitments to the transcript for their entropy
|
|
||||||
// While each CLSAG will do this as they need to for security, they have their own
|
|
||||||
// transcripts cloned from this TX's initial premise's transcript. For our TX
|
|
||||||
// transcript to have the CLSAG data for entropy, it'll have to be added ourselves here
|
|
||||||
self.transcript.append_message(b"participant", (*l).to_bytes());
|
|
||||||
|
|
||||||
let preprocess = if *l == self.i {
|
|
||||||
self.our_preprocess[c].clone()
|
|
||||||
} else {
|
|
||||||
commitments.get_mut(l).ok_or(FrostError::MissingParticipant(*l))?[c].clone()
|
|
||||||
};
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut buf = vec![];
|
|
||||||
preprocess.write(&mut buf).unwrap();
|
|
||||||
self.transcript.append_message(b"preprocess", buf);
|
|
||||||
}
|
|
||||||
|
|
||||||
// While here, calculate the key image
|
|
||||||
// Clsag will parse/calculate/validate this as needed, yet doing so here as well
|
|
||||||
// provides the easiest API overall, as this is where the TX is (which needs the key
|
|
||||||
// images in its message), along with where the outputs are determined (where our
|
|
||||||
// outputs may need these in order to guarantee uniqueness)
|
|
||||||
add_key_image_share(
|
|
||||||
&mut images[c],
|
|
||||||
self.key_images[c].0,
|
|
||||||
self.key_images[c].1,
|
|
||||||
&included,
|
|
||||||
*l,
|
|
||||||
preprocess.addendum.key_image.0,
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok((*l, preprocess))
|
|
||||||
})
|
|
||||||
.collect::<Result<HashMap<_, _>, _>>()
|
|
||||||
})
|
|
||||||
.collect::<Result<Vec<_>, _>>()?;
|
|
||||||
|
|
||||||
// Remove our preprocess which shouldn't be here. It was just the easiest way to implement the
|
|
||||||
// above
|
|
||||||
for map in &mut commitments {
|
|
||||||
map.remove(&self.i);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Create the actual transaction
|
|
||||||
let (mut tx, output_masks) = {
|
|
||||||
let mut sorted_images = images.clone();
|
|
||||||
sorted_images.sort_by(key_image_sort);
|
|
||||||
|
|
||||||
self.signable.prepare_transaction(
|
|
||||||
// Technically, r_seed is used for the transaction keys if it's provided
|
|
||||||
&mut ChaCha20Rng::from_seed(self.transcript.rng_seed(b"transaction_keys_bulletproofs")),
|
|
||||||
uniqueness(
|
|
||||||
&sorted_images
|
|
||||||
.iter()
|
|
||||||
.map(|image| Input::ToKey { amount: None, key_offsets: vec![], key_image: *image })
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
),
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Sort the inputs, as expected
|
|
||||||
let mut sorted = Vec::with_capacity(self.clsags.len());
|
|
||||||
while !self.clsags.is_empty() {
|
|
||||||
let (inputs, decoys) = self.signable.inputs.swap_remove(0);
|
|
||||||
sorted.push((
|
|
||||||
images.swap_remove(0),
|
|
||||||
inputs,
|
|
||||||
decoys,
|
|
||||||
self.inputs.swap_remove(0),
|
|
||||||
self.clsags.swap_remove(0),
|
|
||||||
commitments.swap_remove(0),
|
|
||||||
));
|
|
||||||
}
|
|
||||||
sorted.sort_by(|x, y| key_image_sort(&x.0, &y.0));
|
|
||||||
|
|
||||||
let mut rng = ChaCha20Rng::from_seed(self.transcript.rng_seed(b"pseudo_out_masks"));
|
|
||||||
let mut sum_pseudo_outs = Scalar::ZERO;
|
|
||||||
while !sorted.is_empty() {
|
|
||||||
let value = sorted.remove(0);
|
|
||||||
|
|
||||||
let mut mask = random_scalar(&mut rng);
|
|
||||||
if sorted.is_empty() {
|
|
||||||
mask = output_masks - sum_pseudo_outs;
|
|
||||||
} else {
|
|
||||||
sum_pseudo_outs += mask;
|
|
||||||
}
|
|
||||||
|
|
||||||
tx.prefix.inputs.push(Input::ToKey {
|
|
||||||
amount: None,
|
|
||||||
key_offsets: value.2.offsets.clone(),
|
|
||||||
key_image: value.0,
|
|
||||||
});
|
|
||||||
|
|
||||||
*value.3.write().unwrap() = Some(ClsagDetails::new(
|
|
||||||
ClsagInput::new(value.1.commitment().clone(), value.2).map_err(|_| {
|
|
||||||
panic!("Signing an input which isn't present in the ring we created for it")
|
|
||||||
})?,
|
|
||||||
mask,
|
|
||||||
));
|
|
||||||
|
|
||||||
self.clsags.push(value.4);
|
|
||||||
commitments.push(value.5);
|
|
||||||
}
|
|
||||||
|
|
||||||
let msg = tx.signature_hash();
|
|
||||||
|
|
||||||
// Iterate over each CLSAG calling sign
|
|
||||||
let mut shares = Vec::with_capacity(self.clsags.len());
|
|
||||||
let clsags = self
|
|
||||||
.clsags
|
|
||||||
.drain(..)
|
|
||||||
.map(|clsag| {
|
|
||||||
let (clsag, share) = clsag.sign(commitments.remove(0), &msg)?;
|
|
||||||
shares.push(share);
|
|
||||||
Ok(clsag)
|
|
||||||
})
|
|
||||||
.collect::<Result<_, _>>()?;
|
|
||||||
|
|
||||||
Ok((TransactionSignatureMachine { tx, clsags }, shares))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl SignatureMachine<Transaction> for TransactionSignatureMachine {
|
|
||||||
type SignatureShare = Vec<SignatureShare<Ed25519>>;
|
|
||||||
|
|
||||||
fn read_share<R: Read>(&self, reader: &mut R) -> io::Result<Self::SignatureShare> {
|
|
||||||
self.clsags.iter().map(|clsag| clsag.read_share(reader)).collect()
|
|
||||||
}
|
|
||||||
|
|
||||||
fn complete(
|
|
||||||
mut self,
|
|
||||||
shares: HashMap<Participant, Self::SignatureShare>,
|
|
||||||
) -> Result<Transaction, FrostError> {
|
|
||||||
let mut tx = self.tx;
|
|
||||||
match tx.rct_signatures.prunable {
|
|
||||||
RctPrunable::Null => panic!("Signing for RctPrunable::Null"),
|
|
||||||
RctPrunable::Clsag { ref mut clsags, ref mut pseudo_outs, .. } => {
|
|
||||||
for (c, clsag) in self.clsags.drain(..).enumerate() {
|
|
||||||
let (clsag, pseudo_out) = clsag.complete(
|
|
||||||
shares.iter().map(|(l, shares)| (*l, shares[c].clone())).collect::<HashMap<_, _>>(),
|
|
||||||
)?;
|
|
||||||
clsags.push(clsag);
|
|
||||||
pseudo_outs.push(pseudo_out);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RctPrunable::AggregateMlsagBorromean { .. } |
|
|
||||||
RctPrunable::MlsagBorromean { .. } |
|
|
||||||
RctPrunable::MlsagBulletproofs { .. } => {
|
|
||||||
unreachable!("attempted to sign a multisig TX which wasn't CLSAG")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Ok(tx)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,326 +0,0 @@
|
|||||||
use core::ops::Deref;
|
|
||||||
use std_shims::{sync::OnceLock, collections::HashSet};
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar};
|
|
||||||
|
|
||||||
use tokio::sync::Mutex;
|
|
||||||
|
|
||||||
use monero_serai::{
|
|
||||||
random_scalar,
|
|
||||||
rpc::{HttpRpc, Rpc},
|
|
||||||
wallet::{
|
|
||||||
ViewPair, Scanner,
|
|
||||||
address::{Network, AddressType, AddressSpec, AddressMeta, MoneroAddress},
|
|
||||||
SpendableOutput, Fee,
|
|
||||||
},
|
|
||||||
transaction::Transaction,
|
|
||||||
DEFAULT_LOCK_WINDOW,
|
|
||||||
};
|
|
||||||
|
|
||||||
pub fn random_address() -> (Scalar, ViewPair, MoneroAddress) {
|
|
||||||
let spend = random_scalar(&mut OsRng);
|
|
||||||
let spend_pub = &spend * ED25519_BASEPOINT_TABLE;
|
|
||||||
let view = Zeroizing::new(random_scalar(&mut OsRng));
|
|
||||||
(
|
|
||||||
spend,
|
|
||||||
ViewPair::new(spend_pub, view.clone()),
|
|
||||||
MoneroAddress {
|
|
||||||
meta: AddressMeta::new(Network::Mainnet, AddressType::Standard),
|
|
||||||
spend: spend_pub,
|
|
||||||
view: view.deref() * ED25519_BASEPOINT_TABLE,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: Support transactions already on-chain
|
|
||||||
// TODO: Don't have a side effect of mining blocks more blocks than needed under race conditions
|
|
||||||
pub async fn mine_until_unlocked(rpc: &Rpc<HttpRpc>, addr: &str, tx_hash: [u8; 32]) {
|
|
||||||
// mine until tx is in a block
|
|
||||||
let mut height = rpc.get_height().await.unwrap();
|
|
||||||
let mut found = false;
|
|
||||||
while !found {
|
|
||||||
let block = rpc.get_block_by_number(height - 1).await.unwrap();
|
|
||||||
found = match block.txs.iter().find(|&&x| x == tx_hash) {
|
|
||||||
Some(_) => true,
|
|
||||||
None => {
|
|
||||||
height = rpc.generate_blocks(addr, 1).await.unwrap().1 + 1;
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mine until tx's outputs are unlocked
|
|
||||||
let o_indexes: Vec<u64> = rpc.get_o_indexes(tx_hash).await.unwrap();
|
|
||||||
while rpc
|
|
||||||
.get_outs(&o_indexes)
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.into_iter()
|
|
||||||
.all(|o| (!(o.unlocked && height >= (o.height + DEFAULT_LOCK_WINDOW))))
|
|
||||||
{
|
|
||||||
height = rpc.generate_blocks(addr, 1).await.unwrap().1 + 1;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Mines 60 blocks and returns an unlocked miner TX output.
|
|
||||||
#[allow(dead_code)]
|
|
||||||
pub async fn get_miner_tx_output(rpc: &Rpc<HttpRpc>, view: &ViewPair) -> SpendableOutput {
|
|
||||||
let mut scanner = Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
|
|
||||||
// Mine 60 blocks to unlock a miner TX
|
|
||||||
let start = rpc.get_height().await.unwrap();
|
|
||||||
rpc
|
|
||||||
.generate_blocks(&view.address(Network::Mainnet, AddressSpec::Standard).to_string(), 60)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let block = rpc.get_block_by_number(start).await.unwrap();
|
|
||||||
scanner.scan(rpc, &block).await.unwrap().swap_remove(0).ignore_timelock().swap_remove(0)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Make sure the weight and fee match the expected calculation.
|
|
||||||
pub fn check_weight_and_fee(tx: &Transaction, fee_rate: Fee) {
|
|
||||||
let fee = tx.rct_signatures.base.fee;
|
|
||||||
|
|
||||||
let weight = tx.weight();
|
|
||||||
let expected_weight = fee_rate.calculate_weight_from_fee(fee);
|
|
||||||
assert_eq!(weight, expected_weight);
|
|
||||||
|
|
||||||
let expected_fee = fee_rate.calculate_fee_from_weight(weight);
|
|
||||||
assert_eq!(fee, expected_fee);
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn rpc() -> Rpc<HttpRpc> {
|
|
||||||
let rpc = HttpRpc::new("http://serai:seraidex@127.0.0.1:18081".to_string()).await.unwrap();
|
|
||||||
|
|
||||||
// Only run once
|
|
||||||
if rpc.get_height().await.unwrap() != 1 {
|
|
||||||
return rpc;
|
|
||||||
}
|
|
||||||
|
|
||||||
let addr = MoneroAddress {
|
|
||||||
meta: AddressMeta::new(Network::Mainnet, AddressType::Standard),
|
|
||||||
spend: &random_scalar(&mut OsRng) * ED25519_BASEPOINT_TABLE,
|
|
||||||
view: &random_scalar(&mut OsRng) * ED25519_BASEPOINT_TABLE,
|
|
||||||
}
|
|
||||||
.to_string();
|
|
||||||
|
|
||||||
// Mine 40 blocks to ensure decoy availability
|
|
||||||
rpc.generate_blocks(&addr, 40).await.unwrap();
|
|
||||||
|
|
||||||
// Make sure we recognize the protocol
|
|
||||||
rpc.get_protocol().await.unwrap();
|
|
||||||
|
|
||||||
rpc
|
|
||||||
}
|
|
||||||
|
|
||||||
pub static SEQUENTIAL: OnceLock<Mutex<()>> = OnceLock::new();
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! async_sequential {
|
|
||||||
($(async fn $name: ident() $body: block)*) => {
|
|
||||||
$(
|
|
||||||
#[tokio::test]
|
|
||||||
async fn $name() {
|
|
||||||
let guard = runner::SEQUENTIAL.get_or_init(|| tokio::sync::Mutex::new(())).lock().await;
|
|
||||||
let local = tokio::task::LocalSet::new();
|
|
||||||
local.run_until(async move {
|
|
||||||
if let Err(err) = tokio::task::spawn_local(async move { $body }).await {
|
|
||||||
drop(guard);
|
|
||||||
Err(err).unwrap()
|
|
||||||
}
|
|
||||||
}).await;
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[macro_export]
|
|
||||||
macro_rules! test {
|
|
||||||
(
|
|
||||||
$name: ident,
|
|
||||||
(
|
|
||||||
$first_tx: expr,
|
|
||||||
$first_checks: expr,
|
|
||||||
),
|
|
||||||
$((
|
|
||||||
$tx: expr,
|
|
||||||
$checks: expr,
|
|
||||||
)$(,)?),*
|
|
||||||
) => {
|
|
||||||
async_sequential! {
|
|
||||||
async fn $name() {
|
|
||||||
use core::{ops::Deref, any::Any};
|
|
||||||
use std::collections::HashSet;
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
|
||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
|
|
||||||
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
use transcript::{Transcript, RecommendedTranscript};
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
use frost::{
|
|
||||||
curve::Ed25519,
|
|
||||||
Participant,
|
|
||||||
tests::{THRESHOLD, key_gen},
|
|
||||||
};
|
|
||||||
|
|
||||||
use monero_serai::{
|
|
||||||
random_scalar,
|
|
||||||
wallet::{
|
|
||||||
address::{Network, AddressSpec}, ViewPair, Scanner, Change, Decoys, FeePriority,
|
|
||||||
SignableTransaction, SignableTransactionBuilder,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
use runner::{
|
|
||||||
random_address, rpc, mine_until_unlocked, get_miner_tx_output,
|
|
||||||
check_weight_and_fee,
|
|
||||||
};
|
|
||||||
|
|
||||||
type Builder = SignableTransactionBuilder;
|
|
||||||
|
|
||||||
// Run each function as both a single signer and as a multisig
|
|
||||||
#[allow(clippy::redundant_closure_call)]
|
|
||||||
for multisig in [false, true] {
|
|
||||||
// Only run the multisig variant if multisig is enabled
|
|
||||||
if multisig {
|
|
||||||
#[cfg(not(feature = "multisig"))]
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let spend = Zeroizing::new(random_scalar(&mut OsRng));
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
let keys = key_gen::<_, Ed25519>(&mut OsRng);
|
|
||||||
|
|
||||||
let spend_pub = if !multisig {
|
|
||||||
spend.deref() * ED25519_BASEPOINT_TABLE
|
|
||||||
} else {
|
|
||||||
#[cfg(not(feature = "multisig"))]
|
|
||||||
panic!("Multisig branch called without the multisig feature");
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
keys[&Participant::new(1).unwrap()].group_key().0
|
|
||||||
};
|
|
||||||
|
|
||||||
let rpc = rpc().await;
|
|
||||||
|
|
||||||
let view = ViewPair::new(spend_pub, Zeroizing::new(random_scalar(&mut OsRng)));
|
|
||||||
let addr = view.address(Network::Mainnet, AddressSpec::Standard);
|
|
||||||
|
|
||||||
let miner_tx = get_miner_tx_output(&rpc, &view).await;
|
|
||||||
|
|
||||||
let protocol = rpc.get_protocol().await.unwrap();
|
|
||||||
|
|
||||||
let builder = SignableTransactionBuilder::new(
|
|
||||||
protocol,
|
|
||||||
rpc.get_fee(protocol, FeePriority::Unimportant).await.unwrap(),
|
|
||||||
Change::new(
|
|
||||||
&ViewPair::new(
|
|
||||||
&random_scalar(&mut OsRng) * ED25519_BASEPOINT_TABLE,
|
|
||||||
Zeroizing::new(random_scalar(&mut OsRng))
|
|
||||||
),
|
|
||||||
false
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
let sign = |tx: SignableTransaction| {
|
|
||||||
let spend = spend.clone();
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
let keys = keys.clone();
|
|
||||||
async move {
|
|
||||||
if !multisig {
|
|
||||||
tx.sign(&mut OsRng, &spend).unwrap()
|
|
||||||
} else {
|
|
||||||
#[cfg(not(feature = "multisig"))]
|
|
||||||
panic!("Multisig branch called without the multisig feature");
|
|
||||||
#[cfg(feature = "multisig")]
|
|
||||||
{
|
|
||||||
let mut machines = HashMap::new();
|
|
||||||
for i in (1 ..= THRESHOLD).map(|i| Participant::new(i).unwrap()) {
|
|
||||||
machines.insert(
|
|
||||||
i,
|
|
||||||
tx
|
|
||||||
.clone()
|
|
||||||
.multisig(
|
|
||||||
&keys[&i],
|
|
||||||
RecommendedTranscript::new(b"Monero Serai Test Transaction"),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
frost::tests::sign_without_caching(&mut OsRng, machines, &[])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: Generate a distinct wallet for each transaction to prevent overlap
|
|
||||||
let next_addr = addr;
|
|
||||||
|
|
||||||
let temp = Box::new({
|
|
||||||
let mut builder = builder.clone();
|
|
||||||
|
|
||||||
let decoys = Decoys::fingerprintable_canonical_select(
|
|
||||||
&mut OsRng,
|
|
||||||
&rpc,
|
|
||||||
protocol.ring_len(),
|
|
||||||
rpc.get_height().await.unwrap(),
|
|
||||||
&[miner_tx.clone()],
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
builder.add_input((miner_tx, decoys.first().unwrap().clone()));
|
|
||||||
|
|
||||||
let (tx, state) = ($first_tx)(rpc.clone(), builder, next_addr).await;
|
|
||||||
let fee_rate = tx.fee_rate().clone();
|
|
||||||
let signed = sign(tx).await;
|
|
||||||
rpc.publish_transaction(&signed).await.unwrap();
|
|
||||||
mine_until_unlocked(&rpc, &random_address().2.to_string(), signed.hash()).await;
|
|
||||||
let tx = rpc.get_transaction(signed.hash()).await.unwrap();
|
|
||||||
check_weight_and_fee(&tx, fee_rate);
|
|
||||||
let scanner =
|
|
||||||
Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
($first_checks)(rpc.clone(), tx, scanner, state).await
|
|
||||||
});
|
|
||||||
#[allow(unused_variables, unused_mut, unused_assignments)]
|
|
||||||
let mut carried_state: Box<dyn Any> = temp;
|
|
||||||
|
|
||||||
$(
|
|
||||||
let (tx, state) = ($tx)(
|
|
||||||
protocol,
|
|
||||||
rpc.clone(),
|
|
||||||
builder.clone(),
|
|
||||||
next_addr,
|
|
||||||
*carried_state.downcast().unwrap()
|
|
||||||
).await;
|
|
||||||
let fee_rate = tx.fee_rate().clone();
|
|
||||||
let signed = sign(tx).await;
|
|
||||||
rpc.publish_transaction(&signed).await.unwrap();
|
|
||||||
mine_until_unlocked(&rpc, &random_address().2.to_string(), signed.hash()).await;
|
|
||||||
let tx = rpc.get_transaction(signed.hash()).await.unwrap();
|
|
||||||
if stringify!($name) != "spend_one_input_to_two_outputs_no_change" {
|
|
||||||
// Skip weight and fee check for the above test because when there is no change,
|
|
||||||
// the change is added to the fee
|
|
||||||
check_weight_and_fee(&tx, fee_rate);
|
|
||||||
}
|
|
||||||
#[allow(unused_assignments)]
|
|
||||||
{
|
|
||||||
let scanner =
|
|
||||||
Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
carried_state =
|
|
||||||
Box::new(($checks)(rpc.clone(), tx, scanner, state).await);
|
|
||||||
}
|
|
||||||
)*
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,305 +0,0 @@
|
|||||||
use rand::RngCore;
|
|
||||||
|
|
||||||
use monero_serai::{
|
|
||||||
transaction::Transaction,
|
|
||||||
wallet::{address::SubaddressIndex, extra::PaymentId},
|
|
||||||
};
|
|
||||||
|
|
||||||
mod runner;
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_standard_address,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let scanner = Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
builder.add_payment(view.address(Network::Mainnet, AddressSpec::Standard), 5);
|
|
||||||
(builder.build().unwrap(), scanner)
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: Scanner| async move {
|
|
||||||
let output = state.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
let dummy_payment_id = PaymentId::Encrypted([0u8; 8]);
|
|
||||||
assert_eq!(output.metadata.payment_id, Some(dummy_payment_id));
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_subaddress,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let subaddress = SubaddressIndex::new(0, 1).unwrap();
|
|
||||||
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let mut scanner = Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
scanner.register_subaddress(subaddress);
|
|
||||||
|
|
||||||
builder.add_payment(view.address(Network::Mainnet, AddressSpec::Subaddress(subaddress)), 5);
|
|
||||||
(builder.build().unwrap(), (scanner, subaddress))
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: (Scanner, SubaddressIndex)| async move {
|
|
||||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
assert_eq!(output.metadata.subaddress, Some(state.1));
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_integrated_address,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let scanner = Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
|
|
||||||
let mut payment_id = [0u8; 8];
|
|
||||||
OsRng.fill_bytes(&mut payment_id);
|
|
||||||
|
|
||||||
builder.add_payment(view.address(Network::Mainnet, AddressSpec::Integrated(payment_id)), 5);
|
|
||||||
(builder.build().unwrap(), (scanner, payment_id))
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: (Scanner, [u8; 8])| async move {
|
|
||||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(state.1)));
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_featured_standard,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let scanner = Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
builder.add_payment(
|
|
||||||
view.address(
|
|
||||||
Network::Mainnet,
|
|
||||||
AddressSpec::Featured { subaddress: None, payment_id: None, guaranteed: false },
|
|
||||||
),
|
|
||||||
5,
|
|
||||||
);
|
|
||||||
(builder.build().unwrap(), scanner)
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: Scanner| async move {
|
|
||||||
let output = state.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_featured_subaddress,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let subaddress = SubaddressIndex::new(0, 2).unwrap();
|
|
||||||
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let mut scanner = Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
scanner.register_subaddress(subaddress);
|
|
||||||
|
|
||||||
builder.add_payment(
|
|
||||||
view.address(
|
|
||||||
Network::Mainnet,
|
|
||||||
AddressSpec::Featured {
|
|
||||||
subaddress: Some(subaddress),
|
|
||||||
payment_id: None,
|
|
||||||
guaranteed: false,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
5,
|
|
||||||
);
|
|
||||||
(builder.build().unwrap(), (scanner, subaddress))
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: (Scanner, SubaddressIndex)| async move {
|
|
||||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
assert_eq!(output.metadata.subaddress, Some(state.1));
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_featured_integrated,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let scanner = Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
let mut payment_id = [0u8; 8];
|
|
||||||
OsRng.fill_bytes(&mut payment_id);
|
|
||||||
|
|
||||||
builder.add_payment(
|
|
||||||
view.address(
|
|
||||||
Network::Mainnet,
|
|
||||||
AddressSpec::Featured {
|
|
||||||
subaddress: None,
|
|
||||||
payment_id: Some(payment_id),
|
|
||||||
guaranteed: false,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
5,
|
|
||||||
);
|
|
||||||
(builder.build().unwrap(), (scanner, payment_id))
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: (Scanner, [u8; 8])| async move {
|
|
||||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(state.1)));
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_featured_integrated_subaddress,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let subaddress = SubaddressIndex::new(0, 3).unwrap();
|
|
||||||
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let mut scanner = Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
scanner.register_subaddress(subaddress);
|
|
||||||
|
|
||||||
let mut payment_id = [0u8; 8];
|
|
||||||
OsRng.fill_bytes(&mut payment_id);
|
|
||||||
|
|
||||||
builder.add_payment(
|
|
||||||
view.address(
|
|
||||||
Network::Mainnet,
|
|
||||||
AddressSpec::Featured {
|
|
||||||
subaddress: Some(subaddress),
|
|
||||||
payment_id: Some(payment_id),
|
|
||||||
guaranteed: false,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
5,
|
|
||||||
);
|
|
||||||
(builder.build().unwrap(), (scanner, payment_id, subaddress))
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: (Scanner, [u8; 8], SubaddressIndex)| async move {
|
|
||||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(state.1)));
|
|
||||||
assert_eq!(output.metadata.subaddress, Some(state.2));
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_guaranteed_standard,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let scanner = Scanner::from_view(view.clone(), None);
|
|
||||||
|
|
||||||
builder.add_payment(
|
|
||||||
view.address(
|
|
||||||
Network::Mainnet,
|
|
||||||
AddressSpec::Featured { subaddress: None, payment_id: None, guaranteed: true },
|
|
||||||
),
|
|
||||||
5,
|
|
||||||
);
|
|
||||||
(builder.build().unwrap(), scanner)
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: Scanner| async move {
|
|
||||||
let output = state.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_guaranteed_subaddress,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let subaddress = SubaddressIndex::new(1, 0).unwrap();
|
|
||||||
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let mut scanner = Scanner::from_view(view.clone(), None);
|
|
||||||
scanner.register_subaddress(subaddress);
|
|
||||||
|
|
||||||
builder.add_payment(
|
|
||||||
view.address(
|
|
||||||
Network::Mainnet,
|
|
||||||
AddressSpec::Featured {
|
|
||||||
subaddress: Some(subaddress),
|
|
||||||
payment_id: None,
|
|
||||||
guaranteed: true,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
5,
|
|
||||||
);
|
|
||||||
(builder.build().unwrap(), (scanner, subaddress))
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: (Scanner, SubaddressIndex)| async move {
|
|
||||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
assert_eq!(output.metadata.subaddress, Some(state.1));
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_guaranteed_integrated,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let scanner = Scanner::from_view(view.clone(), None);
|
|
||||||
let mut payment_id = [0u8; 8];
|
|
||||||
OsRng.fill_bytes(&mut payment_id);
|
|
||||||
|
|
||||||
builder.add_payment(
|
|
||||||
view.address(
|
|
||||||
Network::Mainnet,
|
|
||||||
AddressSpec::Featured {
|
|
||||||
subaddress: None,
|
|
||||||
payment_id: Some(payment_id),
|
|
||||||
guaranteed: true,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
5,
|
|
||||||
);
|
|
||||||
(builder.build().unwrap(), (scanner, payment_id))
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: (Scanner, [u8; 8])| async move {
|
|
||||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(state.1)));
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
scan_guaranteed_integrated_subaddress,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, _| async move {
|
|
||||||
let subaddress = SubaddressIndex::new(1, 1).unwrap();
|
|
||||||
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let mut scanner = Scanner::from_view(view.clone(), None);
|
|
||||||
scanner.register_subaddress(subaddress);
|
|
||||||
|
|
||||||
let mut payment_id = [0u8; 8];
|
|
||||||
OsRng.fill_bytes(&mut payment_id);
|
|
||||||
|
|
||||||
builder.add_payment(
|
|
||||||
view.address(
|
|
||||||
Network::Mainnet,
|
|
||||||
AddressSpec::Featured {
|
|
||||||
subaddress: Some(subaddress),
|
|
||||||
payment_id: Some(payment_id),
|
|
||||||
guaranteed: true,
|
|
||||||
},
|
|
||||||
),
|
|
||||||
5,
|
|
||||||
);
|
|
||||||
(builder.build().unwrap(), (scanner, payment_id, subaddress))
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: (Scanner, [u8; 8], SubaddressIndex)| async move {
|
|
||||||
let output = state.0.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
assert_eq!(output.metadata.payment_id, Some(PaymentId::Encrypted(state.1)));
|
|
||||||
assert_eq!(output.metadata.subaddress, Some(state.2));
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
@@ -1,316 +0,0 @@
|
|||||||
use rand_core::OsRng;
|
|
||||||
|
|
||||||
use monero_serai::{
|
|
||||||
transaction::Transaction,
|
|
||||||
wallet::{
|
|
||||||
extra::Extra, address::SubaddressIndex, ReceivedOutput, SpendableOutput, Decoys,
|
|
||||||
SignableTransactionBuilder,
|
|
||||||
},
|
|
||||||
rpc::{Rpc, HttpRpc},
|
|
||||||
Protocol,
|
|
||||||
};
|
|
||||||
|
|
||||||
mod runner;
|
|
||||||
|
|
||||||
// Set up inputs, select decoys, then add them to the TX builder
|
|
||||||
async fn add_inputs(
|
|
||||||
protocol: Protocol,
|
|
||||||
rpc: &Rpc<HttpRpc>,
|
|
||||||
outputs: Vec<ReceivedOutput>,
|
|
||||||
builder: &mut SignableTransactionBuilder,
|
|
||||||
) {
|
|
||||||
let mut spendable_outputs = Vec::with_capacity(outputs.len());
|
|
||||||
for output in outputs {
|
|
||||||
spendable_outputs.push(SpendableOutput::from(rpc, output).await.unwrap());
|
|
||||||
}
|
|
||||||
|
|
||||||
let decoys = Decoys::fingerprintable_canonical_select(
|
|
||||||
&mut OsRng,
|
|
||||||
rpc,
|
|
||||||
protocol.ring_len(),
|
|
||||||
rpc.get_height().await.unwrap(),
|
|
||||||
&spendable_outputs,
|
|
||||||
)
|
|
||||||
.await
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let inputs = spendable_outputs.into_iter().zip(decoys).collect::<Vec<_>>();
|
|
||||||
|
|
||||||
builder.add_inputs(&inputs);
|
|
||||||
}
|
|
||||||
|
|
||||||
test!(
|
|
||||||
spend_miner_output,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, addr| async move {
|
|
||||||
builder.add_payment(addr, 5);
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 5);
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
spend_multiple_outputs,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, addr| async move {
|
|
||||||
builder.add_payment(addr, 1000000000000);
|
|
||||||
builder.add_payment(addr, 2000000000000);
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let mut outputs = scanner.scan_transaction(&tx).not_locked();
|
|
||||||
outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount));
|
|
||||||
assert_eq!(outputs[0].commitment().amount, 1000000000000);
|
|
||||||
assert_eq!(outputs[1].commitment().amount, 2000000000000);
|
|
||||||
outputs
|
|
||||||
},
|
|
||||||
),
|
|
||||||
(
|
|
||||||
|protocol: Protocol, rpc, mut builder: Builder, addr, outputs: Vec<ReceivedOutput>| async move {
|
|
||||||
add_inputs(protocol, &rpc, outputs, &mut builder).await;
|
|
||||||
builder.add_payment(addr, 6);
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 6);
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
// Ideally, this would be single_R, yet it isn't feasible to apply allow(non_snake_case) here
|
|
||||||
single_r_subaddress_send,
|
|
||||||
(
|
|
||||||
// Consume this builder for an output we can use in the future
|
|
||||||
// This is needed because we can't get the input from the passed in builder
|
|
||||||
|_, mut builder: Builder, addr| async move {
|
|
||||||
builder.add_payment(addr, 1000000000000);
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let mut outputs = scanner.scan_transaction(&tx).not_locked();
|
|
||||||
outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount));
|
|
||||||
assert_eq!(outputs[0].commitment().amount, 1000000000000);
|
|
||||||
outputs
|
|
||||||
},
|
|
||||||
),
|
|
||||||
(
|
|
||||||
|protocol, rpc: Rpc<_>, _, _, outputs: Vec<ReceivedOutput>| async move {
|
|
||||||
use monero_serai::wallet::FeePriority;
|
|
||||||
|
|
||||||
let change_view = ViewPair::new(
|
|
||||||
&random_scalar(&mut OsRng) * ED25519_BASEPOINT_TABLE,
|
|
||||||
Zeroizing::new(random_scalar(&mut OsRng)),
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut builder = SignableTransactionBuilder::new(
|
|
||||||
protocol,
|
|
||||||
rpc.get_fee(protocol, FeePriority::Unimportant).await.unwrap(),
|
|
||||||
Change::new(&change_view, false),
|
|
||||||
);
|
|
||||||
add_inputs(protocol, &rpc, vec![outputs.first().unwrap().clone()], &mut builder).await;
|
|
||||||
|
|
||||||
// Send to a subaddress
|
|
||||||
let sub_view = ViewPair::new(
|
|
||||||
&random_scalar(&mut OsRng) * ED25519_BASEPOINT_TABLE,
|
|
||||||
Zeroizing::new(random_scalar(&mut OsRng)),
|
|
||||||
);
|
|
||||||
builder.add_payment(
|
|
||||||
sub_view
|
|
||||||
.address(Network::Mainnet, AddressSpec::Subaddress(SubaddressIndex::new(0, 1).unwrap())),
|
|
||||||
1,
|
|
||||||
);
|
|
||||||
(builder.build().unwrap(), (change_view, sub_view))
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, views: (ViewPair, ViewPair)| async move {
|
|
||||||
// Make sure the change can pick up its output
|
|
||||||
let mut change_scanner = Scanner::from_view(views.0, Some(HashSet::new()));
|
|
||||||
assert!(change_scanner.scan_transaction(&tx).not_locked().len() == 1);
|
|
||||||
|
|
||||||
// Make sure the subaddress can pick up its output
|
|
||||||
let mut sub_scanner = Scanner::from_view(views.1, Some(HashSet::new()));
|
|
||||||
sub_scanner.register_subaddress(SubaddressIndex::new(0, 1).unwrap());
|
|
||||||
let sub_outputs = sub_scanner.scan_transaction(&tx).not_locked();
|
|
||||||
assert!(sub_outputs.len() == 1);
|
|
||||||
assert_eq!(sub_outputs[0].commitment().amount, 1);
|
|
||||||
|
|
||||||
// Make sure only one R was included in TX extra
|
|
||||||
assert!(Extra::read::<&[u8]>(&mut tx.prefix.extra.as_ref())
|
|
||||||
.unwrap()
|
|
||||||
.keys()
|
|
||||||
.unwrap()
|
|
||||||
.1
|
|
||||||
.is_none());
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
spend_one_input_to_one_output_plus_change,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, addr| async move {
|
|
||||||
builder.add_payment(addr, 2000000000000);
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let mut outputs = scanner.scan_transaction(&tx).not_locked();
|
|
||||||
outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount));
|
|
||||||
assert_eq!(outputs[0].commitment().amount, 2000000000000);
|
|
||||||
outputs
|
|
||||||
},
|
|
||||||
),
|
|
||||||
(
|
|
||||||
|protocol: Protocol, rpc, mut builder: Builder, addr, outputs: Vec<ReceivedOutput>| async move {
|
|
||||||
add_inputs(protocol, &rpc, outputs, &mut builder).await;
|
|
||||||
builder.add_payment(addr, 2);
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let output = scanner.scan_transaction(&tx).not_locked().swap_remove(0);
|
|
||||||
assert_eq!(output.commitment().amount, 2);
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
spend_max_outputs,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, addr| async move {
|
|
||||||
builder.add_payment(addr, 1000000000000);
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let mut outputs = scanner.scan_transaction(&tx).not_locked();
|
|
||||||
outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount));
|
|
||||||
assert_eq!(outputs[0].commitment().amount, 1000000000000);
|
|
||||||
outputs
|
|
||||||
},
|
|
||||||
),
|
|
||||||
(
|
|
||||||
|protocol: Protocol, rpc, mut builder: Builder, addr, outputs: Vec<ReceivedOutput>| async move {
|
|
||||||
add_inputs(protocol, &rpc, outputs, &mut builder).await;
|
|
||||||
|
|
||||||
for i in 0 .. 15 {
|
|
||||||
builder.add_payment(addr, i + 1);
|
|
||||||
}
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let mut scanned_tx = scanner.scan_transaction(&tx).not_locked();
|
|
||||||
|
|
||||||
let mut output_amounts = HashSet::new();
|
|
||||||
for i in 0 .. 15 {
|
|
||||||
output_amounts.insert(i + 1);
|
|
||||||
}
|
|
||||||
for _ in 0 .. 15 {
|
|
||||||
let output = scanned_tx.swap_remove(0);
|
|
||||||
let amount = output.commitment().amount;
|
|
||||||
assert!(output_amounts.contains(&amount));
|
|
||||||
output_amounts.remove(&amount);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
spend_max_outputs_to_subaddresses,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, addr| async move {
|
|
||||||
builder.add_payment(addr, 1000000000000);
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let mut outputs = scanner.scan_transaction(&tx).not_locked();
|
|
||||||
outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount));
|
|
||||||
assert_eq!(outputs[0].commitment().amount, 1000000000000);
|
|
||||||
outputs
|
|
||||||
},
|
|
||||||
),
|
|
||||||
(
|
|
||||||
|protocol: Protocol, rpc, mut builder: Builder, _, outputs: Vec<ReceivedOutput>| async move {
|
|
||||||
add_inputs(protocol, &rpc, outputs, &mut builder).await;
|
|
||||||
|
|
||||||
let view = runner::random_address().1;
|
|
||||||
let mut scanner = Scanner::from_view(view.clone(), Some(HashSet::new()));
|
|
||||||
|
|
||||||
let mut subaddresses = vec![];
|
|
||||||
for i in 0 .. 15 {
|
|
||||||
let subaddress = SubaddressIndex::new(0, i + 1).unwrap();
|
|
||||||
scanner.register_subaddress(subaddress);
|
|
||||||
|
|
||||||
builder.add_payment(
|
|
||||||
view.address(Network::Mainnet, AddressSpec::Subaddress(subaddress)),
|
|
||||||
u64::from(i + 1),
|
|
||||||
);
|
|
||||||
subaddresses.push(subaddress);
|
|
||||||
}
|
|
||||||
|
|
||||||
(builder.build().unwrap(), (scanner, subaddresses))
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, _, mut state: (Scanner, Vec<SubaddressIndex>)| async move {
|
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
let mut scanned_tx = state.0.scan_transaction(&tx).not_locked();
|
|
||||||
|
|
||||||
let mut output_amounts_by_subaddress = HashMap::new();
|
|
||||||
for i in 0 .. 15 {
|
|
||||||
output_amounts_by_subaddress.insert(u64::try_from(i + 1).unwrap(), state.1[i]);
|
|
||||||
}
|
|
||||||
for _ in 0 .. 15 {
|
|
||||||
let output = scanned_tx.swap_remove(0);
|
|
||||||
let amount = output.commitment().amount;
|
|
||||||
|
|
||||||
assert!(output_amounts_by_subaddress.contains_key(&amount));
|
|
||||||
assert_eq!(output.metadata.subaddress, Some(output_amounts_by_subaddress[&amount]));
|
|
||||||
|
|
||||||
output_amounts_by_subaddress.remove(&amount);
|
|
||||||
}
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
|
|
||||||
test!(
|
|
||||||
spend_one_input_to_two_outputs_no_change,
|
|
||||||
(
|
|
||||||
|_, mut builder: Builder, addr| async move {
|
|
||||||
builder.add_payment(addr, 1000000000000);
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let mut outputs = scanner.scan_transaction(&tx).not_locked();
|
|
||||||
outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount));
|
|
||||||
assert_eq!(outputs[0].commitment().amount, 1000000000000);
|
|
||||||
outputs
|
|
||||||
},
|
|
||||||
),
|
|
||||||
(
|
|
||||||
|protocol, rpc: Rpc<_>, _, addr, outputs: Vec<ReceivedOutput>| async move {
|
|
||||||
use monero_serai::wallet::FeePriority;
|
|
||||||
|
|
||||||
let mut builder = SignableTransactionBuilder::new(
|
|
||||||
protocol,
|
|
||||||
rpc.get_fee(protocol, FeePriority::Unimportant).await.unwrap(),
|
|
||||||
Change::fingerprintable(None),
|
|
||||||
);
|
|
||||||
add_inputs(protocol, &rpc, vec![outputs.first().unwrap().clone()], &mut builder).await;
|
|
||||||
builder.add_payment(addr, 10000);
|
|
||||||
builder.add_payment(addr, 50000);
|
|
||||||
|
|
||||||
(builder.build().unwrap(), ())
|
|
||||||
},
|
|
||||||
|_, tx: Transaction, mut scanner: Scanner, ()| async move {
|
|
||||||
let mut outputs = scanner.scan_transaction(&tx).not_locked();
|
|
||||||
outputs.sort_by(|x, y| x.commitment().amount.cmp(&y.commitment().amount));
|
|
||||||
assert_eq!(outputs[0].commitment().amount, 10000);
|
|
||||||
assert_eq!(outputs[1].commitment().amount, 50000);
|
|
||||||
|
|
||||||
// The remainder should get shunted to fee, which is fingerprintable
|
|
||||||
assert_eq!(tx.rct_signatures.base.fee, 1000000000000 - 10000 - 50000);
|
|
||||||
},
|
|
||||||
),
|
|
||||||
);
|
|
||||||
@@ -4,6 +4,7 @@ pub use ::parity_db::{Options, Db as ParityDb};
|
|||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
pub struct Transaction<'a>(&'a Arc<ParityDb>, Vec<(u8, Vec<u8>, Option<Vec<u8>>)>);
|
pub struct Transaction<'a>(&'a Arc<ParityDb>, Vec<(u8, Vec<u8>, Option<Vec<u8>>)>);
|
||||||
|
|
||||||
impl Get for Transaction<'_> {
|
impl Get for Transaction<'_> {
|
||||||
@@ -11,7 +12,7 @@ impl Get for Transaction<'_> {
|
|||||||
let mut res = self.0.get(&key);
|
let mut res = self.0.get(&key);
|
||||||
for change in &self.1 {
|
for change in &self.1 {
|
||||||
if change.1 == key.as_ref() {
|
if change.1 == key.as_ref() {
|
||||||
res = change.2.clone();
|
res.clone_from(&change.2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
res
|
res
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ use rocksdb::{
|
|||||||
|
|
||||||
use crate::*;
|
use crate::*;
|
||||||
|
|
||||||
|
#[must_use]
|
||||||
pub struct Transaction<'a, T: ThreadMode>(
|
pub struct Transaction<'a, T: ThreadMode>(
|
||||||
RocksTransaction<'a, OptimisticTransactionDB<T>>,
|
RocksTransaction<'a, OptimisticTransactionDB<T>>,
|
||||||
&'a OptimisticTransactionDB<T>,
|
&'a OptimisticTransactionDB<T>,
|
||||||
|
|||||||
@@ -55,6 +55,10 @@ impl Client {
|
|||||||
fn connector() -> Connector {
|
fn connector() -> Connector {
|
||||||
let mut res = HttpConnector::new();
|
let mut res = HttpConnector::new();
|
||||||
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
||||||
|
res.set_nodelay(true);
|
||||||
|
res.set_reuse_address(true);
|
||||||
|
#[cfg(feature = "tls")]
|
||||||
|
res.enforce_http(false);
|
||||||
#[cfg(feature = "tls")]
|
#[cfg(feature = "tls")]
|
||||||
let res = HttpsConnectorBuilder::new()
|
let res = HttpsConnectorBuilder::new()
|
||||||
.with_native_roots()
|
.with_native_roots()
|
||||||
@@ -68,7 +72,9 @@ impl Client {
|
|||||||
pub fn with_connection_pool() -> Client {
|
pub fn with_connection_pool() -> Client {
|
||||||
Client {
|
Client {
|
||||||
connection: Connection::ConnectionPool(
|
connection: Connection::ConnectionPool(
|
||||||
HyperClient::builder(TokioExecutor::new()).build(Self::connector()),
|
HyperClient::builder(TokioExecutor::new())
|
||||||
|
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
||||||
|
.build(Self::connector()),
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,7 @@ rustdoc-args = ["--cfg", "docsrs"]
|
|||||||
workspace = true
|
workspace = true
|
||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "once"] }
|
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "lazy"] }
|
||||||
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
|
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
|
|||||||
@@ -26,27 +26,6 @@ mod mutex_shim {
|
|||||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||||
|
|
||||||
#[cfg(feature = "std")]
|
#[cfg(feature = "std")]
|
||||||
pub use std::sync::OnceLock;
|
pub use std::sync::LazyLock;
|
||||||
#[cfg(not(feature = "std"))]
|
#[cfg(not(feature = "std"))]
|
||||||
mod oncelock_shim {
|
pub use spin::Lazy as LazyLock;
|
||||||
use spin::Once;
|
|
||||||
|
|
||||||
pub struct OnceLock<T>(Once<T>);
|
|
||||||
impl<T> OnceLock<T> {
|
|
||||||
pub const fn new() -> OnceLock<T> {
|
|
||||||
OnceLock(Once::new())
|
|
||||||
}
|
|
||||||
pub fn get(&self) -> Option<&T> {
|
|
||||||
self.0.poll()
|
|
||||||
}
|
|
||||||
pub fn get_mut(&mut self) -> Option<&mut T> {
|
|
||||||
self.0.get_mut()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_or_init<F: FnOnce() -> T>(&self, f: F) -> &T {
|
|
||||||
self.0.call_once(f)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#[cfg(not(feature = "std"))]
|
|
||||||
pub use oncelock_shim::*;
|
|
||||||
|
|||||||
@@ -7,7 +7,7 @@ repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
|
|||||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||||
keywords = []
|
keywords = []
|
||||||
edition = "2021"
|
edition = "2021"
|
||||||
rust-version = "1.60"
|
rust-version = "1.77.0"
|
||||||
|
|
||||||
[package.metadata.docs.rs]
|
[package.metadata.docs.rs]
|
||||||
all-features = true
|
all-features = true
|
||||||
@@ -19,8 +19,10 @@ workspace = true
|
|||||||
[dependencies]
|
[dependencies]
|
||||||
zeroize = { version = "^1.5", default-features = false }
|
zeroize = { version = "^1.5", default-features = false }
|
||||||
|
|
||||||
|
[build-dependencies]
|
||||||
|
rustversion = { version = "1", default-features = false }
|
||||||
|
|
||||||
[features]
|
[features]
|
||||||
std = ["zeroize/std"]
|
std = ["zeroize/std"]
|
||||||
default = ["std"]
|
default = ["std"]
|
||||||
# Commented for now as it requires nightly and we don't use nightly
|
allocator = []
|
||||||
# allocator = []
|
|
||||||
|
|||||||
10
common/zalloc/build.rs
Normal file
10
common/zalloc/build.rs
Normal file
@@ -0,0 +1,10 @@
|
|||||||
|
#[rustversion::nightly]
|
||||||
|
fn main() {
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)");
|
||||||
|
println!("cargo::rustc-cfg=zalloc_rustc_nightly");
|
||||||
|
}
|
||||||
|
|
||||||
|
#[rustversion::not(nightly)]
|
||||||
|
fn main() {
|
||||||
|
println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)");
|
||||||
|
}
|
||||||
@@ -1,6 +1,6 @@
|
|||||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||||
#![cfg_attr(feature = "allocator", feature(allocator_api))]
|
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||||
|
|
||||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||||
//! This can either be used with Box (requires nightly and the "allocator" feature) to provide the
|
//! This can either be used with Box (requires nightly and the "allocator" feature) to provide the
|
||||||
@@ -17,12 +17,12 @@ use zeroize::Zeroize;
|
|||||||
/// An allocator wrapper which zeroizes its memory on dealloc.
|
/// An allocator wrapper which zeroizes its memory on dealloc.
|
||||||
pub struct ZeroizingAlloc<T>(pub T);
|
pub struct ZeroizingAlloc<T>(pub T);
|
||||||
|
|
||||||
#[cfg(feature = "allocator")]
|
#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))]
|
||||||
use core::{
|
use core::{
|
||||||
ptr::NonNull,
|
ptr::NonNull,
|
||||||
alloc::{AllocError, Allocator},
|
alloc::{AllocError, Allocator},
|
||||||
};
|
};
|
||||||
#[cfg(feature = "allocator")]
|
#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))]
|
||||||
unsafe impl<T: Allocator> Allocator for ZeroizingAlloc<T> {
|
unsafe impl<T: Allocator> Allocator for ZeroizingAlloc<T> {
|
||||||
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
|
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
|
||||||
self.0.allocate(layout)
|
self.0.allocate(layout)
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ workspace = true
|
|||||||
async-trait = { version = "0.1", default-features = false }
|
async-trait = { version = "0.1", default-features = false }
|
||||||
|
|
||||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||||
|
bitvec = { version = "1", default-features = false, features = ["std"] }
|
||||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||||
|
|
||||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||||
|
|||||||
@@ -122,7 +122,7 @@ impl QueuedBatchesDb {
|
|||||||
|
|
||||||
pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
|
pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
|
||||||
let batches_vec = Self::get(txn, set).unwrap_or_default();
|
let batches_vec = Self::get(txn, set).unwrap_or_default();
|
||||||
txn.del(&Self::key(set));
|
txn.del(Self::key(set));
|
||||||
|
|
||||||
let mut batches: &[u8] = &batches_vec;
|
let mut batches: &[u8] = &batches_vec;
|
||||||
let mut res = vec![];
|
let mut res = vec![];
|
||||||
|
|||||||
@@ -16,7 +16,6 @@ use ciphersuite::{
|
|||||||
Ciphersuite, Ristretto,
|
Ciphersuite, Ristretto,
|
||||||
};
|
};
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
use frost::Participant;
|
|
||||||
|
|
||||||
use serai_db::{DbTxn, Db};
|
use serai_db::{DbTxn, Db};
|
||||||
|
|
||||||
@@ -114,16 +113,17 @@ async fn add_tributary<D: Db, Pro: Processors, P: P2p>(
|
|||||||
// If we're rebooting, we'll re-fire this message
|
// If we're rebooting, we'll re-fire this message
|
||||||
// This is safe due to the message-queue deduplicating based off the intent system
|
// This is safe due to the message-queue deduplicating based off the intent system
|
||||||
let set = spec.set();
|
let set = spec.set();
|
||||||
let our_i = spec
|
|
||||||
.i(&[], Ristretto::generator() * key.deref())
|
|
||||||
.expect("adding a tributary for a set we aren't in set for");
|
|
||||||
processors
|
processors
|
||||||
.send(
|
.send(
|
||||||
set.network,
|
set.network,
|
||||||
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
|
processor_messages::key_gen::CoordinatorMessage::GenerateKey {
|
||||||
id: processor_messages::key_gen::KeyGenId { session: set.session, attempt: 0 },
|
session: set.session,
|
||||||
params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),
|
threshold: spec.t(),
|
||||||
shares: u16::from(our_i.end) - u16::from(our_i.start),
|
evrf_public_keys: spec.evrf_public_keys(),
|
||||||
|
// TODO
|
||||||
|
// params: frost::ThresholdParams::new(spec.t(), spec.n(&[]), our_i.start).unwrap(),
|
||||||
|
// shares: u16::from(our_i.end) - u16::from(our_i.start),
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
@@ -166,12 +166,9 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
// We'll only receive these if we fired GenerateKey, which we'll only do if if we're
|
// We'll only receive these if we fired GenerateKey, which we'll only do if if we're
|
||||||
// in-set, making the Tributary relevant
|
// in-set, making the Tributary relevant
|
||||||
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
||||||
key_gen::ProcessorMessage::Commitments { id, .. } |
|
key_gen::ProcessorMessage::Participation { session, .. } |
|
||||||
key_gen::ProcessorMessage::InvalidCommitments { id, .. } |
|
key_gen::ProcessorMessage::GeneratedKeyPair { session, .. } |
|
||||||
key_gen::ProcessorMessage::Shares { id, .. } |
|
key_gen::ProcessorMessage::Blame { session, .. } => Some(*session),
|
||||||
key_gen::ProcessorMessage::InvalidShare { id, .. } |
|
|
||||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, .. } |
|
|
||||||
key_gen::ProcessorMessage::Blame { id, .. } => Some(id.session),
|
|
||||||
},
|
},
|
||||||
ProcessorMessage::Sign(inner_msg) => match inner_msg {
|
ProcessorMessage::Sign(inner_msg) => match inner_msg {
|
||||||
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
|
// We'll only receive InvalidParticipant/Preprocess/Share if we're actively signing
|
||||||
@@ -421,125 +418,33 @@ async fn handle_processor_message<D: Db, P: P2p>(
|
|||||||
|
|
||||||
let txs = match msg.msg.clone() {
|
let txs = match msg.msg.clone() {
|
||||||
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
ProcessorMessage::KeyGen(inner_msg) => match inner_msg {
|
||||||
key_gen::ProcessorMessage::Commitments { id, commitments } => {
|
key_gen::ProcessorMessage::Participation { session, participation } => {
|
||||||
vec![Transaction::DkgCommitments {
|
assert_eq!(session, spec.set().session);
|
||||||
attempt: id.attempt,
|
vec![Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() }]
|
||||||
commitments,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
}]
|
|
||||||
}
|
}
|
||||||
key_gen::ProcessorMessage::InvalidCommitments { id, faulty } => {
|
key_gen::ProcessorMessage::GeneratedKeyPair { session, substrate_key, network_key } => {
|
||||||
// This doesn't have guaranteed timing
|
assert_eq!(session, spec.set().session);
|
||||||
//
|
crate::tributary::generated_key_pair::<D>(
|
||||||
// While the party *should* be fatally slashed and not included in future attempts,
|
|
||||||
// they'll actually be fatally slashed (assuming liveness before the Tributary retires)
|
|
||||||
// and not included in future attempts *which begin after the latency window completes*
|
|
||||||
let participant = spec
|
|
||||||
.reverse_lookup_i(
|
|
||||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
|
||||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
|
||||||
faulty,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
vec![Transaction::RemoveParticipantDueToDkg {
|
|
||||||
participant,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
key_gen::ProcessorMessage::Shares { id, mut shares } => {
|
|
||||||
// Create a MuSig-based machine to inform Substrate of this key generation
|
|
||||||
let nonces = crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, id.attempt);
|
|
||||||
|
|
||||||
let removed = crate::tributary::removed_as_of_dkg_attempt(&txn, genesis, id.attempt)
|
|
||||||
.expect("participating in a DKG attempt yet we didn't track who was removed yet?");
|
|
||||||
let our_i = spec
|
|
||||||
.i(&removed, pub_key)
|
|
||||||
.expect("processor message to DKG for an attempt we aren't a validator in");
|
|
||||||
|
|
||||||
// `tx_shares` needs to be done here as while it can be serialized from the HashMap
|
|
||||||
// without further context, it can't be deserialized without context
|
|
||||||
let mut tx_shares = Vec::with_capacity(shares.len());
|
|
||||||
for shares in &mut shares {
|
|
||||||
tx_shares.push(vec![]);
|
|
||||||
for i in 1 ..= spec.n(&removed) {
|
|
||||||
let i = Participant::new(i).unwrap();
|
|
||||||
if our_i.contains(&i) {
|
|
||||||
if shares.contains_key(&i) {
|
|
||||||
panic!("processor sent us our own shares");
|
|
||||||
}
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
tx_shares.last_mut().unwrap().push(
|
|
||||||
shares.remove(&i).expect("processor didn't send share for another validator"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
vec![Transaction::DkgShares {
|
|
||||||
attempt: id.attempt,
|
|
||||||
shares: tx_shares,
|
|
||||||
confirmation_nonces: nonces,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
key_gen::ProcessorMessage::InvalidShare { id, accuser, faulty, blame } => {
|
|
||||||
vec![Transaction::InvalidDkgShare {
|
|
||||||
attempt: id.attempt,
|
|
||||||
accuser,
|
|
||||||
faulty,
|
|
||||||
blame,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
key_gen::ProcessorMessage::GeneratedKeyPair { id, substrate_key, network_key } => {
|
|
||||||
// TODO2: Check the KeyGenId fields
|
|
||||||
|
|
||||||
// Tell the Tributary the key pair, get back the share for the MuSig signature
|
|
||||||
let share = crate::tributary::generated_key_pair::<D>(
|
|
||||||
&mut txn,
|
&mut txn,
|
||||||
key,
|
genesis,
|
||||||
spec,
|
|
||||||
&KeyPair(Public(substrate_key), network_key.try_into().unwrap()),
|
&KeyPair(Public(substrate_key), network_key.try_into().unwrap()),
|
||||||
id.attempt,
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// TODO: Move this into generated_key_pair?
|
// Create a MuSig-based machine to inform Substrate of this key generation
|
||||||
match share {
|
let confirmation_nonces =
|
||||||
Ok(share) => {
|
crate::tributary::dkg_confirmation_nonces(key, spec, &mut txn, 0);
|
||||||
vec![Transaction::DkgConfirmed {
|
|
||||||
attempt: id.attempt,
|
vec![Transaction::DkgConfirmationNonces {
|
||||||
confirmation_share: share,
|
attempt: 0,
|
||||||
signed: Transaction::empty_signed(),
|
confirmation_nonces,
|
||||||
}]
|
|
||||||
}
|
|
||||||
Err(p) => {
|
|
||||||
let participant = spec
|
|
||||||
.reverse_lookup_i(
|
|
||||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
|
||||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
|
||||||
p,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
vec![Transaction::RemoveParticipantDueToDkg {
|
|
||||||
participant,
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
}]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
key_gen::ProcessorMessage::Blame { id, participant } => {
|
|
||||||
let participant = spec
|
|
||||||
.reverse_lookup_i(
|
|
||||||
&crate::tributary::removed_as_of_dkg_attempt(&txn, spec.genesis(), id.attempt)
|
|
||||||
.expect("participating in DKG attempt yet we didn't save who was removed"),
|
|
||||||
participant,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
vec![Transaction::RemoveParticipantDueToDkg {
|
|
||||||
participant,
|
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
}]
|
}]
|
||||||
}
|
}
|
||||||
|
key_gen::ProcessorMessage::Blame { session, participant } => {
|
||||||
|
assert_eq!(session, spec.set().session);
|
||||||
|
let participant = spec.reverse_lookup_i(participant).unwrap();
|
||||||
|
vec![Transaction::RemoveParticipant { participant, signed: Transaction::empty_signed() }]
|
||||||
|
}
|
||||||
},
|
},
|
||||||
ProcessorMessage::Sign(msg) => match msg {
|
ProcessorMessage::Sign(msg) => match msg {
|
||||||
sign::ProcessorMessage::InvalidParticipant { .. } => {
|
sign::ProcessorMessage::InvalidParticipant { .. } => {
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use std::{
|
|||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
use rand_core::{RngCore, OsRng};
|
use rand_core::{RngCore, OsRng};
|
||||||
|
|
||||||
use scale::Encode;
|
use scale::{Decode, Encode};
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
use serai_client::{primitives::NetworkId, validator_sets::primitives::ValidatorSet, Serai};
|
||||||
|
|
||||||
@@ -29,7 +29,7 @@ use libp2p::{
|
|||||||
noise, yamux,
|
noise, yamux,
|
||||||
request_response::{
|
request_response::{
|
||||||
Codec as RrCodecTrait, Message as RrMessage, Event as RrEvent, Config as RrConfig,
|
Codec as RrCodecTrait, Message as RrMessage, Event as RrEvent, Config as RrConfig,
|
||||||
Behaviour as RrBehavior,
|
Behaviour as RrBehavior, ProtocolSupport,
|
||||||
},
|
},
|
||||||
gossipsub::{
|
gossipsub::{
|
||||||
IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder,
|
IdentTopic, FastMessageId, MessageId, MessageAuthenticity, ValidationMode, ConfigBuilder,
|
||||||
@@ -45,9 +45,28 @@ pub(crate) use tributary::{ReadWrite, P2p as TributaryP2p};
|
|||||||
use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent};
|
use crate::{Transaction, Block, Tributary, ActiveTributary, TributaryEvent};
|
||||||
|
|
||||||
// Block size limit + 1 KB of space for signatures/metadata
|
// Block size limit + 1 KB of space for signatures/metadata
|
||||||
const MAX_LIBP2P_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024;
|
const MAX_LIBP2P_GOSSIP_MESSAGE_SIZE: usize = tributary::BLOCK_SIZE_LIMIT + 1024;
|
||||||
|
|
||||||
|
const MAX_LIBP2P_REQRES_MESSAGE_SIZE: usize =
|
||||||
|
(tributary::BLOCK_SIZE_LIMIT * BLOCKS_PER_BATCH) + 1024;
|
||||||
|
|
||||||
|
const MAX_LIBP2P_MESSAGE_SIZE: usize = {
|
||||||
|
// Manual `max` since `max` isn't a const fn
|
||||||
|
if MAX_LIBP2P_GOSSIP_MESSAGE_SIZE > MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
||||||
|
MAX_LIBP2P_GOSSIP_MESSAGE_SIZE
|
||||||
|
} else {
|
||||||
|
MAX_LIBP2P_REQRES_MESSAGE_SIZE
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
const LIBP2P_TOPIC: &str = "serai-coordinator";
|
const LIBP2P_TOPIC: &str = "serai-coordinator";
|
||||||
|
|
||||||
|
// Amount of blocks in a minute
|
||||||
|
const BLOCKS_PER_MINUTE: usize = (60 / (tributary::tendermint::TARGET_BLOCK_TIME / 1000)) as usize;
|
||||||
|
|
||||||
|
// Maximum amount of blocks to send in a batch
|
||||||
|
const BLOCKS_PER_BATCH: usize = BLOCKS_PER_MINUTE + 1;
|
||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, BorshSerialize, BorshDeserialize)]
|
||||||
pub struct CosignedBlock {
|
pub struct CosignedBlock {
|
||||||
pub network: NetworkId,
|
pub network: NetworkId,
|
||||||
@@ -173,6 +192,18 @@ pub struct Message<P: P2p> {
|
|||||||
pub msg: Vec<u8>,
|
pub msg: Vec<u8>,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Encode, Decode)]
|
||||||
|
pub struct BlockCommit {
|
||||||
|
pub block: Vec<u8>,
|
||||||
|
pub commit: Vec<u8>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Encode, Decode)]
|
||||||
|
pub struct HeartbeatBatch {
|
||||||
|
pub blocks: Vec<BlockCommit>,
|
||||||
|
pub timestamp: u64,
|
||||||
|
}
|
||||||
|
|
||||||
#[async_trait]
|
#[async_trait]
|
||||||
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
|
pub trait P2p: Send + Sync + Clone + fmt::Debug + TributaryP2p {
|
||||||
type Id: Send + Sync + Clone + Copy + fmt::Debug;
|
type Id: Send + Sync + Clone + Copy + fmt::Debug;
|
||||||
@@ -227,9 +258,9 @@ impl RrCodecTrait for RrCodec {
|
|||||||
) -> io::Result<Vec<u8>> {
|
) -> io::Result<Vec<u8>> {
|
||||||
let mut len = [0; 4];
|
let mut len = [0; 4];
|
||||||
io.read_exact(&mut len).await?;
|
io.read_exact(&mut len).await?;
|
||||||
let len = usize::try_from(u32::from_le_bytes(len)).expect("not a 32-bit platform?");
|
let len = usize::try_from(u32::from_le_bytes(len)).expect("not at least a 32-bit platform?");
|
||||||
if len > MAX_LIBP2P_MESSAGE_SIZE {
|
if len > MAX_LIBP2P_REQRES_MESSAGE_SIZE {
|
||||||
Err(io::Error::other("request length exceeded MAX_LIBP2P_MESSAGE_SIZE"))?;
|
Err(io::Error::other("request length exceeded MAX_LIBP2P_REQRES_MESSAGE_SIZE"))?;
|
||||||
}
|
}
|
||||||
// This may be a non-trivial allocation easily causable
|
// This may be a non-trivial allocation easily causable
|
||||||
// While we could chunk the read, meaning we only perform the allocation as bandwidth is used,
|
// While we could chunk the read, meaning we only perform the allocation as bandwidth is used,
|
||||||
@@ -297,7 +328,7 @@ impl LibP2p {
|
|||||||
let throwaway_key_pair = Keypair::generate_ed25519();
|
let throwaway_key_pair = Keypair::generate_ed25519();
|
||||||
|
|
||||||
let behavior = Behavior {
|
let behavior = Behavior {
|
||||||
reqres: { RrBehavior::new([], RrConfig::default()) },
|
reqres: { RrBehavior::new([("/coordinator", ProtocolSupport::Full)], RrConfig::default()) },
|
||||||
gossipsub: {
|
gossipsub: {
|
||||||
let heartbeat_interval = tributary::tendermint::LATENCY_TIME / 2;
|
let heartbeat_interval = tributary::tendermint::LATENCY_TIME / 2;
|
||||||
let heartbeats_per_block =
|
let heartbeats_per_block =
|
||||||
@@ -308,7 +339,7 @@ impl LibP2p {
|
|||||||
.heartbeat_interval(Duration::from_millis(heartbeat_interval.into()))
|
.heartbeat_interval(Duration::from_millis(heartbeat_interval.into()))
|
||||||
.history_length(heartbeats_per_block * 2)
|
.history_length(heartbeats_per_block * 2)
|
||||||
.history_gossip(heartbeats_per_block)
|
.history_gossip(heartbeats_per_block)
|
||||||
.max_transmit_size(MAX_LIBP2P_MESSAGE_SIZE)
|
.max_transmit_size(MAX_LIBP2P_GOSSIP_MESSAGE_SIZE)
|
||||||
// We send KeepAlive after 80s
|
// We send KeepAlive after 80s
|
||||||
.idle_timeout(Duration::from_secs(85))
|
.idle_timeout(Duration::from_secs(85))
|
||||||
.validation_mode(ValidationMode::Strict)
|
.validation_mode(ValidationMode::Strict)
|
||||||
@@ -358,7 +389,7 @@ impl LibP2p {
|
|||||||
.with_behaviour(|_| behavior)
|
.with_behaviour(|_| behavior)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.build();
|
.build();
|
||||||
const PORT: u16 = 30564; // 5132 ^ (('c' << 8) | 'o')
|
const PORT: u16 = 30563; // 5132 ^ (('c' << 8) | 'o')
|
||||||
swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap();
|
swarm.listen_on(format!("/ip4/0.0.0.0/tcp/{PORT}").parse().unwrap()).unwrap();
|
||||||
|
|
||||||
let (send_send, mut send_recv) = mpsc::unbounded_channel();
|
let (send_send, mut send_recv) = mpsc::unbounded_channel();
|
||||||
@@ -868,7 +899,7 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
|||||||
let p2p = p2p.clone();
|
let p2p = p2p.clone();
|
||||||
async move {
|
async move {
|
||||||
loop {
|
loop {
|
||||||
let Some(mut msg) = recv.recv().await else {
|
let Some(msg) = recv.recv().await else {
|
||||||
// Channel closure happens when the tributary retires
|
// Channel closure happens when the tributary retires
|
||||||
break;
|
break;
|
||||||
};
|
};
|
||||||
@@ -913,34 +944,53 @@ pub async fn handle_p2p_task<D: Db, P: P2p>(
|
|||||||
latest = next;
|
latest = next;
|
||||||
}
|
}
|
||||||
if to_send.len() > 3 {
|
if to_send.len() > 3 {
|
||||||
for next in to_send {
|
// prepare the batch to sends
|
||||||
let mut res = reader.block(&next).unwrap().serialize();
|
let mut blocks = vec![];
|
||||||
res.extend(reader.commit(&next).unwrap());
|
for (i, next) in to_send.iter().enumerate() {
|
||||||
// Also include the timestamp used within the Heartbeat
|
if i >= BLOCKS_PER_BATCH {
|
||||||
res.extend(&msg.msg[32 .. 40]);
|
break;
|
||||||
p2p.send(msg.sender, ReqResMessageKind::Block(genesis), res).await;
|
}
|
||||||
|
|
||||||
|
blocks.push(BlockCommit {
|
||||||
|
block: reader.block(next).unwrap().serialize(),
|
||||||
|
commit: reader.commit(next).unwrap(),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
let batch = HeartbeatBatch { blocks, timestamp: msg_time };
|
||||||
|
|
||||||
|
p2p
|
||||||
|
.send(msg.sender, ReqResMessageKind::Block(genesis), batch.encode())
|
||||||
|
.await;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
P2pMessageKind::ReqRes(ReqResMessageKind::Block(msg_genesis)) => {
|
P2pMessageKind::ReqRes(ReqResMessageKind::Block(msg_genesis)) => {
|
||||||
assert_eq!(msg_genesis, genesis);
|
assert_eq!(msg_genesis, genesis);
|
||||||
let mut msg_ref: &[u8] = msg.msg.as_ref();
|
// decode the batch
|
||||||
let Ok(block) = Block::<Transaction>::read(&mut msg_ref) else {
|
let Ok(batch) = HeartbeatBatch::decode(&mut msg.msg.as_ref()) else {
|
||||||
log::error!("received block message with an invalidly serialized block");
|
log::error!(
|
||||||
|
"received HeartBeatBatch message with an invalidly serialized batch"
|
||||||
|
);
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
// Get just the commit
|
|
||||||
msg.msg.drain(.. (msg.msg.len() - msg_ref.len()));
|
|
||||||
msg.msg.drain((msg.msg.len() - 8) ..);
|
|
||||||
|
|
||||||
let res = tributary.tributary.sync_block(block, msg.msg).await;
|
// sync blocks
|
||||||
log::debug!(
|
for bc in batch.blocks {
|
||||||
"received block from {:?}, sync_block returned {}",
|
// TODO: why do we use ReadWrite instead of Encode/Decode for blocks?
|
||||||
msg.sender,
|
// Should we use the same for batches so we can read both at the same time?
|
||||||
res
|
let Ok(block) = Block::<Transaction>::read(&mut bc.block.as_slice()) else {
|
||||||
);
|
log::error!("received block message with an invalidly serialized block");
|
||||||
|
continue;
|
||||||
|
};
|
||||||
|
|
||||||
|
let res = tributary.tributary.sync_block(block, bc.commit).await;
|
||||||
|
log::debug!(
|
||||||
|
"received block from {:?}, sync_block returned {}",
|
||||||
|
msg.sender,
|
||||||
|
res
|
||||||
|
);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
P2pMessageKind::Gossip(GossipMessageKind::Tributary(msg_genesis)) => {
|
P2pMessageKind::Gossip(GossipMessageKind::Tributary(msg_genesis)) => {
|
||||||
|
|||||||
@@ -10,7 +10,7 @@ use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
|||||||
|
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
SeraiError, Block, Serai, TemporalSerai,
|
SeraiError, Block, Serai, TemporalSerai,
|
||||||
primitives::{BlockHash, NetworkId},
|
primitives::{BlockHash, EmbeddedEllipticCurve, NetworkId},
|
||||||
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
|
validator_sets::{primitives::ValidatorSet, ValidatorSetsEvent},
|
||||||
in_instructions::InInstructionsEvent,
|
in_instructions::InInstructionsEvent,
|
||||||
coins::CoinsEvent,
|
coins::CoinsEvent,
|
||||||
@@ -60,13 +60,46 @@ async fn handle_new_set<D: Db>(
|
|||||||
{
|
{
|
||||||
log::info!("present in set {:?}", set);
|
log::info!("present in set {:?}", set);
|
||||||
|
|
||||||
let set_data = {
|
let validators;
|
||||||
|
let mut evrf_public_keys = vec![];
|
||||||
|
{
|
||||||
let serai = serai.as_of(block.hash());
|
let serai = serai.as_of(block.hash());
|
||||||
let serai = serai.validator_sets();
|
let serai = serai.validator_sets();
|
||||||
let set_participants =
|
let set_participants =
|
||||||
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
|
serai.participants(set.network).await?.expect("NewSet for set which doesn't exist");
|
||||||
|
|
||||||
set_participants.into_iter().map(|(k, w)| (k, u16::try_from(w).unwrap())).collect::<Vec<_>>()
|
validators = set_participants
|
||||||
|
.iter()
|
||||||
|
.map(|(k, w)| {
|
||||||
|
(
|
||||||
|
<Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut k.0.as_ref())
|
||||||
|
.expect("invalid key registered as participant"),
|
||||||
|
u16::try_from(*w).unwrap(),
|
||||||
|
)
|
||||||
|
})
|
||||||
|
.collect::<Vec<_>>();
|
||||||
|
for (validator, _) in set_participants {
|
||||||
|
// This is only run for external networks which always do a DKG for Serai
|
||||||
|
let substrate = serai
|
||||||
|
.embedded_elliptic_curve_key(validator, EmbeddedEllipticCurve::Embedwards25519)
|
||||||
|
.await?
|
||||||
|
.expect("Serai called NewSet on a validator without an Embedwards25519 key");
|
||||||
|
// `embedded_elliptic_curves` is documented to have the second entry be the
|
||||||
|
// network-specific curve (if it exists and is distinct from Embedwards25519)
|
||||||
|
let network =
|
||||||
|
if let Some(embedded_elliptic_curve) = set.network.embedded_elliptic_curves().get(1) {
|
||||||
|
serai.embedded_elliptic_curve_key(validator, *embedded_elliptic_curve).await?.expect(
|
||||||
|
"Serai called NewSet on a validator without the embedded key required for the network",
|
||||||
|
)
|
||||||
|
} else {
|
||||||
|
substrate.clone()
|
||||||
|
};
|
||||||
|
evrf_public_keys.push((
|
||||||
|
<[u8; 32]>::try_from(substrate)
|
||||||
|
.expect("validator-sets pallet accepted a key of an invalid length"),
|
||||||
|
network,
|
||||||
|
));
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
let time = if let Ok(time) = block.time() {
|
let time = if let Ok(time) = block.time() {
|
||||||
@@ -90,7 +123,7 @@ async fn handle_new_set<D: Db>(
|
|||||||
const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;
|
const SUBSTRATE_TO_TRIBUTARY_TIME_DELAY: u64 = 120;
|
||||||
let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;
|
let time = time + SUBSTRATE_TO_TRIBUTARY_TIME_DELAY;
|
||||||
|
|
||||||
let spec = TributarySpec::new(block.hash(), time, set, set_data);
|
let spec = TributarySpec::new(block.hash(), time, set, validators, evrf_public_keys);
|
||||||
|
|
||||||
log::info!("creating new tributary for {:?}", spec.set());
|
log::info!("creating new tributary for {:?}", spec.set());
|
||||||
|
|
||||||
|
|||||||
@@ -7,12 +7,8 @@ use zeroize::Zeroizing;
|
|||||||
use rand_core::{RngCore, CryptoRng, OsRng};
|
use rand_core::{RngCore, CryptoRng, OsRng};
|
||||||
use futures_util::{task::Poll, poll};
|
use futures_util::{task::Poll, poll};
|
||||||
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{group::ff::Field, Ciphersuite, Ristretto};
|
||||||
group::{ff::Field, GroupEncoding},
|
|
||||||
Ciphersuite, Ristretto,
|
|
||||||
};
|
|
||||||
|
|
||||||
use sp_application_crypto::sr25519;
|
|
||||||
use borsh::BorshDeserialize;
|
use borsh::BorshDeserialize;
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::NetworkId,
|
primitives::NetworkId,
|
||||||
@@ -52,12 +48,22 @@ pub fn new_spec<R: RngCore + CryptoRng>(
|
|||||||
|
|
||||||
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
|
let set = ValidatorSet { session: Session(0), network: NetworkId::Bitcoin };
|
||||||
|
|
||||||
let set_participants = keys
|
let validators = keys
|
||||||
.iter()
|
.iter()
|
||||||
.map(|key| (sr25519::Public((<Ristretto as Ciphersuite>::generator() * **key).to_bytes()), 1))
|
.map(|key| ((<Ristretto as Ciphersuite>::generator() * **key), 1))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let res = TributarySpec::new(serai_block, start_time, set, set_participants);
|
// Generate random eVRF keys as none of these test rely on them to have any structure
|
||||||
|
let mut evrf_keys = vec![];
|
||||||
|
for _ in 0 .. keys.len() {
|
||||||
|
let mut substrate = [0; 32];
|
||||||
|
OsRng.fill_bytes(&mut substrate);
|
||||||
|
let mut network = vec![0; 64];
|
||||||
|
OsRng.fill_bytes(&mut network);
|
||||||
|
evrf_keys.push((substrate, network));
|
||||||
|
}
|
||||||
|
|
||||||
|
let res = TributarySpec::new(serai_block, start_time, set, validators, evrf_keys);
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
|
TributarySpec::deserialize_reader(&mut borsh::to_vec(&res).unwrap().as_slice()).unwrap(),
|
||||||
res,
|
res,
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use core::time::Duration;
|
use core::time::Duration;
|
||||||
use std::collections::HashMap;
|
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
use rand_core::{RngCore, OsRng};
|
use rand_core::{RngCore, OsRng};
|
||||||
@@ -9,7 +8,7 @@ use frost::Participant;
|
|||||||
|
|
||||||
use sp_runtime::traits::Verify;
|
use sp_runtime::traits::Verify;
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, Signature},
|
primitives::Signature,
|
||||||
validator_sets::primitives::{ValidatorSet, KeyPair},
|
validator_sets::primitives::{ValidatorSet, KeyPair},
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -17,10 +16,7 @@ use tokio::time::sleep;
|
|||||||
|
|
||||||
use serai_db::{Get, DbTxn, Db, MemDb};
|
use serai_db::{Get, DbTxn, Db, MemDb};
|
||||||
|
|
||||||
use processor_messages::{
|
use processor_messages::{key_gen, CoordinatorMessage};
|
||||||
key_gen::{self, KeyGenId},
|
|
||||||
CoordinatorMessage,
|
|
||||||
};
|
|
||||||
|
|
||||||
use tributary::{TransactionTrait, Tributary};
|
use tributary::{TransactionTrait, Tributary};
|
||||||
|
|
||||||
@@ -54,44 +50,41 @@ async fn dkg_test() {
|
|||||||
tokio::spawn(run_tributaries(tributaries.clone()));
|
tokio::spawn(run_tributaries(tributaries.clone()));
|
||||||
|
|
||||||
let mut txs = vec![];
|
let mut txs = vec![];
|
||||||
// Create DKG commitments for each key
|
// Create DKG participation for each key
|
||||||
for key in &keys {
|
for key in &keys {
|
||||||
let attempt = 0;
|
let mut participation = vec![0; 4096];
|
||||||
let mut commitments = vec![0; 256];
|
OsRng.fill_bytes(&mut participation);
|
||||||
OsRng.fill_bytes(&mut commitments);
|
|
||||||
|
|
||||||
let mut tx = Transaction::DkgCommitments {
|
let mut tx =
|
||||||
attempt,
|
Transaction::DkgParticipation { participation, signed: Transaction::empty_signed() };
|
||||||
commitments: vec![commitments],
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
};
|
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||||
txs.push(tx);
|
txs.push(tx);
|
||||||
}
|
}
|
||||||
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
let block_before_tx = tributaries[0].1.tip().await;
|
||||||
|
|
||||||
// Publish all commitments but one
|
// Publish t-1 participations
|
||||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
let t = ((keys.len() * 2) / 3) + 1;
|
||||||
|
for (i, tx) in txs.iter().take(t - 1).enumerate() {
|
||||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
||||||
}
|
|
||||||
|
|
||||||
// Wait until these are included
|
|
||||||
for tx in txs.iter().skip(1) {
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
let expected_commitments: HashMap<_, _> = txs
|
let expected_participations = txs
|
||||||
.iter()
|
.iter()
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(i, tx)| {
|
.map(|(i, tx)| {
|
||||||
if let Transaction::DkgCommitments { commitments, .. } = tx {
|
if let Transaction::DkgParticipation { participation, .. } = tx {
|
||||||
(Participant::new((i + 1).try_into().unwrap()).unwrap(), commitments[0].clone())
|
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Participation {
|
||||||
|
session: spec.set().session,
|
||||||
|
participant: Participant::new((i + 1).try_into().unwrap()).unwrap(),
|
||||||
|
participation: participation.clone(),
|
||||||
|
})
|
||||||
} else {
|
} else {
|
||||||
panic!("txs had non-commitments");
|
panic!("txs wasn't a DkgParticipation");
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
async fn new_processors(
|
async fn new_processors(
|
||||||
db: &mut MemDb,
|
db: &mut MemDb,
|
||||||
@@ -120,28 +113,30 @@ async fn dkg_test() {
|
|||||||
processors
|
processors
|
||||||
}
|
}
|
||||||
|
|
||||||
// Instantiate a scanner and verify it has nothing to report
|
// Instantiate a scanner and verify it has the first two participations to report (and isn't
|
||||||
|
// waiting for `t`)
|
||||||
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
|
let processors = new_processors(&mut dbs[0], &keys[0], &spec, &tributaries[0].1).await;
|
||||||
assert!(processors.0.read().await.is_empty());
|
assert_eq!(processors.0.read().await.get(&spec.set().network).unwrap().len(), t - 1);
|
||||||
|
|
||||||
// Publish the last commitment
|
// Publish the rest of the participations
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
let block_before_tx = tributaries[0].1.tip().await;
|
||||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
for tx in txs.iter().skip(t - 1) {
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
assert_eq!(tributaries[0].1.add_transaction(tx.clone()).await, Ok(true));
|
||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||||
|
}
|
||||||
|
|
||||||
// Verify the scanner emits a KeyGen::Commitments message
|
// Verify the scanner emits all KeyGen::Participations messages
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||||
&mut dbs[0],
|
&mut dbs[0],
|
||||||
&keys[0],
|
&keys[0],
|
||||||
&|_, _, _, _| async {
|
&|_, _, _, _| async {
|
||||||
panic!("provided TX caused recognized_id to be called after Commitments")
|
panic!("provided TX caused recognized_id to be called after DkgParticipation")
|
||||||
},
|
},
|
||||||
&processors,
|
&processors,
|
||||||
&(),
|
&(),
|
||||||
&|_| async {
|
&|_| async {
|
||||||
panic!(
|
panic!(
|
||||||
"test tried to publish a new Tributary TX from handle_application_tx after Commitments"
|
"test tried to publish a new Tributary TX from handle_application_tx after DkgParticipation"
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
&spec,
|
&spec,
|
||||||
@@ -150,17 +145,11 @@ async fn dkg_test() {
|
|||||||
.await;
|
.await;
|
||||||
{
|
{
|
||||||
let mut msgs = processors.0.write().await;
|
let mut msgs = processors.0.write().await;
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||||
let mut expected_commitments = expected_commitments.clone();
|
assert_eq!(msgs.len(), keys.len());
|
||||||
expected_commitments.remove(&Participant::new((1).try_into().unwrap()).unwrap());
|
for expected in &expected_participations {
|
||||||
assert_eq!(
|
assert_eq!(&msgs.pop_front().unwrap(), expected);
|
||||||
msgs.pop_front().unwrap(),
|
}
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
commitments: expected_commitments
|
|
||||||
})
|
|
||||||
);
|
|
||||||
assert!(msgs.is_empty());
|
assert!(msgs.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -168,149 +157,14 @@ async fn dkg_test() {
|
|||||||
for (i, key) in keys.iter().enumerate().skip(1) {
|
for (i, key) in keys.iter().enumerate().skip(1) {
|
||||||
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
|
let processors = new_processors(&mut dbs[i], key, &spec, &tributaries[i].1).await;
|
||||||
let mut msgs = processors.0.write().await;
|
let mut msgs = processors.0.write().await;
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
||||||
let mut expected_commitments = expected_commitments.clone();
|
assert_eq!(msgs.len(), keys.len());
|
||||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
for expected in &expected_participations {
|
||||||
assert_eq!(
|
assert_eq!(&msgs.pop_front().unwrap(), expected);
|
||||||
msgs.pop_front().unwrap(),
|
}
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
commitments: expected_commitments
|
|
||||||
})
|
|
||||||
);
|
|
||||||
assert!(msgs.is_empty());
|
assert!(msgs.is_empty());
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now do shares
|
|
||||||
let mut txs = vec![];
|
|
||||||
for (k, key) in keys.iter().enumerate() {
|
|
||||||
let attempt = 0;
|
|
||||||
|
|
||||||
let mut shares = vec![vec![]];
|
|
||||||
for i in 0 .. keys.len() {
|
|
||||||
if i != k {
|
|
||||||
let mut share = vec![0; 256];
|
|
||||||
OsRng.fill_bytes(&mut share);
|
|
||||||
shares.last_mut().unwrap().push(share);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut txn = dbs[k].txn();
|
|
||||||
let mut tx = Transaction::DkgShares {
|
|
||||||
attempt,
|
|
||||||
shares,
|
|
||||||
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
|
|
||||||
signed: Transaction::empty_signed(),
|
|
||||||
};
|
|
||||||
txn.commit();
|
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
|
||||||
txs.push(tx);
|
|
||||||
}
|
|
||||||
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
|
||||||
for (i, tx) in txs.iter().enumerate().skip(1) {
|
|
||||||
assert_eq!(tributaries[i].1.add_transaction(tx.clone()).await, Ok(true));
|
|
||||||
}
|
|
||||||
for tx in txs.iter().skip(1) {
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
// With just 4 sets of shares, nothing should happen yet
|
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
|
||||||
&mut dbs[0],
|
|
||||||
&keys[0],
|
|
||||||
&|_, _, _, _| async {
|
|
||||||
panic!("provided TX caused recognized_id to be called after some shares")
|
|
||||||
},
|
|
||||||
&processors,
|
|
||||||
&(),
|
|
||||||
&|_| async {
|
|
||||||
panic!(
|
|
||||||
"test tried to publish a new Tributary TX from handle_application_tx after some shares"
|
|
||||||
)
|
|
||||||
},
|
|
||||||
&spec,
|
|
||||||
&tributaries[0].1.reader(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
assert_eq!(processors.0.read().await.len(), 1);
|
|
||||||
assert!(processors.0.read().await[&spec.set().network].is_empty());
|
|
||||||
|
|
||||||
// Publish the final set of shares
|
|
||||||
let block_before_tx = tributaries[0].1.tip().await;
|
|
||||||
assert_eq!(tributaries[0].1.add_transaction(txs[0].clone()).await, Ok(true));
|
|
||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, txs[0].hash()).await;
|
|
||||||
sleep(Duration::from_secs(Tributary::<MemDb, Transaction, LocalP2p>::block_time().into())).await;
|
|
||||||
|
|
||||||
// Each scanner should emit a distinct shares message
|
|
||||||
let shares_for = |i: usize| {
|
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Shares {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
shares: vec![txs
|
|
||||||
.iter()
|
|
||||||
.enumerate()
|
|
||||||
.filter_map(|(l, tx)| {
|
|
||||||
if let Transaction::DkgShares { shares, .. } = tx {
|
|
||||||
if i == l {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
let relative_i = i - (if i > l { 1 } else { 0 });
|
|
||||||
Some((
|
|
||||||
Participant::new((l + 1).try_into().unwrap()).unwrap(),
|
|
||||||
shares[0][relative_i].clone(),
|
|
||||||
))
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
panic!("txs had non-shares");
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect::<HashMap<_, _>>()],
|
|
||||||
})
|
|
||||||
};
|
|
||||||
|
|
||||||
// Any scanner which has handled the prior blocks should only emit the new event
|
|
||||||
for (i, key) in keys.iter().enumerate() {
|
|
||||||
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
|
||||||
&mut dbs[i],
|
|
||||||
key,
|
|
||||||
&|_, _, _, _| async { panic!("provided TX caused recognized_id to be called after shares") },
|
|
||||||
&processors,
|
|
||||||
&(),
|
|
||||||
&|_| async { panic!("test tried to publish a new Tributary TX from handle_application_tx") },
|
|
||||||
&spec,
|
|
||||||
&tributaries[i].1.reader(),
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
{
|
|
||||||
let mut msgs = processors.0.write().await;
|
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
|
||||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
|
||||||
assert!(msgs.is_empty());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Yet new scanners should emit all events
|
|
||||||
for (i, key) in keys.iter().enumerate() {
|
|
||||||
let processors = new_processors(&mut MemDb::new(), key, &spec, &tributaries[i].1).await;
|
|
||||||
let mut msgs = processors.0.write().await;
|
|
||||||
assert_eq!(msgs.len(), 1);
|
|
||||||
let msgs = msgs.get_mut(&spec.set().network).unwrap();
|
|
||||||
let mut expected_commitments = expected_commitments.clone();
|
|
||||||
expected_commitments.remove(&Participant::new((i + 1).try_into().unwrap()).unwrap());
|
|
||||||
assert_eq!(
|
|
||||||
msgs.pop_front().unwrap(),
|
|
||||||
CoordinatorMessage::KeyGen(key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: spec.set().session, attempt: 0 },
|
|
||||||
commitments: expected_commitments
|
|
||||||
})
|
|
||||||
);
|
|
||||||
assert_eq!(msgs.pop_front().unwrap(), shares_for(i));
|
|
||||||
assert!(msgs.is_empty());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Send DkgConfirmed
|
|
||||||
let mut substrate_key = [0; 32];
|
let mut substrate_key = [0; 32];
|
||||||
OsRng.fill_bytes(&mut substrate_key);
|
OsRng.fill_bytes(&mut substrate_key);
|
||||||
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
|
let mut network_key = vec![0; usize::try_from((OsRng.next_u64() % 32) + 32).unwrap()];
|
||||||
@@ -319,17 +173,19 @@ async fn dkg_test() {
|
|||||||
|
|
||||||
let mut txs = vec![];
|
let mut txs = vec![];
|
||||||
for (i, key) in keys.iter().enumerate() {
|
for (i, key) in keys.iter().enumerate() {
|
||||||
let attempt = 0;
|
|
||||||
let mut txn = dbs[i].txn();
|
let mut txn = dbs[i].txn();
|
||||||
let share =
|
|
||||||
crate::tributary::generated_key_pair::<MemDb>(&mut txn, key, &spec, &key_pair, 0).unwrap();
|
|
||||||
txn.commit();
|
|
||||||
|
|
||||||
let mut tx = Transaction::DkgConfirmed {
|
// Claim we've generated the key pair
|
||||||
|
crate::tributary::generated_key_pair::<MemDb>(&mut txn, spec.genesis(), &key_pair);
|
||||||
|
|
||||||
|
// Publish the nonces
|
||||||
|
let attempt = 0;
|
||||||
|
let mut tx = Transaction::DkgConfirmationNonces {
|
||||||
attempt,
|
attempt,
|
||||||
confirmation_share: share,
|
confirmation_nonces: crate::tributary::dkg_confirmation_nonces(key, &spec, &mut txn, 0),
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
};
|
};
|
||||||
|
txn.commit();
|
||||||
tx.sign(&mut OsRng, spec.genesis(), key);
|
tx.sign(&mut OsRng, spec.genesis(), key);
|
||||||
txs.push(tx);
|
txs.push(tx);
|
||||||
}
|
}
|
||||||
@@ -341,6 +197,35 @@ async fn dkg_test() {
|
|||||||
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
wait_for_tx_inclusion(&tributaries[0].1, block_before_tx, tx.hash()).await;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// This should not cause any new processor event as the processor doesn't handle DKG confirming
|
||||||
|
for (i, key) in keys.iter().enumerate() {
|
||||||
|
handle_new_blocks::<_, _, _, _, _, LocalP2p>(
|
||||||
|
&mut dbs[i],
|
||||||
|
key,
|
||||||
|
&|_, _, _, _| async {
|
||||||
|
panic!("provided TX caused recognized_id to be called after DkgConfirmationNonces")
|
||||||
|
},
|
||||||
|
&processors,
|
||||||
|
&(),
|
||||||
|
// The Tributary handler should publish ConfirmationShare itself after ConfirmationNonces
|
||||||
|
&|tx| async { assert_eq!(tributaries[i].1.add_transaction(tx).await, Ok(true)) },
|
||||||
|
&spec,
|
||||||
|
&tributaries[i].1.reader(),
|
||||||
|
)
|
||||||
|
.await;
|
||||||
|
{
|
||||||
|
assert!(processors.0.read().await.get(&spec.set().network).unwrap().is_empty());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Yet once these TXs are on-chain, the tributary should itself publish the confirmation shares
|
||||||
|
// This means in the block after the next block, the keys should be set onto Serai
|
||||||
|
// Sleep twice as long as two blocks, in case there's some stability issue
|
||||||
|
sleep(Duration::from_secs(
|
||||||
|
2 * 2 * u64::from(Tributary::<MemDb, Transaction, LocalP2p>::block_time()),
|
||||||
|
))
|
||||||
|
.await;
|
||||||
|
|
||||||
struct CheckPublishSetKeys {
|
struct CheckPublishSetKeys {
|
||||||
spec: TributarySpec,
|
spec: TributarySpec,
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
@@ -351,19 +236,24 @@ async fn dkg_test() {
|
|||||||
&self,
|
&self,
|
||||||
_db: &(impl Sync + Get),
|
_db: &(impl Sync + Get),
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
removed: Vec<SeraiAddress>,
|
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
|
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) {
|
) {
|
||||||
assert_eq!(set, self.spec.set());
|
assert_eq!(set, self.spec.set());
|
||||||
assert!(removed.is_empty());
|
|
||||||
assert_eq!(self.key_pair, key_pair);
|
assert_eq!(self.key_pair, key_pair);
|
||||||
assert!(signature.verify(
|
assert!(signature.verify(
|
||||||
&*serai_client::validator_sets::primitives::set_keys_message(&set, &[], &key_pair),
|
&*serai_client::validator_sets::primitives::set_keys_message(&set, &key_pair),
|
||||||
&serai_client::Public(
|
&serai_client::Public(
|
||||||
frost::dkg::musig::musig_key::<Ristretto>(
|
frost::dkg::musig::musig_key::<Ristretto>(
|
||||||
&serai_client::validator_sets::primitives::musig_context(set),
|
&serai_client::validator_sets::primitives::musig_context(set),
|
||||||
&self.spec.validators().into_iter().map(|(validator, _)| validator).collect::<Vec<_>>()
|
&self
|
||||||
|
.spec
|
||||||
|
.validators()
|
||||||
|
.into_iter()
|
||||||
|
.zip(signature_participants)
|
||||||
|
.filter_map(|((validator, _), included)| included.then_some(validator))
|
||||||
|
.collect::<Vec<_>>()
|
||||||
)
|
)
|
||||||
.unwrap()
|
.unwrap()
|
||||||
.to_bytes()
|
.to_bytes()
|
||||||
|
|||||||
@@ -6,7 +6,7 @@ use ciphersuite::{group::Group, Ciphersuite, Ristretto};
|
|||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, Signature},
|
primitives::Signature,
|
||||||
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
|
validator_sets::primitives::{MAX_KEY_SHARES_PER_SET, ValidatorSet, KeyPair},
|
||||||
};
|
};
|
||||||
use processor_messages::coordinator::SubstrateSignableId;
|
use processor_messages::coordinator::SubstrateSignableId;
|
||||||
@@ -32,8 +32,8 @@ impl PublishSeraiTransaction for () {
|
|||||||
&self,
|
&self,
|
||||||
_db: &(impl Sync + serai_db::Get),
|
_db: &(impl Sync + serai_db::Get),
|
||||||
_set: ValidatorSet,
|
_set: ValidatorSet,
|
||||||
_removed: Vec<SeraiAddress>,
|
|
||||||
_key_pair: KeyPair,
|
_key_pair: KeyPair,
|
||||||
|
_signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
_signature: Signature,
|
_signature: Signature,
|
||||||
) {
|
) {
|
||||||
panic!("publish_set_keys was called in test")
|
panic!("publish_set_keys was called in test")
|
||||||
@@ -143,84 +143,34 @@ fn serialize_sign_data() {
|
|||||||
|
|
||||||
#[test]
|
#[test]
|
||||||
fn serialize_transaction() {
|
fn serialize_transaction() {
|
||||||
test_read_write(&Transaction::RemoveParticipantDueToDkg {
|
test_read_write(&Transaction::RemoveParticipant {
|
||||||
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
|
participant: <Ristretto as Ciphersuite>::G::random(&mut OsRng),
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||||
});
|
});
|
||||||
|
|
||||||
{
|
test_read_write(&Transaction::DkgParticipation {
|
||||||
let mut commitments = vec![random_vec(&mut OsRng, 512)];
|
participation: random_vec(&mut OsRng, 4096),
|
||||||
for _ in 0 .. (OsRng.next_u64() % 100) {
|
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||||
let mut temp = commitments[0].clone();
|
});
|
||||||
OsRng.fill_bytes(&mut temp);
|
|
||||||
commitments.push(temp);
|
|
||||||
}
|
|
||||||
test_read_write(&Transaction::DkgCommitments {
|
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
commitments,
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 0),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
{
|
test_read_write(&Transaction::DkgConfirmationNonces {
|
||||||
// This supports a variable share length, and variable amount of sent shares, yet share length
|
attempt: random_u32(&mut OsRng),
|
||||||
// and sent shares is expected to be constant among recipients
|
confirmation_nonces: {
|
||||||
let share_len = usize::try_from((OsRng.next_u64() % 512) + 1).unwrap();
|
let mut nonces = [0; 64];
|
||||||
let amount_of_shares = usize::try_from((OsRng.next_u64() % 3) + 1).unwrap();
|
OsRng.fill_bytes(&mut nonces);
|
||||||
// Create a valid vec of shares
|
nonces
|
||||||
let mut shares = vec![];
|
},
|
||||||
// Create up to 150 participants
|
signed: random_signed_with_nonce(&mut OsRng, 0),
|
||||||
for _ in 0 ..= (OsRng.next_u64() % 150) {
|
});
|
||||||
// Give each sender multiple shares
|
|
||||||
let mut sender_shares = vec![];
|
|
||||||
for _ in 0 .. amount_of_shares {
|
|
||||||
let mut share = vec![0; share_len];
|
|
||||||
OsRng.fill_bytes(&mut share);
|
|
||||||
sender_shares.push(share);
|
|
||||||
}
|
|
||||||
shares.push(sender_shares);
|
|
||||||
}
|
|
||||||
|
|
||||||
test_read_write(&Transaction::DkgShares {
|
test_read_write(&Transaction::DkgConfirmationShare {
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
shares,
|
|
||||||
confirmation_nonces: {
|
|
||||||
let mut nonces = [0; 64];
|
|
||||||
OsRng.fill_bytes(&mut nonces);
|
|
||||||
nonces
|
|
||||||
},
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 1),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
for i in 0 .. 2 {
|
|
||||||
test_read_write(&Transaction::InvalidDkgShare {
|
|
||||||
attempt: random_u32(&mut OsRng),
|
|
||||||
accuser: frost::Participant::new(
|
|
||||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
faulty: frost::Participant::new(
|
|
||||||
u16::try_from(OsRng.next_u64() >> 48).unwrap().saturating_add(1),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
blame: if i == 0 {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
Some(random_vec(&mut OsRng, 500)).filter(|blame| !blame.is_empty())
|
|
||||||
},
|
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
test_read_write(&Transaction::DkgConfirmed {
|
|
||||||
attempt: random_u32(&mut OsRng),
|
attempt: random_u32(&mut OsRng),
|
||||||
confirmation_share: {
|
confirmation_share: {
|
||||||
let mut share = [0; 32];
|
let mut share = [0; 32];
|
||||||
OsRng.fill_bytes(&mut share);
|
OsRng.fill_bytes(&mut share);
|
||||||
share
|
share
|
||||||
},
|
},
|
||||||
signed: random_signed_with_nonce(&mut OsRng, 2),
|
signed: random_signed_with_nonce(&mut OsRng, 1),
|
||||||
});
|
});
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -29,7 +29,7 @@ async fn sync_test() {
|
|||||||
let mut keys = new_keys(&mut OsRng);
|
let mut keys = new_keys(&mut OsRng);
|
||||||
let spec = new_spec(&mut OsRng, &keys);
|
let spec = new_spec(&mut OsRng, &keys);
|
||||||
// Ensure this can have a node fail
|
// Ensure this can have a node fail
|
||||||
assert!(spec.n(&[]) > spec.t());
|
assert!(spec.n() > spec.t());
|
||||||
|
|
||||||
let mut tributaries = new_tributaries(&keys, &spec)
|
let mut tributaries = new_tributaries(&keys, &spec)
|
||||||
.await
|
.await
|
||||||
@@ -142,7 +142,7 @@ async fn sync_test() {
|
|||||||
// Because only `t` validators are used in a commit, take n - t nodes offline
|
// Because only `t` validators are used in a commit, take n - t nodes offline
|
||||||
// leaving only `t` nodes. Which should force it to participate in the consensus
|
// leaving only `t` nodes. Which should force it to participate in the consensus
|
||||||
// of next blocks.
|
// of next blocks.
|
||||||
let spares = usize::from(spec.n(&[]) - spec.t());
|
let spares = usize::from(spec.n() - spec.t());
|
||||||
for thread in p2p_threads.iter().take(spares) {
|
for thread in p2p_threads.iter().take(spares) {
|
||||||
thread.abort();
|
thread.abort();
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -37,15 +37,14 @@ async fn tx_test() {
|
|||||||
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
|
usize::try_from(OsRng.next_u64() % u64::try_from(tributaries.len()).unwrap()).unwrap();
|
||||||
let key = keys[sender].clone();
|
let key = keys[sender].clone();
|
||||||
|
|
||||||
let attempt = 0;
|
|
||||||
let mut commitments = vec![0; 256];
|
|
||||||
OsRng.fill_bytes(&mut commitments);
|
|
||||||
|
|
||||||
// Create the TX with a null signature so we can get its sig hash
|
|
||||||
let block_before_tx = tributaries[sender].1.tip().await;
|
let block_before_tx = tributaries[sender].1.tip().await;
|
||||||
let mut tx = Transaction::DkgCommitments {
|
// Create the TX with a null signature so we can get its sig hash
|
||||||
attempt,
|
let mut tx = Transaction::DkgParticipation {
|
||||||
commitments: vec![commitments.clone()],
|
participation: {
|
||||||
|
let mut participation = vec![0; 4096];
|
||||||
|
OsRng.fill_bytes(&mut participation);
|
||||||
|
participation
|
||||||
|
},
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
};
|
};
|
||||||
tx.sign(&mut OsRng, spec.genesis(), &key);
|
tx.sign(&mut OsRng, spec.genesis(), &key);
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ use crate::tributary::{Label, Transaction};
|
|||||||
|
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
#[derive(Clone, Copy, PartialEq, Eq, Debug, Encode, BorshSerialize, BorshDeserialize)]
|
||||||
pub enum Topic {
|
pub enum Topic {
|
||||||
Dkg,
|
|
||||||
DkgConfirmation,
|
DkgConfirmation,
|
||||||
SubstrateSign(SubstrateSignableId),
|
SubstrateSign(SubstrateSignableId),
|
||||||
Sign([u8; 32]),
|
Sign([u8; 32]),
|
||||||
@@ -46,15 +45,13 @@ pub enum Accumulation {
|
|||||||
create_db!(
|
create_db!(
|
||||||
Tributary {
|
Tributary {
|
||||||
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
SeraiBlockNumber: (hash: [u8; 32]) -> u64,
|
||||||
SeraiDkgCompleted: (spec: ValidatorSet) -> [u8; 32],
|
SeraiDkgCompleted: (set: ValidatorSet) -> [u8; 32],
|
||||||
|
|
||||||
TributaryBlockNumber: (block: [u8; 32]) -> u32,
|
TributaryBlockNumber: (block: [u8; 32]) -> u32,
|
||||||
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
LastHandledBlock: (genesis: [u8; 32]) -> [u8; 32],
|
||||||
|
|
||||||
// TODO: Revisit the point of this
|
// TODO: Revisit the point of this
|
||||||
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
FatalSlashes: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
||||||
RemovedAsOfDkgAttempt: (genesis: [u8; 32], attempt: u32) -> Vec<[u8; 32]>,
|
|
||||||
OfflineDuringDkg: (genesis: [u8; 32]) -> Vec<[u8; 32]>,
|
|
||||||
// TODO: Combine these two
|
// TODO: Combine these two
|
||||||
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
FatallySlashed: (genesis: [u8; 32], account: [u8; 32]) -> (),
|
||||||
SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,
|
SlashPoints: (genesis: [u8; 32], account: [u8; 32]) -> u32,
|
||||||
@@ -67,11 +64,9 @@ create_db!(
|
|||||||
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
DataReceived: (genesis: [u8; 32], data_spec: &DataSpecification) -> u16,
|
||||||
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
DataDb: (genesis: [u8; 32], data_spec: &DataSpecification, signer_bytes: &[u8; 32]) -> Vec<u8>,
|
||||||
|
|
||||||
DkgShare: (genesis: [u8; 32], from: u16, to: u16) -> Vec<u8>,
|
DkgParticipation: (genesis: [u8; 32], from: u16) -> Vec<u8>,
|
||||||
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
ConfirmationNonces: (genesis: [u8; 32], attempt: u32) -> HashMap<Participant, Vec<u8>>,
|
||||||
DkgKeyPair: (genesis: [u8; 32], attempt: u32) -> KeyPair,
|
DkgKeyPair: (genesis: [u8; 32]) -> KeyPair,
|
||||||
KeyToDkgAttempt: (key: [u8; 32]) -> u32,
|
|
||||||
DkgLocallyCompleted: (genesis: [u8; 32]) -> (),
|
|
||||||
|
|
||||||
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
PlanIds: (genesis: &[u8], block: u64) -> Vec<[u8; 32]>,
|
||||||
|
|
||||||
@@ -123,12 +118,12 @@ impl AttemptDb {
|
|||||||
|
|
||||||
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
pub fn attempt(getter: &impl Get, genesis: [u8; 32], topic: Topic) -> Option<u32> {
|
||||||
let attempt = Self::get(getter, genesis, &topic);
|
let attempt = Self::get(getter, genesis, &topic);
|
||||||
// Don't require explicit recognition of the Dkg topic as it starts when the chain does
|
// Don't require explicit recognition of the DkgConfirmation topic as it starts when the chain
|
||||||
|
// does
|
||||||
// Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it
|
// Don't require explicit recognition of the SlashReport topic as it isn't a DoS risk and it
|
||||||
// should always happen (eventually)
|
// should always happen (eventually)
|
||||||
if attempt.is_none() &&
|
if attempt.is_none() &&
|
||||||
((topic == Topic::Dkg) ||
|
((topic == Topic::DkgConfirmation) ||
|
||||||
(topic == Topic::DkgConfirmation) ||
|
|
||||||
(topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))
|
(topic == Topic::SubstrateSign(SubstrateSignableId::SlashReport)))
|
||||||
{
|
{
|
||||||
return Some(0);
|
return Some(0);
|
||||||
@@ -155,16 +150,12 @@ impl ReattemptDb {
|
|||||||
// 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5
|
// 5 minutes for attempts 0 ..= 2, 10 minutes for attempts 3 ..= 5, 15 minutes for attempts > 5
|
||||||
// Assumes no event will take longer than 15 minutes, yet grows the time in case there are
|
// Assumes no event will take longer than 15 minutes, yet grows the time in case there are
|
||||||
// network bandwidth issues
|
// network bandwidth issues
|
||||||
let mut reattempt_delay = BASE_REATTEMPT_DELAY *
|
let reattempt_delay = BASE_REATTEMPT_DELAY *
|
||||||
((AttemptDb::attempt(txn, genesis, topic)
|
((AttemptDb::attempt(txn, genesis, topic)
|
||||||
.expect("scheduling re-attempt for unknown topic") /
|
.expect("scheduling re-attempt for unknown topic") /
|
||||||
3) +
|
3) +
|
||||||
1)
|
1)
|
||||||
.min(3);
|
.min(3);
|
||||||
// Allow more time for DKGs since they have an extra round and much more data
|
|
||||||
if matches!(topic, Topic::Dkg) {
|
|
||||||
reattempt_delay *= 4;
|
|
||||||
}
|
|
||||||
let upon_block = current_block_number + reattempt_delay;
|
let upon_block = current_block_number + reattempt_delay;
|
||||||
|
|
||||||
let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]);
|
let mut reattempts = Self::get(txn, genesis, upon_block).unwrap_or(vec![]);
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ use serai_client::{Signature, validator_sets::primitives::KeyPair};
|
|||||||
use tributary::{Signed, TransactionKind, TransactionTrait};
|
use tributary::{Signed, TransactionKind, TransactionTrait};
|
||||||
|
|
||||||
use processor_messages::{
|
use processor_messages::{
|
||||||
key_gen::{self, KeyGenId},
|
key_gen::self,
|
||||||
coordinator::{self, SubstrateSignableId, SubstrateSignId},
|
coordinator::{self, SubstrateSignableId, SubstrateSignId},
|
||||||
sign::{self, SignId},
|
sign::{self, SignId},
|
||||||
};
|
};
|
||||||
@@ -38,33 +38,20 @@ pub fn dkg_confirmation_nonces(
|
|||||||
txn: &mut impl DbTxn,
|
txn: &mut impl DbTxn,
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
) -> [u8; 64] {
|
) -> [u8; 64] {
|
||||||
DkgConfirmer::new(key, spec, txn, attempt)
|
DkgConfirmer::new(key, spec, txn, attempt).preprocess()
|
||||||
.expect("getting DKG confirmation nonces for unknown attempt")
|
|
||||||
.preprocess()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn generated_key_pair<D: Db>(
|
pub fn generated_key_pair<D: Db>(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
genesis: [u8; 32],
|
||||||
spec: &TributarySpec,
|
|
||||||
key_pair: &KeyPair,
|
key_pair: &KeyPair,
|
||||||
attempt: u32,
|
) {
|
||||||
) -> Result<[u8; 32], Participant> {
|
DkgKeyPair::set(txn, genesis, key_pair);
|
||||||
DkgKeyPair::set(txn, spec.genesis(), attempt, key_pair);
|
|
||||||
KeyToDkgAttempt::set(txn, key_pair.0 .0, &attempt);
|
|
||||||
let preprocesses = ConfirmationNonces::get(txn, spec.genesis(), attempt).unwrap();
|
|
||||||
DkgConfirmer::new(key, spec, txn, attempt)
|
|
||||||
.expect("claiming to have generated a key pair for an unrecognized attempt")
|
|
||||||
.share(preprocesses, key_pair)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unflatten(
|
fn unflatten(spec: &TributarySpec, data: &mut HashMap<Participant, Vec<u8>>) {
|
||||||
spec: &TributarySpec,
|
|
||||||
removed: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
data: &mut HashMap<Participant, Vec<u8>>,
|
|
||||||
) {
|
|
||||||
for (validator, _) in spec.validators() {
|
for (validator, _) in spec.validators() {
|
||||||
let Some(range) = spec.i(removed, validator) else { continue };
|
let Some(range) = spec.i(validator) else { continue };
|
||||||
let Some(all_segments) = data.remove(&range.start) else {
|
let Some(all_segments) = data.remove(&range.start) else {
|
||||||
continue;
|
continue;
|
||||||
};
|
};
|
||||||
@@ -88,7 +75,6 @@ impl<
|
|||||||
{
|
{
|
||||||
fn accumulate(
|
fn accumulate(
|
||||||
&mut self,
|
&mut self,
|
||||||
removed: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
data_spec: &DataSpecification,
|
data_spec: &DataSpecification,
|
||||||
signer: <Ristretto as Ciphersuite>::G,
|
signer: <Ristretto as Ciphersuite>::G,
|
||||||
data: &Vec<u8>,
|
data: &Vec<u8>,
|
||||||
@@ -99,10 +85,7 @@ impl<
|
|||||||
panic!("accumulating data for a participant multiple times");
|
panic!("accumulating data for a participant multiple times");
|
||||||
}
|
}
|
||||||
let signer_shares = {
|
let signer_shares = {
|
||||||
let Some(signer_i) = self.spec.i(removed, signer) else {
|
let signer_i = self.spec.i(signer).expect("transaction signer wasn't a member of the set");
|
||||||
log::warn!("accumulating data from {} who was removed", hex::encode(signer.to_bytes()));
|
|
||||||
return Accumulation::NotReady;
|
|
||||||
};
|
|
||||||
u16::from(signer_i.end) - u16::from(signer_i.start)
|
u16::from(signer_i.end) - u16::from(signer_i.start)
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -115,11 +98,7 @@ impl<
|
|||||||
|
|
||||||
// If 2/3rds of the network participated in this preprocess, queue it for an automatic
|
// If 2/3rds of the network participated in this preprocess, queue it for an automatic
|
||||||
// re-attempt
|
// re-attempt
|
||||||
// DkgConfirmation doesn't have a re-attempt as it's just an extension for Dkg
|
if (data_spec.label == Label::Preprocess) && received_range.contains(&self.spec.t()) {
|
||||||
if (data_spec.label == Label::Preprocess) &&
|
|
||||||
received_range.contains(&self.spec.t()) &&
|
|
||||||
(data_spec.topic != Topic::DkgConfirmation)
|
|
||||||
{
|
|
||||||
// Double check the attempt on this entry, as we don't want to schedule a re-attempt if this
|
// Double check the attempt on this entry, as we don't want to schedule a re-attempt if this
|
||||||
// is an old entry
|
// is an old entry
|
||||||
// This is an assert, not part of the if check, as old data shouldn't be here in the first
|
// This is an assert, not part of the if check, as old data shouldn't be here in the first
|
||||||
@@ -129,10 +108,7 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
// If we have all the needed commitments/preprocesses/shares, tell the processor
|
||||||
let needs_everyone =
|
if received_range.contains(&self.spec.t()) {
|
||||||
(data_spec.topic == Topic::Dkg) || (data_spec.topic == Topic::DkgConfirmation);
|
|
||||||
let needed = if needs_everyone { self.spec.n(removed) } else { self.spec.t() };
|
|
||||||
if received_range.contains(&needed) {
|
|
||||||
log::debug!(
|
log::debug!(
|
||||||
"accumulation for entry {:?} attempt #{} is ready",
|
"accumulation for entry {:?} attempt #{} is ready",
|
||||||
&data_spec.topic,
|
&data_spec.topic,
|
||||||
@@ -141,7 +117,7 @@ impl<
|
|||||||
|
|
||||||
let mut data = HashMap::new();
|
let mut data = HashMap::new();
|
||||||
for validator in self.spec.validators().iter().map(|validator| validator.0) {
|
for validator in self.spec.validators().iter().map(|validator| validator.0) {
|
||||||
let Some(i) = self.spec.i(removed, validator) else { continue };
|
let Some(i) = self.spec.i(validator) else { continue };
|
||||||
data.insert(
|
data.insert(
|
||||||
i.start,
|
i.start,
|
||||||
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
|
if let Some(data) = DataDb::get(self.txn, genesis, data_spec, &validator.to_bytes()) {
|
||||||
@@ -152,10 +128,10 @@ impl<
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert_eq!(data.len(), usize::from(needed));
|
assert_eq!(data.len(), usize::from(self.spec.t()));
|
||||||
|
|
||||||
// Remove our own piece of data, if we were involved
|
// Remove our own piece of data, if we were involved
|
||||||
if let Some(i) = self.spec.i(removed, Ristretto::generator() * self.our_key.deref()) {
|
if let Some(i) = self.spec.i(Ristretto::generator() * self.our_key.deref()) {
|
||||||
if data.remove(&i.start).is_some() {
|
if data.remove(&i.start).is_some() {
|
||||||
return Accumulation::Ready(DataSet::Participating(data));
|
return Accumulation::Ready(DataSet::Participating(data));
|
||||||
}
|
}
|
||||||
@@ -167,7 +143,6 @@ impl<
|
|||||||
|
|
||||||
fn handle_data(
|
fn handle_data(
|
||||||
&mut self,
|
&mut self,
|
||||||
removed: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
data_spec: &DataSpecification,
|
data_spec: &DataSpecification,
|
||||||
bytes: &Vec<u8>,
|
bytes: &Vec<u8>,
|
||||||
signed: &Signed,
|
signed: &Signed,
|
||||||
@@ -213,21 +188,15 @@ impl<
|
|||||||
// TODO: If this is shares, we need to check they are part of the selected signing set
|
// TODO: If this is shares, we need to check they are part of the selected signing set
|
||||||
|
|
||||||
// Accumulate this data
|
// Accumulate this data
|
||||||
self.accumulate(removed, data_spec, signed.signer, bytes)
|
self.accumulate(data_spec, signed.signer, bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn check_sign_data_len(
|
fn check_sign_data_len(
|
||||||
&mut self,
|
&mut self,
|
||||||
removed: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
signer: <Ristretto as Ciphersuite>::G,
|
signer: <Ristretto as Ciphersuite>::G,
|
||||||
len: usize,
|
len: usize,
|
||||||
) -> Result<(), ()> {
|
) -> Result<(), ()> {
|
||||||
let Some(signer_i) = self.spec.i(removed, signer) else {
|
let signer_i = self.spec.i(signer).expect("signer wasn't a member of the set");
|
||||||
// TODO: Ensure processor doesn't so participate/check how it handles removals for being
|
|
||||||
// offline
|
|
||||||
self.fatal_slash(signer.to_bytes(), "signer participated despite being removed");
|
|
||||||
Err(())?
|
|
||||||
};
|
|
||||||
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
if len != usize::from(u16::from(signer_i.end) - u16::from(signer_i.start)) {
|
||||||
self.fatal_slash(
|
self.fatal_slash(
|
||||||
signer.to_bytes(),
|
signer.to_bytes(),
|
||||||
@@ -254,12 +223,9 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
match tx {
|
match tx {
|
||||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
Transaction::RemoveParticipant { participant, signed } => {
|
||||||
if self.spec.i(&[], participant).is_none() {
|
if self.spec.i(participant).is_none() {
|
||||||
self.fatal_slash(
|
self.fatal_slash(participant.to_bytes(), "RemoveParticipant vote for non-validator");
|
||||||
participant.to_bytes(),
|
|
||||||
"RemoveParticipantDueToDkg vote for non-validator",
|
|
||||||
);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -274,268 +240,106 @@ impl<
|
|||||||
|
|
||||||
let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0);
|
let prior_votes = VotesToRemove::get(self.txn, genesis, participant).unwrap_or(0);
|
||||||
let signer_votes =
|
let signer_votes =
|
||||||
self.spec.i(&[], signed.signer).expect("signer wasn't a validator for this network?");
|
self.spec.i(signed.signer).expect("signer wasn't a validator for this network?");
|
||||||
let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start);
|
let new_votes = prior_votes + u16::from(signer_votes.end) - u16::from(signer_votes.start);
|
||||||
VotesToRemove::set(self.txn, genesis, participant, &new_votes);
|
VotesToRemove::set(self.txn, genesis, participant, &new_votes);
|
||||||
if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) {
|
if ((prior_votes + 1) ..= new_votes).contains(&self.spec.t()) {
|
||||||
self.fatal_slash(participant, "RemoveParticipantDueToDkg vote")
|
self.fatal_slash(participant, "RemoveParticipant vote")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
Transaction::DkgParticipation { participation, signed } => {
|
||||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
// Send the participation to the processor
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "DkgCommitments with an unrecognized attempt");
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, commitments.len()) else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Preprocess, attempt };
|
|
||||||
match self.handle_data(&removed, &data_spec, &commitments.encode(), &signed) {
|
|
||||||
Accumulation::Ready(DataSet::Participating(mut commitments)) => {
|
|
||||||
log::info!("got all DkgCommitments for {}", hex::encode(genesis));
|
|
||||||
unflatten(self.spec, &removed, &mut commitments);
|
|
||||||
self
|
|
||||||
.processors
|
|
||||||
.send(
|
|
||||||
self.spec.set().network,
|
|
||||||
key_gen::CoordinatorMessage::Commitments {
|
|
||||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
|
||||||
commitments,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
|
||||||
assert!(
|
|
||||||
removed.contains(&(Ristretto::generator() * self.our_key.deref())),
|
|
||||||
"NotParticipating in a DkgCommitments we weren't removed for"
|
|
||||||
);
|
|
||||||
}
|
|
||||||
Accumulation::NotReady => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgShares { attempt, mut shares, confirmation_nonces, signed } => {
|
|
||||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "DkgShares with an unrecognized attempt");
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let not_participating = removed.contains(&(Ristretto::generator() * self.our_key.deref()));
|
|
||||||
|
|
||||||
let Ok(()) = self.check_sign_data_len(&removed, signed.signer, shares.len()) else {
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
|
|
||||||
let Some(sender_i) = self.spec.i(&removed, signed.signer) else {
|
|
||||||
self.fatal_slash(
|
|
||||||
signed.signer.to_bytes(),
|
|
||||||
"DkgShares for a DKG they aren't participating in",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let sender_is_len = u16::from(sender_i.end) - u16::from(sender_i.start);
|
|
||||||
for shares in &shares {
|
|
||||||
if shares.len() != (usize::from(self.spec.n(&removed) - sender_is_len)) {
|
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "invalid amount of DKG shares");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save each share as needed for blame
|
|
||||||
for (from_offset, shares) in shares.iter().enumerate() {
|
|
||||||
let from =
|
|
||||||
Participant::new(u16::from(sender_i.start) + u16::try_from(from_offset).unwrap())
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
for (to_offset, share) in shares.iter().enumerate() {
|
|
||||||
// 0-indexed (the enumeration) to 1-indexed (Participant)
|
|
||||||
let mut to = u16::try_from(to_offset).unwrap() + 1;
|
|
||||||
// Adjust for the omission of the sender's own shares
|
|
||||||
if to >= u16::from(sender_i.start) {
|
|
||||||
to += u16::from(sender_i.end) - u16::from(sender_i.start);
|
|
||||||
}
|
|
||||||
let to = Participant::new(to).unwrap();
|
|
||||||
|
|
||||||
DkgShare::set(self.txn, genesis, from.into(), to.into(), share);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Filter down to only our share's bytes for handle
|
|
||||||
let our_shares = if let Some(our_i) =
|
|
||||||
self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
|
|
||||||
{
|
|
||||||
if sender_i == our_i {
|
|
||||||
vec![]
|
|
||||||
} else {
|
|
||||||
// 1-indexed to 0-indexed
|
|
||||||
let mut our_i_pos = u16::from(our_i.start) - 1;
|
|
||||||
// Handle the omission of the sender's own data
|
|
||||||
if u16::from(our_i.start) > u16::from(sender_i.start) {
|
|
||||||
our_i_pos -= sender_is_len;
|
|
||||||
}
|
|
||||||
let our_i_pos = usize::from(our_i_pos);
|
|
||||||
shares
|
|
||||||
.iter_mut()
|
|
||||||
.map(|shares| {
|
|
||||||
shares
|
|
||||||
.drain(
|
|
||||||
our_i_pos ..
|
|
||||||
(our_i_pos + usize::from(u16::from(our_i.end) - u16::from(our_i.start))),
|
|
||||||
)
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
})
|
|
||||||
.collect()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
assert!(
|
|
||||||
not_participating,
|
|
||||||
"we didn't have an i while handling DkgShares we weren't removed for"
|
|
||||||
);
|
|
||||||
// Since we're not participating, simply save vec![] for our shares
|
|
||||||
vec![]
|
|
||||||
};
|
|
||||||
// Drop shares as it's presumably been mutated into invalidity
|
|
||||||
drop(shares);
|
|
||||||
|
|
||||||
let data_spec = DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt };
|
|
||||||
let encoded_data = (confirmation_nonces.to_vec(), our_shares.encode()).encode();
|
|
||||||
match self.handle_data(&removed, &data_spec, &encoded_data, &signed) {
|
|
||||||
Accumulation::Ready(DataSet::Participating(confirmation_nonces_and_shares)) => {
|
|
||||||
log::info!("got all DkgShares for {}", hex::encode(genesis));
|
|
||||||
|
|
||||||
let mut confirmation_nonces = HashMap::new();
|
|
||||||
let mut shares = HashMap::new();
|
|
||||||
for (participant, confirmation_nonces_and_shares) in confirmation_nonces_and_shares {
|
|
||||||
let (these_confirmation_nonces, these_shares) =
|
|
||||||
<(Vec<u8>, Vec<u8>)>::decode(&mut confirmation_nonces_and_shares.as_slice())
|
|
||||||
.unwrap();
|
|
||||||
confirmation_nonces.insert(participant, these_confirmation_nonces);
|
|
||||||
shares.insert(participant, these_shares);
|
|
||||||
}
|
|
||||||
ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces);
|
|
||||||
|
|
||||||
// shares is a HashMap<Participant, Vec<Vec<Vec<u8>>>>, with the values representing:
|
|
||||||
// - Each of the sender's shares
|
|
||||||
// - Each of the our shares
|
|
||||||
// - Each share
|
|
||||||
// We need a Vec<HashMap<Participant, Vec<u8>>>, with the outer being each of ours
|
|
||||||
let mut expanded_shares = vec![];
|
|
||||||
for (sender_start_i, shares) in shares {
|
|
||||||
let shares: Vec<Vec<Vec<u8>>> = Vec::<_>::decode(&mut shares.as_slice()).unwrap();
|
|
||||||
for (sender_i_offset, our_shares) in shares.into_iter().enumerate() {
|
|
||||||
for (our_share_i, our_share) in our_shares.into_iter().enumerate() {
|
|
||||||
if expanded_shares.len() <= our_share_i {
|
|
||||||
expanded_shares.push(HashMap::new());
|
|
||||||
}
|
|
||||||
expanded_shares[our_share_i].insert(
|
|
||||||
Participant::new(
|
|
||||||
u16::from(sender_start_i) + u16::try_from(sender_i_offset).unwrap(),
|
|
||||||
)
|
|
||||||
.unwrap(),
|
|
||||||
our_share,
|
|
||||||
);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
self
|
|
||||||
.processors
|
|
||||||
.send(
|
|
||||||
self.spec.set().network,
|
|
||||||
key_gen::CoordinatorMessage::Shares {
|
|
||||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
|
||||||
shares: expanded_shares,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
|
||||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
|
||||||
assert!(not_participating, "NotParticipating in a DkgShares we weren't removed for");
|
|
||||||
}
|
|
||||||
Accumulation::NotReady => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
|
||||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
|
||||||
self
|
|
||||||
.fatal_slash(signed.signer.to_bytes(), "InvalidDkgShare with an unrecognized attempt");
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let Some(range) = self.spec.i(&removed, signed.signer) else {
|
|
||||||
self.fatal_slash(
|
|
||||||
signed.signer.to_bytes(),
|
|
||||||
"InvalidDkgShare for a DKG they aren't participating in",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
if !range.contains(&accuser) {
|
|
||||||
self.fatal_slash(
|
|
||||||
signed.signer.to_bytes(),
|
|
||||||
"accused with a Participant index which wasn't theirs",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if range.contains(&faulty) {
|
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "accused self of having an InvalidDkgShare");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
let Some(share) = DkgShare::get(self.txn, genesis, accuser.into(), faulty.into()) else {
|
|
||||||
self.fatal_slash(
|
|
||||||
signed.signer.to_bytes(),
|
|
||||||
"InvalidDkgShare had a non-existent faulty participant",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
self
|
self
|
||||||
.processors
|
.processors
|
||||||
.send(
|
.send(
|
||||||
self.spec.set().network,
|
self.spec.set().network,
|
||||||
key_gen::CoordinatorMessage::VerifyBlame {
|
key_gen::CoordinatorMessage::Participation {
|
||||||
id: KeyGenId { session: self.spec.set().session, attempt },
|
session: self.spec.set().session,
|
||||||
accuser,
|
participant: self
|
||||||
accused: faulty,
|
.spec
|
||||||
share,
|
.i(signed.signer)
|
||||||
blame,
|
.expect("signer wasn't a validator for this network?")
|
||||||
|
.start,
|
||||||
|
participation,
|
||||||
},
|
},
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => {
|
||||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
let data_spec =
|
||||||
self.fatal_slash(signed.signer.to_bytes(), "DkgConfirmed with an unrecognized attempt");
|
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Preprocess, attempt };
|
||||||
return;
|
match self.handle_data(&data_spec, &confirmation_nonces.to_vec(), &signed) {
|
||||||
};
|
Accumulation::Ready(DataSet::Participating(confirmation_nonces)) => {
|
||||||
|
log::info!(
|
||||||
|
"got all DkgConfirmationNonces for {}, attempt {attempt}",
|
||||||
|
hex::encode(genesis)
|
||||||
|
);
|
||||||
|
|
||||||
|
ConfirmationNonces::set(self.txn, genesis, attempt, &confirmation_nonces);
|
||||||
|
|
||||||
|
// Send the expected DkgConfirmationShare
|
||||||
|
// TODO: Slight race condition here due to set, publish tx, then commit txn
|
||||||
|
let key_pair = DkgKeyPair::get(self.txn, genesis)
|
||||||
|
.expect("participating in confirming key we don't have");
|
||||||
|
let mut tx = match DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)
|
||||||
|
.share(confirmation_nonces, &key_pair)
|
||||||
|
{
|
||||||
|
Ok(confirmation_share) => Transaction::DkgConfirmationShare {
|
||||||
|
attempt,
|
||||||
|
confirmation_share,
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
},
|
||||||
|
Err(participant) => Transaction::RemoveParticipant {
|
||||||
|
participant: self.spec.reverse_lookup_i(participant).unwrap(),
|
||||||
|
signed: Transaction::empty_signed(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||||
|
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
||||||
|
}
|
||||||
|
Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => {
|
||||||
let data_spec =
|
let data_spec =
|
||||||
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };
|
DataSpecification { topic: Topic::DkgConfirmation, label: Label::Share, attempt };
|
||||||
match self.handle_data(&removed, &data_spec, &confirmation_share.to_vec(), &signed) {
|
match self.handle_data(&data_spec, &confirmation_share.to_vec(), &signed) {
|
||||||
Accumulation::Ready(DataSet::Participating(shares)) => {
|
Accumulation::Ready(DataSet::Participating(shares)) => {
|
||||||
log::info!("got all DkgConfirmed for {}", hex::encode(genesis));
|
log::info!(
|
||||||
|
"got all DkgConfirmationShare for {}, attempt {attempt}",
|
||||||
let Some(removed) = removed_as_of_dkg_attempt(self.txn, genesis, attempt) else {
|
hex::encode(genesis)
|
||||||
panic!(
|
);
|
||||||
"DkgConfirmed for everyone yet didn't have the removed parties for this attempt",
|
|
||||||
);
|
|
||||||
};
|
|
||||||
|
|
||||||
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
|
let preprocesses = ConfirmationNonces::get(self.txn, genesis, attempt).unwrap();
|
||||||
|
|
||||||
// TODO: This can technically happen under very very very specific timing as the txn
|
// TODO: This can technically happen under very very very specific timing as the txn
|
||||||
// put happens before DkgConfirmed, yet the txn commit isn't guaranteed to
|
// put happens before DkgConfirmationShare, yet the txn isn't guaranteed to be
|
||||||
let key_pair = DkgKeyPair::get(self.txn, genesis, attempt).expect(
|
// committed
|
||||||
"in DkgConfirmed handling, which happens after everyone \
|
let key_pair = DkgKeyPair::get(self.txn, genesis).expect(
|
||||||
(including us) fires DkgConfirmed, yet no confirming key pair",
|
"in DkgConfirmationShare handling, which happens after everyone \
|
||||||
|
(including us) fires DkgConfirmationShare, yet no confirming key pair",
|
||||||
);
|
);
|
||||||
let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt)
|
|
||||||
.expect("confirming DKG for unrecognized attempt");
|
// Determine the bitstring representing who participated before we move `shares`
|
||||||
|
let validators = self.spec.validators();
|
||||||
|
let mut signature_participants = bitvec::vec::BitVec::with_capacity(validators.len());
|
||||||
|
for (participant, _) in validators {
|
||||||
|
signature_participants.push(
|
||||||
|
(participant == (<Ristretto as Ciphersuite>::generator() * self.our_key.deref())) ||
|
||||||
|
shares.contains_key(&self.spec.i(participant).unwrap().start),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Produce the final signature
|
||||||
|
let mut confirmer = DkgConfirmer::new(self.our_key, self.spec, self.txn, attempt);
|
||||||
let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
|
let sig = match confirmer.complete(preprocesses, &key_pair, shares) {
|
||||||
Ok(sig) => sig,
|
Ok(sig) => sig,
|
||||||
Err(p) => {
|
Err(p) => {
|
||||||
let mut tx = Transaction::RemoveParticipantDueToDkg {
|
let mut tx = Transaction::RemoveParticipant {
|
||||||
participant: self.spec.reverse_lookup_i(&removed, p).unwrap(),
|
participant: self.spec.reverse_lookup_i(p).unwrap(),
|
||||||
signed: Transaction::empty_signed(),
|
signed: Transaction::empty_signed(),
|
||||||
};
|
};
|
||||||
tx.sign(&mut OsRng, genesis, self.our_key);
|
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||||
@@ -544,23 +348,18 @@ impl<
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
DkgLocallyCompleted::set(self.txn, genesis, &());
|
|
||||||
|
|
||||||
self
|
self
|
||||||
.publish_serai_tx
|
.publish_serai_tx
|
||||||
.publish_set_keys(
|
.publish_set_keys(
|
||||||
self.db,
|
self.db,
|
||||||
self.spec.set(),
|
self.spec.set(),
|
||||||
removed.into_iter().map(|key| key.to_bytes().into()).collect(),
|
|
||||||
key_pair,
|
key_pair,
|
||||||
|
signature_participants,
|
||||||
Signature(sig),
|
Signature(sig),
|
||||||
)
|
)
|
||||||
.await;
|
.await;
|
||||||
}
|
}
|
||||||
Accumulation::Ready(DataSet::NotParticipating) => {
|
Accumulation::Ready(DataSet::NotParticipating) | Accumulation::NotReady => {}
|
||||||
panic!("wasn't a participant in DKG confirmination shares")
|
|
||||||
}
|
|
||||||
Accumulation::NotReady => {}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -618,19 +417,8 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstrateSign(data) => {
|
Transaction::SubstrateSign(data) => {
|
||||||
// Provided transactions ensure synchrony on any signing protocol, and we won't start
|
|
||||||
// signing with threshold keys before we've confirmed them on-chain
|
|
||||||
let Some(removed) =
|
|
||||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
|
||||||
else {
|
|
||||||
self.fatal_slash(
|
|
||||||
data.signed.signer.to_bytes(),
|
|
||||||
"signing despite not having set keys on substrate",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let signer = data.signed.signer;
|
let signer = data.signed.signer;
|
||||||
let Ok(()) = self.check_sign_data_len(&removed, signer, data.data.len()) else {
|
let Ok(()) = self.check_sign_data_len(signer, data.data.len()) else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
let expected_len = match data.label {
|
let expected_len = match data.label {
|
||||||
@@ -653,11 +441,11 @@ impl<
|
|||||||
attempt: data.attempt,
|
attempt: data.attempt,
|
||||||
};
|
};
|
||||||
let Accumulation::Ready(DataSet::Participating(mut results)) =
|
let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||||
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)
|
self.handle_data(&data_spec, &data.data.encode(), &data.signed)
|
||||||
else {
|
else {
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
unflatten(self.spec, &removed, &mut results);
|
unflatten(self.spec, &mut results);
|
||||||
|
|
||||||
let id = SubstrateSignId {
|
let id = SubstrateSignId {
|
||||||
session: self.spec.set().session,
|
session: self.spec.set().session,
|
||||||
@@ -678,16 +466,7 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
Transaction::Sign(data) => {
|
Transaction::Sign(data) => {
|
||||||
let Some(removed) =
|
let Ok(()) = self.check_sign_data_len(data.signed.signer, data.data.len()) else {
|
||||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
|
||||||
else {
|
|
||||||
self.fatal_slash(
|
|
||||||
data.signed.signer.to_bytes(),
|
|
||||||
"signing despite not having set keys on substrate",
|
|
||||||
);
|
|
||||||
return;
|
|
||||||
};
|
|
||||||
let Ok(()) = self.check_sign_data_len(&removed, data.signed.signer, data.data.len()) else {
|
|
||||||
return;
|
return;
|
||||||
};
|
};
|
||||||
|
|
||||||
@@ -697,9 +476,9 @@ impl<
|
|||||||
attempt: data.attempt,
|
attempt: data.attempt,
|
||||||
};
|
};
|
||||||
if let Accumulation::Ready(DataSet::Participating(mut results)) =
|
if let Accumulation::Ready(DataSet::Participating(mut results)) =
|
||||||
self.handle_data(&removed, &data_spec, &data.data.encode(), &data.signed)
|
self.handle_data(&data_spec, &data.data.encode(), &data.signed)
|
||||||
{
|
{
|
||||||
unflatten(self.spec, &removed, &mut results);
|
unflatten(self.spec, &mut results);
|
||||||
let id =
|
let id =
|
||||||
SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt };
|
SignId { session: self.spec.set().session, id: data.plan, attempt: data.attempt };
|
||||||
self
|
self
|
||||||
@@ -740,8 +519,7 @@ impl<
|
|||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SlashReport(points, signed) => {
|
Transaction::SlashReport(points, signed) => {
|
||||||
// Uses &[] as we only need the length which is independent to who else was removed
|
let signer_range = self.spec.i(signed.signer).unwrap();
|
||||||
let signer_range = self.spec.i(&[], signed.signer).unwrap();
|
|
||||||
let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start);
|
let signer_len = u16::from(signer_range.end) - u16::from(signer_range.start);
|
||||||
if points.len() != (self.spec.validators().len() - 1) {
|
if points.len() != (self.spec.validators().len() - 1) {
|
||||||
self.fatal_slash(
|
self.fatal_slash(
|
||||||
|
|||||||
@@ -1,7 +1,3 @@
|
|||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
|
||||||
|
|
||||||
use serai_client::validator_sets::primitives::ValidatorSet;
|
|
||||||
|
|
||||||
use tributary::{
|
use tributary::{
|
||||||
ReadWrite,
|
ReadWrite,
|
||||||
transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},
|
transaction::{TransactionError, TransactionKind, Transaction as TransactionTrait},
|
||||||
@@ -24,39 +20,6 @@ pub use handle::*;
|
|||||||
|
|
||||||
pub mod scanner;
|
pub mod scanner;
|
||||||
|
|
||||||
pub fn removed_as_of_dkg_attempt(
|
|
||||||
getter: &impl Get,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
attempt: u32,
|
|
||||||
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
|
||||||
if attempt == 0 {
|
|
||||||
Some(vec![])
|
|
||||||
} else {
|
|
||||||
RemovedAsOfDkgAttempt::get(getter, genesis, attempt).map(|keys| {
|
|
||||||
keys.iter().map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap()).collect()
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn removed_as_of_set_keys(
|
|
||||||
getter: &impl Get,
|
|
||||||
set: ValidatorSet,
|
|
||||||
genesis: [u8; 32],
|
|
||||||
) -> Option<Vec<<Ristretto as Ciphersuite>::G>> {
|
|
||||||
// SeraiDkgCompleted has the key placed on-chain.
|
|
||||||
// This key can be uniquely mapped to an attempt so long as one participant was honest, which we
|
|
||||||
// assume as a presumably honest participant.
|
|
||||||
// Resolve from generated key to attempt to fatally slashed as of attempt.
|
|
||||||
|
|
||||||
// This expect will trigger if this is prematurely called and Substrate has tracked the keys yet
|
|
||||||
// we haven't locally synced and handled the Tributary
|
|
||||||
// All callers of this, at the time of writing, ensure the Tributary has sufficiently synced
|
|
||||||
// making the panic with context more desirable than the None
|
|
||||||
let attempt = KeyToDkgAttempt::get(getter, SeraiDkgCompleted::get(getter, set)?)
|
|
||||||
.expect("key completed on-chain didn't have an attempt related");
|
|
||||||
removed_as_of_dkg_attempt(getter, genesis, attempt)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
|
pub async fn publish_signed_transaction<D: Db, P: crate::P2p>(
|
||||||
txn: &mut D::Transaction<'_>,
|
txn: &mut D::Transaction<'_>,
|
||||||
tributary: &Tributary<D, Transaction, P>,
|
tributary: &Tributary<D, Transaction, P>,
|
||||||
|
|||||||
@@ -1,15 +1,17 @@
|
|||||||
use core::{marker::PhantomData, ops::Deref, future::Future, time::Duration};
|
use core::{marker::PhantomData, future::Future, time::Duration};
|
||||||
use std::{sync::Arc, collections::HashSet};
|
use std::sync::Arc;
|
||||||
|
|
||||||
use zeroize::Zeroizing;
|
use zeroize::Zeroizing;
|
||||||
|
|
||||||
|
use rand_core::OsRng;
|
||||||
|
|
||||||
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
use ciphersuite::{group::GroupEncoding, Ciphersuite, Ristretto};
|
||||||
|
|
||||||
use tokio::sync::broadcast;
|
use tokio::sync::broadcast;
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use serai_client::{
|
use serai_client::{
|
||||||
primitives::{SeraiAddress, Signature},
|
primitives::Signature,
|
||||||
validator_sets::primitives::{KeyPair, ValidatorSet},
|
validator_sets::primitives::{KeyPair, ValidatorSet},
|
||||||
Serai,
|
Serai,
|
||||||
};
|
};
|
||||||
@@ -67,8 +69,8 @@ pub trait PublishSeraiTransaction {
|
|||||||
&self,
|
&self,
|
||||||
db: &(impl Sync + Get),
|
db: &(impl Sync + Get),
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
removed: Vec<SeraiAddress>,
|
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
|
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
@@ -129,11 +131,12 @@ mod impl_pst_for_serai {
|
|||||||
&self,
|
&self,
|
||||||
db: &(impl Sync + Get),
|
db: &(impl Sync + Get),
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
removed: Vec<SeraiAddress>,
|
|
||||||
key_pair: KeyPair,
|
key_pair: KeyPair,
|
||||||
|
signature_participants: bitvec::vec::BitVec<u8, bitvec::order::Lsb0>,
|
||||||
signature: Signature,
|
signature: Signature,
|
||||||
) {
|
) {
|
||||||
let tx = SeraiValidatorSets::set_keys(set.network, removed, key_pair, signature);
|
let tx =
|
||||||
|
SeraiValidatorSets::set_keys(set.network, key_pair, signature_participants, signature);
|
||||||
async fn check(serai: SeraiValidatorSets<'_>, set: ValidatorSet, (): ()) -> bool {
|
async fn check(serai: SeraiValidatorSets<'_>, set: ValidatorSet, (): ()) -> bool {
|
||||||
if matches!(serai.keys(set).await, Ok(Some(_))) {
|
if matches!(serai.keys(set).await, Ok(Some(_))) {
|
||||||
log::info!("another coordinator set key pair for {:?}", set);
|
log::info!("another coordinator set key pair for {:?}", set);
|
||||||
@@ -243,18 +246,15 @@ impl<
|
|||||||
|
|
||||||
let genesis = self.spec.genesis();
|
let genesis = self.spec.genesis();
|
||||||
|
|
||||||
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
|
|
||||||
|
|
||||||
// Calculate the shares still present, spinning if not enough are
|
// Calculate the shares still present, spinning if not enough are
|
||||||
// still_present_shares is used by a below branch, yet it's a natural byproduct of checking if
|
{
|
||||||
// we should spin, hence storing it in a variable here
|
|
||||||
let still_present_shares = {
|
|
||||||
// Start with the original n value
|
// Start with the original n value
|
||||||
let mut present_shares = self.spec.n(&[]);
|
let mut present_shares = self.spec.n();
|
||||||
// Remove everyone fatally slashed
|
// Remove everyone fatally slashed
|
||||||
|
let current_fatal_slashes = FatalSlashes::get_as_keys(self.txn, genesis);
|
||||||
for removed in ¤t_fatal_slashes {
|
for removed in ¤t_fatal_slashes {
|
||||||
let original_i_for_removed =
|
let original_i_for_removed =
|
||||||
self.spec.i(&[], *removed).expect("removed party was never present");
|
self.spec.i(*removed).expect("removed party was never present");
|
||||||
let removed_shares =
|
let removed_shares =
|
||||||
u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start);
|
u16::from(original_i_for_removed.end) - u16::from(original_i_for_removed.start);
|
||||||
present_shares -= removed_shares;
|
present_shares -= removed_shares;
|
||||||
@@ -270,79 +270,17 @@ impl<
|
|||||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
present_shares
|
|
||||||
};
|
|
||||||
|
|
||||||
for topic in ReattemptDb::take(self.txn, genesis, self.block_number) {
|
for topic in ReattemptDb::take(self.txn, genesis, self.block_number) {
|
||||||
let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic);
|
let attempt = AttemptDb::start_next_attempt(self.txn, genesis, topic);
|
||||||
log::info!("re-attempting {topic:?} with attempt {attempt}");
|
log::info!("potentially re-attempting {topic:?} with attempt {attempt}");
|
||||||
|
|
||||||
// Slash people who failed to participate as expected in the prior attempt
|
// Slash people who failed to participate as expected in the prior attempt
|
||||||
{
|
{
|
||||||
let prior_attempt = attempt - 1;
|
let prior_attempt = attempt - 1;
|
||||||
let (removed, expected_participants) = match topic {
|
// TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
|
||||||
Topic::Dkg => {
|
let expected_participants: Vec<<Ristretto as Ciphersuite>::G> = vec![];
|
||||||
// Every validator who wasn't removed is expected to have participated
|
|
||||||
let removed =
|
|
||||||
crate::tributary::removed_as_of_dkg_attempt(self.txn, genesis, prior_attempt)
|
|
||||||
.expect("prior attempt didn't have its removed saved to disk");
|
|
||||||
let removed_set = removed.iter().copied().collect::<HashSet<_>>();
|
|
||||||
(
|
|
||||||
removed,
|
|
||||||
self
|
|
||||||
.spec
|
|
||||||
.validators()
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|(validator, _)| {
|
|
||||||
Some(validator).filter(|validator| !removed_set.contains(validator))
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
Topic::DkgConfirmation => {
|
|
||||||
panic!("TODO: re-attempting DkgConfirmation when we should be re-attempting the Dkg")
|
|
||||||
}
|
|
||||||
Topic::SubstrateSign(_) | Topic::Sign(_) => {
|
|
||||||
let removed =
|
|
||||||
crate::tributary::removed_as_of_set_keys(self.txn, self.spec.set(), genesis)
|
|
||||||
.expect("SubstrateSign/Sign yet have yet to set keys");
|
|
||||||
// TODO: If 67% sent preprocesses, this should be them. Else, this should be vec![]
|
|
||||||
let expected_participants = vec![];
|
|
||||||
(removed, expected_participants)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let (expected_topic, expected_label) = match topic {
|
|
||||||
Topic::Dkg => {
|
|
||||||
let n = self.spec.n(&removed);
|
|
||||||
// If we got all the DKG shares, we should be on DKG confirmation
|
|
||||||
let share_spec =
|
|
||||||
DataSpecification { topic: Topic::Dkg, label: Label::Share, attempt: prior_attempt };
|
|
||||||
if DataReceived::get(self.txn, genesis, &share_spec).unwrap_or(0) == n {
|
|
||||||
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
|
|
||||||
// preprocess is part of Topic::Dkg Label::Share
|
|
||||||
(Topic::DkgConfirmation, Label::Share)
|
|
||||||
} else {
|
|
||||||
let preprocess_spec = DataSpecification {
|
|
||||||
topic: Topic::Dkg,
|
|
||||||
label: Label::Preprocess,
|
|
||||||
attempt: prior_attempt,
|
|
||||||
};
|
|
||||||
// If we got all the DKG preprocesses, DKG shares
|
|
||||||
if DataReceived::get(self.txn, genesis, &preprocess_spec).unwrap_or(0) == n {
|
|
||||||
// Label::Share since there is no Label::Preprocess for DkgConfirmation since the
|
|
||||||
// preprocess is part of Topic::Dkg Label::Share
|
|
||||||
(Topic::Dkg, Label::Share)
|
|
||||||
} else {
|
|
||||||
(Topic::Dkg, Label::Preprocess)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Topic::DkgConfirmation => unreachable!(),
|
|
||||||
// If we got enough participants to move forward, then we expect shares from them all
|
|
||||||
Topic::SubstrateSign(_) | Topic::Sign(_) => (topic, Label::Share),
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut did_not_participate = vec![];
|
let mut did_not_participate = vec![];
|
||||||
for expected_participant in expected_participants {
|
for expected_participant in expected_participants {
|
||||||
@@ -350,8 +288,9 @@ impl<
|
|||||||
self.txn,
|
self.txn,
|
||||||
genesis,
|
genesis,
|
||||||
&DataSpecification {
|
&DataSpecification {
|
||||||
topic: expected_topic,
|
topic,
|
||||||
label: expected_label,
|
// Since we got the preprocesses, we were supposed to get the shares
|
||||||
|
label: Label::Share,
|
||||||
attempt: prior_attempt,
|
attempt: prior_attempt,
|
||||||
},
|
},
|
||||||
&expected_participant.to_bytes(),
|
&expected_participant.to_bytes(),
|
||||||
@@ -367,15 +306,8 @@ impl<
|
|||||||
// Accordingly, clear did_not_participate
|
// Accordingly, clear did_not_participate
|
||||||
// TODO
|
// TODO
|
||||||
|
|
||||||
// If during the DKG, explicitly mark these people as having been offline
|
// TODO: Increment the slash points of people who didn't preprocess in some expected window
|
||||||
// TODO: If they were offline sufficiently long ago, don't strike them off
|
// of time
|
||||||
if topic == Topic::Dkg {
|
|
||||||
let mut existing = OfflineDuringDkg::get(self.txn, genesis).unwrap_or(vec![]);
|
|
||||||
for did_not_participate in did_not_participate {
|
|
||||||
existing.push(did_not_participate.to_bytes());
|
|
||||||
}
|
|
||||||
OfflineDuringDkg::set(self.txn, genesis, &existing);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Slash everyone who didn't participate as expected
|
// Slash everyone who didn't participate as expected
|
||||||
// This may be overzealous as if a minority detects a completion, they'll abort yet the
|
// This may be overzealous as if a minority detects a completion, they'll abort yet the
|
||||||
@@ -405,75 +337,22 @@ impl<
|
|||||||
then preprocesses. This only sends preprocesses).
|
then preprocesses. This only sends preprocesses).
|
||||||
*/
|
*/
|
||||||
match topic {
|
match topic {
|
||||||
Topic::Dkg => {
|
Topic::DkgConfirmation => {
|
||||||
let mut removed = current_fatal_slashes.clone();
|
if SeraiDkgCompleted::get(self.txn, self.spec.set()).is_none() {
|
||||||
|
log::info!("re-attempting DKG confirmation with attempt {attempt}");
|
||||||
|
|
||||||
let t = self.spec.t();
|
// Since it wasn't completed, publish our nonces for the next attempt
|
||||||
{
|
let confirmation_nonces =
|
||||||
let mut present_shares = still_present_shares;
|
crate::tributary::dkg_confirmation_nonces(self.our_key, self.spec, self.txn, attempt);
|
||||||
|
let mut tx = Transaction::DkgConfirmationNonces {
|
||||||
// Load the parties marked as offline across the various attempts
|
attempt,
|
||||||
let mut offline = OfflineDuringDkg::get(self.txn, genesis)
|
confirmation_nonces,
|
||||||
.unwrap_or(vec![])
|
signed: Transaction::empty_signed(),
|
||||||
.iter()
|
|
||||||
.map(|key| <Ristretto as Ciphersuite>::G::from_bytes(key).unwrap())
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
// Pop from the list to prioritize the removal of those recently offline
|
|
||||||
while let Some(offline) = offline.pop() {
|
|
||||||
// Make sure they weren't removed already (such as due to being fatally slashed)
|
|
||||||
// This also may trigger if they were offline across multiple attempts
|
|
||||||
if removed.contains(&offline) {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we can remove them and still meet the threshold, do so
|
|
||||||
let original_i_for_offline =
|
|
||||||
self.spec.i(&[], offline).expect("offline was never present?");
|
|
||||||
let offline_shares =
|
|
||||||
u16::from(original_i_for_offline.end) - u16::from(original_i_for_offline.start);
|
|
||||||
if (present_shares - offline_shares) >= t {
|
|
||||||
present_shares -= offline_shares;
|
|
||||||
removed.push(offline);
|
|
||||||
}
|
|
||||||
|
|
||||||
// If we've removed as many people as we can, break
|
|
||||||
if present_shares == t {
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RemovedAsOfDkgAttempt::set(
|
|
||||||
self.txn,
|
|
||||||
genesis,
|
|
||||||
attempt,
|
|
||||||
&removed.iter().map(<Ristretto as Ciphersuite>::G::to_bytes).collect(),
|
|
||||||
);
|
|
||||||
|
|
||||||
if DkgLocallyCompleted::get(self.txn, genesis).is_none() {
|
|
||||||
let Some(our_i) = self.spec.i(&removed, Ristretto::generator() * self.our_key.deref())
|
|
||||||
else {
|
|
||||||
continue;
|
|
||||||
};
|
};
|
||||||
|
tx.sign(&mut OsRng, genesis, self.our_key);
|
||||||
// Since it wasn't completed, instruct the processor to start the next attempt
|
self.publish_tributary_tx.publish_tributary_tx(tx).await;
|
||||||
let id =
|
|
||||||
processor_messages::key_gen::KeyGenId { session: self.spec.set().session, attempt };
|
|
||||||
|
|
||||||
let params =
|
|
||||||
frost::ThresholdParams::new(t, self.spec.n(&removed), our_i.start).unwrap();
|
|
||||||
let shares = u16::from(our_i.end) - u16::from(our_i.start);
|
|
||||||
|
|
||||||
self
|
|
||||||
.processors
|
|
||||||
.send(
|
|
||||||
self.spec.set().network,
|
|
||||||
processor_messages::key_gen::CoordinatorMessage::GenerateKey { id, params, shares },
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Topic::DkgConfirmation => unreachable!(),
|
|
||||||
Topic::SubstrateSign(inner_id) => {
|
Topic::SubstrateSign(inner_id) => {
|
||||||
let id = processor_messages::coordinator::SubstrateSignId {
|
let id = processor_messages::coordinator::SubstrateSignId {
|
||||||
session: self.spec.set().session,
|
session: self.spec.set().session,
|
||||||
@@ -490,6 +369,8 @@ impl<
|
|||||||
crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network)
|
crate::cosign_evaluator::LatestCosign::get(self.txn, self.spec.set().network)
|
||||||
.map_or(0, |cosign| cosign.block_number);
|
.map_or(0, |cosign| cosign.block_number);
|
||||||
if latest_cosign < block_number {
|
if latest_cosign < block_number {
|
||||||
|
log::info!("re-attempting cosigning {block_number:?} with attempt {attempt}");
|
||||||
|
|
||||||
// Instruct the processor to start the next attempt
|
// Instruct the processor to start the next attempt
|
||||||
self
|
self
|
||||||
.processors
|
.processors
|
||||||
@@ -506,6 +387,8 @@ impl<
|
|||||||
SubstrateSignableId::Batch(batch) => {
|
SubstrateSignableId::Batch(batch) => {
|
||||||
// If the Batch hasn't appeared on-chain...
|
// If the Batch hasn't appeared on-chain...
|
||||||
if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() {
|
if BatchInstructionsHashDb::get(self.txn, self.spec.set().network, batch).is_none() {
|
||||||
|
log::info!("re-attempting signing batch {batch:?} with attempt {attempt}");
|
||||||
|
|
||||||
// Instruct the processor to start the next attempt
|
// Instruct the processor to start the next attempt
|
||||||
// The processor won't continue if it's already signed a Batch
|
// The processor won't continue if it's already signed a Batch
|
||||||
// Prior checking if the Batch is on-chain just may reduce the non-participating
|
// Prior checking if the Batch is on-chain just may reduce the non-participating
|
||||||
@@ -523,6 +406,11 @@ impl<
|
|||||||
// If this Tributary hasn't been retired...
|
// If this Tributary hasn't been retired...
|
||||||
// (published SlashReport/took too long to do so)
|
// (published SlashReport/took too long to do so)
|
||||||
if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() {
|
if crate::RetiredTributaryDb::get(self.txn, self.spec.set()).is_none() {
|
||||||
|
log::info!(
|
||||||
|
"re-attempting signing slash report for {:?} with attempt {attempt}",
|
||||||
|
self.spec.set()
|
||||||
|
);
|
||||||
|
|
||||||
let report = SlashReport::get(self.txn, self.spec.set())
|
let report = SlashReport::get(self.txn, self.spec.set())
|
||||||
.expect("re-attempting signing a SlashReport we don't have?");
|
.expect("re-attempting signing a SlashReport we don't have?");
|
||||||
self
|
self
|
||||||
@@ -569,8 +457,7 @@ impl<
|
|||||||
};
|
};
|
||||||
// Assign them 0 points for themselves
|
// Assign them 0 points for themselves
|
||||||
report.insert(i, 0);
|
report.insert(i, 0);
|
||||||
// Uses &[] as we only need the length which is independent to who else was removed
|
let signer_i = self.spec.i(validator).unwrap();
|
||||||
let signer_i = self.spec.i(&[], validator).unwrap();
|
|
||||||
let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start);
|
let signer_len = u16::from(signer_i.end) - u16::from(signer_i.start);
|
||||||
// Push `n` copies, one for each of their shares
|
// Push `n` copies, one for each of their shares
|
||||||
for _ in 0 .. signer_len {
|
for _ in 0 .. signer_len {
|
||||||
|
|||||||
@@ -55,7 +55,7 @@
|
|||||||
*/
|
*/
|
||||||
|
|
||||||
use core::ops::Deref;
|
use core::ops::Deref;
|
||||||
use std::collections::HashMap;
|
use std::collections::{HashSet, HashMap};
|
||||||
|
|
||||||
use zeroize::{Zeroize, Zeroizing};
|
use zeroize::{Zeroize, Zeroizing};
|
||||||
|
|
||||||
@@ -63,10 +63,7 @@ use rand_core::OsRng;
|
|||||||
|
|
||||||
use blake2::{Digest, Blake2s256};
|
use blake2::{Digest, Blake2s256};
|
||||||
|
|
||||||
use ciphersuite::{
|
use ciphersuite::{group::ff::PrimeField, Ciphersuite, Ristretto};
|
||||||
group::{ff::PrimeField, GroupEncoding},
|
|
||||||
Ciphersuite, Ristretto,
|
|
||||||
};
|
|
||||||
use frost::{
|
use frost::{
|
||||||
FrostError,
|
FrostError,
|
||||||
dkg::{Participant, musig::musig},
|
dkg::{Participant, musig::musig},
|
||||||
@@ -77,10 +74,7 @@ use frost_schnorrkel::Schnorrkel;
|
|||||||
|
|
||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
|
|
||||||
use serai_client::{
|
use serai_client::validator_sets::primitives::{KeyPair, musig_context, set_keys_message};
|
||||||
Public,
|
|
||||||
validator_sets::primitives::{KeyPair, musig_context, set_keys_message},
|
|
||||||
};
|
|
||||||
|
|
||||||
use serai_db::*;
|
use serai_db::*;
|
||||||
|
|
||||||
@@ -89,6 +83,7 @@ use crate::tributary::TributarySpec;
|
|||||||
create_db!(
|
create_db!(
|
||||||
SigningProtocolDb {
|
SigningProtocolDb {
|
||||||
CachedPreprocesses: (context: &impl Encode) -> [u8; 32]
|
CachedPreprocesses: (context: &impl Encode) -> [u8; 32]
|
||||||
|
DataSignedWith: (context: &impl Encode) -> (Vec<u8>, HashMap<Participant, Vec<u8>>),
|
||||||
}
|
}
|
||||||
);
|
);
|
||||||
|
|
||||||
@@ -117,16 +112,22 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
|||||||
};
|
};
|
||||||
let encryption_key_slice: &mut [u8] = encryption_key.as_mut();
|
let encryption_key_slice: &mut [u8] = encryption_key.as_mut();
|
||||||
|
|
||||||
let algorithm = Schnorrkel::new(b"substrate");
|
// Create the MuSig keys
|
||||||
let keys: ThresholdKeys<Ristretto> =
|
let keys: ThresholdKeys<Ristretto> =
|
||||||
musig(&musig_context(self.spec.set()), self.key, participants)
|
musig(&musig_context(self.spec.set()), self.key, participants)
|
||||||
.expect("signing for a set we aren't in/validator present multiple times")
|
.expect("signing for a set we aren't in/validator present multiple times")
|
||||||
.into();
|
.into();
|
||||||
|
|
||||||
|
// Define the algorithm
|
||||||
|
let algorithm = Schnorrkel::new(b"substrate");
|
||||||
|
|
||||||
|
// Check if we've prior preprocessed
|
||||||
if CachedPreprocesses::get(self.txn, &self.context).is_none() {
|
if CachedPreprocesses::get(self.txn, &self.context).is_none() {
|
||||||
|
// If we haven't, we create a machine solely to obtain the preprocess with
|
||||||
let (machine, _) =
|
let (machine, _) =
|
||||||
AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng);
|
AlgorithmMachine::new(algorithm.clone(), keys.clone()).preprocess(&mut OsRng);
|
||||||
|
|
||||||
|
// Cache and save the preprocess to disk
|
||||||
let mut cache = machine.cache();
|
let mut cache = machine.cache();
|
||||||
assert_eq!(cache.0.len(), 32);
|
assert_eq!(cache.0.len(), 32);
|
||||||
#[allow(clippy::needless_range_loop)]
|
#[allow(clippy::needless_range_loop)]
|
||||||
@@ -137,13 +138,15 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
|||||||
CachedPreprocesses::set(self.txn, &self.context, &cache.0);
|
CachedPreprocesses::set(self.txn, &self.context, &cache.0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// We're now guaranteed to have the preprocess, hence why this `unwrap` is safe
|
||||||
let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap();
|
let cached = CachedPreprocesses::get(self.txn, &self.context).unwrap();
|
||||||
let mut cached: Zeroizing<[u8; 32]> = Zeroizing::new(cached);
|
let mut cached = Zeroizing::new(cached);
|
||||||
#[allow(clippy::needless_range_loop)]
|
#[allow(clippy::needless_range_loop)]
|
||||||
for b in 0 .. 32 {
|
for b in 0 .. 32 {
|
||||||
cached[b] ^= encryption_key_slice[b];
|
cached[b] ^= encryption_key_slice[b];
|
||||||
}
|
}
|
||||||
encryption_key_slice.zeroize();
|
encryption_key_slice.zeroize();
|
||||||
|
// Create the machine from the cached preprocess
|
||||||
let (machine, preprocess) =
|
let (machine, preprocess) =
|
||||||
AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached));
|
AlgorithmSignMachine::from_cache(algorithm, keys, CachedPreprocess(cached));
|
||||||
|
|
||||||
@@ -156,8 +159,29 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
|||||||
mut serialized_preprocesses: HashMap<Participant, Vec<u8>>,
|
mut serialized_preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
msg: &[u8],
|
msg: &[u8],
|
||||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||||
let machine = self.preprocess_internal(participants).0;
|
// We can't clear the preprocess as we sitll need it to accumulate all of the shares
|
||||||
|
// We do save the message we signed so any future calls with distinct messages panic
|
||||||
|
// This assumes the txn deciding this data is committed before the share is broaadcast
|
||||||
|
if let Some((existing_msg, existing_preprocesses)) =
|
||||||
|
DataSignedWith::get(self.txn, &self.context)
|
||||||
|
{
|
||||||
|
assert_eq!(msg, &existing_msg, "obtaining a signature share for a distinct message");
|
||||||
|
assert_eq!(
|
||||||
|
&serialized_preprocesses, &existing_preprocesses,
|
||||||
|
"obtaining a signature share with a distinct set of preprocesses"
|
||||||
|
);
|
||||||
|
} else {
|
||||||
|
DataSignedWith::set(
|
||||||
|
self.txn,
|
||||||
|
&self.context,
|
||||||
|
&(msg.to_vec(), serialized_preprocesses.clone()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the preprocessed machine
|
||||||
|
let (machine, _) = self.preprocess_internal(participants);
|
||||||
|
|
||||||
|
// Deserialize all the preprocesses
|
||||||
let mut participants = serialized_preprocesses.keys().copied().collect::<Vec<_>>();
|
let mut participants = serialized_preprocesses.keys().copied().collect::<Vec<_>>();
|
||||||
participants.sort();
|
participants.sort();
|
||||||
let mut preprocesses = HashMap::new();
|
let mut preprocesses = HashMap::new();
|
||||||
@@ -170,13 +194,14 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
|||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sign the share
|
||||||
let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e {
|
let (machine, share) = machine.sign(preprocesses, msg).map_err(|e| match e {
|
||||||
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
FrostError::InternalError(e) => unreachable!("FrostError::InternalError {e}"),
|
||||||
FrostError::InvalidParticipant(_, _) |
|
FrostError::InvalidParticipant(_, _) |
|
||||||
FrostError::InvalidSigningSet(_) |
|
FrostError::InvalidSigningSet(_) |
|
||||||
FrostError::InvalidParticipantQuantity(_, _) |
|
FrostError::InvalidParticipantQuantity(_, _) |
|
||||||
FrostError::DuplicatedParticipant(_) |
|
FrostError::DuplicatedParticipant(_) |
|
||||||
FrostError::MissingParticipant(_) => unreachable!("{e:?}"),
|
FrostError::MissingParticipant(_) => panic!("unexpected error during sign: {e:?}"),
|
||||||
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
FrostError::InvalidPreprocess(p) | FrostError::InvalidShare(p) => p,
|
||||||
})?;
|
})?;
|
||||||
|
|
||||||
@@ -207,24 +232,24 @@ impl<T: DbTxn, C: Encode> SigningProtocol<'_, T, C> {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Get the keys of the participants, noted by their threshold is, and return a new map indexed by
|
// Get the keys of the participants, noted by their threshold is, and return a new map indexed by
|
||||||
// the MuSig is.
|
// their MuSig is.
|
||||||
fn threshold_i_map_to_keys_and_musig_i_map(
|
fn threshold_i_map_to_keys_and_musig_i_map(
|
||||||
spec: &TributarySpec,
|
spec: &TributarySpec,
|
||||||
removed: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
our_key: &Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
mut map: HashMap<Participant, Vec<u8>>,
|
mut map: HashMap<Participant, Vec<u8>>,
|
||||||
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
|
) -> (Vec<<Ristretto as Ciphersuite>::G>, HashMap<Participant, Vec<u8>>) {
|
||||||
// Insert our own index so calculations aren't offset
|
// Insert our own index so calculations aren't offset
|
||||||
let our_threshold_i = spec
|
let our_threshold_i = spec
|
||||||
.i(removed, <Ristretto as Ciphersuite>::generator() * our_key.deref())
|
.i(<Ristretto as Ciphersuite>::generator() * our_key.deref())
|
||||||
.expect("MuSig t-of-n signing a for a protocol we were removed from")
|
.expect("not in a set we're signing for")
|
||||||
.start;
|
.start;
|
||||||
|
// Asserts we weren't unexpectedly already present
|
||||||
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
assert!(map.insert(our_threshold_i, vec![]).is_none());
|
||||||
|
|
||||||
let spec_validators = spec.validators();
|
let spec_validators = spec.validators();
|
||||||
let key_from_threshold_i = |threshold_i| {
|
let key_from_threshold_i = |threshold_i| {
|
||||||
for (key, _) in &spec_validators {
|
for (key, _) in &spec_validators {
|
||||||
if threshold_i == spec.i(removed, *key).expect("MuSig t-of-n participant was removed").start {
|
if threshold_i == spec.i(*key).expect("validator wasn't in a set they're in").start {
|
||||||
return *key;
|
return *key;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -235,19 +260,27 @@ fn threshold_i_map_to_keys_and_musig_i_map(
|
|||||||
let mut threshold_is = map.keys().copied().collect::<Vec<_>>();
|
let mut threshold_is = map.keys().copied().collect::<Vec<_>>();
|
||||||
threshold_is.sort();
|
threshold_is.sort();
|
||||||
for threshold_i in threshold_is {
|
for threshold_i in threshold_is {
|
||||||
sorted.push((key_from_threshold_i(threshold_i), map.remove(&threshold_i).unwrap()));
|
sorted.push((
|
||||||
|
threshold_i,
|
||||||
|
key_from_threshold_i(threshold_i),
|
||||||
|
map.remove(&threshold_i).unwrap(),
|
||||||
|
));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig
|
// Now that signers are sorted, with their shares, create a map with the is needed for MuSig
|
||||||
let mut participants = vec![];
|
let mut participants = vec![];
|
||||||
let mut map = HashMap::new();
|
let mut map = HashMap::new();
|
||||||
for (raw_i, (key, share)) in sorted.into_iter().enumerate() {
|
let mut our_musig_i = None;
|
||||||
let musig_i = u16::try_from(raw_i).unwrap() + 1;
|
for (raw_i, (threshold_i, key, share)) in sorted.into_iter().enumerate() {
|
||||||
|
let musig_i = Participant::new(u16::try_from(raw_i).unwrap() + 1).unwrap();
|
||||||
|
if threshold_i == our_threshold_i {
|
||||||
|
our_musig_i = Some(musig_i);
|
||||||
|
}
|
||||||
participants.push(key);
|
participants.push(key);
|
||||||
map.insert(Participant::new(musig_i).unwrap(), share);
|
map.insert(musig_i, share);
|
||||||
}
|
}
|
||||||
|
|
||||||
map.remove(&our_threshold_i).unwrap();
|
map.remove(&our_musig_i.unwrap()).unwrap();
|
||||||
|
|
||||||
(participants, map)
|
(participants, map)
|
||||||
}
|
}
|
||||||
@@ -257,7 +290,6 @@ type DkgConfirmerSigningProtocol<'a, T> = SigningProtocol<'a, T, (&'static [u8;
|
|||||||
pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
|
pub(crate) struct DkgConfirmer<'a, T: DbTxn> {
|
||||||
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
key: &'a Zeroizing<<Ristretto as Ciphersuite>::F>,
|
||||||
spec: &'a TributarySpec,
|
spec: &'a TributarySpec,
|
||||||
removed: Vec<<Ristretto as Ciphersuite>::G>,
|
|
||||||
txn: &'a mut T,
|
txn: &'a mut T,
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
}
|
}
|
||||||
@@ -268,19 +300,19 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
|||||||
spec: &'a TributarySpec,
|
spec: &'a TributarySpec,
|
||||||
txn: &'a mut T,
|
txn: &'a mut T,
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
) -> Option<DkgConfirmer<'a, T>> {
|
) -> DkgConfirmer<'a, T> {
|
||||||
// This relies on how confirmations are inlined into the DKG protocol and they accordingly
|
DkgConfirmer { key, spec, txn, attempt }
|
||||||
// share attempts
|
|
||||||
let removed = crate::tributary::removed_as_of_dkg_attempt(txn, spec.genesis(), attempt)?;
|
|
||||||
Some(DkgConfirmer { key, spec, removed, txn, attempt })
|
|
||||||
}
|
}
|
||||||
|
|
||||||
fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> {
|
fn signing_protocol(&mut self) -> DkgConfirmerSigningProtocol<'_, T> {
|
||||||
let context = (b"DkgConfirmer", self.attempt);
|
let context = (b"DkgConfirmer", self.attempt);
|
||||||
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
SigningProtocol { key: self.key, spec: self.spec, txn: self.txn, context }
|
||||||
}
|
}
|
||||||
|
|
||||||
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
fn preprocess_internal(&mut self) -> (AlgorithmSignMachine<Ristretto, Schnorrkel>, [u8; 64]) {
|
||||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
// This preprocesses with just us as we only decide the participants after obtaining
|
||||||
|
// preprocesses
|
||||||
|
let participants = vec![<Ristretto as Ciphersuite>::generator() * self.key.deref()];
|
||||||
self.signing_protocol().preprocess_internal(&participants)
|
self.signing_protocol().preprocess_internal(&participants)
|
||||||
}
|
}
|
||||||
// Get the preprocess for this confirmation.
|
// Get the preprocess for this confirmation.
|
||||||
@@ -293,14 +325,9 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
|||||||
preprocesses: HashMap<Participant, Vec<u8>>,
|
preprocesses: HashMap<Participant, Vec<u8>>,
|
||||||
key_pair: &KeyPair,
|
key_pair: &KeyPair,
|
||||||
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
) -> Result<(AlgorithmSignatureMachine<Ristretto, Schnorrkel>, [u8; 32]), Participant> {
|
||||||
let participants = self.spec.validators().iter().map(|val| val.0).collect::<Vec<_>>();
|
let (participants, preprocesses) =
|
||||||
let preprocesses =
|
threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, preprocesses);
|
||||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, preprocesses).1;
|
let msg = set_keys_message(&self.spec.set(), key_pair);
|
||||||
let msg = set_keys_message(
|
|
||||||
&self.spec.set(),
|
|
||||||
&self.removed.iter().map(|key| Public(key.to_bytes())).collect::<Vec<_>>(),
|
|
||||||
key_pair,
|
|
||||||
);
|
|
||||||
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
self.signing_protocol().share_internal(&participants, preprocesses, &msg)
|
||||||
}
|
}
|
||||||
// Get the share for this confirmation, if the preprocesses are valid.
|
// Get the share for this confirmation, if the preprocesses are valid.
|
||||||
@@ -318,8 +345,9 @@ impl<T: DbTxn> DkgConfirmer<'_, T> {
|
|||||||
key_pair: &KeyPair,
|
key_pair: &KeyPair,
|
||||||
shares: HashMap<Participant, Vec<u8>>,
|
shares: HashMap<Participant, Vec<u8>>,
|
||||||
) -> Result<[u8; 64], Participant> {
|
) -> Result<[u8; 64], Participant> {
|
||||||
let shares =
|
assert_eq!(preprocesses.keys().collect::<HashSet<_>>(), shares.keys().collect::<HashSet<_>>());
|
||||||
threshold_i_map_to_keys_and_musig_i_map(self.spec, &self.removed, self.key, shares).1;
|
|
||||||
|
let shares = threshold_i_map_to_keys_and_musig_i_map(self.spec, self.key, shares).1;
|
||||||
|
|
||||||
let machine = self
|
let machine = self
|
||||||
.share_internal(preprocesses, key_pair)
|
.share_internal(preprocesses, key_pair)
|
||||||
|
|||||||
@@ -9,7 +9,7 @@ use frost::Participant;
|
|||||||
use scale::Encode;
|
use scale::Encode;
|
||||||
use borsh::{BorshSerialize, BorshDeserialize};
|
use borsh::{BorshSerialize, BorshDeserialize};
|
||||||
|
|
||||||
use serai_client::{primitives::PublicKey, validator_sets::primitives::ValidatorSet};
|
use serai_client::validator_sets::primitives::ValidatorSet;
|
||||||
|
|
||||||
fn borsh_serialize_validators<W: io::Write>(
|
fn borsh_serialize_validators<W: io::Write>(
|
||||||
validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
validators: &Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||||
@@ -49,6 +49,7 @@ pub struct TributarySpec {
|
|||||||
deserialize_with = "borsh_deserialize_validators"
|
deserialize_with = "borsh_deserialize_validators"
|
||||||
)]
|
)]
|
||||||
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||||
|
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||||
}
|
}
|
||||||
|
|
||||||
impl TributarySpec {
|
impl TributarySpec {
|
||||||
@@ -56,16 +57,10 @@ impl TributarySpec {
|
|||||||
serai_block: [u8; 32],
|
serai_block: [u8; 32],
|
||||||
start_time: u64,
|
start_time: u64,
|
||||||
set: ValidatorSet,
|
set: ValidatorSet,
|
||||||
set_participants: Vec<(PublicKey, u16)>,
|
validators: Vec<(<Ristretto as Ciphersuite>::G, u16)>,
|
||||||
|
evrf_public_keys: Vec<([u8; 32], Vec<u8>)>,
|
||||||
) -> TributarySpec {
|
) -> TributarySpec {
|
||||||
let mut validators = vec![];
|
Self { serai_block, start_time, set, validators, evrf_public_keys }
|
||||||
for (participant, shares) in set_participants {
|
|
||||||
let participant = <Ristretto as Ciphersuite>::read_G::<&[u8]>(&mut participant.0.as_ref())
|
|
||||||
.expect("invalid key registered as participant");
|
|
||||||
validators.push((participant, shares));
|
|
||||||
}
|
|
||||||
|
|
||||||
Self { serai_block, start_time, set, validators }
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn set(&self) -> ValidatorSet {
|
pub fn set(&self) -> ValidatorSet {
|
||||||
@@ -74,7 +69,7 @@ impl TributarySpec {
|
|||||||
|
|
||||||
pub fn genesis(&self) -> [u8; 32] {
|
pub fn genesis(&self) -> [u8; 32] {
|
||||||
// Calculate the genesis for this Tributary
|
// Calculate the genesis for this Tributary
|
||||||
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis Testnet 2.1");
|
let mut genesis = RecommendedTranscript::new(b"Serai Tributary Genesis");
|
||||||
// This locks it to a specific Serai chain
|
// This locks it to a specific Serai chain
|
||||||
genesis.append_message(b"serai_block", self.serai_block);
|
genesis.append_message(b"serai_block", self.serai_block);
|
||||||
genesis.append_message(b"session", self.set.session.0.to_le_bytes());
|
genesis.append_message(b"session", self.set.session.0.to_le_bytes());
|
||||||
@@ -88,24 +83,15 @@ impl TributarySpec {
|
|||||||
self.start_time
|
self.start_time
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn n(&self, removed_validators: &[<Ristretto as Ciphersuite>::G]) -> u16 {
|
pub fn n(&self) -> u16 {
|
||||||
self
|
self.validators.iter().map(|(_, weight)| *weight).sum()
|
||||||
.validators
|
|
||||||
.iter()
|
|
||||||
.map(|(validator, weight)| if removed_validators.contains(validator) { 0 } else { *weight })
|
|
||||||
.sum()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn t(&self) -> u16 {
|
pub fn t(&self) -> u16 {
|
||||||
// t doesn't change with regards to the amount of removed validators
|
((2 * self.n()) / 3) + 1
|
||||||
((2 * self.n(&[])) / 3) + 1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn i(
|
pub fn i(&self, key: <Ristretto as Ciphersuite>::G) -> Option<Range<Participant>> {
|
||||||
&self,
|
|
||||||
removed_validators: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
key: <Ristretto as Ciphersuite>::G,
|
|
||||||
) -> Option<Range<Participant>> {
|
|
||||||
let mut all_is = HashMap::new();
|
let mut all_is = HashMap::new();
|
||||||
let mut i = 1;
|
let mut i = 1;
|
||||||
for (validator, weight) in &self.validators {
|
for (validator, weight) in &self.validators {
|
||||||
@@ -116,34 +102,12 @@ impl TributarySpec {
|
|||||||
i += weight;
|
i += weight;
|
||||||
}
|
}
|
||||||
|
|
||||||
let original_i = all_is.get(&key)?.clone();
|
Some(all_is.get(&key)?.clone())
|
||||||
let mut result_i = original_i.clone();
|
|
||||||
for removed_validator in removed_validators {
|
|
||||||
let removed_i = all_is
|
|
||||||
.get(removed_validator)
|
|
||||||
.expect("removed validator wasn't present in set to begin with");
|
|
||||||
// If the queried key was removed, return None
|
|
||||||
if &original_i == removed_i {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
// If the removed was before the queried, shift the queried down accordingly
|
|
||||||
if removed_i.start < original_i.start {
|
|
||||||
let removed_shares = u16::from(removed_i.end) - u16::from(removed_i.start);
|
|
||||||
result_i.start = Participant::new(u16::from(original_i.start) - removed_shares).unwrap();
|
|
||||||
result_i.end = Participant::new(u16::from(original_i.end) - removed_shares).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Some(result_i)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn reverse_lookup_i(
|
pub fn reverse_lookup_i(&self, i: Participant) -> Option<<Ristretto as Ciphersuite>::G> {
|
||||||
&self,
|
|
||||||
removed_validators: &[<Ristretto as Ciphersuite>::G],
|
|
||||||
i: Participant,
|
|
||||||
) -> Option<<Ristretto as Ciphersuite>::G> {
|
|
||||||
for (validator, _) in &self.validators {
|
for (validator, _) in &self.validators {
|
||||||
if self.i(removed_validators, *validator).map_or(false, |range| range.contains(&i)) {
|
if self.i(*validator).map_or(false, |range| range.contains(&i)) {
|
||||||
return Some(*validator);
|
return Some(*validator);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -153,4 +117,8 @@ impl TributarySpec {
|
|||||||
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
pub fn validators(&self) -> Vec<(<Ristretto as Ciphersuite>::G, u64)> {
|
||||||
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
|
self.validators.iter().map(|(validator, weight)| (*validator, u64::from(*weight))).collect()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
pub fn evrf_public_keys(&self) -> Vec<([u8; 32], Vec<u8>)> {
|
||||||
|
self.evrf_public_keys.clone()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -12,7 +12,6 @@ use ciphersuite::{
|
|||||||
Ciphersuite, Ristretto,
|
Ciphersuite, Ristretto,
|
||||||
};
|
};
|
||||||
use schnorr::SchnorrSignature;
|
use schnorr::SchnorrSignature;
|
||||||
use frost::Participant;
|
|
||||||
|
|
||||||
use scale::{Encode, Decode};
|
use scale::{Encode, Decode};
|
||||||
use processor_messages::coordinator::SubstrateSignableId;
|
use processor_messages::coordinator::SubstrateSignableId;
|
||||||
@@ -130,32 +129,26 @@ impl<Id: Clone + PartialEq + Eq + Debug + Encode + Decode> SignData<Id> {
|
|||||||
|
|
||||||
#[derive(Clone, PartialEq, Eq)]
|
#[derive(Clone, PartialEq, Eq)]
|
||||||
pub enum Transaction {
|
pub enum Transaction {
|
||||||
RemoveParticipantDueToDkg {
|
RemoveParticipant {
|
||||||
participant: <Ristretto as Ciphersuite>::G,
|
participant: <Ristretto as Ciphersuite>::G,
|
||||||
signed: Signed,
|
signed: Signed,
|
||||||
},
|
},
|
||||||
|
|
||||||
DkgCommitments {
|
DkgParticipation {
|
||||||
attempt: u32,
|
participation: Vec<u8>,
|
||||||
commitments: Vec<Vec<u8>>,
|
|
||||||
signed: Signed,
|
signed: Signed,
|
||||||
},
|
},
|
||||||
DkgShares {
|
DkgConfirmationNonces {
|
||||||
|
// The confirmation attempt
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
// Sending Participant, Receiving Participant, Share
|
// The nonces for DKG confirmation attempt #attempt
|
||||||
shares: Vec<Vec<Vec<u8>>>,
|
|
||||||
confirmation_nonces: [u8; 64],
|
confirmation_nonces: [u8; 64],
|
||||||
signed: Signed,
|
signed: Signed,
|
||||||
},
|
},
|
||||||
InvalidDkgShare {
|
DkgConfirmationShare {
|
||||||
attempt: u32,
|
// The confirmation attempt
|
||||||
accuser: Participant,
|
|
||||||
faulty: Participant,
|
|
||||||
blame: Option<Vec<u8>>,
|
|
||||||
signed: Signed,
|
|
||||||
},
|
|
||||||
DkgConfirmed {
|
|
||||||
attempt: u32,
|
attempt: u32,
|
||||||
|
// The share for DKG confirmation attempt #attempt
|
||||||
confirmation_share: [u8; 32],
|
confirmation_share: [u8; 32],
|
||||||
signed: Signed,
|
signed: Signed,
|
||||||
},
|
},
|
||||||
@@ -197,29 +190,22 @@ pub enum Transaction {
|
|||||||
impl Debug for Transaction {
|
impl Debug for Transaction {
|
||||||
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
fn fmt(&self, fmt: &mut core::fmt::Formatter<'_>) -> Result<(), core::fmt::Error> {
|
||||||
match self {
|
match self {
|
||||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => fmt
|
Transaction::RemoveParticipant { participant, signed } => fmt
|
||||||
.debug_struct("Transaction::RemoveParticipantDueToDkg")
|
.debug_struct("Transaction::RemoveParticipant")
|
||||||
.field("participant", &hex::encode(participant.to_bytes()))
|
.field("participant", &hex::encode(participant.to_bytes()))
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
.finish_non_exhaustive(),
|
.finish_non_exhaustive(),
|
||||||
Transaction::DkgCommitments { attempt, commitments: _, signed } => fmt
|
Transaction::DkgParticipation { signed, .. } => fmt
|
||||||
.debug_struct("Transaction::DkgCommitments")
|
.debug_struct("Transaction::DkgParticipation")
|
||||||
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
|
.finish_non_exhaustive(),
|
||||||
|
Transaction::DkgConfirmationNonces { attempt, signed, .. } => fmt
|
||||||
|
.debug_struct("Transaction::DkgConfirmationNonces")
|
||||||
.field("attempt", attempt)
|
.field("attempt", attempt)
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
.finish_non_exhaustive(),
|
.finish_non_exhaustive(),
|
||||||
Transaction::DkgShares { attempt, signed, .. } => fmt
|
Transaction::DkgConfirmationShare { attempt, signed, .. } => fmt
|
||||||
.debug_struct("Transaction::DkgShares")
|
.debug_struct("Transaction::DkgConfirmationShare")
|
||||||
.field("attempt", attempt)
|
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
|
||||||
.finish_non_exhaustive(),
|
|
||||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, .. } => fmt
|
|
||||||
.debug_struct("Transaction::InvalidDkgShare")
|
|
||||||
.field("attempt", attempt)
|
|
||||||
.field("accuser", accuser)
|
|
||||||
.field("faulty", faulty)
|
|
||||||
.finish_non_exhaustive(),
|
|
||||||
Transaction::DkgConfirmed { attempt, confirmation_share: _, signed } => fmt
|
|
||||||
.debug_struct("Transaction::DkgConfirmed")
|
|
||||||
.field("attempt", attempt)
|
.field("attempt", attempt)
|
||||||
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
.field("signer", &hex::encode(signed.signer.to_bytes()))
|
||||||
.finish_non_exhaustive(),
|
.finish_non_exhaustive(),
|
||||||
@@ -261,43 +247,32 @@ impl ReadWrite for Transaction {
|
|||||||
reader.read_exact(&mut kind)?;
|
reader.read_exact(&mut kind)?;
|
||||||
|
|
||||||
match kind[0] {
|
match kind[0] {
|
||||||
0 => Ok(Transaction::RemoveParticipantDueToDkg {
|
0 => Ok(Transaction::RemoveParticipant {
|
||||||
participant: Ristretto::read_G(reader)?,
|
participant: Ristretto::read_G(reader)?,
|
||||||
signed: Signed::read_without_nonce(reader, 0)?,
|
signed: Signed::read_without_nonce(reader, 0)?,
|
||||||
}),
|
}),
|
||||||
|
|
||||||
1 => {
|
1 => {
|
||||||
let mut attempt = [0; 4];
|
let participation = {
|
||||||
reader.read_exact(&mut attempt)?;
|
let mut participation_len = [0; 4];
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
reader.read_exact(&mut participation_len)?;
|
||||||
|
let participation_len = u32::from_le_bytes(participation_len);
|
||||||
|
|
||||||
let commitments = {
|
if participation_len > u32::try_from(TRANSACTION_SIZE_LIMIT).unwrap() {
|
||||||
let mut commitments_len = [0; 1];
|
|
||||||
reader.read_exact(&mut commitments_len)?;
|
|
||||||
let commitments_len = usize::from(commitments_len[0]);
|
|
||||||
if commitments_len == 0 {
|
|
||||||
Err(io::Error::other("zero commitments in DkgCommitments"))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut each_commitments_len = [0; 2];
|
|
||||||
reader.read_exact(&mut each_commitments_len)?;
|
|
||||||
let each_commitments_len = usize::from(u16::from_le_bytes(each_commitments_len));
|
|
||||||
if (commitments_len * each_commitments_len) > TRANSACTION_SIZE_LIMIT {
|
|
||||||
Err(io::Error::other(
|
Err(io::Error::other(
|
||||||
"commitments present in transaction exceeded transaction size limit",
|
"participation present in transaction exceeded transaction size limit",
|
||||||
))?;
|
))?;
|
||||||
}
|
}
|
||||||
let mut commitments = vec![vec![]; commitments_len];
|
let participation_len = usize::try_from(participation_len).unwrap();
|
||||||
for commitments in &mut commitments {
|
|
||||||
*commitments = vec![0; each_commitments_len];
|
let mut participation = vec![0; participation_len];
|
||||||
reader.read_exact(commitments)?;
|
reader.read_exact(&mut participation)?;
|
||||||
}
|
participation
|
||||||
commitments
|
|
||||||
};
|
};
|
||||||
|
|
||||||
let signed = Signed::read_without_nonce(reader, 0)?;
|
let signed = Signed::read_without_nonce(reader, 0)?;
|
||||||
|
|
||||||
Ok(Transaction::DkgCommitments { attempt, commitments, signed })
|
Ok(Transaction::DkgParticipation { participation, signed })
|
||||||
}
|
}
|
||||||
|
|
||||||
2 => {
|
2 => {
|
||||||
@@ -305,36 +280,12 @@ impl ReadWrite for Transaction {
|
|||||||
reader.read_exact(&mut attempt)?;
|
reader.read_exact(&mut attempt)?;
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
let attempt = u32::from_le_bytes(attempt);
|
||||||
|
|
||||||
let shares = {
|
|
||||||
let mut share_quantity = [0; 1];
|
|
||||||
reader.read_exact(&mut share_quantity)?;
|
|
||||||
|
|
||||||
let mut key_share_quantity = [0; 1];
|
|
||||||
reader.read_exact(&mut key_share_quantity)?;
|
|
||||||
|
|
||||||
let mut share_len = [0; 2];
|
|
||||||
reader.read_exact(&mut share_len)?;
|
|
||||||
let share_len = usize::from(u16::from_le_bytes(share_len));
|
|
||||||
|
|
||||||
let mut all_shares = vec![];
|
|
||||||
for _ in 0 .. share_quantity[0] {
|
|
||||||
let mut shares = vec![];
|
|
||||||
for _ in 0 .. key_share_quantity[0] {
|
|
||||||
let mut share = vec![0; share_len];
|
|
||||||
reader.read_exact(&mut share)?;
|
|
||||||
shares.push(share);
|
|
||||||
}
|
|
||||||
all_shares.push(shares);
|
|
||||||
}
|
|
||||||
all_shares
|
|
||||||
};
|
|
||||||
|
|
||||||
let mut confirmation_nonces = [0; 64];
|
let mut confirmation_nonces = [0; 64];
|
||||||
reader.read_exact(&mut confirmation_nonces)?;
|
reader.read_exact(&mut confirmation_nonces)?;
|
||||||
|
|
||||||
let signed = Signed::read_without_nonce(reader, 1)?;
|
let signed = Signed::read_without_nonce(reader, 0)?;
|
||||||
|
|
||||||
Ok(Transaction::DkgShares { attempt, shares, confirmation_nonces, signed })
|
Ok(Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed })
|
||||||
}
|
}
|
||||||
|
|
||||||
3 => {
|
3 => {
|
||||||
@@ -342,53 +293,21 @@ impl ReadWrite for Transaction {
|
|||||||
reader.read_exact(&mut attempt)?;
|
reader.read_exact(&mut attempt)?;
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
let attempt = u32::from_le_bytes(attempt);
|
||||||
|
|
||||||
let mut accuser = [0; 2];
|
|
||||||
reader.read_exact(&mut accuser)?;
|
|
||||||
let accuser = Participant::new(u16::from_le_bytes(accuser))
|
|
||||||
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
|
||||||
|
|
||||||
let mut faulty = [0; 2];
|
|
||||||
reader.read_exact(&mut faulty)?;
|
|
||||||
let faulty = Participant::new(u16::from_le_bytes(faulty))
|
|
||||||
.ok_or_else(|| io::Error::other("invalid participant in InvalidDkgShare"))?;
|
|
||||||
|
|
||||||
let mut blame_len = [0; 2];
|
|
||||||
reader.read_exact(&mut blame_len)?;
|
|
||||||
let mut blame = vec![0; u16::from_le_bytes(blame_len).into()];
|
|
||||||
reader.read_exact(&mut blame)?;
|
|
||||||
|
|
||||||
// This shares a nonce with DkgConfirmed as only one is expected
|
|
||||||
let signed = Signed::read_without_nonce(reader, 2)?;
|
|
||||||
|
|
||||||
Ok(Transaction::InvalidDkgShare {
|
|
||||||
attempt,
|
|
||||||
accuser,
|
|
||||||
faulty,
|
|
||||||
blame: Some(blame).filter(|blame| !blame.is_empty()),
|
|
||||||
signed,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
4 => {
|
|
||||||
let mut attempt = [0; 4];
|
|
||||||
reader.read_exact(&mut attempt)?;
|
|
||||||
let attempt = u32::from_le_bytes(attempt);
|
|
||||||
|
|
||||||
let mut confirmation_share = [0; 32];
|
let mut confirmation_share = [0; 32];
|
||||||
reader.read_exact(&mut confirmation_share)?;
|
reader.read_exact(&mut confirmation_share)?;
|
||||||
|
|
||||||
let signed = Signed::read_without_nonce(reader, 2)?;
|
let signed = Signed::read_without_nonce(reader, 1)?;
|
||||||
|
|
||||||
Ok(Transaction::DkgConfirmed { attempt, confirmation_share, signed })
|
Ok(Transaction::DkgConfirmationShare { attempt, confirmation_share, signed })
|
||||||
}
|
}
|
||||||
|
|
||||||
5 => {
|
4 => {
|
||||||
let mut block = [0; 32];
|
let mut block = [0; 32];
|
||||||
reader.read_exact(&mut block)?;
|
reader.read_exact(&mut block)?;
|
||||||
Ok(Transaction::CosignSubstrateBlock(block))
|
Ok(Transaction::CosignSubstrateBlock(block))
|
||||||
}
|
}
|
||||||
|
|
||||||
6 => {
|
5 => {
|
||||||
let mut block = [0; 32];
|
let mut block = [0; 32];
|
||||||
reader.read_exact(&mut block)?;
|
reader.read_exact(&mut block)?;
|
||||||
let mut batch = [0; 4];
|
let mut batch = [0; 4];
|
||||||
@@ -396,16 +315,16 @@ impl ReadWrite for Transaction {
|
|||||||
Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) })
|
Ok(Transaction::Batch { block, batch: u32::from_le_bytes(batch) })
|
||||||
}
|
}
|
||||||
|
|
||||||
7 => {
|
6 => {
|
||||||
let mut block = [0; 8];
|
let mut block = [0; 8];
|
||||||
reader.read_exact(&mut block)?;
|
reader.read_exact(&mut block)?;
|
||||||
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
Ok(Transaction::SubstrateBlock(u64::from_le_bytes(block)))
|
||||||
}
|
}
|
||||||
|
|
||||||
8 => SignData::read(reader).map(Transaction::SubstrateSign),
|
7 => SignData::read(reader).map(Transaction::SubstrateSign),
|
||||||
9 => SignData::read(reader).map(Transaction::Sign),
|
8 => SignData::read(reader).map(Transaction::Sign),
|
||||||
|
|
||||||
10 => {
|
9 => {
|
||||||
let mut plan = [0; 32];
|
let mut plan = [0; 32];
|
||||||
reader.read_exact(&mut plan)?;
|
reader.read_exact(&mut plan)?;
|
||||||
|
|
||||||
@@ -420,7 +339,7 @@ impl ReadWrite for Transaction {
|
|||||||
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
|
Ok(Transaction::SignCompleted { plan, tx_hash, first_signer, signature })
|
||||||
}
|
}
|
||||||
|
|
||||||
11 => {
|
10 => {
|
||||||
let mut len = [0];
|
let mut len = [0];
|
||||||
reader.read_exact(&mut len)?;
|
reader.read_exact(&mut len)?;
|
||||||
let len = len[0];
|
let len = len[0];
|
||||||
@@ -445,109 +364,59 @@ impl ReadWrite for Transaction {
|
|||||||
|
|
||||||
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
fn write<W: io::Write>(&self, writer: &mut W) -> io::Result<()> {
|
||||||
match self {
|
match self {
|
||||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
Transaction::RemoveParticipant { participant, signed } => {
|
||||||
writer.write_all(&[0])?;
|
writer.write_all(&[0])?;
|
||||||
writer.write_all(&participant.to_bytes())?;
|
writer.write_all(&participant.to_bytes())?;
|
||||||
signed.write_without_nonce(writer)
|
signed.write_without_nonce(writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgCommitments { attempt, commitments, signed } => {
|
Transaction::DkgParticipation { participation, signed } => {
|
||||||
writer.write_all(&[1])?;
|
writer.write_all(&[1])?;
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
writer.write_all(&u32::try_from(participation.len()).unwrap().to_le_bytes())?;
|
||||||
if commitments.is_empty() {
|
writer.write_all(participation)?;
|
||||||
Err(io::Error::other("zero commitments in DkgCommitments"))?
|
|
||||||
}
|
|
||||||
writer.write_all(&[u8::try_from(commitments.len()).unwrap()])?;
|
|
||||||
for commitments_i in commitments {
|
|
||||||
if commitments_i.len() != commitments[0].len() {
|
|
||||||
Err(io::Error::other("commitments of differing sizes in DkgCommitments"))?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
writer.write_all(&u16::try_from(commitments[0].len()).unwrap().to_le_bytes())?;
|
|
||||||
for commitments in commitments {
|
|
||||||
writer.write_all(commitments)?;
|
|
||||||
}
|
|
||||||
signed.write_without_nonce(writer)
|
signed.write_without_nonce(writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgShares { attempt, shares, confirmation_nonces, signed } => {
|
Transaction::DkgConfirmationNonces { attempt, confirmation_nonces, signed } => {
|
||||||
writer.write_all(&[2])?;
|
writer.write_all(&[2])?;
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
writer.write_all(&attempt.to_le_bytes())?;
|
||||||
|
|
||||||
// `shares` is a Vec which is supposed to map to a HashMap<Participant, Vec<u8>>. Since we
|
|
||||||
// bound participants to 150, this conversion is safe if a valid in-memory transaction.
|
|
||||||
writer.write_all(&[u8::try_from(shares.len()).unwrap()])?;
|
|
||||||
// This assumes at least one share is being sent to another party
|
|
||||||
writer.write_all(&[u8::try_from(shares[0].len()).unwrap()])?;
|
|
||||||
let share_len = shares[0][0].len();
|
|
||||||
// For BLS12-381 G2, this would be:
|
|
||||||
// - A 32-byte share
|
|
||||||
// - A 96-byte ephemeral key
|
|
||||||
// - A 128-byte signature
|
|
||||||
// Hence why this has to be u16
|
|
||||||
writer.write_all(&u16::try_from(share_len).unwrap().to_le_bytes())?;
|
|
||||||
|
|
||||||
for these_shares in shares {
|
|
||||||
assert_eq!(these_shares.len(), shares[0].len(), "amount of sent shares was variable");
|
|
||||||
for share in these_shares {
|
|
||||||
assert_eq!(share.len(), share_len, "sent shares were of variable length");
|
|
||||||
writer.write_all(share)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
writer.write_all(confirmation_nonces)?;
|
writer.write_all(confirmation_nonces)?;
|
||||||
signed.write_without_nonce(writer)
|
signed.write_without_nonce(writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::InvalidDkgShare { attempt, accuser, faulty, blame, signed } => {
|
Transaction::DkgConfirmationShare { attempt, confirmation_share, signed } => {
|
||||||
writer.write_all(&[3])?;
|
writer.write_all(&[3])?;
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
writer.write_all(&attempt.to_le_bytes())?;
|
||||||
writer.write_all(&u16::from(*accuser).to_le_bytes())?;
|
|
||||||
writer.write_all(&u16::from(*faulty).to_le_bytes())?;
|
|
||||||
|
|
||||||
// Flattens Some(vec![]) to None on the expectation no actual blame will be 0-length
|
|
||||||
assert!(blame.as_ref().map_or(1, Vec::len) != 0);
|
|
||||||
let blame_len =
|
|
||||||
u16::try_from(blame.as_ref().unwrap_or(&vec![]).len()).expect("blame exceeded 64 KB");
|
|
||||||
writer.write_all(&blame_len.to_le_bytes())?;
|
|
||||||
writer.write_all(blame.as_ref().unwrap_or(&vec![]))?;
|
|
||||||
|
|
||||||
signed.write_without_nonce(writer)
|
|
||||||
}
|
|
||||||
|
|
||||||
Transaction::DkgConfirmed { attempt, confirmation_share, signed } => {
|
|
||||||
writer.write_all(&[4])?;
|
|
||||||
writer.write_all(&attempt.to_le_bytes())?;
|
|
||||||
writer.write_all(confirmation_share)?;
|
writer.write_all(confirmation_share)?;
|
||||||
signed.write_without_nonce(writer)
|
signed.write_without_nonce(writer)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(block) => {
|
Transaction::CosignSubstrateBlock(block) => {
|
||||||
writer.write_all(&[5])?;
|
writer.write_all(&[4])?;
|
||||||
writer.write_all(block)
|
writer.write_all(block)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::Batch { block, batch } => {
|
Transaction::Batch { block, batch } => {
|
||||||
writer.write_all(&[6])?;
|
writer.write_all(&[5])?;
|
||||||
writer.write_all(block)?;
|
writer.write_all(block)?;
|
||||||
writer.write_all(&batch.to_le_bytes())
|
writer.write_all(&batch.to_le_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstrateBlock(block) => {
|
Transaction::SubstrateBlock(block) => {
|
||||||
writer.write_all(&[7])?;
|
writer.write_all(&[6])?;
|
||||||
writer.write_all(&block.to_le_bytes())
|
writer.write_all(&block.to_le_bytes())
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::SubstrateSign(data) => {
|
Transaction::SubstrateSign(data) => {
|
||||||
writer.write_all(&[8])?;
|
writer.write_all(&[7])?;
|
||||||
data.write(writer)
|
data.write(writer)
|
||||||
}
|
}
|
||||||
Transaction::Sign(data) => {
|
Transaction::Sign(data) => {
|
||||||
writer.write_all(&[9])?;
|
writer.write_all(&[8])?;
|
||||||
data.write(writer)
|
data.write(writer)
|
||||||
}
|
}
|
||||||
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
Transaction::SignCompleted { plan, tx_hash, first_signer, signature } => {
|
||||||
writer.write_all(&[10])?;
|
writer.write_all(&[9])?;
|
||||||
writer.write_all(plan)?;
|
writer.write_all(plan)?;
|
||||||
writer
|
writer
|
||||||
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
.write_all(&[u8::try_from(tx_hash.len()).expect("tx hash length exceed 255 bytes")])?;
|
||||||
@@ -556,7 +425,7 @@ impl ReadWrite for Transaction {
|
|||||||
signature.write(writer)
|
signature.write(writer)
|
||||||
}
|
}
|
||||||
Transaction::SlashReport(points, signed) => {
|
Transaction::SlashReport(points, signed) => {
|
||||||
writer.write_all(&[11])?;
|
writer.write_all(&[10])?;
|
||||||
writer.write_all(&[u8::try_from(points.len()).unwrap()])?;
|
writer.write_all(&[u8::try_from(points.len()).unwrap()])?;
|
||||||
for points in points {
|
for points in points {
|
||||||
writer.write_all(&points.to_le_bytes())?;
|
writer.write_all(&points.to_le_bytes())?;
|
||||||
@@ -570,15 +439,16 @@ impl ReadWrite for Transaction {
|
|||||||
impl TransactionTrait for Transaction {
|
impl TransactionTrait for Transaction {
|
||||||
fn kind(&self) -> TransactionKind<'_> {
|
fn kind(&self) -> TransactionKind<'_> {
|
||||||
match self {
|
match self {
|
||||||
Transaction::RemoveParticipantDueToDkg { participant, signed } => {
|
Transaction::RemoveParticipant { participant, signed } => {
|
||||||
TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed)
|
TransactionKind::Signed((b"remove", participant.to_bytes()).encode(), signed)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::DkgCommitments { attempt, commitments: _, signed } |
|
Transaction::DkgParticipation { signed, .. } => {
|
||||||
Transaction::DkgShares { attempt, signed, .. } |
|
TransactionKind::Signed(b"dkg".to_vec(), signed)
|
||||||
Transaction::InvalidDkgShare { attempt, signed, .. } |
|
}
|
||||||
Transaction::DkgConfirmed { attempt, signed, .. } => {
|
Transaction::DkgConfirmationNonces { attempt, signed, .. } |
|
||||||
TransactionKind::Signed((b"dkg", attempt).encode(), signed)
|
Transaction::DkgConfirmationShare { attempt, signed, .. } => {
|
||||||
|
TransactionKind::Signed((b"dkg_confirmation", attempt).encode(), signed)
|
||||||
}
|
}
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
Transaction::CosignSubstrateBlock(_) => TransactionKind::Provided("cosign"),
|
||||||
@@ -645,11 +515,14 @@ impl Transaction {
|
|||||||
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
fn signed(tx: &mut Transaction) -> (u32, &mut Signed) {
|
||||||
#[allow(clippy::match_same_arms)] // Doesn't make semantic sense here
|
#[allow(clippy::match_same_arms)] // Doesn't make semantic sense here
|
||||||
let nonce = match tx {
|
let nonce = match tx {
|
||||||
Transaction::RemoveParticipantDueToDkg { .. } => 0,
|
Transaction::RemoveParticipant { .. } => 0,
|
||||||
|
|
||||||
Transaction::DkgCommitments { .. } => 0,
|
Transaction::DkgParticipation { .. } => 0,
|
||||||
Transaction::DkgShares { .. } => 1,
|
// Uses a nonce of 0 as it has an internal attempt counter we distinguish by
|
||||||
Transaction::InvalidDkgShare { .. } | Transaction::DkgConfirmed { .. } => 2,
|
Transaction::DkgConfirmationNonces { .. } => 0,
|
||||||
|
// Uses a nonce of 1 due to internal attempt counter and due to following
|
||||||
|
// DkgConfirmationNonces
|
||||||
|
Transaction::DkgConfirmationShare { .. } => 1,
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||||
|
|
||||||
@@ -668,11 +541,10 @@ impl Transaction {
|
|||||||
nonce,
|
nonce,
|
||||||
#[allow(clippy::match_same_arms)]
|
#[allow(clippy::match_same_arms)]
|
||||||
match tx {
|
match tx {
|
||||||
Transaction::RemoveParticipantDueToDkg { ref mut signed, .. } |
|
Transaction::RemoveParticipant { ref mut signed, .. } |
|
||||||
Transaction::DkgCommitments { ref mut signed, .. } |
|
Transaction::DkgParticipation { ref mut signed, .. } |
|
||||||
Transaction::DkgShares { ref mut signed, .. } |
|
Transaction::DkgConfirmationNonces { ref mut signed, .. } => signed,
|
||||||
Transaction::InvalidDkgShare { ref mut signed, .. } |
|
Transaction::DkgConfirmationShare { ref mut signed, .. } => signed,
|
||||||
Transaction::DkgConfirmed { ref mut signed, .. } => signed,
|
|
||||||
|
|
||||||
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
Transaction::CosignSubstrateBlock(_) => panic!("signing CosignSubstrateBlock"),
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
use core::{marker::PhantomData, fmt::Debug};
|
use core::{marker::PhantomData, fmt::Debug};
|
||||||
use std::{sync::Arc, io, collections::VecDeque};
|
use std::{sync::Arc, io};
|
||||||
|
|
||||||
use async_trait::async_trait;
|
use async_trait::async_trait;
|
||||||
|
|
||||||
@@ -59,7 +59,7 @@ pub const ACCOUNT_MEMPOOL_LIMIT: u32 = 50;
|
|||||||
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
pub const BLOCK_SIZE_LIMIT: usize = 3_001_000;
|
||||||
|
|
||||||
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
pub(crate) const TENDERMINT_MESSAGE: u8 = 0;
|
||||||
pub(crate) const TRANSACTION_MESSAGE: u8 = 2; // TODO: Normalize to 1
|
pub(crate) const TRANSACTION_MESSAGE: u8 = 1;
|
||||||
|
|
||||||
#[allow(clippy::large_enum_variant)]
|
#[allow(clippy::large_enum_variant)]
|
||||||
#[derive(Clone, PartialEq, Eq, Debug)]
|
#[derive(Clone, PartialEq, Eq, Debug)]
|
||||||
@@ -154,14 +154,6 @@ pub struct Tributary<D: Db, T: TransactionTrait, P: P2p> {
|
|||||||
synced_block: Arc<RwLock<SyncedBlockSender<TendermintNetwork<D, T, P>>>>,
|
synced_block: Arc<RwLock<SyncedBlockSender<TendermintNetwork<D, T, P>>>>,
|
||||||
synced_block_result: Arc<RwLock<SyncedBlockResultReceiver>>,
|
synced_block_result: Arc<RwLock<SyncedBlockResultReceiver>>,
|
||||||
messages: Arc<RwLock<MessageSender<TendermintNetwork<D, T, P>>>>,
|
messages: Arc<RwLock<MessageSender<TendermintNetwork<D, T, P>>>>,
|
||||||
|
|
||||||
p2p_meta_task_handle: Arc<tokio::task::AbortHandle>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl<D: Db, T: TransactionTrait, P: P2p> Drop for Tributary<D, T, P> {
|
|
||||||
fn drop(&mut self) {
|
|
||||||
self.p2p_meta_task_handle.abort();
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
||||||
@@ -193,28 +185,7 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
);
|
);
|
||||||
let blockchain = Arc::new(RwLock::new(blockchain));
|
let blockchain = Arc::new(RwLock::new(blockchain));
|
||||||
|
|
||||||
let to_rebroadcast = Arc::new(RwLock::new(VecDeque::new()));
|
let network = TendermintNetwork { genesis, signer, validators, blockchain, p2p };
|
||||||
// Actively rebroadcast consensus messages to ensure they aren't prematurely dropped from the
|
|
||||||
// P2P layer
|
|
||||||
let p2p_meta_task_handle = Arc::new(
|
|
||||||
tokio::spawn({
|
|
||||||
let to_rebroadcast = to_rebroadcast.clone();
|
|
||||||
let p2p = p2p.clone();
|
|
||||||
async move {
|
|
||||||
loop {
|
|
||||||
let to_rebroadcast = to_rebroadcast.read().await.clone();
|
|
||||||
for msg in to_rebroadcast {
|
|
||||||
p2p.broadcast(genesis, msg).await;
|
|
||||||
}
|
|
||||||
tokio::time::sleep(core::time::Duration::from_secs(60)).await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.abort_handle(),
|
|
||||||
);
|
|
||||||
|
|
||||||
let network =
|
|
||||||
TendermintNetwork { genesis, signer, validators, blockchain, to_rebroadcast, p2p };
|
|
||||||
|
|
||||||
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
let TendermintHandle { synced_block, synced_block_result, messages, machine } =
|
||||||
TendermintMachine::new(
|
TendermintMachine::new(
|
||||||
@@ -235,7 +206,6 @@ impl<D: Db, T: TransactionTrait, P: P2p> Tributary<D, T, P> {
|
|||||||
synced_block: Arc::new(RwLock::new(synced_block)),
|
synced_block: Arc::new(RwLock::new(synced_block)),
|
||||||
synced_block_result: Arc::new(RwLock::new(synced_block_result)),
|
synced_block_result: Arc::new(RwLock::new(synced_block_result)),
|
||||||
messages: Arc::new(RwLock::new(messages)),
|
messages: Arc::new(RwLock::new(messages)),
|
||||||
p2p_meta_task_handle,
|
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user