mirror of
https://github.com/serai-dex/serai.git
synced 2025-12-12 14:09:25 +00:00
Compare commits
1 Commits
0060e3c59f
...
tables
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f029471f9f |
5
.gitattributes
vendored
5
.gitattributes
vendored
@@ -1,5 +0,0 @@
|
||||
# Auto detect text files and perform LF normalization
|
||||
* text=auto
|
||||
* text eol=lf
|
||||
|
||||
*.pdf binary
|
||||
21
.github/actions/LICENSE
vendored
21
.github/actions/LICENSE
vendored
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
40
.github/actions/bitcoin/action.yml
vendored
40
.github/actions/bitcoin/action.yml
vendored
@@ -1,40 +0,0 @@
|
||||
name: bitcoin-regtest
|
||||
description: Spawns a regtest Bitcoin daemon
|
||||
|
||||
inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: "27.0"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Bitcoin Daemon Cache
|
||||
id: cache-bitcoind
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
with:
|
||||
path: bitcoin.tar.gz
|
||||
key: bitcoind-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
- name: Download the Bitcoin Daemon
|
||||
if: steps.cache-bitcoind.outputs.cache-hit != 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
RUNNER_OS=linux
|
||||
RUNNER_ARCH=x86_64
|
||||
FILE=bitcoin-${{ inputs.version }}-$RUNNER_ARCH-$RUNNER_OS-gnu.tar.gz
|
||||
|
||||
wget https://bitcoincore.org/bin/bitcoin-core-${{ inputs.version }}/$FILE
|
||||
mv $FILE bitcoin.tar.gz
|
||||
|
||||
- name: Extract the Bitcoin Daemon
|
||||
shell: bash
|
||||
run: |
|
||||
tar xzvf bitcoin.tar.gz
|
||||
cd bitcoin-${{ inputs.version }}
|
||||
sudo mv bin/* /bin && sudo mv lib/* /lib
|
||||
|
||||
- name: Bitcoin Regtest Daemon
|
||||
shell: bash
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/bitcoin/run.sh -daemon
|
||||
49
.github/actions/build-dependencies/action.yml
vendored
49
.github/actions/build-dependencies/action.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: build-dependencies
|
||||
description: Installs build dependencies for Serai
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Remove unused packages
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt remove -y "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||
sudo apt remove -y "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
||||
sudo apt remove -y "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
||||
sudo apt remove -y "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
||||
sudo apt autoremove -y
|
||||
sudo apt clean
|
||||
docker system prune -a --volumes
|
||||
if: runner.os == 'Linux'
|
||||
|
||||
- name: Remove unused packages
|
||||
shell: bash
|
||||
run: |
|
||||
(gem uninstall -aIx) || (exit 0)
|
||||
brew uninstall --force "*msbuild*" "*powershell*" "*nuget*" "*bazel*" "*ansible*" "*terraform*" "*heroku*" "*aws*" azure-cli
|
||||
brew uninstall --force "*nodejs*" "*npm*" "*yarn*" "*java*" "*kotlin*" "*golang*" "*swift*" "*julia*" "*fortran*" "*android*"
|
||||
brew uninstall --force "*apache2*" "*nginx*" "*firefox*" "*chromium*" "*chrome*" "*edge*"
|
||||
brew uninstall --force "*qemu*" "*sql*" "*texinfo*" "*imagemagick*"
|
||||
brew cleanup
|
||||
if: runner.os == 'macOS'
|
||||
|
||||
- name: Install dependencies
|
||||
shell: bash
|
||||
run: |
|
||||
if [ "$RUNNER_OS" == "Linux" ]; then
|
||||
sudo apt install -y ca-certificates protobuf-compiler
|
||||
elif [ "$RUNNER_OS" == "Windows" ]; then
|
||||
choco install protoc
|
||||
elif [ "$RUNNER_OS" == "macOS" ]; then
|
||||
brew install protobuf
|
||||
fi
|
||||
|
||||
- name: Install solc
|
||||
shell: bash
|
||||
run: |
|
||||
cargo install svm-rs
|
||||
svm install 0.8.25
|
||||
svm use 0.8.25
|
||||
|
||||
# - name: Cache Rust
|
||||
# uses: Swatinem/rust-cache@a95ba195448af2da9b00fb742d14ffaaf3c21f43
|
||||
49
.github/actions/monero-wallet-rpc/action.yml
vendored
49
.github/actions/monero-wallet-rpc/action.yml
vendored
@@ -1,49 +0,0 @@
|
||||
name: monero-wallet-rpc
|
||||
description: Spawns a Monero Wallet-RPC.
|
||||
|
||||
inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: v0.18.3.1
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Monero Wallet RPC Cache
|
||||
id: cache-monero-wallet-rpc
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
with:
|
||||
path: monero-wallet-rpc
|
||||
key: monero-wallet-rpc-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
- name: Download the Monero Wallet RPC
|
||||
if: steps.cache-monero-wallet-rpc.outputs.cache-hit != 'true'
|
||||
# Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due
|
||||
# to the contained folder not following the same naming scheme and
|
||||
# requiring further expansion not worth doing right now
|
||||
shell: bash
|
||||
run: |
|
||||
RUNNER_OS=${{ runner.os }}
|
||||
RUNNER_ARCH=${{ runner.arch }}
|
||||
|
||||
RUNNER_OS=${RUNNER_OS,,}
|
||||
RUNNER_ARCH=${RUNNER_ARCH,,}
|
||||
|
||||
RUNNER_OS=linux
|
||||
RUNNER_ARCH=x64
|
||||
|
||||
FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2
|
||||
wget https://downloads.getmonero.org/cli/$FILE
|
||||
tar -xvf $FILE
|
||||
|
||||
mv monero-x86_64-linux-gnu-${{ inputs.version }}/monero-wallet-rpc monero-wallet-rpc
|
||||
|
||||
- name: Monero Wallet RPC
|
||||
shell: bash
|
||||
run: |
|
||||
./monero-wallet-rpc --allow-mismatched-daemon-version \
|
||||
--daemon-address 0.0.0.0:18081 --daemon-login serai:seraidex \
|
||||
--disable-rpc-login --rpc-bind-port 18082 \
|
||||
--wallet-dir ./ \
|
||||
--detach
|
||||
46
.github/actions/monero/action.yml
vendored
46
.github/actions/monero/action.yml
vendored
@@ -1,46 +0,0 @@
|
||||
name: monero-regtest
|
||||
description: Spawns a regtest Monero daemon
|
||||
|
||||
inputs:
|
||||
version:
|
||||
description: "Version to download and run"
|
||||
required: false
|
||||
default: v0.18.3.1
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Monero Daemon Cache
|
||||
id: cache-monerod
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
with:
|
||||
path: /usr/bin/monerod
|
||||
key: monerod-${{ runner.os }}-${{ runner.arch }}-${{ inputs.version }}
|
||||
|
||||
- name: Download the Monero Daemon
|
||||
if: steps.cache-monerod.outputs.cache-hit != 'true'
|
||||
# Calculates OS/ARCH to demonstrate it, yet then locks to linux-x64 due
|
||||
# to the contained folder not following the same naming scheme and
|
||||
# requiring further expansion not worth doing right now
|
||||
shell: bash
|
||||
run: |
|
||||
RUNNER_OS=${{ runner.os }}
|
||||
RUNNER_ARCH=${{ runner.arch }}
|
||||
|
||||
RUNNER_OS=${RUNNER_OS,,}
|
||||
RUNNER_ARCH=${RUNNER_ARCH,,}
|
||||
|
||||
RUNNER_OS=linux
|
||||
RUNNER_ARCH=x64
|
||||
|
||||
FILE=monero-$RUNNER_OS-$RUNNER_ARCH-${{ inputs.version }}.tar.bz2
|
||||
wget https://downloads.getmonero.org/cli/$FILE
|
||||
tar -xvf $FILE
|
||||
|
||||
sudo mv monero-x86_64-linux-gnu-${{ inputs.version }}/monerod /usr/bin/monerod
|
||||
sudo chmod 777 /usr/bin/monerod
|
||||
sudo chmod +x /usr/bin/monerod
|
||||
|
||||
- name: Monero Regtest Daemon
|
||||
shell: bash
|
||||
run: PATH=$PATH:/usr/bin ./orchestration/dev/networks/monero/run.sh --detach
|
||||
38
.github/actions/test-dependencies/action.yml
vendored
38
.github/actions/test-dependencies/action.yml
vendored
@@ -1,38 +0,0 @@
|
||||
name: test-dependencies
|
||||
description: Installs test dependencies for Serai
|
||||
|
||||
inputs:
|
||||
monero-version:
|
||||
description: "Monero version to download and run as a regtest node"
|
||||
required: false
|
||||
default: v0.18.3.1
|
||||
|
||||
bitcoin-version:
|
||||
description: "Bitcoin version to download and run as a regtest node"
|
||||
required: false
|
||||
default: "27.1"
|
||||
|
||||
runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install Foundry
|
||||
uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773
|
||||
with:
|
||||
version: nightly-f625d0fa7c51e65b4bf1e8f7931cd1c6e2e285e9
|
||||
cache: false
|
||||
|
||||
- name: Run a Monero Regtest Node
|
||||
uses: ./.github/actions/monero
|
||||
with:
|
||||
version: ${{ inputs.monero-version }}
|
||||
|
||||
- name: Run a Bitcoin Regtest Node
|
||||
uses: ./.github/actions/bitcoin
|
||||
with:
|
||||
version: ${{ inputs.bitcoin-version }}
|
||||
|
||||
- name: Run a Monero Wallet-RPC
|
||||
uses: ./.github/actions/monero-wallet-rpc
|
||||
1
.github/nightly-version
vendored
1
.github/nightly-version
vendored
@@ -1 +0,0 @@
|
||||
nightly-2024-07-01
|
||||
32
.github/workflows/common-tests.yml
vendored
32
.github/workflows/common-tests.yml
vendored
@@ -1,32 +0,0 @@
|
||||
name: common/ Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "common/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test-common:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p std-shims \
|
||||
-p zalloc \
|
||||
-p serai-db \
|
||||
-p serai-env \
|
||||
-p simple-request
|
||||
40
.github/workflows/coordinator-tests.yml
vendored
40
.github/workflows/coordinator-tests.yml
vendored
@@ -1,40 +0,0 @@
|
||||
name: Coordinator Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "coordinator/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/coordinator/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "coordinator/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/coordinator/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run coordinator Docker tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-coordinator-tests
|
||||
40
.github/workflows/crypto-tests.yml
vendored
40
.github/workflows/crypto-tests.yml
vendored
@@ -1,40 +0,0 @@
|
||||
name: crypto/ Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test-crypto:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p flexible-transcript \
|
||||
-p ff-group-tests \
|
||||
-p dalek-ff-group \
|
||||
-p minimal-ed448 \
|
||||
-p ciphersuite \
|
||||
-p multiexp \
|
||||
-p schnorr-signatures \
|
||||
-p dleq \
|
||||
-p dkg \
|
||||
-p modular-frost \
|
||||
-p frost-schnorrkel
|
||||
24
.github/workflows/daily-deny.yml
vendored
24
.github/workflows/daily-deny.yml
vendored
@@ -1,24 +0,0 @@
|
||||
name: Daily Deny Check
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 * * *"
|
||||
|
||||
jobs:
|
||||
deny:
|
||||
name: Run cargo deny
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Advisory Cache
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
with:
|
||||
path: ~/.cargo/advisory-db
|
||||
key: rust-advisory-db
|
||||
|
||||
- name: Install cargo deny
|
||||
run: cargo install --locked cargo-deny
|
||||
|
||||
- name: Run cargo deny
|
||||
run: cargo deny -L error --all-features check
|
||||
22
.github/workflows/full-stack-tests.yml
vendored
22
.github/workflows/full-stack-tests.yml
vendored
@@ -1,22 +0,0 @@
|
||||
name: Full Stack Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
|
||||
pull_request:
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Full Stack Docker tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-full-stack-tests
|
||||
83
.github/workflows/lint.yml
vendored
83
.github/workflows/lint.yml
vendored
@@ -1,83 +0,0 @@
|
||||
name: Lint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
pull_request:
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
clippy:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-13, macos-14, windows-latest]
|
||||
runs-on: ${{ matrix.os }}
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Get nightly version to use
|
||||
id: nightly
|
||||
shell: bash
|
||||
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install nightly rust
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -t wasm32-unknown-unknown -c clippy
|
||||
|
||||
- name: Run Clippy
|
||||
run: cargo +${{ steps.nightly.outputs.version }} clippy --all-features --all-targets -- -D warnings -A clippy::items_after_test_module
|
||||
|
||||
# Also verify the lockfile isn't dirty
|
||||
# This happens when someone edits a Cargo.toml yet doesn't do anything
|
||||
# which causes the lockfile to be updated
|
||||
# The above clippy run will cause it to be updated, so checking there's
|
||||
# no differences present now performs the desired check
|
||||
- name: Verify lockfile
|
||||
shell: bash
|
||||
run: git diff | wc -l | LC_ALL="en_US.utf8" grep -x -e "^[ ]*0"
|
||||
|
||||
deny:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Advisory Cache
|
||||
uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2
|
||||
with:
|
||||
path: ~/.cargo/advisory-db
|
||||
key: rust-advisory-db
|
||||
|
||||
- name: Install cargo deny
|
||||
run: cargo install --locked cargo-deny
|
||||
|
||||
- name: Run cargo deny
|
||||
run: cargo deny -L error --all-features check
|
||||
|
||||
fmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Get nightly version to use
|
||||
id: nightly
|
||||
shell: bash
|
||||
run: echo "version=$(cat .github/nightly-version)" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Install nightly rust
|
||||
run: rustup toolchain install ${{ steps.nightly.outputs.version }} --profile minimal -c rustfmt
|
||||
|
||||
- name: Run rustfmt
|
||||
run: cargo +${{ steps.nightly.outputs.version }} fmt -- --check
|
||||
|
||||
machete:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
- name: Verify all dependencies are in use
|
||||
run: |
|
||||
cargo install cargo-machete
|
||||
cargo machete
|
||||
36
.github/workflows/message-queue-tests.yml
vendored
36
.github/workflows/message-queue-tests.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: Message Queue Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "message-queue/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/message-queue/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "message-queue/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/message-queue/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run message-queue Docker tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-message-queue-tests
|
||||
26
.github/workflows/mini-tests.yml
vendored
26
.github/workflows/mini-tests.yml
vendored
@@ -1,26 +0,0 @@
|
||||
name: mini/ Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "mini/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "mini/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test-common:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p mini-serai
|
||||
77
.github/workflows/monero-tests.yaml
vendored
77
.github/workflows/monero-tests.yaml
vendored
@@ -1,77 +0,0 @@
|
||||
name: Monero Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "networks/monero/**"
|
||||
- "processor/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "networks/monero/**"
|
||||
- "processor/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
# Only run these once since they will be consistent regardless of any node
|
||||
unit-tests:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
|
||||
- name: Run Unit Tests Without Features
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-io --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-generators --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-primitives --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-mlsag --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-clsag --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-borromean --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-bulletproofs --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-rpc --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-address --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-seed --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package polyseed --lib
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --lib
|
||||
|
||||
# Doesn't run unit tests with features as the tests workflow will
|
||||
|
||||
integration-tests:
|
||||
runs-on: ubuntu-latest
|
||||
# Test against all supported protocol versions
|
||||
strategy:
|
||||
matrix:
|
||||
version: [v0.17.3.2, v0.18.2.0]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
with:
|
||||
monero-version: ${{ matrix.version }}
|
||||
|
||||
- name: Run Integration Tests Without Features
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --test '*'
|
||||
|
||||
- name: Run Integration Tests
|
||||
# Don't run if the the tests workflow also will
|
||||
if: ${{ matrix.version != 'v0.18.2.0' }}
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-serai --all-features --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-simple-request-rpc --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet --all-features --test '*'
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --package monero-wallet-util --all-features --test '*'
|
||||
53
.github/workflows/monthly-nightly-update.yml
vendored
53
.github/workflows/monthly-nightly-update.yml
vendored
@@ -1,53 +0,0 @@
|
||||
name: Monthly Nightly Update
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: "0 0 1 * *"
|
||||
|
||||
jobs:
|
||||
update:
|
||||
name: Update nightly
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
with:
|
||||
submodules: "recursive"
|
||||
|
||||
- name: Write nightly version
|
||||
run: echo $(date +"nightly-%Y-%m"-01) > .github/nightly-version
|
||||
|
||||
- name: Create the commit
|
||||
run: |
|
||||
git config user.name "GitHub Actions"
|
||||
git config user.email "<>"
|
||||
|
||||
git checkout -b $(date +"nightly-%Y-%m")
|
||||
|
||||
git add .github/nightly-version
|
||||
git commit -m "Update nightly"
|
||||
git push -u origin $(date +"nightly-%Y-%m")
|
||||
|
||||
- name: Pull Request
|
||||
uses: actions/github-script@d7906e4ad0b1822421a7e6a35d5ca353c962f410
|
||||
with:
|
||||
script: |
|
||||
const { repo, owner } = context.repo;
|
||||
|
||||
const result = await github.rest.pulls.create({
|
||||
title: (new Date()).toLocaleString(
|
||||
false,
|
||||
{ month: "long", year: "numeric" }
|
||||
) + " - Rust Nightly Update",
|
||||
owner,
|
||||
repo,
|
||||
head: "nightly-" + (new Date()).toISOString().split("-").splice(0, 2).join("-"),
|
||||
base: "develop",
|
||||
body: "PR auto-generated by a GitHub workflow."
|
||||
});
|
||||
|
||||
github.rest.issues.addLabels({
|
||||
owner,
|
||||
repo,
|
||||
issue_number: result.data.number,
|
||||
labels: ["improvement"]
|
||||
});
|
||||
51
.github/workflows/networks-tests.yml
vendored
51
.github/workflows/networks-tests.yml
vendored
@@ -1,51 +0,0 @@
|
||||
name: networks/ Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test-networks:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Test Dependencies
|
||||
uses: ./.github/actions/test-dependencies
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p bitcoin-serai \
|
||||
-p alloy-simple-request-transport \
|
||||
-p ethereum-serai \
|
||||
-p serai-ethereum-relayer \
|
||||
-p monero-io \
|
||||
-p monero-generators \
|
||||
-p monero-primitives \
|
||||
-p monero-mlsag \
|
||||
-p monero-clsag \
|
||||
-p monero-borromean \
|
||||
-p monero-bulletproofs \
|
||||
-p monero-serai \
|
||||
-p monero-rpc \
|
||||
-p monero-simple-request-rpc \
|
||||
-p monero-address \
|
||||
-p monero-wallet \
|
||||
-p monero-seed \
|
||||
-p polyseed \
|
||||
-p monero-wallet-util \
|
||||
-p monero-serai-verify-chain
|
||||
35
.github/workflows/no-std.yml
vendored
35
.github/workflows/no-std.yml
vendored
@@ -1,35 +0,0 @@
|
||||
name: no-std build
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
- "tests/no-std/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
- "tests/no-std/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Install RISC-V Toolchain
|
||||
run: sudo apt update && sudo apt install -y gcc-riscv64-unknown-elf gcc-multilib && rustup target add riscv32imac-unknown-none-elf
|
||||
|
||||
- name: Verify no-std builds
|
||||
run: CFLAGS=-I/usr/include cargo build --target riscv32imac-unknown-none-elf -p serai-no-std-tests
|
||||
90
.github/workflows/pages.yml
vendored
90
.github/workflows/pages.yml
vendored
@@ -1,90 +0,0 @@
|
||||
# MIT License
|
||||
#
|
||||
# Copyright (c) 2022 just-the-docs
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
# This workflow uses actions that are not certified by GitHub.
|
||||
# They are provided by a third-party and are governed by
|
||||
# separate terms of service, privacy policy, and support
|
||||
# documentation.
|
||||
|
||||
# Sample workflow for building and deploying a Jekyll site to GitHub Pages
|
||||
name: Deploy Jekyll site to Pages
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "develop"
|
||||
paths:
|
||||
- "docs/**"
|
||||
|
||||
# Allows you to run this workflow manually from the Actions tab
|
||||
workflow_dispatch:
|
||||
|
||||
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
|
||||
permissions:
|
||||
contents: read
|
||||
pages: write
|
||||
id-token: write
|
||||
|
||||
# Allow one concurrent deployment
|
||||
concurrency:
|
||||
group: "pages"
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
# Build job
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
defaults:
|
||||
run:
|
||||
working-directory: docs
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Setup Ruby
|
||||
uses: ruby/setup-ruby@v1
|
||||
with:
|
||||
bundler-cache: true
|
||||
cache-version: 0
|
||||
working-directory: "${{ github.workspace }}/docs"
|
||||
- name: Setup Pages
|
||||
id: pages
|
||||
uses: actions/configure-pages@v3
|
||||
- name: Build with Jekyll
|
||||
run: bundle exec jekyll build --baseurl "${{ steps.pages.outputs.base_path }}"
|
||||
env:
|
||||
JEKYLL_ENV: production
|
||||
- name: Upload artifact
|
||||
uses: actions/upload-pages-artifact@v1
|
||||
with:
|
||||
path: "docs/_site/"
|
||||
|
||||
# Deployment job
|
||||
deploy:
|
||||
environment:
|
||||
name: github-pages
|
||||
url: ${{ steps.deployment.outputs.page_url }}
|
||||
runs-on: ubuntu-latest
|
||||
needs: build
|
||||
steps:
|
||||
- name: Deploy to GitHub Pages
|
||||
id: deployment
|
||||
uses: actions/deploy-pages@v2
|
||||
40
.github/workflows/processor-tests.yml
vendored
40
.github/workflows/processor-tests.yml
vendored
@@ -1,40 +0,0 @@
|
||||
name: Processor Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "processor/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/processor/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "processor/**"
|
||||
- "orchestration/**"
|
||||
- "tests/docker/**"
|
||||
- "tests/processor/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run processor Docker tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-processor-tests
|
||||
36
.github/workflows/reproducible-runtime.yml
vendored
36
.github/workflows/reproducible-runtime.yml
vendored
@@ -1,36 +0,0 @@
|
||||
name: Reproducible Runtime
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "Cargo.lock"
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "substrate/**"
|
||||
- "orchestration/runtime/**"
|
||||
- "tests/reproducible-runtime/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "Cargo.lock"
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "substrate/**"
|
||||
- "orchestration/runtime/**"
|
||||
- "tests/reproducible-runtime/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Install Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Reproducible Runtime tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-reproducible-runtime-tests
|
||||
83
.github/workflows/tests.yml
vendored
83
.github/workflows/tests.yml
vendored
@@ -1,83 +0,0 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- develop
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "processor/**"
|
||||
- "coordinator/**"
|
||||
- "substrate/**"
|
||||
|
||||
pull_request:
|
||||
paths:
|
||||
- "common/**"
|
||||
- "crypto/**"
|
||||
- "networks/**"
|
||||
- "message-queue/**"
|
||||
- "processor/**"
|
||||
- "coordinator/**"
|
||||
- "substrate/**"
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
test-infra:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p serai-message-queue \
|
||||
-p serai-processor-messages \
|
||||
-p serai-processor \
|
||||
-p tendermint-machine \
|
||||
-p tributary-chain \
|
||||
-p serai-coordinator \
|
||||
-p serai-orchestrator \
|
||||
-p serai-docker-tests
|
||||
|
||||
test-substrate:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Tests
|
||||
run: |
|
||||
GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features \
|
||||
-p serai-primitives \
|
||||
-p serai-coins-primitives \
|
||||
-p serai-coins-pallet \
|
||||
-p serai-dex-pallet \
|
||||
-p serai-validator-sets-primitives \
|
||||
-p serai-validator-sets-pallet \
|
||||
-p serai-in-instructions-primitives \
|
||||
-p serai-in-instructions-pallet \
|
||||
-p serai-signals-primitives \
|
||||
-p serai-signals-pallet \
|
||||
-p serai-abi \
|
||||
-p serai-runtime \
|
||||
-p serai-node
|
||||
|
||||
test-serai-client:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@3df4ab11eba7bda6032a0b82a6bb43b11571feac
|
||||
|
||||
- name: Build Dependencies
|
||||
uses: ./.github/actions/build-dependencies
|
||||
|
||||
- name: Run Tests
|
||||
run: GITHUB_CI=true RUST_BACKTRACE=1 cargo test --all-features -p serai-client
|
||||
7
.gitignore
vendored
7
.gitignore
vendored
@@ -1,7 +1,2 @@
|
||||
target
|
||||
Dockerfile
|
||||
Dockerfile.fast-epoch
|
||||
!orchestration/runtime/Dockerfile
|
||||
.test-logs
|
||||
|
||||
.vscode
|
||||
Cargo.lock
|
||||
|
||||
3
.gitmodules
vendored
Normal file
3
.gitmodules
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
[submodule "coins/monero/c/monero"]
|
||||
path = coins/monero/c/monero
|
||||
url = https://github.com/monero-project/monero
|
||||
@@ -1,17 +0,0 @@
|
||||
edition = "2021"
|
||||
tab_spaces = 2
|
||||
|
||||
max_width = 100
|
||||
# Let the developer decide based on the 100 char line limit
|
||||
use_small_heuristics = "Max"
|
||||
|
||||
error_on_line_overflow = true
|
||||
error_on_unformatted = true
|
||||
|
||||
imports_granularity = "Crate"
|
||||
reorder_imports = false
|
||||
reorder_modules = false
|
||||
|
||||
unstable_features = true
|
||||
spaces_around_ranges = true
|
||||
binop_separator = "Back"
|
||||
@@ -1,37 +0,0 @@
|
||||
# Contributing
|
||||
|
||||
Contributions come in a variety of forms. Developing Serai, helping document it,
|
||||
using its libraries in another project, using and testing it, and simply sharing
|
||||
it are all valuable ways of contributing.
|
||||
|
||||
This document will specifically focus on contributions to this repository in the
|
||||
form of code and documentation.
|
||||
|
||||
### Rules
|
||||
|
||||
- Stable native Rust, nightly wasm and tools.
|
||||
- `cargo fmt` must be used.
|
||||
- `cargo clippy` must pass, except for the ignored rules (`type_complexity` and
|
||||
`dead_code`).
|
||||
- The CI must pass.
|
||||
|
||||
- Only use uppercase variable names when relevant to cryptography.
|
||||
|
||||
- Use a two-space ident when possible.
|
||||
- Put a space after comment markers.
|
||||
- Don't use multiple newlines between sections of code.
|
||||
- Have a newline before EOF.
|
||||
|
||||
### Guidelines
|
||||
|
||||
- Sort inputs as core, std, third party, and then Serai.
|
||||
- Comment code reasonably.
|
||||
- Include tests for new features.
|
||||
- Sign commits.
|
||||
|
||||
### Submission
|
||||
|
||||
All submissions should be through GitHub. Contributions to a crate will be
|
||||
licensed according to the crate's existing license, with the crate's copyright
|
||||
holders (distinct from authors) having the right to re-license the crate via a
|
||||
unanimous decision.
|
||||
11322
Cargo.lock
generated
11322
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
193
Cargo.toml
193
Cargo.toml
@@ -1,207 +1,22 @@
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
# Version patches
|
||||
"patches/parking_lot_core",
|
||||
"patches/parking_lot",
|
||||
"patches/zstd",
|
||||
"patches/rocksdb",
|
||||
"patches/proc-macro-crate",
|
||||
|
||||
# std patches
|
||||
"patches/matches",
|
||||
"patches/is-terminal",
|
||||
|
||||
# Rewrites/redirects
|
||||
"patches/option-ext",
|
||||
"patches/directories-next",
|
||||
|
||||
"common/std-shims",
|
||||
"common/zalloc",
|
||||
"common/db",
|
||||
"common/env",
|
||||
"common/request",
|
||||
|
||||
"crypto/transcript",
|
||||
|
||||
"crypto/ff-group-tests",
|
||||
"crypto/dalek-ff-group",
|
||||
"crypto/ed448",
|
||||
"crypto/ciphersuite",
|
||||
|
||||
"crypto/tables",
|
||||
"crypto/multiexp",
|
||||
|
||||
"crypto/schnorr",
|
||||
"crypto/dleq",
|
||||
"crypto/dkg",
|
||||
"crypto/frost",
|
||||
"crypto/schnorrkel",
|
||||
|
||||
"networks/bitcoin",
|
||||
"coins/monero",
|
||||
|
||||
"networks/ethereum/alloy-simple-request-transport",
|
||||
"networks/ethereum",
|
||||
"networks/ethereum/relayer",
|
||||
|
||||
"networks/monero/io",
|
||||
"networks/monero/generators",
|
||||
"networks/monero/primitives",
|
||||
"networks/monero/ringct/mlsag",
|
||||
"networks/monero/ringct/clsag",
|
||||
"networks/monero/ringct/borromean",
|
||||
"networks/monero/ringct/bulletproofs",
|
||||
"networks/monero",
|
||||
"networks/monero/rpc",
|
||||
"networks/monero/rpc/simple-request",
|
||||
"networks/monero/wallet/address",
|
||||
"networks/monero/wallet",
|
||||
"networks/monero/wallet/seed",
|
||||
"networks/monero/wallet/polyseed",
|
||||
"networks/monero/wallet/util",
|
||||
"networks/monero/verify-chain",
|
||||
|
||||
"message-queue",
|
||||
|
||||
"processor/messages",
|
||||
"processor",
|
||||
|
||||
"coordinator/tributary/tendermint",
|
||||
"coordinator/tributary",
|
||||
"coordinator",
|
||||
|
||||
"substrate/primitives",
|
||||
|
||||
"substrate/coins/primitives",
|
||||
"substrate/coins/pallet",
|
||||
|
||||
"substrate/dex/pallet",
|
||||
|
||||
"substrate/validator-sets/primitives",
|
||||
"substrate/validator-sets/pallet",
|
||||
|
||||
"substrate/in-instructions/primitives",
|
||||
"substrate/in-instructions/pallet",
|
||||
|
||||
"substrate/signals/primitives",
|
||||
"substrate/signals/pallet",
|
||||
|
||||
"substrate/abi",
|
||||
|
||||
"substrate/runtime",
|
||||
"substrate/node",
|
||||
|
||||
"substrate/client",
|
||||
|
||||
"orchestration",
|
||||
|
||||
"mini",
|
||||
|
||||
"tests/no-std",
|
||||
|
||||
"tests/docker",
|
||||
"tests/message-queue",
|
||||
"tests/processor",
|
||||
"tests/coordinator",
|
||||
"tests/full-stack",
|
||||
"tests/reproducible-runtime",
|
||||
"substrate/consensus",
|
||||
"substrate/node"
|
||||
]
|
||||
|
||||
# Always compile Monero (and a variety of dependencies) with optimizations due
|
||||
# to the extensive operations required for Bulletproofs
|
||||
[profile.dev.package]
|
||||
subtle = { opt-level = 3 }
|
||||
curve25519-dalek = { opt-level = 3 }
|
||||
|
||||
ff = { opt-level = 3 }
|
||||
group = { opt-level = 3 }
|
||||
|
||||
crypto-bigint = { opt-level = 3 }
|
||||
dalek-ff-group = { opt-level = 3 }
|
||||
minimal-ed448 = { opt-level = 3 }
|
||||
|
||||
multiexp = { opt-level = 3 }
|
||||
|
||||
monero-serai = { opt-level = 3 }
|
||||
|
||||
[profile.release]
|
||||
panic = "unwind"
|
||||
|
||||
[patch.crates-io]
|
||||
# https://github.com/rust-lang-nursery/lazy-static.rs/issues/201
|
||||
lazy_static = { git = "https://github.com/rust-lang-nursery/lazy-static.rs", rev = "5735630d46572f1e5377c8f2ba0f79d18f53b10c" }
|
||||
|
||||
# Needed due to dockertest's usage of `Rc`s when we need `Arc`s
|
||||
dockertest = { git = "https://github.com/orcalabs/dockertest-rs", rev = "4dd6ae24738aa6dc5c89444cc822ea4745517493" }
|
||||
|
||||
parking_lot_core = { path = "patches/parking_lot_core" }
|
||||
parking_lot = { path = "patches/parking_lot" }
|
||||
# wasmtime pulls in an old version for this
|
||||
zstd = { path = "patches/zstd" }
|
||||
# Needed for WAL compression
|
||||
rocksdb = { path = "patches/rocksdb" }
|
||||
# proc-macro-crate 2 binds to an old version of toml for msrv so we patch to 3
|
||||
proc-macro-crate = { path = "patches/proc-macro-crate" }
|
||||
|
||||
# is-terminal now has an std-based solution with an equivalent API
|
||||
is-terminal = { path = "patches/is-terminal" }
|
||||
# So does matches
|
||||
matches = { path = "patches/matches" }
|
||||
|
||||
# directories-next was created because directories was unmaintained
|
||||
# directories-next is now unmaintained while directories is maintained
|
||||
# The directories author pulls in ridiculously pointless crates and prefers
|
||||
# copyleft licenses
|
||||
# The following two patches resolve everything
|
||||
option-ext = { path = "patches/option-ext" }
|
||||
directories-next = { path = "patches/directories-next" }
|
||||
|
||||
[workspace.lints.clippy]
|
||||
unwrap_or_default = "allow"
|
||||
borrow_as_ptr = "deny"
|
||||
cast_lossless = "deny"
|
||||
cast_possible_truncation = "deny"
|
||||
cast_possible_wrap = "deny"
|
||||
cast_precision_loss = "deny"
|
||||
cast_ptr_alignment = "deny"
|
||||
cast_sign_loss = "deny"
|
||||
checked_conversions = "deny"
|
||||
cloned_instead_of_copied = "deny"
|
||||
enum_glob_use = "deny"
|
||||
expl_impl_clone_on_copy = "deny"
|
||||
explicit_into_iter_loop = "deny"
|
||||
explicit_iter_loop = "deny"
|
||||
flat_map_option = "deny"
|
||||
float_cmp = "deny"
|
||||
fn_params_excessive_bools = "deny"
|
||||
ignored_unit_patterns = "deny"
|
||||
implicit_clone = "deny"
|
||||
inefficient_to_string = "deny"
|
||||
invalid_upcast_comparisons = "deny"
|
||||
large_stack_arrays = "deny"
|
||||
linkedlist = "deny"
|
||||
macro_use_imports = "deny"
|
||||
manual_instant_elapsed = "deny"
|
||||
manual_let_else = "deny"
|
||||
manual_ok_or = "deny"
|
||||
manual_string_new = "deny"
|
||||
map_unwrap_or = "deny"
|
||||
match_bool = "deny"
|
||||
match_same_arms = "deny"
|
||||
missing_fields_in_debug = "deny"
|
||||
needless_continue = "deny"
|
||||
needless_pass_by_value = "deny"
|
||||
ptr_cast_constness = "deny"
|
||||
range_minus_one = "deny"
|
||||
range_plus_one = "deny"
|
||||
redundant_closure_for_method_calls = "deny"
|
||||
redundant_else = "deny"
|
||||
string_add_assign = "deny"
|
||||
unchecked_duration_subtraction = "deny"
|
||||
uninlined_format_args = "deny"
|
||||
unnecessary_box_returns = "deny"
|
||||
unnecessary_join = "deny"
|
||||
unnecessary_wraps = "deny"
|
||||
unnested_or_patterns = "deny"
|
||||
unused_async = "deny"
|
||||
unused_self = "deny"
|
||||
zero_sized_map_values = "deny"
|
||||
|
||||
8
LICENSE
8
LICENSE
@@ -1,8 +0,0 @@
|
||||
Serai crates are licensed under one of two licenses, either MIT or AGPL-3.0,
|
||||
depending on the crate in question. Each crate declares their license in their
|
||||
`Cargo.toml` and includes a `LICENSE` file detailing its status. Additionally,
|
||||
a full copy of the AGPL-3.0 License is included in the root of this repository
|
||||
as a reference text. This copy should be provided with any distribution of a
|
||||
crate licensed under the AGPL-3.0, as per its terms.
|
||||
|
||||
The GitHub actions (`.github/actions`) are licensed under the MIT license.
|
||||
62
README.md
62
README.md
@@ -1,66 +1,22 @@
|
||||
# Serai
|
||||
|
||||
Serai is a new DEX, built from the ground up, initially planning on listing
|
||||
Bitcoin, Ethereum, DAI, and Monero, offering a liquidity-pool-based trading
|
||||
experience. Funds are stored in an economically secured threshold-multisig
|
||||
Bitcoin, Ethereum, Monero, DAI, and USDC, offering a liquidity pool trading
|
||||
experience. Funds are stored in an economically secured threshold multisig
|
||||
wallet.
|
||||
|
||||
[Getting Started](spec/Getting%20Started.md)
|
||||
|
||||
### Layout
|
||||
|
||||
- `audits`: Audits for various parts of Serai.
|
||||
- `docs` - Documentation on the Serai protocol.
|
||||
|
||||
- `spec`: The specification of the Serai protocol, both internally and as
|
||||
networked.
|
||||
|
||||
- `docs`: User-facing documentation on the Serai protocol.
|
||||
|
||||
- `common`: Crates containing utilities common to a variety of areas under
|
||||
Serai, none neatly fitting under another category.
|
||||
|
||||
- `crypto`: A series of composable cryptographic libraries built around the
|
||||
`ff`/`group` APIs, achieving a variety of tasks. These range from generic
|
||||
infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as
|
||||
needed for Bitcoin-Monero atomic swaps.
|
||||
|
||||
- `networks`: Various libraries intended for usage in Serai yet also by the
|
||||
- `coins` - Various coin libraries intended for usage in Serai yet also by the
|
||||
wider community. This means they will always support the functionality Serai
|
||||
needs, yet won't disadvantage other use cases when possible.
|
||||
|
||||
- `message-queue`: An ordered message server so services can talk to each other,
|
||||
even when the other is offline.
|
||||
- `crypto` - A series of composable cryptographic libraries built around the
|
||||
`ff`/`group` APIs achieving a variety of tasks. These range from generic
|
||||
infrastructure, to our IETF-compliant FROST implementation, to a DLEq proof as
|
||||
needed for Bitcoin-Monero atomic swaps.
|
||||
|
||||
- `processor`: A generic chain processor to process data for Serai and process
|
||||
- `processor` - A generic chain processor to process data for Serai and process
|
||||
events from Serai, executing transactions as expected and needed.
|
||||
|
||||
- `coordinator`: A service to manage processors and communicate over a P2P
|
||||
network with other validators.
|
||||
|
||||
- `substrate`: Substrate crates used to instantiate the Serai network.
|
||||
|
||||
- `orchestration`: Dockerfiles and scripts to deploy a Serai node/test
|
||||
environment.
|
||||
|
||||
- `tests`: Tests for various crates. Generally, `crate/src/tests` is used, or
|
||||
`crate/tests`, yet any tests requiring crates' binaries are placed here.
|
||||
|
||||
### Security
|
||||
|
||||
Serai hosts a bug bounty program via
|
||||
[Immunefi](https://immunefi.com/bounty/serai/). For in-scope critical
|
||||
vulnerabilities, we will reward whitehats with up to $30,000.
|
||||
|
||||
Anything not in-scope should still be submitted through Immunefi, with rewards
|
||||
issued at the discretion of the Immunefi program managers.
|
||||
|
||||
### Links
|
||||
|
||||
- [Website](https://serai.exchange/): https://serai.exchange/
|
||||
- [Immunefi](https://immunefi.com/bounty/serai/): https://immunefi.com/bounty/serai/
|
||||
- [Twitter](https://twitter.com/SeraiDEX): https://twitter.com/SeraiDEX
|
||||
- [Mastodon](https://cryptodon.lol/@serai): https://cryptodon.lol/@serai
|
||||
- [Discord](https://discord.gg/mpEUtJR3vz): https://discord.gg/mpEUtJR3vz
|
||||
- [Matrix](https://matrix.to/#/#serai:matrix.org): https://matrix.to/#/#serai:matrix.org
|
||||
- [Reddit](https://www.reddit.com/r/SeraiDEX/): https://www.reddit.com/r/SeraiDEX/
|
||||
- [Telegram](https://t.me/SeraiDEX): https://t.me/SeraiDEX
|
||||
|
||||
Binary file not shown.
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Cypher Stack
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,7 +0,0 @@
|
||||
# Cypher Stack /crypto Audit, March 2023
|
||||
|
||||
This audit was over the /crypto folder, excluding the ed448 crate, the `Ed448`
|
||||
ciphersuite in the ciphersuite crate, and the `dleq/experimental` feature. It is
|
||||
encompassing up to commit 669d2dbffc1dafb82a09d9419ea182667115df06.
|
||||
|
||||
Please see https://github.com/cypherstack/serai-audit for provenance.
|
||||
Binary file not shown.
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Cypher Stack
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,7 +0,0 @@
|
||||
# Cypher Stack /networks/bitcoin Audit, August 2023
|
||||
|
||||
This audit was over the `/networks/bitcoin` folder (at the time located at
|
||||
`/coins/bitcoin`). It is encompassing up to commit
|
||||
5121ca75199dff7bd34230880a1fdd793012068c.
|
||||
|
||||
Please see https://github.com/cypherstack/serai-btc-audit for provenance.
|
||||
1
coins/monero/.gitignore
vendored
Normal file
1
coins/monero/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
c/.build
|
||||
52
coins/monero/Cargo.toml
Normal file
52
coins/monero/Cargo.toml
Normal file
@@ -0,0 +1,52 @@
|
||||
[package]
|
||||
name = "monero-serai"
|
||||
version = "0.1.0"
|
||||
description = "A modern Monero wallet library"
|
||||
license = "MIT"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
edition = "2021"
|
||||
|
||||
[build-dependencies]
|
||||
cc = "1.0"
|
||||
|
||||
[dependencies]
|
||||
hex-literal = "0.3"
|
||||
lazy_static = "1"
|
||||
thiserror = "1"
|
||||
|
||||
rand_core = "0.6"
|
||||
rand_chacha = { version = "0.3", optional = true }
|
||||
rand = "0.8"
|
||||
rand_distr = "0.4"
|
||||
|
||||
subtle = "2.4"
|
||||
|
||||
tiny-keccak = { version = "2", features = ["keccak"] }
|
||||
blake2 = { version = "0.10", optional = true }
|
||||
|
||||
curve25519-dalek = { version = "3", features = ["std"] }
|
||||
|
||||
group = { version = "0.12" }
|
||||
dalek-ff-group = { path = "../../crypto/dalek-ff-group" }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../../crypto/transcript", features = ["recommended"], optional = true }
|
||||
frost = { package = "modular-frost", path = "../../crypto/frost", features = ["ed25519"], optional = true }
|
||||
dleq = { package = "dleq-serai", path = "../../crypto/dleq", features = ["serialize"], optional = true }
|
||||
|
||||
hex = "0.4"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
|
||||
base58-monero = "1"
|
||||
monero-epee-bin-serde = "1.0"
|
||||
monero = "0.16"
|
||||
|
||||
reqwest = { version = "0.11", features = ["json"] }
|
||||
|
||||
[features]
|
||||
experimental = []
|
||||
multisig = ["rand_chacha", "blake2", "transcript", "frost", "dleq"]
|
||||
|
||||
[dev-dependencies]
|
||||
sha2 = "0.10"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Luke Parker
|
||||
Copyright (c) 2022 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
7
coins/monero/README.md
Normal file
7
coins/monero/README.md
Normal file
@@ -0,0 +1,7 @@
|
||||
# monero-serai
|
||||
|
||||
A modern Monero transaction library intended for usage in wallets. It prides
|
||||
itself on accuracy, correctness, and removing common pit falls developers may
|
||||
face.
|
||||
|
||||
Threshold multisignature support is available via the `multisig` feature.
|
||||
72
coins/monero/build.rs
Normal file
72
coins/monero/build.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
use std::{env, path::Path, process::Command};
|
||||
|
||||
fn main() {
|
||||
if !Command::new("git").args(&["submodule", "update", "--init", "--recursive"]).status().unwrap().success() {
|
||||
panic!("git failed to init submodules");
|
||||
}
|
||||
|
||||
if !Command ::new("mkdir").args(&["-p", ".build"])
|
||||
.current_dir(&Path::new("c")).status().unwrap().success() {
|
||||
panic!("failed to create a directory to track build progress");
|
||||
}
|
||||
|
||||
let out_dir = &env::var("OUT_DIR").unwrap();
|
||||
|
||||
// Use a file to signal if Monero was already built, as that should never be rebuilt
|
||||
// If the signaling file was deleted, run this script again to rebuild Monero though
|
||||
println!("cargo:rerun-if-changed=c/.build/monero");
|
||||
if !Path::new("c/.build/monero").exists() {
|
||||
if !Command::new("make").arg(format!("-j{}", &env::var("THREADS").unwrap_or("2".to_string())))
|
||||
.current_dir(&Path::new("c/monero")).status().unwrap().success() {
|
||||
panic!("make failed to build Monero. Please check your dependencies");
|
||||
}
|
||||
|
||||
if !Command::new("touch").arg("monero")
|
||||
.current_dir(&Path::new("c/.build")).status().unwrap().success() {
|
||||
panic!("failed to create a file to label Monero as built");
|
||||
}
|
||||
}
|
||||
|
||||
println!("cargo:rerun-if-changed=c/wrapper.cpp");
|
||||
cc::Build::new()
|
||||
.static_flag(true)
|
||||
.warnings(false)
|
||||
.extra_warnings(false)
|
||||
.flag("-Wno-deprecated-declarations")
|
||||
|
||||
.include("c/monero/external/supercop/include")
|
||||
.include("c/monero/contrib/epee/include")
|
||||
.include("c/monero/src")
|
||||
.include("c/monero/build/release/generated_include")
|
||||
|
||||
.define("AUTO_INITIALIZE_EASYLOGGINGPP", None)
|
||||
.include("c/monero/external/easylogging++")
|
||||
.file("c/monero/external/easylogging++/easylogging++.cc")
|
||||
|
||||
.file("c/monero/src/common/aligned.c")
|
||||
.file("c/monero/src/common/perf_timer.cpp")
|
||||
|
||||
.include("c/monero/src/crypto")
|
||||
.file("c/monero/src/crypto/crypto-ops-data.c")
|
||||
.file("c/monero/src/crypto/crypto-ops.c")
|
||||
.file("c/monero/src/crypto/keccak.c")
|
||||
.file("c/monero/src/crypto/hash.c")
|
||||
|
||||
.include("c/monero/src/device")
|
||||
.file("c/monero/src/device/device_default.cpp")
|
||||
|
||||
.include("c/monero/src/ringct")
|
||||
.file("c/monero/src/ringct/rctCryptoOps.c")
|
||||
.file("c/monero/src/ringct/rctTypes.cpp")
|
||||
.file("c/monero/src/ringct/rctOps.cpp")
|
||||
.file("c/monero/src/ringct/multiexp.cc")
|
||||
.file("c/monero/src/ringct/bulletproofs.cc")
|
||||
.file("c/monero/src/ringct/rctSigs.cpp")
|
||||
|
||||
.file("c/wrapper.cpp")
|
||||
.compile("wrapper");
|
||||
|
||||
println!("cargo:rustc-link-search={}", out_dir);
|
||||
println!("cargo:rustc-link-lib=wrapper");
|
||||
println!("cargo:rustc-link-lib=stdc++");
|
||||
}
|
||||
1
coins/monero/c/monero
Submodule
1
coins/monero/c/monero
Submodule
Submodule coins/monero/c/monero added at 424e4de16b
158
coins/monero/c/wrapper.cpp
Normal file
158
coins/monero/c/wrapper.cpp
Normal file
@@ -0,0 +1,158 @@
|
||||
#include <mutex>
|
||||
|
||||
#include "device/device_default.hpp"
|
||||
|
||||
#include "ringct/bulletproofs.h"
|
||||
#include "ringct/rctSigs.h"
|
||||
|
||||
typedef std::lock_guard<std::mutex> lock;
|
||||
|
||||
std::mutex rng_mutex;
|
||||
uint8_t rng_entropy[64];
|
||||
|
||||
extern "C" {
|
||||
void rng(uint8_t* seed) {
|
||||
// Set the first half to the seed
|
||||
memcpy(rng_entropy, seed, 32);
|
||||
// Set the second half to the hash of a DST to ensure a lack of collisions
|
||||
crypto::cn_fast_hash("RNG_entropy_seed", 16, (char*) &rng_entropy[32]);
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" void monero_wide_reduce(uint8_t* value);
|
||||
namespace crypto {
|
||||
void generate_random_bytes_not_thread_safe(size_t n, void* value) {
|
||||
size_t written = 0;
|
||||
while (written != n) {
|
||||
uint8_t hash[32];
|
||||
crypto::cn_fast_hash(rng_entropy, 64, (char*) hash);
|
||||
// Step the RNG by setting the latter half to the most recent result
|
||||
// Does not leak the RNG, even if the values are leaked (which they are
|
||||
// expected to be) due to the first half remaining constant and
|
||||
// undisclosed
|
||||
memcpy(&rng_entropy[32], hash, 32);
|
||||
|
||||
size_t next = n - written;
|
||||
if (next > 32) {
|
||||
next = 32;
|
||||
}
|
||||
memcpy(&((uint8_t*) value)[written], hash, next);
|
||||
written += next;
|
||||
}
|
||||
}
|
||||
|
||||
void random32_unbiased(unsigned char *bytes) {
|
||||
uint8_t value[64];
|
||||
generate_random_bytes_not_thread_safe(64, value);
|
||||
monero_wide_reduce(value);
|
||||
memcpy(bytes, value, 32);
|
||||
}
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
void c_hash_to_point(uint8_t* point) {
|
||||
rct::key key_point;
|
||||
ge_p3 e_p3;
|
||||
memcpy(key_point.bytes, point, 32);
|
||||
rct::hash_to_p3(e_p3, key_point);
|
||||
ge_p3_tobytes(point, &e_p3);
|
||||
}
|
||||
|
||||
uint8_t* c_generate_bp(uint8_t* seed, uint8_t len, uint64_t* a, uint8_t* m) {
|
||||
lock guard(rng_mutex);
|
||||
rng(seed);
|
||||
|
||||
rct::keyV masks;
|
||||
std::vector<uint64_t> amounts;
|
||||
masks.resize(len);
|
||||
amounts.resize(len);
|
||||
for (uint8_t i = 0; i < len; i++) {
|
||||
memcpy(masks[i].bytes, m + (i * 32), 32);
|
||||
amounts[i] = a[i];
|
||||
}
|
||||
|
||||
rct::Bulletproof bp = rct::bulletproof_PROVE(amounts, masks);
|
||||
|
||||
std::stringstream ss;
|
||||
binary_archive<true> ba(ss);
|
||||
::serialization::serialize(ba, bp);
|
||||
uint8_t* res = (uint8_t*) calloc(ss.str().size(), 1);
|
||||
memcpy(res, ss.str().data(), ss.str().size());
|
||||
return res;
|
||||
}
|
||||
|
||||
bool c_verify_bp(
|
||||
uint8_t* seed,
|
||||
uint s_len,
|
||||
uint8_t* s,
|
||||
uint8_t c_len,
|
||||
uint8_t* c
|
||||
) {
|
||||
// BPs are batch verified which use RNG based weights to ensure individual
|
||||
// integrity
|
||||
// That's why this must also have control over RNG, to prevent interrupting
|
||||
// multisig signing while not using known seeds. Considering this doesn't
|
||||
// actually define a batch, and it's only verifying a single BP,
|
||||
// it'd probably be fine, but...
|
||||
lock guard(rng_mutex);
|
||||
rng(seed);
|
||||
|
||||
rct::Bulletproof bp;
|
||||
std::stringstream ss;
|
||||
std::string str;
|
||||
str.assign((char*) s, (size_t) s_len);
|
||||
ss << str;
|
||||
binary_archive<false> ba(ss);
|
||||
::serialization::serialize(ba, bp);
|
||||
if (!ss.good()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
bp.V.resize(c_len);
|
||||
for (uint8_t i = 0; i < c_len; i++) {
|
||||
memcpy(bp.V[i].bytes, &c[i * 32], 32);
|
||||
}
|
||||
|
||||
try { return rct::bulletproof_VERIFY(bp); } catch(...) { return false; }
|
||||
}
|
||||
|
||||
bool c_verify_clsag(
|
||||
uint s_len,
|
||||
uint8_t* s,
|
||||
uint8_t k_len,
|
||||
uint8_t* k,
|
||||
uint8_t* I,
|
||||
uint8_t* p,
|
||||
uint8_t* m
|
||||
) {
|
||||
rct::clsag clsag;
|
||||
std::stringstream ss;
|
||||
std::string str;
|
||||
str.assign((char*) s, (size_t) s_len);
|
||||
ss << str;
|
||||
binary_archive<false> ba(ss);
|
||||
::serialization::serialize(ba, clsag);
|
||||
if (!ss.good()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
rct::ctkeyV keys;
|
||||
keys.resize(k_len);
|
||||
for (uint8_t i = 0; i < k_len; i++) {
|
||||
memcpy(keys[i].dest.bytes, &k[(i * 2) * 32], 32);
|
||||
memcpy(keys[i].mask.bytes, &k[((i * 2) + 1) * 32], 32);
|
||||
}
|
||||
|
||||
memcpy(clsag.I.bytes, I, 32);
|
||||
|
||||
rct::key pseudo_out;
|
||||
memcpy(pseudo_out.bytes, p, 32);
|
||||
|
||||
rct::key msg;
|
||||
memcpy(msg.bytes, m, 32);
|
||||
|
||||
try {
|
||||
return verRctCLSAGSimple(msg, clsag, keys, pseudo_out);
|
||||
} catch(...) { return false; }
|
||||
}
|
||||
}
|
||||
66
coins/monero/src/block.rs
Normal file
66
coins/monero/src/block.rs
Normal file
@@ -0,0 +1,66 @@
|
||||
use crate::{
|
||||
serialize::*,
|
||||
transaction::Transaction
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct BlockHeader {
|
||||
pub major_version: u64,
|
||||
pub minor_version: u64,
|
||||
pub timestamp: u64,
|
||||
pub previous: [u8; 32],
|
||||
pub nonce: u32
|
||||
}
|
||||
|
||||
impl BlockHeader {
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
write_varint(&self.major_version, w)?;
|
||||
write_varint(&self.minor_version, w)?;
|
||||
write_varint(&self.timestamp, w)?;
|
||||
w.write_all(&self.previous)?;
|
||||
w.write_all(&self.nonce.to_le_bytes())
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(r: &mut R) -> std::io::Result<BlockHeader> {
|
||||
Ok(
|
||||
BlockHeader {
|
||||
major_version: read_varint(r)?,
|
||||
minor_version: read_varint(r)?,
|
||||
timestamp: read_varint(r)?,
|
||||
previous: { let mut previous = [0; 32]; r.read_exact(&mut previous)?; previous },
|
||||
nonce: { let mut nonce = [0; 4]; r.read_exact(&mut nonce)?; u32::from_le_bytes(nonce) }
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct Block {
|
||||
pub header: BlockHeader,
|
||||
pub miner_tx: Transaction,
|
||||
pub txs: Vec<[u8; 32]>
|
||||
}
|
||||
|
||||
impl Block {
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
self.header.serialize(w)?;
|
||||
self.miner_tx.serialize(w)?;
|
||||
write_varint(&self.txs.len().try_into().unwrap(), w)?;
|
||||
for tx in &self.txs {
|
||||
w.write_all(tx)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(r: &mut R) -> std::io::Result<Block> {
|
||||
Ok(
|
||||
Block {
|
||||
header: BlockHeader::deserialize(r)?,
|
||||
miner_tx: Transaction::deserialize(r)?,
|
||||
txs: (0 .. read_varint(r)?).map(
|
||||
|_| { let mut tx = [0; 32]; r.read_exact(&mut tx).map(|_| tx) }
|
||||
).collect::<Result<_, _>>()?
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
76
coins/monero/src/frost.rs
Normal file
76
coins/monero/src/frost.rs
Normal file
@@ -0,0 +1,76 @@
|
||||
use std::io::Read;
|
||||
|
||||
use thiserror::Error;
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use group::{Group, GroupEncoding};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
use dalek_ff_group as dfg;
|
||||
use dleq::DLEqProof;
|
||||
|
||||
#[derive(Clone, Error, Debug)]
|
||||
pub enum MultisigError {
|
||||
#[error("internal error ({0})")]
|
||||
InternalError(String),
|
||||
#[error("invalid discrete log equality proof")]
|
||||
InvalidDLEqProof(u16),
|
||||
#[error("invalid key image {0}")]
|
||||
InvalidKeyImage(u16)
|
||||
}
|
||||
|
||||
fn transcript() -> RecommendedTranscript {
|
||||
RecommendedTranscript::new(b"monero_key_image_dleq")
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub(crate) fn write_dleq<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
H: EdwardsPoint,
|
||||
x: Scalar
|
||||
) -> Vec<u8> {
|
||||
let mut res = Vec::with_capacity(64);
|
||||
DLEqProof::prove(
|
||||
rng,
|
||||
// Doesn't take in a larger transcript object due to the usage of this
|
||||
// Every prover would immediately write their own DLEq proof, when they can only do so in
|
||||
// the proper order if they want to reach consensus
|
||||
// It'd be a poor API to have CLSAG define a new transcript solely to pass here, just to try to
|
||||
// merge later in some form, when it should instead just merge xH (as it does)
|
||||
&mut transcript(),
|
||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)],
|
||||
dfg::Scalar(x)
|
||||
).serialize(&mut res).unwrap();
|
||||
res
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
pub(crate) fn read_dleq<Re: Read>(
|
||||
serialized: &mut Re,
|
||||
H: EdwardsPoint,
|
||||
l: u16,
|
||||
xG: dfg::EdwardsPoint
|
||||
) -> Result<dfg::EdwardsPoint, MultisigError> {
|
||||
let mut bytes = [0; 32];
|
||||
serialized.read_exact(&mut bytes).map_err(|_| MultisigError::InvalidDLEqProof(l))?;
|
||||
// dfg ensures the point is torsion free
|
||||
let xH = Option::<dfg::EdwardsPoint>::from(
|
||||
dfg::EdwardsPoint::from_bytes(&bytes)).ok_or(MultisigError::InvalidDLEqProof(l)
|
||||
)?;
|
||||
// Ensure this is a canonical point
|
||||
if xH.to_bytes() != bytes {
|
||||
Err(MultisigError::InvalidDLEqProof(l))?;
|
||||
}
|
||||
|
||||
DLEqProof::<dfg::EdwardsPoint>::deserialize(
|
||||
serialized
|
||||
).map_err(|_| MultisigError::InvalidDLEqProof(l))?.verify(
|
||||
&mut transcript(),
|
||||
&[dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(H)],
|
||||
&[xG, xH]
|
||||
).map_err(|_| MultisigError::InvalidDLEqProof(l))?;
|
||||
|
||||
Ok(xH)
|
||||
}
|
||||
100
coins/monero/src/lib.rs
Normal file
100
coins/monero/src/lib.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
use std::slice;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use subtle::ConstantTimeEq;
|
||||
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_TABLE,
|
||||
scalar::Scalar,
|
||||
edwards::{EdwardsPoint, EdwardsBasepointTable, CompressedEdwardsY}
|
||||
};
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
pub mod frost;
|
||||
|
||||
mod serialize;
|
||||
|
||||
pub mod ringct;
|
||||
|
||||
pub mod transaction;
|
||||
pub mod block;
|
||||
|
||||
pub mod rpc;
|
||||
pub mod wallet;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
lazy_static! {
|
||||
static ref H: EdwardsPoint = CompressedEdwardsY(
|
||||
hex::decode("8b655970153799af2aeadc9ff1add0ea6c7251d54154cfa92c173a0dd39c1f94").unwrap().try_into().unwrap()
|
||||
).decompress().unwrap();
|
||||
static ref H_TABLE: EdwardsBasepointTable = EdwardsBasepointTable::create(&*H);
|
||||
}
|
||||
|
||||
// Function from libsodium our subsection of Monero relies on. Implementing it here means we don't
|
||||
// need to link against libsodium
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn crypto_verify_32(a: *const u8, b: *const u8) -> isize {
|
||||
isize::from(
|
||||
slice::from_raw_parts(a, 32).ct_eq(slice::from_raw_parts(b, 32)).unwrap_u8()
|
||||
) - 1
|
||||
}
|
||||
|
||||
// Offer a wide reduction to C. Our seeded RNG prevented Monero from defining an unbiased scalar
|
||||
// generation function, and in order to not use Monero code (which would require propagating its
|
||||
// license), the function was rewritten. It was rewritten with wide reduction, instead of rejection
|
||||
// sampling however, hence the need for this function
|
||||
#[no_mangle]
|
||||
unsafe extern "C" fn monero_wide_reduce(value: *mut u8) {
|
||||
let res = Scalar::from_bytes_mod_order_wide(
|
||||
std::slice::from_raw_parts(value, 64).try_into().unwrap()
|
||||
);
|
||||
for (i, b) in res.to_bytes().iter().enumerate() {
|
||||
value.add(i).write(*b);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
|
||||
pub struct Commitment {
|
||||
pub mask: Scalar,
|
||||
pub amount: u64
|
||||
}
|
||||
|
||||
impl Commitment {
|
||||
pub fn zero() -> Commitment {
|
||||
Commitment { mask: Scalar::one(), amount: 0}
|
||||
}
|
||||
|
||||
pub fn new(mask: Scalar, amount: u64) -> Commitment {
|
||||
Commitment { mask, amount }
|
||||
}
|
||||
|
||||
pub fn calculate(&self) -> EdwardsPoint {
|
||||
(&self.mask * &ED25519_BASEPOINT_TABLE) + (&Scalar::from(self.amount) * &*H_TABLE)
|
||||
}
|
||||
}
|
||||
|
||||
// Allows using a modern rand as dalek's is notoriously dated
|
||||
pub fn random_scalar<R: RngCore + CryptoRng>(rng: &mut R) -> Scalar {
|
||||
let mut r = [0; 64];
|
||||
rng.fill_bytes(&mut r);
|
||||
Scalar::from_bytes_mod_order_wide(&r)
|
||||
}
|
||||
|
||||
pub fn hash(data: &[u8]) -> [u8; 32] {
|
||||
let mut keccak = Keccak::v256();
|
||||
keccak.update(data);
|
||||
let mut res = [0; 32];
|
||||
keccak.finalize(&mut res);
|
||||
res
|
||||
}
|
||||
|
||||
pub fn hash_to_scalar(data: &[u8]) -> Scalar {
|
||||
Scalar::from_bytes_mod_order(hash(&data))
|
||||
}
|
||||
161
coins/monero/src/ringct/bulletproofs.rs
Normal file
161
coins/monero/src/ringct/bulletproofs.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use crate::{Commitment, wallet::TransactionError, serialize::*};
|
||||
|
||||
pub(crate) const MAX_OUTPUTS: usize = 16;
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct Bulletproofs {
|
||||
pub A: EdwardsPoint,
|
||||
pub S: EdwardsPoint,
|
||||
pub T1: EdwardsPoint,
|
||||
pub T2: EdwardsPoint,
|
||||
pub taux: Scalar,
|
||||
pub mu: Scalar,
|
||||
pub L: Vec<EdwardsPoint>,
|
||||
pub R: Vec<EdwardsPoint>,
|
||||
pub a: Scalar,
|
||||
pub b: Scalar,
|
||||
pub t: Scalar
|
||||
}
|
||||
|
||||
impl Bulletproofs {
|
||||
pub(crate) fn fee_weight(outputs: usize) -> usize {
|
||||
let proofs = 6 + usize::try_from(usize::BITS - (outputs - 1).leading_zeros()).unwrap();
|
||||
let len = (9 + (2 * proofs)) * 32;
|
||||
|
||||
let mut clawback = 0;
|
||||
let padded = 1 << (proofs - 6);
|
||||
if padded > 2 {
|
||||
const BP_BASE: usize = 368;
|
||||
clawback = ((BP_BASE * padded) - len) * 4 / 5;
|
||||
}
|
||||
|
||||
len + clawback
|
||||
}
|
||||
|
||||
pub fn new<R: RngCore + CryptoRng>(rng: &mut R, outputs: &[Commitment]) -> Result<Bulletproofs, TransactionError> {
|
||||
if outputs.len() > MAX_OUTPUTS {
|
||||
return Err(TransactionError::TooManyOutputs)?;
|
||||
}
|
||||
|
||||
let mut seed = [0; 32];
|
||||
rng.fill_bytes(&mut seed);
|
||||
|
||||
let masks = outputs.iter().map(|commitment| commitment.mask.to_bytes()).collect::<Vec<_>>();
|
||||
let amounts = outputs.iter().map(|commitment| commitment.amount).collect::<Vec<_>>();
|
||||
|
||||
let res;
|
||||
unsafe {
|
||||
#[link(name = "wrapper")]
|
||||
extern "C" {
|
||||
fn free(ptr: *const u8);
|
||||
fn c_generate_bp(seed: *const u8, len: u8, amounts: *const u64, masks: *const [u8; 32]) -> *const u8;
|
||||
}
|
||||
|
||||
let ptr = c_generate_bp(
|
||||
seed.as_ptr(),
|
||||
u8::try_from(outputs.len()).unwrap(),
|
||||
amounts.as_ptr(),
|
||||
masks.as_ptr()
|
||||
);
|
||||
|
||||
let mut len = 6 * 32;
|
||||
len += (2 * (1 + (usize::from(ptr.add(len).read()) * 32))) + (3 * 32);
|
||||
res = Bulletproofs::deserialize(
|
||||
// Wrap in a cursor to provide a mutable Reader
|
||||
&mut std::io::Cursor::new(std::slice::from_raw_parts(ptr, len))
|
||||
).expect("Couldn't deserialize Bulletproofs from Monero");
|
||||
free(ptr);
|
||||
};
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn verify<R: RngCore + CryptoRng>(&self, rng: &mut R, commitments: &[EdwardsPoint]) -> bool {
|
||||
if commitments.len() > 16 {
|
||||
return false;
|
||||
}
|
||||
|
||||
let mut seed = [0; 32];
|
||||
rng.fill_bytes(&mut seed);
|
||||
|
||||
let mut serialized = Vec::with_capacity((9 + (2 * self.L.len())) * 32);
|
||||
self.serialize(&mut serialized).unwrap();
|
||||
let commitments: Vec<[u8; 32]> = commitments.iter().map(
|
||||
|commitment| (commitment * Scalar::from(8u8).invert()).compress().to_bytes()
|
||||
).collect();
|
||||
|
||||
unsafe {
|
||||
#[link(name = "wrapper")]
|
||||
extern "C" {
|
||||
fn c_verify_bp(
|
||||
seed: *const u8,
|
||||
serialized_len: usize,
|
||||
serialized: *const u8,
|
||||
commitments_len: u8,
|
||||
commitments: *const [u8; 32]
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
c_verify_bp(
|
||||
seed.as_ptr(),
|
||||
serialized.len(),
|
||||
serialized.as_ptr(),
|
||||
u8::try_from(commitments.len()).unwrap(),
|
||||
commitments.as_ptr()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn serialize_core<
|
||||
W: std::io::Write,
|
||||
F: Fn(&[EdwardsPoint], &mut W) -> std::io::Result<()>
|
||||
>(&self, w: &mut W, specific_write_vec: F) -> std::io::Result<()> {
|
||||
write_point(&self.A, w)?;
|
||||
write_point(&self.S, w)?;
|
||||
write_point(&self.T1, w)?;
|
||||
write_point(&self.T2, w)?;
|
||||
write_scalar(&self.taux, w)?;
|
||||
write_scalar(&self.mu, w)?;
|
||||
specific_write_vec(&self.L, w)?;
|
||||
specific_write_vec(&self.R, w)?;
|
||||
write_scalar(&self.a, w)?;
|
||||
write_scalar(&self.b, w)?;
|
||||
write_scalar(&self.t, w)
|
||||
}
|
||||
|
||||
pub fn signature_serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
self.serialize_core(w, |points, w| write_raw_vec(write_point, points, w))
|
||||
}
|
||||
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
self.serialize_core(w, |points, w| write_vec(write_point, points, w))
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(r: &mut R) -> std::io::Result<Bulletproofs> {
|
||||
let bp = Bulletproofs {
|
||||
A: read_point(r)?,
|
||||
S: read_point(r)?,
|
||||
T1: read_point(r)?,
|
||||
T2: read_point(r)?,
|
||||
taux: read_scalar(r)?,
|
||||
mu: read_scalar(r)?,
|
||||
L: read_vec(read_point, r)?,
|
||||
R: read_vec(read_point, r)?,
|
||||
a: read_scalar(r)?,
|
||||
b: read_scalar(r)?,
|
||||
t: read_scalar(r)?
|
||||
};
|
||||
|
||||
if bp.L.len() != bp.R.len() {
|
||||
Err(std::io::Error::new(std::io::ErrorKind::Other, "mismatched L/R len"))?;
|
||||
}
|
||||
Ok(bp)
|
||||
}
|
||||
}
|
||||
360
coins/monero/src/ringct/clsag/mod.rs
Normal file
360
coins/monero/src/ringct/clsag/mod.rs
Normal file
@@ -0,0 +1,360 @@
|
||||
#![allow(non_snake_case)]
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use thiserror::Error;
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_TABLE,
|
||||
scalar::Scalar,
|
||||
traits::VartimePrecomputedMultiscalarMul,
|
||||
edwards::{EdwardsPoint, VartimeEdwardsPrecomputation}
|
||||
};
|
||||
|
||||
use crate::{
|
||||
Commitment, random_scalar, hash_to_scalar,
|
||||
transaction::RING_LEN,
|
||||
wallet::decoys::Decoys,
|
||||
ringct::hash_to_point,
|
||||
serialize::*
|
||||
};
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
mod multisig;
|
||||
#[cfg(feature = "multisig")]
|
||||
pub use multisig::{ClsagDetails, ClsagMultisig};
|
||||
|
||||
lazy_static! {
|
||||
static ref INV_EIGHT: Scalar = Scalar::from(8u8).invert();
|
||||
}
|
||||
|
||||
#[derive(Clone, Error, Debug)]
|
||||
pub enum ClsagError {
|
||||
#[error("internal error ({0})")]
|
||||
InternalError(String),
|
||||
#[error("invalid ring member (member {0}, ring size {1})")]
|
||||
InvalidRingMember(u8, u8),
|
||||
#[error("invalid commitment")]
|
||||
InvalidCommitment,
|
||||
#[error("invalid D")]
|
||||
InvalidD,
|
||||
#[error("invalid s")]
|
||||
InvalidS,
|
||||
#[error("invalid c1")]
|
||||
InvalidC1
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct ClsagInput {
|
||||
// The actual commitment for the true spend
|
||||
pub commitment: Commitment,
|
||||
// True spend index, offsets, and ring
|
||||
pub decoys: Decoys
|
||||
}
|
||||
|
||||
impl ClsagInput {
|
||||
pub fn new(
|
||||
commitment: Commitment,
|
||||
decoys: Decoys
|
||||
) -> Result<ClsagInput, ClsagError> {
|
||||
let n = decoys.len();
|
||||
if n > u8::MAX.into() {
|
||||
Err(ClsagError::InternalError("max ring size in this library is u8 max".to_string()))?;
|
||||
}
|
||||
let n = u8::try_from(n).unwrap();
|
||||
if decoys.i >= n {
|
||||
Err(ClsagError::InvalidRingMember(decoys.i, n))?;
|
||||
}
|
||||
|
||||
// Validate the commitment matches
|
||||
if decoys.ring[usize::from(decoys.i)][1] != commitment.calculate() {
|
||||
Err(ClsagError::InvalidCommitment)?;
|
||||
}
|
||||
|
||||
Ok(ClsagInput { commitment, decoys })
|
||||
}
|
||||
}
|
||||
|
||||
enum Mode {
|
||||
Sign(usize, EdwardsPoint, EdwardsPoint),
|
||||
#[cfg(feature = "experimental")]
|
||||
Verify(Scalar)
|
||||
}
|
||||
|
||||
// Core of the CLSAG algorithm, applicable to both sign and verify with minimal differences
|
||||
// Said differences are covered via the above Mode
|
||||
fn core(
|
||||
ring: &[[EdwardsPoint; 2]],
|
||||
I: &EdwardsPoint,
|
||||
pseudo_out: &EdwardsPoint,
|
||||
msg: &[u8; 32],
|
||||
D: &EdwardsPoint,
|
||||
s: &[Scalar],
|
||||
A_c1: Mode
|
||||
) -> ((EdwardsPoint, Scalar, Scalar), Scalar) {
|
||||
let n = ring.len();
|
||||
|
||||
let images_precomp = VartimeEdwardsPrecomputation::new([I, D]);
|
||||
let D = D * *INV_EIGHT;
|
||||
|
||||
// Generate the transcript
|
||||
// Instead of generating multiple, a single transcript is created and then edited as needed
|
||||
let mut to_hash = vec![];
|
||||
to_hash.reserve_exact(((2 * n) + 5) * 32);
|
||||
const PREFIX: &[u8] = "CLSAG_".as_bytes();
|
||||
const AGG_0: &[u8] = "CLSAG_agg_0".as_bytes();
|
||||
const ROUND: &[u8] = "round".as_bytes();
|
||||
to_hash.extend(AGG_0);
|
||||
to_hash.extend([0; 32 - AGG_0.len()]);
|
||||
|
||||
let mut P = Vec::with_capacity(n);
|
||||
for member in ring {
|
||||
P.push(member[0]);
|
||||
to_hash.extend(member[0].compress().to_bytes());
|
||||
}
|
||||
|
||||
let mut C = Vec::with_capacity(n);
|
||||
for member in ring {
|
||||
C.push(member[1] - pseudo_out);
|
||||
to_hash.extend(member[1].compress().to_bytes());
|
||||
}
|
||||
|
||||
to_hash.extend(I.compress().to_bytes());
|
||||
to_hash.extend(D.compress().to_bytes());
|
||||
to_hash.extend(pseudo_out.compress().to_bytes());
|
||||
// mu_P with agg_0
|
||||
let mu_P = hash_to_scalar(&to_hash);
|
||||
// mu_C with agg_1
|
||||
to_hash[AGG_0.len() - 1] = b'1';
|
||||
let mu_C = hash_to_scalar(&to_hash);
|
||||
|
||||
// Truncate it for the round transcript, altering the DST as needed
|
||||
to_hash.truncate(((2 * n) + 1) * 32);
|
||||
for i in 0 .. ROUND.len() {
|
||||
to_hash[PREFIX.len() + i] = ROUND[i];
|
||||
}
|
||||
// Unfortunately, it's I D pseudo_out instead of pseudo_out I D, meaning this needs to be
|
||||
// truncated just to add it back
|
||||
to_hash.extend(pseudo_out.compress().to_bytes());
|
||||
to_hash.extend(msg);
|
||||
|
||||
// Configure the loop based on if we're signing or verifying
|
||||
let start;
|
||||
let end;
|
||||
let mut c;
|
||||
match A_c1 {
|
||||
Mode::Sign(r, A, AH) => {
|
||||
start = r + 1;
|
||||
end = r + n;
|
||||
to_hash.extend(A.compress().to_bytes());
|
||||
to_hash.extend(AH.compress().to_bytes());
|
||||
c = hash_to_scalar(&to_hash);
|
||||
},
|
||||
|
||||
#[cfg(feature = "experimental")]
|
||||
Mode::Verify(c1) => {
|
||||
start = 0;
|
||||
end = n;
|
||||
c = c1;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform the core loop
|
||||
let mut c1 = None;
|
||||
for i in (start .. end).map(|i| i % n) {
|
||||
if i == 0 {
|
||||
c1 = Some(c);
|
||||
}
|
||||
|
||||
let c_p = mu_P * c;
|
||||
let c_c = mu_C * c;
|
||||
|
||||
let L = (&s[i] * &ED25519_BASEPOINT_TABLE) + (c_p * P[i]) + (c_c * C[i]);
|
||||
let PH = hash_to_point(P[i]);
|
||||
// Shouldn't be an issue as all of the variables in this vartime statement are public
|
||||
let R = (s[i] * PH) + images_precomp.vartime_multiscalar_mul(&[c_p, c_c]);
|
||||
|
||||
to_hash.truncate(((2 * n) + 3) * 32);
|
||||
to_hash.extend(L.compress().to_bytes());
|
||||
to_hash.extend(R.compress().to_bytes());
|
||||
c = hash_to_scalar(&to_hash);
|
||||
}
|
||||
|
||||
// This first tuple is needed to continue signing, the latter is the c to be tested/worked with
|
||||
((D, c * mu_P, c * mu_C), c1.unwrap_or(c))
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct Clsag {
|
||||
pub D: EdwardsPoint,
|
||||
pub s: Vec<Scalar>,
|
||||
pub c1: Scalar
|
||||
}
|
||||
|
||||
impl Clsag {
|
||||
// Sign core is the extension of core as needed for signing, yet is shared between single signer
|
||||
// and multisig, hence why it's still core
|
||||
pub(crate) fn sign_core<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
I: &EdwardsPoint,
|
||||
input: &ClsagInput,
|
||||
mask: Scalar,
|
||||
msg: &[u8; 32],
|
||||
A: EdwardsPoint,
|
||||
AH: EdwardsPoint
|
||||
) -> (Clsag, EdwardsPoint, Scalar, Scalar) {
|
||||
let r: usize = input.decoys.i.into();
|
||||
|
||||
let pseudo_out = Commitment::new(mask, input.commitment.amount).calculate();
|
||||
let z = input.commitment.mask - mask;
|
||||
|
||||
let H = hash_to_point(input.decoys.ring[r][0]);
|
||||
let D = H * z;
|
||||
let mut s = Vec::with_capacity(input.decoys.ring.len());
|
||||
for _ in 0 .. input.decoys.ring.len() {
|
||||
s.push(random_scalar(rng));
|
||||
}
|
||||
let ((D, p, c), c1) = core(&input.decoys.ring, I, &pseudo_out, msg, &D, &s, Mode::Sign(r, A, AH));
|
||||
|
||||
(
|
||||
Clsag { D, s, c1 },
|
||||
pseudo_out,
|
||||
p,
|
||||
c * z
|
||||
)
|
||||
}
|
||||
|
||||
// Single signer CLSAG
|
||||
pub fn sign<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
inputs: &[(Scalar, EdwardsPoint, ClsagInput)],
|
||||
sum_outputs: Scalar,
|
||||
msg: [u8; 32]
|
||||
) -> Vec<(Clsag, EdwardsPoint)> {
|
||||
let nonce = random_scalar(rng);
|
||||
let mut rand_source = [0; 64];
|
||||
rng.fill_bytes(&mut rand_source);
|
||||
|
||||
let mut res = Vec::with_capacity(inputs.len());
|
||||
let mut sum_pseudo_outs = Scalar::zero();
|
||||
for i in 0 .. inputs.len() {
|
||||
let mut mask = random_scalar(rng);
|
||||
if i == (inputs.len() - 1) {
|
||||
mask = sum_outputs - sum_pseudo_outs;
|
||||
} else {
|
||||
sum_pseudo_outs += mask;
|
||||
}
|
||||
|
||||
let mut rand_source = [0; 64];
|
||||
rng.fill_bytes(&mut rand_source);
|
||||
let (mut clsag, pseudo_out, p, c) = Clsag::sign_core(
|
||||
rng,
|
||||
&inputs[i].1,
|
||||
&inputs[i].2,
|
||||
mask,
|
||||
&msg,
|
||||
&nonce * &ED25519_BASEPOINT_TABLE,
|
||||
nonce * hash_to_point(inputs[i].2.decoys.ring[usize::from(inputs[i].2.decoys.i)][0])
|
||||
);
|
||||
clsag.s[usize::from(inputs[i].2.decoys.i)] = nonce - ((p * inputs[i].0) + c);
|
||||
|
||||
res.push((clsag, pseudo_out));
|
||||
}
|
||||
|
||||
res
|
||||
}
|
||||
|
||||
// Not extensively tested nor guaranteed to have expected parity with Monero
|
||||
#[cfg(feature = "experimental")]
|
||||
pub fn rust_verify(
|
||||
&self,
|
||||
ring: &[[EdwardsPoint; 2]],
|
||||
I: &EdwardsPoint,
|
||||
pseudo_out: &EdwardsPoint,
|
||||
msg: &[u8; 32]
|
||||
) -> Result<(), ClsagError> {
|
||||
let (_, c1) = core(
|
||||
ring,
|
||||
I,
|
||||
pseudo_out,
|
||||
msg,
|
||||
&self.D.mul_by_cofactor(),
|
||||
&self.s,
|
||||
Mode::Verify(self.c1)
|
||||
);
|
||||
if c1 != self.c1 {
|
||||
Err(ClsagError::InvalidC1)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub(crate) fn fee_weight() -> usize {
|
||||
(RING_LEN * 32) + 32 + 32
|
||||
}
|
||||
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
write_raw_vec(write_scalar, &self.s, w)?;
|
||||
w.write_all(&self.c1.to_bytes())?;
|
||||
write_point(&self.D, w)
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(decoys: usize, r: &mut R) -> std::io::Result<Clsag> {
|
||||
Ok(
|
||||
Clsag {
|
||||
s: read_raw_vec(read_scalar, decoys, r)?,
|
||||
c1: read_scalar(r)?,
|
||||
D: read_point(r)?
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
pub fn verify(
|
||||
&self,
|
||||
ring: &[[EdwardsPoint; 2]],
|
||||
I: &EdwardsPoint,
|
||||
pseudo_out: &EdwardsPoint,
|
||||
msg: &[u8; 32]
|
||||
) -> Result<(), ClsagError> {
|
||||
// Serialize it to pass the struct to Monero without extensive FFI
|
||||
let mut serialized = Vec::with_capacity(1 + ((self.s.len() + 2) * 32));
|
||||
write_varint(&self.s.len().try_into().unwrap(), &mut serialized).unwrap();
|
||||
self.serialize(&mut serialized).unwrap();
|
||||
|
||||
let I_bytes = I.compress().to_bytes();
|
||||
|
||||
let mut ring_bytes = vec![];
|
||||
for member in ring {
|
||||
ring_bytes.extend(&member[0].compress().to_bytes());
|
||||
ring_bytes.extend(&member[1].compress().to_bytes());
|
||||
}
|
||||
|
||||
let pseudo_out_bytes = pseudo_out.compress().to_bytes();
|
||||
|
||||
unsafe {
|
||||
// Uses Monero's C verification function to ensure compatibility with Monero
|
||||
#[link(name = "wrapper")]
|
||||
extern "C" {
|
||||
pub(crate) fn c_verify_clsag(
|
||||
serialized_len: usize,
|
||||
serialized: *const u8,
|
||||
ring_size: u8,
|
||||
ring: *const u8,
|
||||
I: *const u8,
|
||||
pseudo_out: *const u8,
|
||||
msg: *const u8
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
if c_verify_clsag(
|
||||
serialized.len(), serialized.as_ptr(),
|
||||
u8::try_from(ring.len()).map_err(|_| ClsagError::InternalError("too large ring".to_string()))?,
|
||||
ring_bytes.as_ptr(),
|
||||
I_bytes.as_ptr(), pseudo_out_bytes.as_ptr(), msg.as_ptr()
|
||||
) {
|
||||
Ok(())
|
||||
} else {
|
||||
Err(ClsagError::InvalidC1)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
234
coins/monero/src/ringct/clsag/multisig.rs
Normal file
234
coins/monero/src/ringct/clsag/multisig.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
use core::fmt::Debug;
|
||||
use std::{io::Read, sync::{Arc, RwLock}};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng, SeedableRng};
|
||||
use rand_chacha::ChaCha12Rng;
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_TABLE,
|
||||
traits::{Identity, IsIdentity},
|
||||
scalar::Scalar,
|
||||
edwards::EdwardsPoint
|
||||
};
|
||||
|
||||
use group::Group;
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
use frost::{curve::Ed25519, FrostError, FrostView, algorithm::Algorithm};
|
||||
use dalek_ff_group as dfg;
|
||||
|
||||
use crate::{
|
||||
frost::{MultisigError, write_dleq, read_dleq},
|
||||
ringct::{hash_to_point, clsag::{ClsagInput, Clsag}}
|
||||
};
|
||||
|
||||
impl ClsagInput {
|
||||
fn transcript<T: Transcript>(&self, transcript: &mut T) {
|
||||
// Doesn't domain separate as this is considered part of the larger CLSAG proof
|
||||
|
||||
// Ring index
|
||||
transcript.append_message(b"ring_index", &[self.decoys.i]);
|
||||
|
||||
// Ring
|
||||
let mut ring = vec![];
|
||||
for pair in &self.decoys.ring {
|
||||
// Doesn't include global output indexes as CLSAG doesn't care and won't be affected by it
|
||||
// They're just a unreliable reference to this data which will be included in the message
|
||||
// if in use
|
||||
ring.extend(&pair[0].compress().to_bytes());
|
||||
ring.extend(&pair[1].compress().to_bytes());
|
||||
}
|
||||
transcript.append_message(b"ring", &ring);
|
||||
|
||||
// Doesn't include the commitment's parts as the above ring + index includes the commitment
|
||||
// The only potential malleability would be if the G/H relationship is known breaking the
|
||||
// discrete log problem, which breaks everything already
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClsagDetails {
|
||||
input: ClsagInput,
|
||||
mask: Scalar
|
||||
}
|
||||
|
||||
impl ClsagDetails {
|
||||
pub fn new(input: ClsagInput, mask: Scalar) -> ClsagDetails {
|
||||
ClsagDetails { input, mask }
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
struct Interim {
|
||||
p: Scalar,
|
||||
c: Scalar,
|
||||
|
||||
clsag: Clsag,
|
||||
pseudo_out: EdwardsPoint
|
||||
}
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct ClsagMultisig {
|
||||
transcript: RecommendedTranscript,
|
||||
|
||||
H: EdwardsPoint,
|
||||
// Merged here as CLSAG needs it, passing it would be a mess, yet having it beforehand requires a round
|
||||
image: EdwardsPoint,
|
||||
|
||||
details: Arc<RwLock<Option<ClsagDetails>>>,
|
||||
|
||||
msg: Option<[u8; 32]>,
|
||||
interim: Option<Interim>
|
||||
}
|
||||
|
||||
impl ClsagMultisig {
|
||||
pub fn new(
|
||||
transcript: RecommendedTranscript,
|
||||
output_key: EdwardsPoint,
|
||||
details: Arc<RwLock<Option<ClsagDetails>>>
|
||||
) -> Result<ClsagMultisig, MultisigError> {
|
||||
Ok(
|
||||
ClsagMultisig {
|
||||
transcript,
|
||||
|
||||
H: hash_to_point(output_key),
|
||||
image: EdwardsPoint::identity(),
|
||||
|
||||
details,
|
||||
|
||||
msg: None,
|
||||
interim: None
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
pub const fn serialized_len() -> usize {
|
||||
32 + (2 * 32)
|
||||
}
|
||||
|
||||
fn input(&self) -> ClsagInput {
|
||||
(*self.details.read().unwrap()).as_ref().unwrap().input.clone()
|
||||
}
|
||||
|
||||
fn mask(&self) -> Scalar {
|
||||
(*self.details.read().unwrap()).as_ref().unwrap().mask
|
||||
}
|
||||
}
|
||||
|
||||
impl Algorithm<Ed25519> for ClsagMultisig {
|
||||
type Transcript = RecommendedTranscript;
|
||||
type Signature = (Clsag, EdwardsPoint);
|
||||
|
||||
fn nonces(&self) -> Vec<Vec<dfg::EdwardsPoint>> {
|
||||
vec![vec![dfg::EdwardsPoint::generator(), dfg::EdwardsPoint(self.H)]]
|
||||
}
|
||||
|
||||
fn preprocess_addendum<R: RngCore + CryptoRng>(
|
||||
&mut self,
|
||||
rng: &mut R,
|
||||
view: &FrostView<Ed25519>
|
||||
) -> Vec<u8> {
|
||||
let mut serialized = Vec::with_capacity(Self::serialized_len());
|
||||
serialized.extend((view.secret_share().0 * self.H).compress().to_bytes());
|
||||
serialized.extend(write_dleq(rng, self.H, view.secret_share().0));
|
||||
serialized
|
||||
}
|
||||
|
||||
fn process_addendum<Re: Read>(
|
||||
&mut self,
|
||||
view: &FrostView<Ed25519>,
|
||||
l: u16,
|
||||
serialized: &mut Re
|
||||
) -> Result<(), FrostError> {
|
||||
if self.image.is_identity().into() {
|
||||
self.transcript.domain_separate(b"CLSAG");
|
||||
self.input().transcript(&mut self.transcript);
|
||||
self.transcript.append_message(b"mask", &self.mask().to_bytes());
|
||||
}
|
||||
|
||||
self.transcript.append_message(b"participant", &l.to_be_bytes());
|
||||
let image = read_dleq(
|
||||
serialized,
|
||||
self.H,
|
||||
l,
|
||||
view.verification_share(l)
|
||||
).map_err(|_| FrostError::InvalidCommitment(l))?.0;
|
||||
self.transcript.append_message(b"key_image_share", image.compress().to_bytes().as_ref());
|
||||
self.image += image;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn transcript(&mut self) -> &mut Self::Transcript {
|
||||
&mut self.transcript
|
||||
}
|
||||
|
||||
fn sign_share(
|
||||
&mut self,
|
||||
view: &FrostView<Ed25519>,
|
||||
nonce_sums: &[Vec<dfg::EdwardsPoint>],
|
||||
nonces: &[dfg::Scalar],
|
||||
msg: &[u8]
|
||||
) -> dfg::Scalar {
|
||||
// Use the transcript to get a seeded random number generator
|
||||
// The transcript contains private data, preventing passive adversaries from recreating this
|
||||
// process even if they have access to commitments (specifically, the ring index being signed
|
||||
// for, along with the mask which should not only require knowing the shared keys yet also the
|
||||
// input commitment masks)
|
||||
let mut rng = ChaCha12Rng::from_seed(self.transcript.rng_seed(b"decoy_responses"));
|
||||
|
||||
self.msg = Some(msg.try_into().expect("CLSAG message should be 32-bytes"));
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
let (clsag, pseudo_out, p, c) = Clsag::sign_core(
|
||||
&mut rng,
|
||||
&self.image,
|
||||
&self.input(),
|
||||
self.mask(),
|
||||
&self.msg.as_ref().unwrap(),
|
||||
nonce_sums[0][0].0,
|
||||
nonce_sums[0][1].0
|
||||
);
|
||||
self.interim = Some(Interim { p, c, clsag, pseudo_out });
|
||||
|
||||
let share = dfg::Scalar(nonces[0].0 - (p * view.secret_share().0));
|
||||
|
||||
share
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn verify(
|
||||
&self,
|
||||
_: dfg::EdwardsPoint,
|
||||
_: &[Vec<dfg::EdwardsPoint>],
|
||||
sum: dfg::Scalar
|
||||
) -> Option<Self::Signature> {
|
||||
let interim = self.interim.as_ref().unwrap();
|
||||
let mut clsag = interim.clsag.clone();
|
||||
clsag.s[usize::from(self.input().decoys.i)] = sum.0 - interim.c;
|
||||
if clsag.verify(
|
||||
&self.input().decoys.ring,
|
||||
&self.image,
|
||||
&interim.pseudo_out,
|
||||
&self.msg.as_ref().unwrap()
|
||||
).is_ok() {
|
||||
return Some((clsag, interim.pseudo_out));
|
||||
}
|
||||
return None;
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
fn verify_share(
|
||||
&self,
|
||||
verification_share: dfg::EdwardsPoint,
|
||||
nonces: &[Vec<dfg::EdwardsPoint>],
|
||||
share: dfg::Scalar,
|
||||
) -> bool {
|
||||
let interim = self.interim.as_ref().unwrap();
|
||||
return (&share.0 * &ED25519_BASEPOINT_TABLE) == (
|
||||
nonces[0][0].0 - (interim.p * verification_share.0)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,21 +1,35 @@
|
||||
use subtle::ConditionallySelectable;
|
||||
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
use curve25519_dalek::edwards::{CompressedEdwardsY, EdwardsPoint};
|
||||
|
||||
use group::ff::{Field, PrimeField};
|
||||
use dalek_ff_group::FieldElement;
|
||||
use dalek_ff_group::field::FieldElement;
|
||||
|
||||
use monero_io::decompress_point;
|
||||
use crate::hash;
|
||||
|
||||
use crate::keccak256;
|
||||
pub fn hash_to_point(point: EdwardsPoint) -> EdwardsPoint {
|
||||
let mut bytes = point.compress().to_bytes();
|
||||
unsafe {
|
||||
#[link(name = "wrapper")]
|
||||
extern "C" {
|
||||
fn c_hash_to_point(point: *const u8);
|
||||
}
|
||||
|
||||
/// Monero's `hash_to_ec` function.
|
||||
pub fn hash_to_point(bytes: [u8; 32]) -> EdwardsPoint {
|
||||
c_hash_to_point(bytes.as_mut_ptr());
|
||||
}
|
||||
CompressedEdwardsY::from_slice(&bytes).decompress().unwrap()
|
||||
}
|
||||
|
||||
// This works without issue. It's also 140 times slower (@ 3.5ms), and despite checking it passes
|
||||
// for all branches, there still could be *some* discrepancy somewhere. There's no reason to use it
|
||||
// unless we're trying to purge that section of the C static library, which we aren't right now
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn rust_hash_to_point(key: EdwardsPoint) -> EdwardsPoint {
|
||||
#[allow(non_snake_case)]
|
||||
let A = FieldElement::from(486662u64);
|
||||
|
||||
let v = FieldElement::from_square(keccak256(&bytes)).double();
|
||||
let w = v + FieldElement::ONE;
|
||||
let v = FieldElement::from_square(hash(&key.compress().to_bytes())).double();
|
||||
let w = v + FieldElement::one();
|
||||
let x = w.square() + (-A.square() * v);
|
||||
|
||||
// This isn't the complete X, yet its initial value
|
||||
@@ -45,9 +59,9 @@ pub fn hash_to_point(bytes: [u8; 32]) -> EdwardsPoint {
|
||||
#[allow(non_snake_case)]
|
||||
let mut Y = z - w;
|
||||
|
||||
Y *= Z.invert().unwrap();
|
||||
Y = Y * Z.invert().unwrap();
|
||||
let mut bytes = Y.to_repr();
|
||||
bytes[31] |= sign.unwrap_u8() << 7;
|
||||
|
||||
decompress_point(bytes).unwrap().mul_by_cofactor()
|
||||
CompressedEdwardsY(bytes).decompress().unwrap().mul_by_cofactor()
|
||||
}
|
||||
145
coins/monero/src/ringct/mod.rs
Normal file
145
coins/monero/src/ringct/mod.rs
Normal file
@@ -0,0 +1,145 @@
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
pub(crate) mod hash_to_point;
|
||||
pub use hash_to_point::hash_to_point;
|
||||
|
||||
pub mod clsag;
|
||||
pub mod bulletproofs;
|
||||
|
||||
use crate::{
|
||||
serialize::*,
|
||||
ringct::{clsag::Clsag, bulletproofs::Bulletproofs}
|
||||
};
|
||||
|
||||
pub fn generate_key_image(secret: Scalar) -> EdwardsPoint {
|
||||
secret * hash_to_point(&secret * &ED25519_BASEPOINT_TABLE)
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct RctBase {
|
||||
pub fee: u64,
|
||||
pub ecdh_info: Vec<[u8; 8]>,
|
||||
pub commitments: Vec<EdwardsPoint>
|
||||
}
|
||||
|
||||
impl RctBase {
|
||||
pub(crate) fn fee_weight(outputs: usize) -> usize {
|
||||
1 + 8 + (outputs * (8 + 32))
|
||||
}
|
||||
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W, rct_type: u8) -> std::io::Result<()> {
|
||||
w.write_all(&[rct_type])?;
|
||||
match rct_type {
|
||||
0 => Ok(()),
|
||||
5 => {
|
||||
write_varint(&self.fee, w)?;
|
||||
for ecdh in &self.ecdh_info {
|
||||
w.write_all(ecdh)?;
|
||||
}
|
||||
write_raw_vec(write_point, &self.commitments, w)
|
||||
},
|
||||
_ => panic!("Serializing unknown RctType's Base")
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(outputs: usize, r: &mut R) -> std::io::Result<(RctBase, u8)> {
|
||||
let mut rct_type = [0];
|
||||
r.read_exact(&mut rct_type)?;
|
||||
Ok((
|
||||
if rct_type[0] == 0 {
|
||||
RctBase { fee: 0, ecdh_info: vec![], commitments: vec![] }
|
||||
} else {
|
||||
RctBase {
|
||||
fee: read_varint(r)?,
|
||||
ecdh_info: (0 .. outputs).map(
|
||||
|_| { let mut ecdh = [0; 8]; r.read_exact(&mut ecdh).map(|_| ecdh) }
|
||||
).collect::<Result<_, _>>()?,
|
||||
commitments: read_raw_vec(read_point, outputs, r)?
|
||||
}
|
||||
},
|
||||
rct_type[0]
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub enum RctPrunable {
|
||||
Null,
|
||||
Clsag {
|
||||
bulletproofs: Vec<Bulletproofs>,
|
||||
clsags: Vec<Clsag>,
|
||||
pseudo_outs: Vec<EdwardsPoint>
|
||||
}
|
||||
}
|
||||
|
||||
impl RctPrunable {
|
||||
pub fn rct_type(&self) -> u8 {
|
||||
match self {
|
||||
RctPrunable::Null => 0,
|
||||
RctPrunable::Clsag { .. } => 5
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn fee_weight(inputs: usize, outputs: usize) -> usize {
|
||||
1 + Bulletproofs::fee_weight(outputs) + (inputs * (Clsag::fee_weight() + 32))
|
||||
}
|
||||
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
match self {
|
||||
RctPrunable::Null => Ok(()),
|
||||
RctPrunable::Clsag { bulletproofs, clsags, pseudo_outs } => {
|
||||
write_vec(Bulletproofs::serialize, &bulletproofs, w)?;
|
||||
write_raw_vec(Clsag::serialize, &clsags, w)?;
|
||||
write_raw_vec(write_point, &pseudo_outs, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(
|
||||
rct_type: u8,
|
||||
decoys: &[usize],
|
||||
r: &mut R
|
||||
) -> std::io::Result<RctPrunable> {
|
||||
Ok(
|
||||
match rct_type {
|
||||
0 => RctPrunable::Null,
|
||||
5 => RctPrunable::Clsag {
|
||||
// TODO: Can the amount of outputs be calculated from the BPs for any validly formed TX?
|
||||
bulletproofs: read_vec(Bulletproofs::deserialize, r)?,
|
||||
clsags: (0 .. decoys.len()).map(|o| Clsag::deserialize(decoys[o], r)).collect::<Result<_, _>>()?,
|
||||
pseudo_outs: read_raw_vec(read_point, decoys.len(), r)?
|
||||
},
|
||||
_ => Err(std::io::Error::new(std::io::ErrorKind::Other, "Tried to deserialize unknown RCT type"))?
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
pub fn signature_serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
match self {
|
||||
RctPrunable::Null => panic!("Serializing RctPrunable::Null for a signature"),
|
||||
RctPrunable::Clsag { bulletproofs, .. } => bulletproofs.iter().map(|bp| bp.signature_serialize(w)).collect(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct RctSignatures {
|
||||
pub base: RctBase,
|
||||
pub prunable: RctPrunable
|
||||
}
|
||||
|
||||
impl RctSignatures {
|
||||
pub(crate) fn fee_weight(inputs: usize, outputs: usize) -> usize {
|
||||
RctBase::fee_weight(outputs) + RctPrunable::fee_weight(inputs, outputs)
|
||||
}
|
||||
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
self.base.serialize(w, self.prunable.rct_type())?;
|
||||
self.prunable.serialize(w)
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(decoys: Vec<usize>, outputs: usize, r: &mut R) -> std::io::Result<RctSignatures> {
|
||||
let base = RctBase::deserialize(outputs, r)?;
|
||||
Ok(RctSignatures { base: base.0, prunable: RctPrunable::deserialize(base.1, &decoys, r)? })
|
||||
}
|
||||
}
|
||||
353
coins/monero/src/rpc.rs
Normal file
353
coins/monero/src/rpc.rs
Normal file
@@ -0,0 +1,353 @@
|
||||
use std::fmt::Debug;
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use curve25519_dalek::edwards::{EdwardsPoint, CompressedEdwardsY};
|
||||
|
||||
use serde::{Serialize, Deserialize, de::DeserializeOwned};
|
||||
use serde_json::json;
|
||||
|
||||
use reqwest;
|
||||
|
||||
use crate::{transaction::{Input, Timelock, Transaction}, block::Block, wallet::Fee};
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct EmptyResponse {}
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct JsonRpcResponse<T> {
|
||||
result: T
|
||||
}
|
||||
|
||||
#[derive(Clone, Error, Debug)]
|
||||
pub enum RpcError {
|
||||
#[error("internal error ({0})")]
|
||||
InternalError(String),
|
||||
#[error("connection error")]
|
||||
ConnectionError,
|
||||
#[error("transactions not found")]
|
||||
TransactionsNotFound(Vec<[u8; 32]>),
|
||||
#[error("invalid point ({0})")]
|
||||
InvalidPoint(String),
|
||||
#[error("pruned transaction")]
|
||||
PrunedTransaction,
|
||||
#[error("invalid transaction ({0:?})")]
|
||||
InvalidTransaction([u8; 32])
|
||||
}
|
||||
|
||||
fn rpc_hex(value: &str) -> Result<Vec<u8>, RpcError> {
|
||||
hex::decode(value).map_err(|_| RpcError::InternalError("Monero returned invalid hex".to_string()))
|
||||
}
|
||||
|
||||
fn rpc_point(point: &str) -> Result<EdwardsPoint, RpcError> {
|
||||
CompressedEdwardsY(
|
||||
rpc_hex(point)?.try_into().map_err(|_| RpcError::InvalidPoint(point.to_string()))?
|
||||
).decompress().ok_or(RpcError::InvalidPoint(point.to_string()))
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Rpc(String);
|
||||
|
||||
impl Rpc {
|
||||
pub fn new(daemon: String) -> Rpc {
|
||||
Rpc(daemon)
|
||||
}
|
||||
|
||||
pub async fn rpc_call<
|
||||
Params: Serialize + Debug,
|
||||
Response: DeserializeOwned + Debug
|
||||
>(&self, method: &str, params: Option<Params>) -> Result<Response, RpcError> {
|
||||
let client = reqwest::Client::new();
|
||||
let mut builder = client.post(&(self.0.clone() + "/" + method));
|
||||
if let Some(params) = params.as_ref() {
|
||||
builder = builder.json(params);
|
||||
}
|
||||
|
||||
self.call_tail(method, builder).await
|
||||
}
|
||||
|
||||
pub async fn bin_call<
|
||||
Response: DeserializeOwned + Debug
|
||||
>(&self, method: &str, params: Vec<u8>) -> Result<Response, RpcError> {
|
||||
let client = reqwest::Client::new();
|
||||
let builder = client.post(&(self.0.clone() + "/" + method)).body(params);
|
||||
self.call_tail(method, builder.header("Content-Type", "application/octet-stream")).await
|
||||
}
|
||||
|
||||
async fn call_tail<
|
||||
Response: DeserializeOwned + Debug
|
||||
>(&self, method: &str, builder: reqwest::RequestBuilder) -> Result<Response, RpcError> {
|
||||
let res = builder
|
||||
.send()
|
||||
.await
|
||||
.map_err(|_| RpcError::ConnectionError)?;
|
||||
|
||||
Ok(
|
||||
if !method.ends_with(".bin") {
|
||||
serde_json::from_str(&res.text().await.map_err(|_| RpcError::ConnectionError)?)
|
||||
.map_err(|_| RpcError::InternalError("Failed to parse JSON response".to_string()))?
|
||||
} else {
|
||||
monero_epee_bin_serde::from_bytes(&res.bytes().await.map_err(|_| RpcError::ConnectionError)?)
|
||||
.map_err(|_| RpcError::InternalError("Failed to parse binary response".to_string()))?
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn get_height(&self) -> Result<usize, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct HeightResponse {
|
||||
height: usize
|
||||
}
|
||||
Ok(self.rpc_call::<Option<()>, HeightResponse>("get_height", None).await?.height)
|
||||
}
|
||||
|
||||
async fn get_transactions_core(
|
||||
&self,
|
||||
hashes: &[[u8; 32]]
|
||||
) -> Result<(Vec<Result<Transaction, RpcError>>, Vec<[u8; 32]>), RpcError> {
|
||||
if hashes.len() == 0 {
|
||||
return Ok((vec![], vec![]));
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct TransactionResponse {
|
||||
tx_hash: String,
|
||||
as_hex: String,
|
||||
pruned_as_hex: String
|
||||
}
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct TransactionsResponse {
|
||||
#[serde(default)]
|
||||
missed_tx: Vec<String>,
|
||||
txs: Vec<TransactionResponse>
|
||||
}
|
||||
|
||||
let txs: TransactionsResponse = self.rpc_call("get_transactions", Some(json!({
|
||||
"txs_hashes": hashes.iter().map(|hash| hex::encode(&hash)).collect::<Vec<_>>()
|
||||
}))).await?;
|
||||
|
||||
Ok((
|
||||
txs.txs.iter().map(|res| {
|
||||
let tx = Transaction::deserialize(
|
||||
&mut std::io::Cursor::new(
|
||||
rpc_hex(if res.as_hex.len() != 0 { &res.as_hex } else { &res.pruned_as_hex }).unwrap()
|
||||
)
|
||||
).map_err(|_| RpcError::InvalidTransaction(hex::decode(&res.tx_hash).unwrap().try_into().unwrap()))?;
|
||||
|
||||
// https://github.com/monero-project/monero/issues/8311
|
||||
if res.as_hex.len() == 0 {
|
||||
match tx.prefix.inputs.get(0) {
|
||||
Some(Input::Gen { .. }) => (),
|
||||
_ => Err(RpcError::PrunedTransaction)?
|
||||
}
|
||||
}
|
||||
|
||||
Ok(tx)
|
||||
}).collect(),
|
||||
|
||||
txs.missed_tx.iter().map(|hash| hex::decode(&hash).unwrap().try_into().unwrap()).collect()
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn get_transactions(&self, hashes: &[[u8; 32]]) -> Result<Vec<Transaction>, RpcError> {
|
||||
let (txs, missed) = self.get_transactions_core(hashes).await?;
|
||||
if missed.len() != 0 {
|
||||
Err(RpcError::TransactionsNotFound(missed))?;
|
||||
}
|
||||
// This will clone several KB and is accordingly inefficient
|
||||
// TODO: Optimize
|
||||
txs.iter().cloned().collect::<Result<_, _>>()
|
||||
}
|
||||
|
||||
pub async fn get_transactions_possible(&self, hashes: &[[u8; 32]]) -> Result<Vec<Transaction>, RpcError> {
|
||||
let (txs, _) = self.get_transactions_core(hashes).await?;
|
||||
Ok(txs.iter().cloned().filter_map(|tx| tx.ok()).collect())
|
||||
}
|
||||
|
||||
pub async fn get_block(&self, height: usize) -> Result<Block, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct BlockResponse {
|
||||
blob: String
|
||||
}
|
||||
|
||||
let block: JsonRpcResponse<BlockResponse> = self.rpc_call("json_rpc", Some(json!({
|
||||
"method": "get_block",
|
||||
"params": {
|
||||
"height": height
|
||||
}
|
||||
}))).await?;
|
||||
|
||||
Ok(
|
||||
Block::deserialize(
|
||||
&mut std::io::Cursor::new(rpc_hex(&block.result.blob)?)
|
||||
).expect("Monero returned a block we couldn't deserialize")
|
||||
)
|
||||
}
|
||||
|
||||
async fn get_block_transactions_core(
|
||||
&self,
|
||||
height: usize,
|
||||
possible: bool
|
||||
) -> Result<Vec<Transaction>, RpcError> {
|
||||
let block = self.get_block(height).await?;
|
||||
let mut res = vec![block.miner_tx];
|
||||
res.extend(
|
||||
if possible {
|
||||
self.get_transactions_possible(&block.txs).await?
|
||||
} else {
|
||||
self.get_transactions(&block.txs).await?
|
||||
}
|
||||
);
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub async fn get_block_transactions(&self, height: usize) -> Result<Vec<Transaction>, RpcError> {
|
||||
self.get_block_transactions_core(height, false).await
|
||||
}
|
||||
|
||||
pub async fn get_block_transactions_possible(&self, height: usize) -> Result<Vec<Transaction>, RpcError> {
|
||||
self.get_block_transactions_core(height, true).await
|
||||
}
|
||||
|
||||
pub async fn get_o_indexes(&self, hash: [u8; 32]) -> Result<Vec<u64>, RpcError> {
|
||||
#[derive(Serialize, Debug)]
|
||||
struct Request {
|
||||
txid: [u8; 32]
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct OIndexes {
|
||||
o_indexes: Vec<u64>,
|
||||
status: String,
|
||||
untrusted: bool,
|
||||
credits: usize,
|
||||
top_hash: String
|
||||
}
|
||||
|
||||
let indexes: OIndexes = self.bin_call("get_o_indexes.bin", monero_epee_bin_serde::to_bytes(
|
||||
&Request {
|
||||
txid: hash
|
||||
}).unwrap()
|
||||
).await?;
|
||||
|
||||
Ok(indexes.o_indexes)
|
||||
}
|
||||
|
||||
// from and to are inclusive
|
||||
pub async fn get_output_distribution(&self, from: usize, to: usize) -> Result<Vec<u64>, RpcError> {
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Distribution {
|
||||
distribution: Vec<u64>
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Distributions {
|
||||
distributions: Vec<Distribution>
|
||||
}
|
||||
|
||||
let mut distributions: JsonRpcResponse<Distributions> = self.rpc_call("json_rpc", Some(json!({
|
||||
"method": "get_output_distribution",
|
||||
"params": {
|
||||
"binary": false,
|
||||
"amounts": [0],
|
||||
"cumulative": true,
|
||||
"from_height": from,
|
||||
"to_height": to
|
||||
}
|
||||
}))).await?;
|
||||
|
||||
Ok(distributions.result.distributions.swap_remove(0).distribution)
|
||||
}
|
||||
|
||||
pub async fn get_outputs(
|
||||
&self,
|
||||
indexes: &[u64],
|
||||
height: usize
|
||||
) -> Result<Vec<Option<[EdwardsPoint; 2]>>, RpcError> {
|
||||
#[derive(Deserialize, Debug)]
|
||||
pub struct Out {
|
||||
key: String,
|
||||
mask: String,
|
||||
txid: String
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct Outs {
|
||||
outs: Vec<Out>
|
||||
}
|
||||
|
||||
let outs: Outs = self.rpc_call("get_outs", Some(json!({
|
||||
"get_txid": true,
|
||||
"outputs": indexes.iter().map(|o| json!({
|
||||
"amount": 0,
|
||||
"index": o
|
||||
})).collect::<Vec<_>>()
|
||||
}))).await?;
|
||||
|
||||
let txs = self.get_transactions(
|
||||
&outs.outs.iter().map(|out|
|
||||
rpc_hex(&out.txid).expect("Monero returned an invalidly encoded hash")
|
||||
.try_into().expect("Monero returned an invalid sized hash")
|
||||
).collect::<Vec<_>>()
|
||||
).await?;
|
||||
// TODO: Support time based lock times. These shouldn't be needed, and it may be painful to
|
||||
// get the median time for the given height, yet we do need to in order to be complete
|
||||
outs.outs.iter().enumerate().map(
|
||||
|(i, out)| Ok(
|
||||
Some([rpc_point(&out.key)?, rpc_point(&out.mask)?]).filter(|_| {
|
||||
match txs[i].prefix.timelock {
|
||||
Timelock::Block(t_height) => (t_height <= height),
|
||||
_ => false
|
||||
}
|
||||
})
|
||||
)
|
||||
).collect()
|
||||
}
|
||||
|
||||
pub async fn get_fee(&self) -> Result<Fee, RpcError> {
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct FeeResponse {
|
||||
fee: u64,
|
||||
quantization_mask: u64
|
||||
}
|
||||
|
||||
let res: JsonRpcResponse<FeeResponse> = self.rpc_call("json_rpc", Some(json!({
|
||||
"method": "get_fee_estimate"
|
||||
}))).await?;
|
||||
|
||||
Ok(Fee { per_weight: res.result.fee, mask: res.result.quantization_mask })
|
||||
}
|
||||
|
||||
pub async fn publish_transaction(&self, tx: &Transaction) -> Result<(), RpcError> {
|
||||
#[allow(dead_code)]
|
||||
#[derive(Deserialize, Debug)]
|
||||
struct SendRawResponse {
|
||||
status: String,
|
||||
double_spend: bool,
|
||||
fee_too_low: bool,
|
||||
invalid_input: bool,
|
||||
invalid_output: bool,
|
||||
low_mixin: bool,
|
||||
not_relayed: bool,
|
||||
overspend: bool,
|
||||
too_big: bool,
|
||||
too_few_outputs: bool,
|
||||
reason: String
|
||||
}
|
||||
|
||||
let mut buf = Vec::with_capacity(2048);
|
||||
tx.serialize(&mut buf).unwrap();
|
||||
let res: SendRawResponse = self.rpc_call("send_raw_transaction", Some(json!({
|
||||
"tx_as_hex": hex::encode(&buf)
|
||||
}))).await?;
|
||||
|
||||
if res.status != "OK" {
|
||||
Err(RpcError::InvalidTransaction(tx.hash()))?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
104
coins/monero/src/serialize.rs
Normal file
104
coins/monero/src/serialize.rs
Normal file
@@ -0,0 +1,104 @@
|
||||
use std::io;
|
||||
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::{EdwardsPoint, CompressedEdwardsY}};
|
||||
|
||||
pub const VARINT_CONTINUATION_MASK: u8 = 0b1000_0000;
|
||||
|
||||
pub fn varint_len(varint: usize) -> usize {
|
||||
((usize::try_from(usize::BITS - varint.leading_zeros()).unwrap().saturating_sub(1)) / 7) + 1
|
||||
}
|
||||
|
||||
pub fn write_varint<W: io::Write>(varint: &u64, w: &mut W) -> io::Result<()> {
|
||||
let mut varint = *varint;
|
||||
while {
|
||||
let mut b = u8::try_from(varint & u64::from(!VARINT_CONTINUATION_MASK)).unwrap();
|
||||
varint >>= 7;
|
||||
if varint != 0 {
|
||||
b |= VARINT_CONTINUATION_MASK;
|
||||
}
|
||||
w.write_all(&[b])?;
|
||||
varint != 0
|
||||
} {}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_scalar<W: io::Write>(scalar: &Scalar, w: &mut W) -> io::Result<()> {
|
||||
w.write_all(&scalar.to_bytes())
|
||||
}
|
||||
|
||||
pub fn write_point<W: io::Write>(point: &EdwardsPoint, w: &mut W) -> io::Result<()> {
|
||||
w.write_all(&point.compress().to_bytes())
|
||||
}
|
||||
|
||||
pub fn write_raw_vec<
|
||||
T,
|
||||
W: io::Write,
|
||||
F: Fn(&T, &mut W) -> io::Result<()>
|
||||
>(f: F, values: &[T], w: &mut W) -> io::Result<()> {
|
||||
for value in values {
|
||||
f(value, w)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn write_vec<
|
||||
T,
|
||||
W: io::Write,
|
||||
F: Fn(&T, &mut W) -> io::Result<()>
|
||||
>(f: F, values: &[T], w: &mut W) -> io::Result<()> {
|
||||
write_varint(&values.len().try_into().unwrap(), w)?;
|
||||
write_raw_vec(f, &values, w)
|
||||
}
|
||||
|
||||
pub fn read_byte<R: io::Read>(r: &mut R) -> io::Result<u8> {
|
||||
let mut res = [0; 1];
|
||||
r.read_exact(&mut res)?;
|
||||
Ok(res[0])
|
||||
}
|
||||
|
||||
pub fn read_varint<R: io::Read>(r: &mut R) -> io::Result<u64> {
|
||||
let mut bits = 0;
|
||||
let mut res = 0;
|
||||
while {
|
||||
let b = read_byte(r)?;
|
||||
res += u64::from(b & (!VARINT_CONTINUATION_MASK)) << bits;
|
||||
// TODO: Error if bits exceed u64
|
||||
bits += 7;
|
||||
b & VARINT_CONTINUATION_MASK == VARINT_CONTINUATION_MASK
|
||||
} {}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn read_32<R: io::Read>(r: &mut R) -> io::Result<[u8; 32]> {
|
||||
let mut res = [0; 32];
|
||||
r.read_exact(&mut res)?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
// TODO: Potentially update to Monero's parsing rules on scalars/points, which should be any arbitrary 32-bytes
|
||||
// We may be able to consider such transactions as malformed and accordingly be opinionated in ignoring them
|
||||
pub fn read_scalar<R: io::Read>(r: &mut R) -> io::Result<Scalar> {
|
||||
Scalar::from_canonical_bytes(
|
||||
read_32(r)?
|
||||
).ok_or(io::Error::new(io::ErrorKind::Other, "unreduced scalar"))
|
||||
}
|
||||
|
||||
pub fn read_point<R: io::Read>(r: &mut R) -> io::Result<EdwardsPoint> {
|
||||
CompressedEdwardsY(
|
||||
read_32(r)?
|
||||
).decompress().filter(|point| point.is_torsion_free()).ok_or(io::Error::new(io::ErrorKind::Other, "invalid point"))
|
||||
}
|
||||
|
||||
pub fn read_raw_vec<R: io::Read, T, F: Fn(&mut R) -> io::Result<T>>(f: F, len: usize, r: &mut R) -> io::Result<Vec<T>> {
|
||||
let mut res = Vec::with_capacity(
|
||||
len.try_into().map_err(|_| io::Error::new(io::ErrorKind::Other, "length exceeds usize"))?
|
||||
);
|
||||
for _ in 0 .. len {
|
||||
res.push(f(r)?);
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub fn read_vec<R: io::Read, T, F: Fn(&mut R) -> io::Result<T>>(f: F, r: &mut R) -> io::Result<Vec<T>> {
|
||||
read_raw_vec(f, read_varint(r)?.try_into().unwrap(), r)
|
||||
}
|
||||
45
coins/monero/src/tests/address.rs
Normal file
45
coins/monero/src/tests/address.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
use hex_literal::hex;
|
||||
|
||||
use crate::wallet::address::{Network, AddressType, Address};
|
||||
|
||||
const SPEND: [u8; 32] = hex!("f8631661f6ab4e6fda310c797330d86e23a682f20d5bc8cc27b18051191f16d7");
|
||||
const VIEW: [u8; 32] = hex!("4a1535063ad1fee2dabbf909d4fd9a873e29541b401f0944754e17c9a41820ce");
|
||||
|
||||
const STANDARD: &'static str = "4B33mFPMq6mKi7Eiyd5XuyKRVMGVZz1Rqb9ZTyGApXW5d1aT7UBDZ89ewmnWFkzJ5wPd2SFbn313vCT8a4E2Qf4KQH4pNey";
|
||||
|
||||
const PAYMENT_ID: [u8; 8] = hex!("b8963a57855cf73f");
|
||||
const INTEGRATED: &'static str = "4Ljin4CrSNHKi7Eiyd5XuyKRVMGVZz1Rqb9ZTyGApXW5d1aT7UBDZ89ewmnWFkzJ5wPd2SFbn313vCT8a4E2Qf4KbaTH6MnpXSn88oBX35";
|
||||
|
||||
const SUB_SPEND: [u8; 32] = hex!("fe358188b528335ad1cfdc24a22a23988d742c882b6f19a602892eaab3c1b62b");
|
||||
const SUB_VIEW: [u8; 32] = hex!("9bc2b464de90d058468522098d5610c5019c45fd1711a9517db1eea7794f5470");
|
||||
const SUBADDRESS: &'static str = "8C5zHM5ud8nGC4hC2ULiBLSWx9infi8JUUmWEat4fcTf8J4H38iWYVdFmPCA9UmfLTZxD43RsyKnGEdZkoGij6csDeUnbEB";
|
||||
|
||||
#[test]
|
||||
fn standard_address() {
|
||||
let addr = Address::from_str(STANDARD, Network::Mainnet).unwrap();
|
||||
assert_eq!(addr.meta.network, Network::Mainnet);
|
||||
assert_eq!(addr.meta.kind, AddressType::Standard);
|
||||
assert_eq!(addr.meta.guaranteed, false);
|
||||
assert_eq!(addr.spend.compress().to_bytes(), SPEND);
|
||||
assert_eq!(addr.view.compress().to_bytes(), VIEW);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn integrated_address() {
|
||||
let addr = Address::from_str(INTEGRATED, Network::Mainnet).unwrap();
|
||||
assert_eq!(addr.meta.network, Network::Mainnet);
|
||||
assert_eq!(addr.meta.kind, AddressType::Integrated(PAYMENT_ID));
|
||||
assert_eq!(addr.meta.guaranteed, false);
|
||||
assert_eq!(addr.spend.compress().to_bytes(), SPEND);
|
||||
assert_eq!(addr.view.compress().to_bytes(), VIEW);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subaddress() {
|
||||
let addr = Address::from_str(SUBADDRESS, Network::Mainnet).unwrap();
|
||||
assert_eq!(addr.meta.network, Network::Mainnet);
|
||||
assert_eq!(addr.meta.kind, AddressType::Subaddress);
|
||||
assert_eq!(addr.meta.guaranteed, false);
|
||||
assert_eq!(addr.spend.compress().to_bytes(), SUB_SPEND);
|
||||
assert_eq!(addr.view.compress().to_bytes(), SUB_VIEW);
|
||||
}
|
||||
126
coins/monero/src/tests/clsag.rs
Normal file
126
coins/monero/src/tests/clsag.rs
Normal file
@@ -0,0 +1,126 @@
|
||||
#[cfg(feature = "multisig")]
|
||||
use std::sync::{Arc, RwLock};
|
||||
|
||||
use rand::{RngCore, rngs::OsRng};
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, scalar::Scalar};
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
#[cfg(feature = "multisig")]
|
||||
use frost::curve::Ed25519;
|
||||
|
||||
use crate::{
|
||||
Commitment,
|
||||
random_scalar,
|
||||
wallet::Decoys,
|
||||
ringct::{generate_key_image, clsag::{ClsagInput, Clsag}}
|
||||
};
|
||||
#[cfg(feature = "multisig")]
|
||||
use crate::{frost::MultisigError, ringct::clsag::{ClsagDetails, ClsagMultisig}};
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
use frost::tests::{key_gen, algorithm_machines, sign};
|
||||
|
||||
const RING_LEN: u64 = 11;
|
||||
const AMOUNT: u64 = 1337;
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
const RING_INDEX: u8 = 3;
|
||||
|
||||
#[test]
|
||||
fn clsag() {
|
||||
for real in 0 .. RING_LEN {
|
||||
let msg = [1; 32];
|
||||
|
||||
let mut secrets = [Scalar::zero(), Scalar::zero()];
|
||||
let mut ring = vec![];
|
||||
for i in 0 .. RING_LEN {
|
||||
let dest = random_scalar(&mut OsRng);
|
||||
let mask = random_scalar(&mut OsRng);
|
||||
let amount;
|
||||
if i == u64::from(real) {
|
||||
secrets = [dest, mask];
|
||||
amount = AMOUNT;
|
||||
} else {
|
||||
amount = OsRng.next_u64();
|
||||
}
|
||||
ring.push([&dest * &ED25519_BASEPOINT_TABLE, Commitment::new(mask, amount).calculate()]);
|
||||
}
|
||||
|
||||
let image = generate_key_image(secrets[0]);
|
||||
let (clsag, pseudo_out) = Clsag::sign(
|
||||
&mut OsRng,
|
||||
&vec![(
|
||||
secrets[0],
|
||||
image,
|
||||
ClsagInput::new(
|
||||
Commitment::new(secrets[1], AMOUNT),
|
||||
Decoys {
|
||||
i: u8::try_from(real).unwrap(),
|
||||
offsets: (1 ..= RING_LEN).into_iter().collect(),
|
||||
ring: ring.clone()
|
||||
}
|
||||
).unwrap()
|
||||
)],
|
||||
random_scalar(&mut OsRng),
|
||||
msg
|
||||
).swap_remove(0);
|
||||
clsag.verify(&ring, &image, &pseudo_out, &msg).unwrap();
|
||||
#[cfg(feature = "experimental")]
|
||||
clsag.rust_verify(&ring, &image, &pseudo_out, &msg).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
#[test]
|
||||
fn clsag_multisig() -> Result<(), MultisigError> {
|
||||
let keys = key_gen::<_, Ed25519>(&mut OsRng);
|
||||
|
||||
let randomness = random_scalar(&mut OsRng);
|
||||
let mut ring = vec![];
|
||||
for i in 0 .. RING_LEN {
|
||||
let dest;
|
||||
let mask;
|
||||
let amount;
|
||||
if i != u64::from(RING_INDEX) {
|
||||
dest = &random_scalar(&mut OsRng) * &ED25519_BASEPOINT_TABLE;
|
||||
mask = random_scalar(&mut OsRng);
|
||||
amount = OsRng.next_u64();
|
||||
} else {
|
||||
dest = keys[&1].group_key().0;
|
||||
mask = randomness;
|
||||
amount = AMOUNT;
|
||||
}
|
||||
ring.push([dest, Commitment::new(mask, amount).calculate()]);
|
||||
}
|
||||
|
||||
let mask_sum = random_scalar(&mut OsRng);
|
||||
sign(
|
||||
&mut OsRng,
|
||||
algorithm_machines(
|
||||
&mut OsRng,
|
||||
ClsagMultisig::new(
|
||||
RecommendedTranscript::new(b"Monero Serai CLSAG Test"),
|
||||
keys[&1].group_key().0,
|
||||
Arc::new(RwLock::new(Some(
|
||||
ClsagDetails::new(
|
||||
ClsagInput::new(
|
||||
Commitment::new(randomness, AMOUNT),
|
||||
Decoys {
|
||||
i: RING_INDEX,
|
||||
offsets: (1 ..= RING_LEN).into_iter().collect(),
|
||||
ring: ring.clone()
|
||||
}
|
||||
).unwrap(),
|
||||
mask_sum
|
||||
)
|
||||
)))
|
||||
).unwrap(),
|
||||
&keys
|
||||
),
|
||||
&[1; 32]
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
13
coins/monero/src/tests/hash_to_point.rs
Normal file
13
coins/monero/src/tests/hash_to_point.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
use rand::rngs::OsRng;
|
||||
|
||||
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
|
||||
|
||||
use crate::{random_scalar, ringct::hash_to_point::{hash_to_point as c_hash_to_point, rust_hash_to_point}};
|
||||
|
||||
#[test]
|
||||
fn hash_to_point() {
|
||||
for _ in 0 .. 50 {
|
||||
let point = &random_scalar(&mut OsRng) * &ED25519_BASEPOINT_TABLE;
|
||||
assert_eq!(rust_hash_to_point(point), c_hash_to_point(point));
|
||||
}
|
||||
}
|
||||
3
coins/monero/src/tests/mod.rs
Normal file
3
coins/monero/src/tests/mod.rs
Normal file
@@ -0,0 +1,3 @@
|
||||
mod hash_to_point;
|
||||
mod clsag;
|
||||
mod address;
|
||||
274
coins/monero/src/transaction.rs
Normal file
274
coins/monero/src/transaction.rs
Normal file
@@ -0,0 +1,274 @@
|
||||
use core::cmp::Ordering;
|
||||
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
|
||||
use crate::{hash, serialize::*, ringct::{RctPrunable, RctSignatures}};
|
||||
|
||||
pub const RING_LEN: usize = 11;
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub enum Input {
|
||||
Gen(u64),
|
||||
|
||||
ToKey {
|
||||
amount: u64,
|
||||
key_offsets: Vec<u64>,
|
||||
key_image: EdwardsPoint
|
||||
}
|
||||
}
|
||||
|
||||
impl Input {
|
||||
// Worst-case predictive len
|
||||
pub(crate) fn fee_weight() -> usize {
|
||||
// Uses 1 byte for the VarInt amount due to amount being 0
|
||||
// Uses 1 byte for the VarInt encoding of the length of the ring as well
|
||||
1 + 1 + 1 + (8 * RING_LEN) + 32
|
||||
}
|
||||
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
match self {
|
||||
Input::Gen(height) => {
|
||||
w.write_all(&[255])?;
|
||||
write_varint(height, w)
|
||||
},
|
||||
|
||||
Input::ToKey { amount, key_offsets, key_image } => {
|
||||
w.write_all(&[2])?;
|
||||
write_varint(amount, w)?;
|
||||
write_vec(write_varint, key_offsets, w)?;
|
||||
write_point(key_image, w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(r: &mut R) -> std::io::Result<Input> {
|
||||
let mut variant = [0];
|
||||
r.read_exact(&mut variant)?;
|
||||
Ok(
|
||||
match variant[0] {
|
||||
255 => Input::Gen(read_varint(r)?),
|
||||
2 => Input::ToKey {
|
||||
amount: read_varint(r)?,
|
||||
key_offsets: read_vec(read_varint, r)?,
|
||||
key_image: read_point(r)?
|
||||
},
|
||||
_ => Err(std::io::Error::new(std::io::ErrorKind::Other, "Tried to deserialize unknown/unused input type"))?
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Doesn't bother moving to an enum for the unused Script classes
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct Output {
|
||||
pub amount: u64,
|
||||
pub key: EdwardsPoint,
|
||||
pub tag: Option<u8>
|
||||
}
|
||||
|
||||
impl Output {
|
||||
pub(crate) fn fee_weight() -> usize {
|
||||
1 + 1 + 32 + 1
|
||||
}
|
||||
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
write_varint(&self.amount, w)?;
|
||||
w.write_all(&[2 + (if self.tag.is_some() { 1 } else { 0 })])?;
|
||||
write_point(&self.key, w)?;
|
||||
if let Some(tag) = self.tag {
|
||||
w.write_all(&[tag])?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(r: &mut R) -> std::io::Result<Output> {
|
||||
let amount = read_varint(r)?;
|
||||
let mut tag = [0];
|
||||
r.read_exact(&mut tag)?;
|
||||
if (tag[0] != 2) && (tag[0] != 3) {
|
||||
Err(std::io::Error::new(std::io::ErrorKind::Other, "Tried to deserialize unknown/unused output type"))?;
|
||||
}
|
||||
|
||||
Ok(
|
||||
Output {
|
||||
amount,
|
||||
key: read_point(r)?,
|
||||
tag: if tag[0] == 3 { r.read_exact(&mut tag)?; Some(tag[0]) } else { None }
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Debug)]
|
||||
pub enum Timelock {
|
||||
None,
|
||||
Block(usize),
|
||||
Time(u64)
|
||||
}
|
||||
|
||||
impl Timelock {
|
||||
fn from_raw(raw: u64) -> Timelock {
|
||||
if raw == 0 {
|
||||
Timelock::None
|
||||
} else if raw < 500_000_000 {
|
||||
Timelock::Block(usize::try_from(raw).unwrap())
|
||||
} else {
|
||||
Timelock::Time(raw)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn fee_weight() -> usize {
|
||||
8
|
||||
}
|
||||
|
||||
fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
write_varint(
|
||||
&match self {
|
||||
Timelock::None => 0,
|
||||
Timelock::Block(block) => (*block).try_into().unwrap(),
|
||||
Timelock::Time(time) => *time
|
||||
},
|
||||
w
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for Timelock {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
|
||||
match (self, other) {
|
||||
(Timelock::None, _) => Some(Ordering::Less),
|
||||
(Timelock::Block(a), Timelock::Block(b)) => a.partial_cmp(b),
|
||||
(Timelock::Time(a), Timelock::Time(b)) => a.partial_cmp(b),
|
||||
_ => None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct TransactionPrefix {
|
||||
pub version: u64,
|
||||
pub timelock: Timelock,
|
||||
pub inputs: Vec<Input>,
|
||||
pub outputs: Vec<Output>,
|
||||
pub extra: Vec<u8>
|
||||
}
|
||||
|
||||
impl TransactionPrefix {
|
||||
pub(crate) fn fee_weight(inputs: usize, outputs: usize, extra: usize) -> usize {
|
||||
// Assumes Timelock::None since this library won't let you create a TX with a timelock
|
||||
1 + 1 +
|
||||
varint_len(inputs) + (inputs * Input::fee_weight()) +
|
||||
// Only 16 outputs are possible under transactions by this lib
|
||||
1 + (outputs * Output::fee_weight()) +
|
||||
varint_len(extra) + extra
|
||||
}
|
||||
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
write_varint(&self.version, w)?;
|
||||
self.timelock.serialize(w)?;
|
||||
write_vec(Input::serialize, &self.inputs, w)?;
|
||||
write_vec(Output::serialize, &self.outputs, w)?;
|
||||
write_varint(&self.extra.len().try_into().unwrap(), w)?;
|
||||
w.write_all(&self.extra)
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(r: &mut R) -> std::io::Result<TransactionPrefix> {
|
||||
let mut prefix = TransactionPrefix {
|
||||
version: read_varint(r)?,
|
||||
timelock: Timelock::from_raw(read_varint(r)?),
|
||||
inputs: read_vec(Input::deserialize, r)?,
|
||||
outputs: read_vec(Output::deserialize, r)?,
|
||||
extra: vec![]
|
||||
};
|
||||
|
||||
let len = read_varint(r)?;
|
||||
prefix.extra.resize(len.try_into().unwrap(), 0);
|
||||
r.read_exact(&mut prefix.extra)?;
|
||||
|
||||
Ok(prefix)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct Transaction {
|
||||
pub prefix: TransactionPrefix,
|
||||
pub rct_signatures: RctSignatures
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
pub(crate) fn fee_weight(inputs: usize, outputs: usize, extra: usize) -> usize {
|
||||
TransactionPrefix::fee_weight(inputs, outputs, extra) + RctSignatures::fee_weight(inputs, outputs)
|
||||
}
|
||||
|
||||
pub fn serialize<W: std::io::Write>(&self, w: &mut W) -> std::io::Result<()> {
|
||||
self.prefix.serialize(w)?;
|
||||
self.rct_signatures.serialize(w)
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(r: &mut R) -> std::io::Result<Transaction> {
|
||||
let prefix = TransactionPrefix::deserialize(r)?;
|
||||
Ok(
|
||||
Transaction {
|
||||
rct_signatures: RctSignatures::deserialize(
|
||||
prefix.inputs.iter().map(|input| match input {
|
||||
Input::Gen(_) => 0,
|
||||
Input::ToKey { key_offsets, .. } => key_offsets.len()
|
||||
}).collect(),
|
||||
prefix.outputs.len(),
|
||||
r
|
||||
)?,
|
||||
prefix
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
pub fn hash(&self) -> [u8; 32] {
|
||||
let mut serialized = Vec::with_capacity(2048);
|
||||
if self.prefix.version == 1 {
|
||||
self.serialize(&mut serialized).unwrap();
|
||||
hash(&serialized)
|
||||
} else {
|
||||
let mut sig_hash = Vec::with_capacity(96);
|
||||
|
||||
self.prefix.serialize(&mut serialized).unwrap();
|
||||
sig_hash.extend(hash(&serialized));
|
||||
serialized.clear();
|
||||
|
||||
self.rct_signatures.base.serialize(
|
||||
&mut serialized,
|
||||
self.rct_signatures.prunable.rct_type()
|
||||
).unwrap();
|
||||
sig_hash.extend(hash(&serialized));
|
||||
serialized.clear();
|
||||
|
||||
match self.rct_signatures.prunable {
|
||||
RctPrunable::Null => serialized.resize(32, 0),
|
||||
_ => {
|
||||
self.rct_signatures.prunable.serialize(&mut serialized).unwrap();
|
||||
serialized = hash(&serialized).to_vec();
|
||||
}
|
||||
}
|
||||
sig_hash.extend(&serialized);
|
||||
|
||||
hash(&sig_hash)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn signature_hash(&self) -> [u8; 32] {
|
||||
let mut serialized = Vec::with_capacity(2048);
|
||||
let mut sig_hash = Vec::with_capacity(96);
|
||||
|
||||
self.prefix.serialize(&mut serialized).unwrap();
|
||||
sig_hash.extend(hash(&serialized));
|
||||
serialized.clear();
|
||||
|
||||
self.rct_signatures.base.serialize(&mut serialized, self.rct_signatures.prunable.rct_type()).unwrap();
|
||||
sig_hash.extend(hash(&serialized));
|
||||
serialized.clear();
|
||||
|
||||
self.rct_signatures.prunable.signature_serialize(&mut serialized).unwrap();
|
||||
sig_hash.extend(&hash(&serialized));
|
||||
|
||||
hash(&sig_hash)
|
||||
}
|
||||
}
|
||||
152
coins/monero/src/wallet/address.rs
Normal file
152
coins/monero/src/wallet/address.rs
Normal file
@@ -0,0 +1,152 @@
|
||||
use std::string::ToString;
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
use curve25519_dalek::{constants::ED25519_BASEPOINT_TABLE, edwards::{EdwardsPoint, CompressedEdwardsY}};
|
||||
|
||||
use base58_monero::base58::{encode_check, decode_check};
|
||||
|
||||
use crate::wallet::ViewPair;
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub enum Network {
|
||||
Mainnet,
|
||||
Testnet,
|
||||
Stagenet
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub enum AddressType {
|
||||
Standard,
|
||||
Integrated([u8; 8]),
|
||||
Subaddress
|
||||
}
|
||||
|
||||
impl AddressType {
|
||||
fn network_bytes(network: Network) -> (u8, u8, u8) {
|
||||
match network {
|
||||
Network::Mainnet => (18, 19, 42),
|
||||
Network::Testnet => (53, 54, 63),
|
||||
Network::Stagenet => (24, 25, 36)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct AddressMeta {
|
||||
pub network: Network,
|
||||
pub kind: AddressType,
|
||||
pub guaranteed: bool
|
||||
}
|
||||
|
||||
#[derive(Clone, Error, Debug)]
|
||||
pub enum AddressError {
|
||||
#[error("invalid address byte")]
|
||||
InvalidByte,
|
||||
#[error("invalid address encoding")]
|
||||
InvalidEncoding,
|
||||
#[error("invalid length")]
|
||||
InvalidLength,
|
||||
#[error("different network than expected")]
|
||||
DifferentNetwork,
|
||||
#[error("invalid key")]
|
||||
InvalidKey
|
||||
}
|
||||
|
||||
impl AddressMeta {
|
||||
fn to_byte(&self) -> u8 {
|
||||
let bytes = AddressType::network_bytes(self.network);
|
||||
let byte = match self.kind {
|
||||
AddressType::Standard => bytes.0,
|
||||
AddressType::Integrated(_) => bytes.1,
|
||||
AddressType::Subaddress => bytes.2
|
||||
};
|
||||
byte | (if self.guaranteed { 1 << 7 } else { 0 })
|
||||
}
|
||||
|
||||
// Returns an incomplete type in the case of Integrated addresses
|
||||
fn from_byte(byte: u8) -> Result<AddressMeta, AddressError> {
|
||||
let actual = byte & 0b01111111;
|
||||
let guaranteed = (byte >> 7) == 1;
|
||||
|
||||
let mut meta = None;
|
||||
for network in [Network::Mainnet, Network::Testnet, Network::Stagenet] {
|
||||
let (standard, integrated, subaddress) = AddressType::network_bytes(network);
|
||||
if let Some(kind) = match actual {
|
||||
_ if actual == standard => Some(AddressType::Standard),
|
||||
_ if actual == integrated => Some(AddressType::Integrated([0; 8])),
|
||||
_ if actual == subaddress => Some(AddressType::Subaddress),
|
||||
_ => None
|
||||
} {
|
||||
meta = Some(AddressMeta { network, kind, guaranteed });
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
meta.ok_or(AddressError::InvalidByte)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Address {
|
||||
pub meta: AddressMeta,
|
||||
pub spend: EdwardsPoint,
|
||||
pub view: EdwardsPoint
|
||||
}
|
||||
|
||||
impl ViewPair {
|
||||
pub fn address(&self, network: Network, kind: AddressType, guaranteed: bool) -> Address {
|
||||
Address {
|
||||
meta: AddressMeta {
|
||||
network,
|
||||
kind,
|
||||
guaranteed
|
||||
},
|
||||
spend: self.spend,
|
||||
view: &self.view * &ED25519_BASEPOINT_TABLE
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ToString for Address {
|
||||
fn to_string(&self) -> String {
|
||||
let mut data = vec![self.meta.to_byte()];
|
||||
data.extend(self.spend.compress().to_bytes());
|
||||
data.extend(self.view.compress().to_bytes());
|
||||
if let AddressType::Integrated(id) = self.meta.kind {
|
||||
data.extend(id);
|
||||
}
|
||||
encode_check(&data).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl Address {
|
||||
pub fn from_str(s: &str, network: Network) -> Result<Self, AddressError> {
|
||||
let raw = decode_check(s).map_err(|_| AddressError::InvalidEncoding)?;
|
||||
if raw.len() == 1 {
|
||||
Err(AddressError::InvalidLength)?;
|
||||
}
|
||||
|
||||
let mut meta = AddressMeta::from_byte(raw[0])?;
|
||||
if meta.network != network {
|
||||
Err(AddressError::DifferentNetwork)?;
|
||||
}
|
||||
|
||||
let len = match meta.kind {
|
||||
AddressType::Standard | AddressType::Subaddress => 65,
|
||||
AddressType::Integrated(_) => 73
|
||||
};
|
||||
if raw.len() != len {
|
||||
Err(AddressError::InvalidLength)?;
|
||||
}
|
||||
|
||||
let spend = CompressedEdwardsY(raw[1 .. 33].try_into().unwrap()).decompress().ok_or(AddressError::InvalidKey)?;
|
||||
let view = CompressedEdwardsY(raw[33 .. 65].try_into().unwrap()).decompress().ok_or(AddressError::InvalidKey)?;
|
||||
|
||||
if let AddressType::Integrated(ref mut payment_id) = meta.kind {
|
||||
payment_id.copy_from_slice(&raw[65 .. 73]);
|
||||
}
|
||||
|
||||
Ok(Address { meta, spend, view })
|
||||
}
|
||||
}
|
||||
220
coins/monero/src/wallet/decoys.rs
Normal file
220
coins/monero/src/wallet/decoys.rs
Normal file
@@ -0,0 +1,220 @@
|
||||
use std::{sync::Mutex, collections::HashSet};
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
use rand_distr::{Distribution, Gamma};
|
||||
|
||||
use curve25519_dalek::edwards::EdwardsPoint;
|
||||
|
||||
use crate::{transaction::RING_LEN, wallet::SpendableOutput, rpc::{RpcError, Rpc}};
|
||||
|
||||
const LOCK_WINDOW: usize = 10;
|
||||
const MATURITY: u64 = 60;
|
||||
const RECENT_WINDOW: usize = 15;
|
||||
const BLOCK_TIME: usize = 120;
|
||||
const BLOCKS_PER_YEAR: usize = 365 * 24 * 60 * 60 / BLOCK_TIME;
|
||||
const TIP_APPLICATION: f64 = (LOCK_WINDOW * BLOCK_TIME) as f64;
|
||||
|
||||
const DECOYS: usize = RING_LEN - 1;
|
||||
|
||||
lazy_static! {
|
||||
static ref GAMMA: Gamma<f64> = Gamma::new(19.28, 1.0 / 1.61).unwrap();
|
||||
static ref DISTRIBUTION: Mutex<Vec<u64>> = Mutex::new(Vec::with_capacity(3000000));
|
||||
}
|
||||
|
||||
async fn select_n<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
rpc: &Rpc,
|
||||
height: usize,
|
||||
high: u64,
|
||||
per_second: f64,
|
||||
used: &mut HashSet<u64>,
|
||||
count: usize
|
||||
) -> Result<Vec<(u64, [EdwardsPoint; 2])>, RpcError> {
|
||||
let mut iters = 0;
|
||||
let mut confirmed = Vec::with_capacity(count);
|
||||
while confirmed.len() != count {
|
||||
let remaining = count - confirmed.len();
|
||||
let mut candidates = Vec::with_capacity(remaining);
|
||||
while candidates.len() != remaining {
|
||||
iters += 1;
|
||||
// This is cheap and on fresh chains, thousands of rounds may be needed
|
||||
if iters == 10000 {
|
||||
Err(RpcError::InternalError("not enough decoy candidates".to_string()))?;
|
||||
}
|
||||
|
||||
// Use a gamma distribution
|
||||
let mut age = GAMMA.sample(rng).exp();
|
||||
if age > TIP_APPLICATION {
|
||||
age -= TIP_APPLICATION;
|
||||
} else {
|
||||
// f64 does not have try_from available, which is why these are written with `as`
|
||||
age = (rng.next_u64() % u64::try_from(RECENT_WINDOW * BLOCK_TIME).unwrap()) as f64;
|
||||
}
|
||||
|
||||
let o = (age * per_second) as u64;
|
||||
if o < high {
|
||||
let distribution = DISTRIBUTION.lock().unwrap();
|
||||
let i = distribution.partition_point(|s| *s < (high - 1 - o));
|
||||
let prev = i.saturating_sub(1);
|
||||
let n = distribution[i] - distribution[prev];
|
||||
if n != 0 {
|
||||
let o = distribution[prev] + (rng.next_u64() % n);
|
||||
if !used.contains(&o) {
|
||||
// It will either actually be used, or is unusable and this prevents trying it again
|
||||
used.insert(o);
|
||||
candidates.push(o);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let outputs = rpc.get_outputs(&candidates, height).await?;
|
||||
for i in 0 .. outputs.len() {
|
||||
if let Some(output) = outputs[i] {
|
||||
confirmed.push((candidates[i], output));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(confirmed)
|
||||
}
|
||||
|
||||
fn offset(ring: &[u64]) -> Vec<u64> {
|
||||
let mut res = vec![ring[0]];
|
||||
res.resize(ring.len(), 0);
|
||||
for m in (1 .. ring.len()).rev() {
|
||||
res[m] = ring[m] - ring[m - 1];
|
||||
}
|
||||
res
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct Decoys {
|
||||
pub i: u8,
|
||||
pub offsets: Vec<u64>,
|
||||
pub ring: Vec<[EdwardsPoint; 2]>
|
||||
}
|
||||
|
||||
impl Decoys {
|
||||
pub fn len(&self) -> usize {
|
||||
self.offsets.len()
|
||||
}
|
||||
|
||||
pub(crate) async fn select<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
rpc: &Rpc,
|
||||
height: usize,
|
||||
inputs: &[SpendableOutput]
|
||||
) -> Result<Vec<Decoys>, RpcError> {
|
||||
// Convert the inputs in question to the raw output data
|
||||
let mut outputs = Vec::with_capacity(inputs.len());
|
||||
for input in inputs {
|
||||
outputs.push((
|
||||
rpc.get_o_indexes(input.tx).await?[usize::from(input.o)],
|
||||
[input.key, input.commitment.calculate()]
|
||||
));
|
||||
}
|
||||
|
||||
let distribution_len = {
|
||||
let distribution = DISTRIBUTION.lock().unwrap();
|
||||
distribution.len()
|
||||
};
|
||||
if distribution_len <= height {
|
||||
let extension = rpc.get_output_distribution(distribution_len, height).await?;
|
||||
DISTRIBUTION.lock().unwrap().extend(extension);
|
||||
}
|
||||
|
||||
let high;
|
||||
let per_second;
|
||||
{
|
||||
let mut distribution = DISTRIBUTION.lock().unwrap();
|
||||
// If asked to use an older height than previously asked, truncate to ensure accuracy
|
||||
// Should never happen, yet risks desyncing if it did
|
||||
distribution.truncate(height + 1); // height is inclusive, and 0 is a valid height
|
||||
|
||||
high = distribution[distribution.len() - 1];
|
||||
per_second = {
|
||||
let blocks = distribution.len().min(BLOCKS_PER_YEAR);
|
||||
let outputs = high - distribution[distribution.len().saturating_sub(blocks + 1)];
|
||||
(outputs as f64) / ((blocks * BLOCK_TIME) as f64)
|
||||
};
|
||||
};
|
||||
|
||||
let mut used = HashSet::<u64>::new();
|
||||
for o in &outputs {
|
||||
used.insert(o.0);
|
||||
}
|
||||
|
||||
// TODO: Simply create a TX with less than the target amount
|
||||
if (high - MATURITY) < u64::try_from(inputs.len() * RING_LEN).unwrap() {
|
||||
Err(RpcError::InternalError("not enough decoy candidates".to_string()))?;
|
||||
}
|
||||
|
||||
// Select all decoys for this transaction, assuming we generate a sane transaction
|
||||
// We should almost never naturally generate an insane transaction, hence why this doesn't bother
|
||||
// with an overage
|
||||
let mut decoys = select_n(
|
||||
rng,
|
||||
rpc,
|
||||
height,
|
||||
high,
|
||||
per_second,
|
||||
&mut used,
|
||||
inputs.len() * DECOYS
|
||||
).await?;
|
||||
|
||||
let mut res = Vec::with_capacity(inputs.len());
|
||||
for o in outputs {
|
||||
// Grab the decoys for this specific output
|
||||
let mut ring = decoys.drain((decoys.len() - DECOYS) ..).collect::<Vec<_>>();
|
||||
ring.push(o);
|
||||
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
|
||||
// Sanity checks are only run when 1000 outputs are available in Monero
|
||||
// We run this check whenever the highest output index, which we acknowledge, is > 500
|
||||
// This means we assume (for presumably test blockchains) the height being used has not had
|
||||
// 500 outputs since while itself not being a sufficiently mature blockchain
|
||||
// Considering Monero's p2p layer doesn't actually check transaction sanity, it should be
|
||||
// fine for us to not have perfectly matching rules, especially since this code will infinite
|
||||
// loop if it can't determine sanity, which is possible with sufficient inputs on sufficiently
|
||||
// small chains
|
||||
if high > 500 {
|
||||
// Make sure the TX passes the sanity check that the median output is within the last 40%
|
||||
let target_median = high * 3 / 5;
|
||||
while ring[RING_LEN / 2].0 < target_median {
|
||||
// If it's not, update the bottom half with new values to ensure the median only moves up
|
||||
for removed in ring.drain(0 .. (RING_LEN / 2)).collect::<Vec<_>>() {
|
||||
// If we removed the real spend, add it back
|
||||
if removed.0 == o.0 {
|
||||
ring.push(o);
|
||||
} else {
|
||||
// We could not remove this, saving CPU time and removing low values as possibilities, yet
|
||||
// it'd increase the amount of decoys required to create this transaction and some removed
|
||||
// outputs may be the best option (as we drop the first half, not just the bottom n)
|
||||
used.remove(&removed.0);
|
||||
}
|
||||
}
|
||||
|
||||
// Select new outputs until we have a full sized ring again
|
||||
ring.extend(
|
||||
select_n(rng, rpc, height, high, per_second, &mut used, RING_LEN - ring.len()).await?
|
||||
);
|
||||
ring.sort_by(|a, b| a.0.cmp(&b.0));
|
||||
}
|
||||
|
||||
// The other sanity check rule is about duplicates, yet we already enforce unique ring members
|
||||
}
|
||||
|
||||
res.push(Decoys {
|
||||
// Binary searches for the real spend since we don't know where it sorted to
|
||||
i: u8::try_from(ring.partition_point(|x| x.0 < o.0)).unwrap(),
|
||||
offsets: offset(&ring.iter().map(|output| output.0).collect::<Vec<_>>()),
|
||||
ring: ring.iter().map(|output| output.1).collect()
|
||||
});
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
73
coins/monero/src/wallet/mod.rs
Normal file
73
coins/monero/src/wallet/mod.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
use curve25519_dalek::{scalar::Scalar, edwards::EdwardsPoint};
|
||||
|
||||
use crate::{
|
||||
hash, hash_to_scalar,
|
||||
serialize::write_varint,
|
||||
transaction::Input
|
||||
};
|
||||
|
||||
pub mod address;
|
||||
|
||||
mod scan;
|
||||
pub use scan::SpendableOutput;
|
||||
|
||||
pub(crate) mod decoys;
|
||||
pub(crate) use decoys::Decoys;
|
||||
|
||||
mod send;
|
||||
pub use send::{Fee, TransactionError, SignableTransaction};
|
||||
#[cfg(feature = "multisig")]
|
||||
pub use send::TransactionMachine;
|
||||
|
||||
fn key_image_sort(x: &EdwardsPoint, y: &EdwardsPoint) -> std::cmp::Ordering {
|
||||
x.compress().to_bytes().cmp(&y.compress().to_bytes()).reverse()
|
||||
}
|
||||
|
||||
// https://gist.github.com/kayabaNerve/8066c13f1fe1573286ba7a2fd79f6100
|
||||
pub(crate) fn uniqueness(inputs: &[Input]) -> [u8; 32] {
|
||||
let mut u = b"uniqueness".to_vec();
|
||||
for input in inputs {
|
||||
match input {
|
||||
// If Gen, this should be the only input, making this loop somewhat pointless
|
||||
// This works and even if there were somehow multiple inputs, it'd be a false negative
|
||||
Input::Gen(height) => { write_varint(&(*height).try_into().unwrap(), &mut u).unwrap(); },
|
||||
Input::ToKey { key_image, .. } => u.extend(key_image.compress().to_bytes())
|
||||
}
|
||||
}
|
||||
hash(&u)
|
||||
}
|
||||
|
||||
// Hs(8Ra || o) with https://github.com/monero-project/research-lab/issues/103 as an option
|
||||
#[allow(non_snake_case)]
|
||||
pub(crate) fn shared_key(uniqueness: Option<[u8; 32]>, s: Scalar, P: &EdwardsPoint, o: usize) -> Scalar {
|
||||
// uniqueness
|
||||
let mut shared = uniqueness.map_or(vec![], |uniqueness| uniqueness.to_vec());
|
||||
// || 8Ra
|
||||
shared.extend((s * P).mul_by_cofactor().compress().to_bytes().to_vec());
|
||||
// || o
|
||||
write_varint(&o.try_into().unwrap(), &mut shared).unwrap();
|
||||
// Hs()
|
||||
hash_to_scalar(&shared)
|
||||
}
|
||||
|
||||
pub(crate) fn amount_encryption(amount: u64, key: Scalar) -> [u8; 8] {
|
||||
let mut amount_mask = b"amount".to_vec();
|
||||
amount_mask.extend(key.to_bytes());
|
||||
(amount ^ u64::from_le_bytes(hash(&amount_mask)[0 .. 8].try_into().unwrap())).to_le_bytes()
|
||||
}
|
||||
|
||||
fn amount_decryption(amount: [u8; 8], key: Scalar) -> u64 {
|
||||
u64::from_le_bytes(amount_encryption(u64::from_le_bytes(amount), key))
|
||||
}
|
||||
|
||||
pub(crate) fn commitment_mask(shared_key: Scalar) -> Scalar {
|
||||
let mut mask = b"commitment_mask".to_vec();
|
||||
mask.extend(shared_key.to_bytes());
|
||||
hash_to_scalar(&mask)
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub struct ViewPair {
|
||||
pub spend: EdwardsPoint,
|
||||
pub view: Scalar
|
||||
}
|
||||
161
coins/monero/src/wallet/scan.rs
Normal file
161
coins/monero/src/wallet/scan.rs
Normal file
@@ -0,0 +1,161 @@
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_TABLE,
|
||||
scalar::Scalar,
|
||||
edwards::EdwardsPoint
|
||||
};
|
||||
|
||||
use monero::{consensus::deserialize, blockdata::transaction::ExtraField};
|
||||
|
||||
use crate::{
|
||||
Commitment,
|
||||
serialize::{write_varint, read_32, read_scalar, read_point},
|
||||
transaction::{Timelock, Transaction},
|
||||
wallet::{ViewPair, uniqueness, shared_key, amount_decryption, commitment_mask}
|
||||
};
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct SpendableOutput {
|
||||
pub tx: [u8; 32],
|
||||
pub o: u8,
|
||||
pub key: EdwardsPoint,
|
||||
pub key_offset: Scalar,
|
||||
pub commitment: Commitment
|
||||
}
|
||||
|
||||
pub struct Timelocked(Timelock, Vec<SpendableOutput>);
|
||||
impl Timelocked {
|
||||
pub fn timelock(&self) -> Timelock {
|
||||
self.0
|
||||
}
|
||||
|
||||
pub fn not_locked(&self) -> Vec<SpendableOutput> {
|
||||
if self.0 == Timelock::None {
|
||||
return self.1.clone();
|
||||
}
|
||||
vec![]
|
||||
}
|
||||
|
||||
/// Returns None if the Timelocks aren't comparable. Returns Some(vec![]) if none are unlocked
|
||||
pub fn unlocked(&self, timelock: Timelock) -> Option<Vec<SpendableOutput>> {
|
||||
// If the Timelocks are comparable, return the outputs if they're now unlocked
|
||||
self.0.partial_cmp(&timelock).filter(|_| self.0 <= timelock).map(|_| self.1.clone())
|
||||
}
|
||||
|
||||
pub fn ignore_timelock(&self) -> Vec<SpendableOutput> {
|
||||
self.1.clone()
|
||||
}
|
||||
}
|
||||
|
||||
impl SpendableOutput {
|
||||
pub fn serialize(&self) -> Vec<u8> {
|
||||
let mut res = Vec::with_capacity(32 + 1 + 32 + 32 + 40);
|
||||
res.extend(&self.tx);
|
||||
res.push(self.o);
|
||||
res.extend(self.key.compress().to_bytes());
|
||||
res.extend(self.key_offset.to_bytes());
|
||||
res.extend(self.commitment.mask.to_bytes());
|
||||
res.extend(self.commitment.amount.to_le_bytes());
|
||||
res
|
||||
}
|
||||
|
||||
pub fn deserialize<R: std::io::Read>(r: &mut R) -> std::io::Result<SpendableOutput> {
|
||||
Ok(
|
||||
SpendableOutput {
|
||||
tx: read_32(r)?,
|
||||
o: { let mut o = [0; 1]; r.read_exact(&mut o)?; o[0] },
|
||||
key: read_point(r)?,
|
||||
key_offset: read_scalar(r)?,
|
||||
commitment: Commitment::new(
|
||||
read_scalar(r)?,
|
||||
{ let mut amount = [0; 8]; r.read_exact(&mut amount)?; u64::from_le_bytes(amount) }
|
||||
)
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Transaction {
|
||||
pub fn scan(
|
||||
&self,
|
||||
view: ViewPair,
|
||||
guaranteed: bool
|
||||
) -> Timelocked {
|
||||
let mut extra = vec![];
|
||||
write_varint(&u64::try_from(self.prefix.extra.len()).unwrap(), &mut extra).unwrap();
|
||||
extra.extend(&self.prefix.extra);
|
||||
let extra = deserialize::<ExtraField>(&extra);
|
||||
|
||||
let pubkeys: Vec<EdwardsPoint>;
|
||||
if let Ok(extra) = extra {
|
||||
let mut m_pubkeys = vec![];
|
||||
if let Some(key) = extra.tx_pubkey() {
|
||||
m_pubkeys.push(key);
|
||||
}
|
||||
if let Some(keys) = extra.tx_additional_pubkeys() {
|
||||
m_pubkeys.extend(&keys);
|
||||
}
|
||||
|
||||
pubkeys = m_pubkeys.iter().map(|key| key.point.decompress()).filter_map(|key| key).collect();
|
||||
} else {
|
||||
return Timelocked(self.prefix.timelock, vec![]);
|
||||
};
|
||||
|
||||
let mut res = vec![];
|
||||
for (o, output) in self.prefix.outputs.iter().enumerate() {
|
||||
// TODO: This may be replaceable by pubkeys[o]
|
||||
for pubkey in &pubkeys {
|
||||
let key_offset = shared_key(
|
||||
Some(uniqueness(&self.prefix.inputs)).filter(|_| guaranteed),
|
||||
view.view,
|
||||
pubkey,
|
||||
o
|
||||
);
|
||||
// P - shared == spend
|
||||
if (output.key - (&key_offset * &ED25519_BASEPOINT_TABLE)) != view.spend {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Since we've found an output to us, get its amount
|
||||
let mut commitment = Commitment::zero();
|
||||
|
||||
// Miner transaction
|
||||
if output.amount != 0 {
|
||||
commitment.amount = output.amount;
|
||||
// Regular transaction
|
||||
} else {
|
||||
let amount = match self.rct_signatures.base.ecdh_info.get(o) {
|
||||
Some(amount) => amount_decryption(*amount, key_offset),
|
||||
// This should never happen, yet it may be possible with miner transactions?
|
||||
// Using get just decreases the possibility of a panic and lets us move on in that case
|
||||
None => break
|
||||
};
|
||||
|
||||
// Rebuild the commitment to verify it
|
||||
commitment = Commitment::new(commitment_mask(key_offset), amount);
|
||||
// If this is a malicious commitment, move to the next output
|
||||
// Any other R value will calculate to a different spend key and are therefore ignorable
|
||||
if Some(&commitment.calculate()) != self.rct_signatures.base.commitments.get(o) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if commitment.amount != 0 {
|
||||
res.push(SpendableOutput {
|
||||
tx: self.hash(),
|
||||
o: o.try_into().unwrap(),
|
||||
key: output.key,
|
||||
key_offset,
|
||||
commitment
|
||||
});
|
||||
}
|
||||
// Break to prevent public keys from being included multiple times, triggering multiple
|
||||
// inclusions of the same output
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
Timelocked(self.prefix.timelock, res)
|
||||
}
|
||||
}
|
||||
369
coins/monero/src/wallet/send/mod.rs
Normal file
369
coins/monero/src/wallet/send/mod.rs
Normal file
@@ -0,0 +1,369 @@
|
||||
use thiserror::Error;
|
||||
|
||||
use rand_core::{RngCore, CryptoRng};
|
||||
use rand::seq::SliceRandom;
|
||||
|
||||
use curve25519_dalek::{
|
||||
constants::ED25519_BASEPOINT_TABLE,
|
||||
scalar::Scalar,
|
||||
edwards::EdwardsPoint
|
||||
};
|
||||
|
||||
use monero::{consensus::Encodable, PublicKey, blockdata::transaction::SubField};
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
use frost::FrostError;
|
||||
|
||||
use crate::{
|
||||
Commitment,
|
||||
random_scalar,
|
||||
ringct::{
|
||||
generate_key_image,
|
||||
clsag::{ClsagError, ClsagInput, Clsag},
|
||||
bulletproofs::{MAX_OUTPUTS, Bulletproofs},
|
||||
RctBase, RctPrunable, RctSignatures
|
||||
},
|
||||
transaction::{Input, Output, Timelock, TransactionPrefix, Transaction},
|
||||
rpc::{Rpc, RpcError},
|
||||
wallet::{
|
||||
address::{AddressType, Address}, SpendableOutput, Decoys,
|
||||
key_image_sort, uniqueness, shared_key, commitment_mask, amount_encryption
|
||||
}
|
||||
};
|
||||
#[cfg(feature = "multisig")]
|
||||
use crate::frost::MultisigError;
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
mod multisig;
|
||||
#[cfg(feature = "multisig")]
|
||||
pub use multisig::TransactionMachine;
|
||||
|
||||
#[allow(non_snake_case)]
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
struct SendOutput {
|
||||
R: EdwardsPoint,
|
||||
dest: EdwardsPoint,
|
||||
commitment: Commitment,
|
||||
amount: [u8; 8]
|
||||
}
|
||||
|
||||
impl SendOutput {
|
||||
fn new<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
unique: [u8; 32],
|
||||
output: (Address, u64),
|
||||
o: usize
|
||||
) -> SendOutput {
|
||||
let r = random_scalar(rng);
|
||||
let shared_key = shared_key(
|
||||
Some(unique).filter(|_| output.0.meta.guaranteed),
|
||||
r,
|
||||
&output.0.view,
|
||||
o
|
||||
);
|
||||
|
||||
let spend = output.0.spend;
|
||||
SendOutput {
|
||||
R: match output.0.meta.kind {
|
||||
AddressType::Standard => &r * &ED25519_BASEPOINT_TABLE,
|
||||
AddressType::Integrated(_) => unimplemented!("SendOutput::new doesn't support Integrated addresses"),
|
||||
AddressType::Subaddress => &r * spend
|
||||
},
|
||||
dest: ((&shared_key * &ED25519_BASEPOINT_TABLE) + spend),
|
||||
commitment: Commitment::new(commitment_mask(shared_key), output.1),
|
||||
amount: amount_encryption(output.1, shared_key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Error, Debug)]
|
||||
pub enum TransactionError {
|
||||
#[error("invalid address")]
|
||||
InvalidAddress,
|
||||
#[error("no inputs")]
|
||||
NoInputs,
|
||||
#[error("no outputs")]
|
||||
NoOutputs,
|
||||
#[error("only one output and no change address")]
|
||||
NoChange,
|
||||
#[error("too many outputs")]
|
||||
TooManyOutputs,
|
||||
#[error("not enough funds (in {0}, out {1})")]
|
||||
NotEnoughFunds(u64, u64),
|
||||
#[error("wrong spend private key")]
|
||||
WrongPrivateKey,
|
||||
#[error("rpc error ({0})")]
|
||||
RpcError(RpcError),
|
||||
#[error("clsag error ({0})")]
|
||||
ClsagError(ClsagError),
|
||||
#[error("invalid transaction ({0})")]
|
||||
InvalidTransaction(RpcError),
|
||||
#[cfg(feature = "multisig")]
|
||||
#[error("frost error {0}")]
|
||||
FrostError(FrostError),
|
||||
#[cfg(feature = "multisig")]
|
||||
#[error("multisig error {0}")]
|
||||
MultisigError(MultisigError)
|
||||
}
|
||||
|
||||
async fn prepare_inputs<R: RngCore + CryptoRng>(
|
||||
rng: &mut R,
|
||||
rpc: &Rpc,
|
||||
inputs: &[SpendableOutput],
|
||||
spend: &Scalar,
|
||||
tx: &mut Transaction
|
||||
) -> Result<Vec<(Scalar, EdwardsPoint, ClsagInput)>, TransactionError> {
|
||||
let mut signable = Vec::with_capacity(inputs.len());
|
||||
|
||||
// Select decoys
|
||||
let decoys = Decoys::select(
|
||||
rng,
|
||||
rpc,
|
||||
rpc.get_height().await.map_err(|e| TransactionError::RpcError(e))? - 10,
|
||||
inputs
|
||||
).await.map_err(|e| TransactionError::RpcError(e))?;
|
||||
|
||||
for (i, input) in inputs.iter().enumerate() {
|
||||
signable.push((
|
||||
spend + input.key_offset,
|
||||
generate_key_image(spend + input.key_offset),
|
||||
ClsagInput::new(
|
||||
input.commitment,
|
||||
decoys[i].clone()
|
||||
).map_err(|e| TransactionError::ClsagError(e))?
|
||||
));
|
||||
|
||||
tx.prefix.inputs.push(Input::ToKey {
|
||||
amount: 0,
|
||||
key_offsets: decoys[i].offsets.clone(),
|
||||
key_image: signable[i].1
|
||||
});
|
||||
}
|
||||
|
||||
signable.sort_by(|x, y| x.1.compress().to_bytes().cmp(&y.1.compress().to_bytes()).reverse());
|
||||
tx.prefix.inputs.sort_by(|x, y| if let (
|
||||
Input::ToKey { key_image: x, ..},
|
||||
Input::ToKey { key_image: y, ..}
|
||||
) = (x, y) {
|
||||
x.compress().to_bytes().cmp(&y.compress().to_bytes()).reverse()
|
||||
} else {
|
||||
panic!("Input wasn't ToKey")
|
||||
});
|
||||
|
||||
Ok(signable)
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub struct Fee {
|
||||
pub per_weight: u64,
|
||||
pub mask: u64
|
||||
}
|
||||
|
||||
impl Fee {
|
||||
pub fn calculate(&self, weight: usize) -> u64 {
|
||||
((((self.per_weight * u64::try_from(weight).unwrap()) - 1) / self.mask) + 1) * self.mask
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
pub struct SignableTransaction {
|
||||
inputs: Vec<SpendableOutput>,
|
||||
payments: Vec<(Address, u64)>,
|
||||
outputs: Vec<SendOutput>,
|
||||
fee: u64
|
||||
}
|
||||
|
||||
impl SignableTransaction {
|
||||
pub fn new(
|
||||
inputs: Vec<SpendableOutput>,
|
||||
mut payments: Vec<(Address, u64)>,
|
||||
change_address: Option<Address>,
|
||||
fee_rate: Fee
|
||||
) -> Result<SignableTransaction, TransactionError> {
|
||||
// Make sure all addresses are valid
|
||||
let test = |addr: Address| {
|
||||
match addr.meta.kind {
|
||||
AddressType::Standard => Ok(()),
|
||||
AddressType::Integrated(..) => Err(TransactionError::InvalidAddress),
|
||||
AddressType::Subaddress => Ok(())
|
||||
}
|
||||
};
|
||||
|
||||
for payment in &payments {
|
||||
test(payment.0)?;
|
||||
}
|
||||
if let Some(change) = change_address {
|
||||
test(change)?;
|
||||
}
|
||||
|
||||
if inputs.len() == 0 {
|
||||
Err(TransactionError::NoInputs)?;
|
||||
}
|
||||
if payments.len() == 0 {
|
||||
Err(TransactionError::NoOutputs)?;
|
||||
}
|
||||
|
||||
// TODO TX MAX SIZE
|
||||
|
||||
// If we don't have two outputs, as required by Monero, add a second
|
||||
let mut change = payments.len() == 1;
|
||||
if change && change_address.is_none() {
|
||||
Err(TransactionError::NoChange)?;
|
||||
}
|
||||
let mut outputs = payments.len() + (if change { 1 } else { 0 });
|
||||
|
||||
// Calculate the extra length.
|
||||
// Type, length, value, with 1 field for the first key and 1 field for the rest
|
||||
let extra = (outputs * (2 + 32)) - (outputs.saturating_sub(2) * 2);
|
||||
|
||||
// Calculate the fee.
|
||||
let mut fee = fee_rate.calculate(Transaction::fee_weight(inputs.len(), outputs, extra));
|
||||
|
||||
// Make sure we have enough funds
|
||||
let in_amount = inputs.iter().map(|input| input.commitment.amount).sum::<u64>();
|
||||
let mut out_amount = payments.iter().map(|payment| payment.1).sum::<u64>() + fee;
|
||||
if in_amount < out_amount {
|
||||
Err(TransactionError::NotEnoughFunds(in_amount, out_amount))?;
|
||||
}
|
||||
|
||||
// If we have yet to add a change output, do so if it's economically viable
|
||||
if (!change) && change_address.is_some() && (in_amount != out_amount) {
|
||||
// Check even with the new fee, there's remaining funds
|
||||
let change_fee = fee_rate.calculate(Transaction::fee_weight(inputs.len(), outputs + 1, extra)) - fee;
|
||||
if (out_amount + change_fee) < in_amount {
|
||||
change = true;
|
||||
outputs += 1;
|
||||
out_amount += change_fee;
|
||||
fee += change_fee;
|
||||
}
|
||||
}
|
||||
|
||||
if outputs > MAX_OUTPUTS {
|
||||
Err(TransactionError::TooManyOutputs)?;
|
||||
}
|
||||
|
||||
if change {
|
||||
payments.push((change_address.unwrap(), in_amount - out_amount));
|
||||
}
|
||||
|
||||
Ok(
|
||||
SignableTransaction {
|
||||
inputs,
|
||||
payments,
|
||||
outputs: vec![],
|
||||
fee
|
||||
}
|
||||
)
|
||||
}
|
||||
|
||||
fn prepare_outputs<R: RngCore + CryptoRng>(
|
||||
&mut self,
|
||||
rng: &mut R,
|
||||
uniqueness: [u8; 32]
|
||||
) -> (Vec<Commitment>, Scalar) {
|
||||
// Shuffle the payments
|
||||
self.payments.shuffle(rng);
|
||||
|
||||
// Actually create the outputs
|
||||
self.outputs = Vec::with_capacity(self.payments.len() + 1);
|
||||
for (o, output) in self.payments.iter().enumerate() {
|
||||
self.outputs.push(SendOutput::new(rng, uniqueness, *output, o));
|
||||
}
|
||||
|
||||
let commitments = self.outputs.iter().map(|output| output.commitment).collect::<Vec<_>>();
|
||||
let sum = commitments.iter().map(|commitment| commitment.mask).sum();
|
||||
(commitments, sum)
|
||||
}
|
||||
|
||||
fn prepare_transaction(
|
||||
&self,
|
||||
commitments: &[Commitment],
|
||||
bp: Bulletproofs
|
||||
) -> Transaction {
|
||||
// Create the TX extra
|
||||
// TODO: Review this for canonicity with Monero
|
||||
let mut extra = vec![];
|
||||
SubField::TxPublicKey(
|
||||
PublicKey { point: self.outputs[0].R.compress() }
|
||||
).consensus_encode(&mut extra).unwrap();
|
||||
SubField::AdditionalPublickKey(
|
||||
self.outputs[1 ..].iter().map(|output| PublicKey { point: output.R.compress() }).collect()
|
||||
).consensus_encode(&mut extra).unwrap();
|
||||
|
||||
let mut tx_outputs = Vec::with_capacity(self.outputs.len());
|
||||
let mut ecdh_info = Vec::with_capacity(self.outputs.len());
|
||||
for o in 0 .. self.outputs.len() {
|
||||
tx_outputs.push(Output {
|
||||
amount: 0,
|
||||
key: self.outputs[o].dest,
|
||||
tag: None
|
||||
});
|
||||
ecdh_info.push(self.outputs[o].amount);
|
||||
}
|
||||
|
||||
Transaction {
|
||||
prefix: TransactionPrefix {
|
||||
version: 2,
|
||||
timelock: Timelock::None,
|
||||
inputs: vec![],
|
||||
outputs: tx_outputs,
|
||||
extra
|
||||
},
|
||||
rct_signatures: RctSignatures {
|
||||
base: RctBase {
|
||||
fee: self.fee,
|
||||
ecdh_info,
|
||||
commitments: commitments.iter().map(|commitment| commitment.calculate()).collect()
|
||||
},
|
||||
prunable: RctPrunable::Clsag {
|
||||
bulletproofs: vec![bp],
|
||||
clsags: vec![],
|
||||
pseudo_outs: vec![]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn sign<R: RngCore + CryptoRng>(
|
||||
&mut self,
|
||||
rng: &mut R,
|
||||
rpc: &Rpc,
|
||||
spend: &Scalar
|
||||
) -> Result<Transaction, TransactionError> {
|
||||
let mut images = Vec::with_capacity(self.inputs.len());
|
||||
for input in &self.inputs {
|
||||
let offset = spend + input.key_offset;
|
||||
if (&offset * &ED25519_BASEPOINT_TABLE) != input.key {
|
||||
Err(TransactionError::WrongPrivateKey)?;
|
||||
}
|
||||
|
||||
images.push(generate_key_image(offset));
|
||||
}
|
||||
images.sort_by(key_image_sort);
|
||||
|
||||
let (commitments, mask_sum) = self.prepare_outputs(
|
||||
rng,
|
||||
uniqueness(
|
||||
&images.iter().map(|image| Input::ToKey {
|
||||
amount: 0,
|
||||
key_offsets: vec![],
|
||||
key_image: *image
|
||||
}).collect::<Vec<_>>()
|
||||
)
|
||||
);
|
||||
|
||||
let mut tx = self.prepare_transaction(&commitments, Bulletproofs::new(rng, &commitments)?);
|
||||
|
||||
let signable = prepare_inputs(rng, rpc, &self.inputs, spend, &mut tx).await?;
|
||||
|
||||
let clsag_pairs = Clsag::sign(rng, &signable, mask_sum, tx.signature_hash());
|
||||
match tx.rct_signatures.prunable {
|
||||
RctPrunable::Null => panic!("Signing for RctPrunable::Null"),
|
||||
RctPrunable::Clsag { ref mut clsags, ref mut pseudo_outs, .. } => {
|
||||
clsags.append(&mut clsag_pairs.iter().map(|clsag| clsag.0.clone()).collect::<Vec<_>>());
|
||||
pseudo_outs.append(&mut clsag_pairs.iter().map(|clsag| clsag.1.clone()).collect::<Vec<_>>());
|
||||
}
|
||||
}
|
||||
Ok(tx)
|
||||
}
|
||||
}
|
||||
368
coins/monero/src/wallet/send/multisig.rs
Normal file
368
coins/monero/src/wallet/send/multisig.rs
Normal file
@@ -0,0 +1,368 @@
|
||||
use std::{io::{Read, Cursor}, sync::{Arc, RwLock}, collections::HashMap};
|
||||
|
||||
use rand_core::{RngCore, CryptoRng, SeedableRng};
|
||||
use rand_chacha::ChaCha12Rng;
|
||||
|
||||
use curve25519_dalek::{traits::Identity, scalar::Scalar, edwards::{EdwardsPoint, CompressedEdwardsY}};
|
||||
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
use frost::{
|
||||
curve::Ed25519,
|
||||
FrostError, FrostKeys,
|
||||
sign::{
|
||||
PreprocessMachine, SignMachine, SignatureMachine,
|
||||
AlgorithmMachine, AlgorithmSignMachine, AlgorithmSignatureMachine
|
||||
}
|
||||
};
|
||||
|
||||
use crate::{
|
||||
random_scalar, ringct::{clsag::{ClsagInput, ClsagDetails, ClsagMultisig}, bulletproofs::Bulletproofs, RctPrunable},
|
||||
transaction::{Input, Transaction},
|
||||
rpc::Rpc,
|
||||
wallet::{TransactionError, SignableTransaction, Decoys, key_image_sort, uniqueness}
|
||||
};
|
||||
|
||||
pub struct TransactionMachine {
|
||||
signable: SignableTransaction,
|
||||
i: u16,
|
||||
included: Vec<u16>,
|
||||
transcript: RecommendedTranscript,
|
||||
|
||||
decoys: Vec<Decoys>,
|
||||
|
||||
inputs: Vec<Arc<RwLock<Option<ClsagDetails>>>>,
|
||||
clsags: Vec<AlgorithmMachine<Ed25519, ClsagMultisig>>
|
||||
}
|
||||
|
||||
pub struct TransactionSignMachine {
|
||||
signable: SignableTransaction,
|
||||
i: u16,
|
||||
included: Vec<u16>,
|
||||
transcript: RecommendedTranscript,
|
||||
|
||||
decoys: Vec<Decoys>,
|
||||
|
||||
inputs: Vec<Arc<RwLock<Option<ClsagDetails>>>>,
|
||||
clsags: Vec<AlgorithmSignMachine<Ed25519, ClsagMultisig>>,
|
||||
|
||||
our_preprocess: Vec<u8>
|
||||
}
|
||||
|
||||
pub struct TransactionSignatureMachine {
|
||||
tx: Transaction,
|
||||
clsags: Vec<AlgorithmSignatureMachine<Ed25519, ClsagMultisig>>
|
||||
}
|
||||
|
||||
impl SignableTransaction {
|
||||
pub async fn multisig(
|
||||
self,
|
||||
rpc: &Rpc,
|
||||
keys: FrostKeys<Ed25519>,
|
||||
mut transcript: RecommendedTranscript,
|
||||
height: usize,
|
||||
mut included: Vec<u16>
|
||||
) -> Result<TransactionMachine, TransactionError> {
|
||||
let mut inputs = vec![];
|
||||
for _ in 0 .. self.inputs.len() {
|
||||
// Doesn't resize as that will use a single Rc for the entire Vec
|
||||
inputs.push(Arc::new(RwLock::new(None)));
|
||||
}
|
||||
let mut clsags = vec![];
|
||||
|
||||
// Create a RNG out of the input shared keys, which either requires the view key or being every
|
||||
// sender, and the payments (address and amount), which a passive adversary may be able to know
|
||||
// depending on how these transactions are coordinated
|
||||
// Being every sender would already let you note rings which happen to use your transactions
|
||||
// multiple times, already breaking privacy there
|
||||
|
||||
transcript.domain_separate(b"monero_transaction");
|
||||
// Include the height we're using for our data
|
||||
// The data itself will be included, making this unnecessary, yet a lot of this is technically
|
||||
// unnecessary. Anything which further increases security at almost no cost should be followed
|
||||
transcript.append_message(b"height", &u64::try_from(height).unwrap().to_le_bytes());
|
||||
// Also include the spend_key as below only the key offset is included, so this confirms the sum product
|
||||
// Useful as confirming the sum product confirms the key image, further guaranteeing the one time
|
||||
// properties noted below
|
||||
transcript.append_message(b"spend_key", &keys.group_key().0.compress().to_bytes());
|
||||
for input in &self.inputs {
|
||||
// These outputs can only be spent once. Therefore, it forces all RNGs derived from this
|
||||
// transcript (such as the one used to create one time keys) to be unique
|
||||
transcript.append_message(b"input_hash", &input.tx);
|
||||
transcript.append_message(b"input_output_index", &[input.o]);
|
||||
// Not including this, with a doxxed list of payments, would allow brute forcing the inputs
|
||||
// to determine RNG seeds and therefore the true spends
|
||||
transcript.append_message(b"input_shared_key", &input.key_offset.to_bytes());
|
||||
}
|
||||
for payment in &self.payments {
|
||||
transcript.append_message(b"payment_address", &payment.0.to_string().as_bytes());
|
||||
transcript.append_message(b"payment_amount", &payment.1.to_le_bytes());
|
||||
}
|
||||
|
||||
// Sort included before cloning it around
|
||||
included.sort_unstable();
|
||||
|
||||
for (i, input) in self.inputs.iter().enumerate() {
|
||||
// Check this the right set of keys
|
||||
let offset = keys.offset(dalek_ff_group::Scalar(input.key_offset));
|
||||
if offset.group_key().0 != input.key {
|
||||
Err(TransactionError::WrongPrivateKey)?;
|
||||
}
|
||||
|
||||
clsags.push(
|
||||
AlgorithmMachine::new(
|
||||
ClsagMultisig::new(
|
||||
transcript.clone(),
|
||||
input.key,
|
||||
inputs[i].clone()
|
||||
).map_err(|e| TransactionError::MultisigError(e))?,
|
||||
Arc::new(offset),
|
||||
&included
|
||||
).map_err(|e| TransactionError::FrostError(e))?
|
||||
);
|
||||
}
|
||||
|
||||
// Select decoys
|
||||
// Ideally, this would be done post entropy, instead of now, yet doing so would require sign
|
||||
// to be async which isn't preferable. This should be suitably competent though
|
||||
// While this inability means we can immediately create the input, moving it out of the
|
||||
// Arc RwLock, keeping it within an Arc RwLock keeps our options flexible
|
||||
let decoys = Decoys::select(
|
||||
// Using a seeded RNG with a specific height, committed to above, should make these decoys
|
||||
// committed to. They'll also be committed to later via the TX message as a whole
|
||||
&mut ChaCha12Rng::from_seed(transcript.rng_seed(b"decoys")),
|
||||
rpc,
|
||||
height,
|
||||
&self.inputs
|
||||
).await.map_err(|e| TransactionError::RpcError(e))?;
|
||||
|
||||
Ok(
|
||||
TransactionMachine {
|
||||
signable: self,
|
||||
i: keys.params().i(),
|
||||
included,
|
||||
transcript,
|
||||
|
||||
decoys,
|
||||
|
||||
inputs,
|
||||
clsags
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl PreprocessMachine for TransactionMachine {
|
||||
type Signature = Transaction;
|
||||
type SignMachine = TransactionSignMachine;
|
||||
|
||||
fn preprocess<R: RngCore + CryptoRng>(
|
||||
mut self,
|
||||
rng: &mut R
|
||||
) -> (TransactionSignMachine, Vec<u8>) {
|
||||
// Iterate over each CLSAG calling preprocess
|
||||
let mut serialized = Vec::with_capacity(
|
||||
// D_{G, H}, E_{G, H}, DLEqs, key image addendum
|
||||
self.clsags.len() * ((2 * (32 + 32)) + (2 * (32 + 32)) + ClsagMultisig::serialized_len())
|
||||
);
|
||||
let clsags = self.clsags.drain(..).map(|clsag| {
|
||||
let (clsag, preprocess) = clsag.preprocess(rng);
|
||||
serialized.extend(&preprocess);
|
||||
clsag
|
||||
}).collect();
|
||||
let our_preprocess = serialized.clone();
|
||||
|
||||
// We could add further entropy here, and previous versions of this library did so
|
||||
// As of right now, the multisig's key, the inputs being spent, and the FROST data itself
|
||||
// will be used for RNG seeds. In order to recreate these RNG seeds, breaking privacy,
|
||||
// counterparties must have knowledge of the multisig, either the view key or access to the
|
||||
// coordination layer, and then access to the actual FROST signing process
|
||||
// If the commitments are sent in plain text, then entropy here also would be, making it not
|
||||
// increase privacy. If they're not sent in plain text, or are otherwise inaccessible, they
|
||||
// already offer sufficient entropy. That's why further entropy is not included
|
||||
|
||||
(
|
||||
TransactionSignMachine {
|
||||
signable: self.signable,
|
||||
i: self.i,
|
||||
included: self.included,
|
||||
transcript: self.transcript,
|
||||
|
||||
decoys: self.decoys,
|
||||
|
||||
inputs: self.inputs,
|
||||
clsags,
|
||||
|
||||
our_preprocess,
|
||||
},
|
||||
serialized
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl SignMachine<Transaction> for TransactionSignMachine {
|
||||
type SignatureMachine = TransactionSignatureMachine;
|
||||
|
||||
fn sign<Re: Read>(
|
||||
mut self,
|
||||
mut commitments: HashMap<u16, Re>,
|
||||
msg: &[u8]
|
||||
) -> Result<(TransactionSignatureMachine, Vec<u8>), FrostError> {
|
||||
if msg.len() != 0 {
|
||||
Err(
|
||||
FrostError::InternalError(
|
||||
"message was passed to the TransactionMachine when it generates its own"
|
||||
)
|
||||
)?;
|
||||
}
|
||||
|
||||
// FROST commitments and their DLEqs, and the image and its DLEq
|
||||
const CLSAG_LEN: usize = (2 * (32 + 32)) + (2 * (32 + 32)) + ClsagMultisig::serialized_len();
|
||||
|
||||
// Convert the unified commitments to a Vec of the individual commitments
|
||||
let mut images = vec![EdwardsPoint::identity(); self.clsags.len()];
|
||||
let mut commitments = (0 .. self.clsags.len()).map(|c| {
|
||||
let mut buf = [0; CLSAG_LEN];
|
||||
(&self.included).iter().map(|l| {
|
||||
// Add all commitments to the transcript for their entropy
|
||||
// While each CLSAG will do this as they need to for security, they have their own transcripts
|
||||
// cloned from this TX's initial premise's transcript. For our TX transcript to have the CLSAG
|
||||
// data for entropy, it'll have to be added ourselves here
|
||||
self.transcript.append_message(b"participant", &(*l).to_be_bytes());
|
||||
if *l == self.i {
|
||||
buf.copy_from_slice(self.our_preprocess.drain(.. CLSAG_LEN).as_slice());
|
||||
} else {
|
||||
commitments.get_mut(l).ok_or(FrostError::MissingParticipant(*l))?
|
||||
.read_exact(&mut buf).map_err(|_| FrostError::InvalidCommitment(*l))?;
|
||||
}
|
||||
self.transcript.append_message(b"preprocess", &buf);
|
||||
|
||||
// While here, calculate the key image
|
||||
// Clsag will parse/calculate/validate this as needed, yet doing so here as well provides
|
||||
// the easiest API overall, as this is where the TX is (which needs the key images in its
|
||||
// message), along with where the outputs are determined (where our outputs may need
|
||||
// these in order to guarantee uniqueness)
|
||||
images[c] += CompressedEdwardsY(
|
||||
buf[(CLSAG_LEN - 96) .. (CLSAG_LEN - 64)].try_into().map_err(|_| FrostError::InvalidCommitment(*l))?
|
||||
).decompress().ok_or(FrostError::InvalidCommitment(*l))?;
|
||||
|
||||
Ok((*l, Cursor::new(buf)))
|
||||
}).collect::<Result<HashMap<_, _>, _>>()
|
||||
}).collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
// Remove our preprocess which shouldn't be here. It was just the easiest way to implement the
|
||||
// above
|
||||
for map in commitments.iter_mut() {
|
||||
map.remove(&self.i);
|
||||
}
|
||||
|
||||
// Create the actual transaction
|
||||
let output_masks;
|
||||
let mut tx = {
|
||||
let mut sorted_images = images.clone();
|
||||
sorted_images.sort_by(key_image_sort);
|
||||
|
||||
let commitments;
|
||||
(commitments, output_masks) = self.signable.prepare_outputs(
|
||||
&mut ChaCha12Rng::from_seed(self.transcript.rng_seed(b"tx_keys")),
|
||||
uniqueness(
|
||||
&images.iter().map(|image| Input::ToKey {
|
||||
amount: 0,
|
||||
key_offsets: vec![],
|
||||
key_image: *image
|
||||
}).collect::<Vec<_>>()
|
||||
)
|
||||
);
|
||||
|
||||
self.signable.prepare_transaction(
|
||||
&commitments,
|
||||
Bulletproofs::new(
|
||||
&mut ChaCha12Rng::from_seed(self.transcript.rng_seed(b"bulletproofs")),
|
||||
&commitments
|
||||
).unwrap()
|
||||
)
|
||||
};
|
||||
|
||||
// Sort the inputs, as expected
|
||||
let mut sorted = Vec::with_capacity(self.clsags.len());
|
||||
while self.clsags.len() != 0 {
|
||||
sorted.push((
|
||||
images.swap_remove(0),
|
||||
self.signable.inputs.swap_remove(0),
|
||||
self.decoys.swap_remove(0),
|
||||
self.inputs.swap_remove(0),
|
||||
self.clsags.swap_remove(0),
|
||||
commitments.swap_remove(0)
|
||||
));
|
||||
}
|
||||
sorted.sort_by(|x, y| key_image_sort(&x.0, &y.0));
|
||||
|
||||
let mut rng = ChaCha12Rng::from_seed(self.transcript.rng_seed(b"pseudo_out_masks"));
|
||||
let mut sum_pseudo_outs = Scalar::zero();
|
||||
while sorted.len() != 0 {
|
||||
let value = sorted.remove(0);
|
||||
|
||||
let mut mask = random_scalar(&mut rng);
|
||||
if sorted.len() == 0 {
|
||||
mask = output_masks - sum_pseudo_outs;
|
||||
} else {
|
||||
sum_pseudo_outs += mask;
|
||||
}
|
||||
|
||||
tx.prefix.inputs.push(
|
||||
Input::ToKey {
|
||||
amount: 0,
|
||||
key_offsets: value.2.offsets.clone(),
|
||||
key_image: value.0
|
||||
}
|
||||
);
|
||||
|
||||
*value.3.write().unwrap() = Some(
|
||||
ClsagDetails::new(
|
||||
ClsagInput::new(
|
||||
value.1.commitment,
|
||||
value.2
|
||||
).map_err(|_| panic!("Signing an input which isn't present in the ring we created for it"))?,
|
||||
mask
|
||||
)
|
||||
);
|
||||
|
||||
self.clsags.push(value.4);
|
||||
commitments.push(value.5);
|
||||
}
|
||||
|
||||
let msg = tx.signature_hash();
|
||||
|
||||
// Iterate over each CLSAG calling sign
|
||||
let mut serialized = Vec::with_capacity(self.clsags.len() * 32);
|
||||
let clsags = self.clsags.drain(..).map(|clsag| {
|
||||
let (clsag, share) = clsag.sign(commitments.remove(0), &msg)?;
|
||||
serialized.extend(&share);
|
||||
Ok(clsag)
|
||||
}).collect::<Result<_, _>>()?;
|
||||
|
||||
Ok((TransactionSignatureMachine { tx, clsags }, serialized))
|
||||
}
|
||||
}
|
||||
|
||||
impl SignatureMachine<Transaction> for TransactionSignatureMachine {
|
||||
fn complete<Re: Read>(self, mut shares: HashMap<u16, Re>) -> Result<Transaction, FrostError> {
|
||||
let mut tx = self.tx;
|
||||
match tx.rct_signatures.prunable {
|
||||
RctPrunable::Null => panic!("Signing for RctPrunable::Null"),
|
||||
RctPrunable::Clsag { ref mut clsags, ref mut pseudo_outs, .. } => {
|
||||
for clsag in self.clsags {
|
||||
let (clsag, pseudo_out) = clsag.complete(
|
||||
shares.iter_mut().map(|(l, shares)| {
|
||||
let mut buf = [0; 32];
|
||||
shares.read_exact(&mut buf).map_err(|_| FrostError::InvalidShare(*l))?;
|
||||
Ok((*l, Cursor::new(buf)))
|
||||
}).collect::<Result<HashMap<_, _>, _>>()?
|
||||
)?;
|
||||
clsags.push(clsag);
|
||||
pseudo_outs.push(pseudo_out);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(tx)
|
||||
}
|
||||
}
|
||||
42
coins/monero/tests/rpc.rs
Normal file
42
coins/monero/tests/rpc.rs
Normal file
@@ -0,0 +1,42 @@
|
||||
use rand::rngs::OsRng;
|
||||
|
||||
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
|
||||
|
||||
use serde_json::json;
|
||||
|
||||
use monero::{
|
||||
network::Network,
|
||||
util::{key::PublicKey, address::Address}
|
||||
};
|
||||
|
||||
use monero_serai::{random_scalar, rpc::{EmptyResponse, RpcError, Rpc}};
|
||||
|
||||
pub async fn rpc() -> Rpc {
|
||||
let rpc = Rpc::new("http://127.0.0.1:18081".to_string());
|
||||
|
||||
// Only run once
|
||||
if rpc.get_height().await.unwrap() != 1 {
|
||||
return rpc;
|
||||
}
|
||||
|
||||
let addr = Address::standard(
|
||||
Network::Mainnet,
|
||||
PublicKey { point: (&random_scalar(&mut OsRng) * &ED25519_BASEPOINT_TABLE).compress() },
|
||||
PublicKey { point: (&random_scalar(&mut OsRng) * &ED25519_BASEPOINT_TABLE).compress() }
|
||||
).to_string();
|
||||
|
||||
// Mine 10 blocks so we have 10 decoys so decoy selection doesn't fail
|
||||
mine_block(&rpc, &addr).await.unwrap();
|
||||
|
||||
rpc
|
||||
}
|
||||
|
||||
pub async fn mine_block(rpc: &Rpc, address: &str) -> Result<EmptyResponse, RpcError> {
|
||||
rpc.rpc_call("json_rpc", Some(json!({
|
||||
"method": "generateblocks",
|
||||
"params": {
|
||||
"wallet_address": address,
|
||||
"amount_of_blocks": 10
|
||||
},
|
||||
}))).await
|
||||
}
|
||||
176
coins/monero/tests/send.rs
Normal file
176
coins/monero/tests/send.rs
Normal file
@@ -0,0 +1,176 @@
|
||||
use std::sync::Mutex;
|
||||
#[cfg(feature = "multisig")]
|
||||
use std::collections::HashMap;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
use rand::rngs::OsRng;
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
use blake2::{digest::Update, Digest, Blake2b512};
|
||||
|
||||
use curve25519_dalek::constants::ED25519_BASEPOINT_TABLE;
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
use dalek_ff_group::Scalar;
|
||||
#[cfg(feature = "multisig")]
|
||||
use transcript::{Transcript, RecommendedTranscript};
|
||||
#[cfg(feature = "multisig")]
|
||||
use frost::{curve::Ed25519, tests::{THRESHOLD, key_gen, sign}};
|
||||
|
||||
use monero_serai::{random_scalar, wallet::{ViewPair, address::{Network, AddressType}, SignableTransaction}};
|
||||
|
||||
mod rpc;
|
||||
use crate::rpc::{rpc, mine_block};
|
||||
|
||||
lazy_static! {
|
||||
static ref SEQUENTIAL: Mutex<()> = Mutex::new(());
|
||||
}
|
||||
|
||||
macro_rules! async_sequential {
|
||||
($(async fn $name: ident() $body: block)*) => {
|
||||
$(
|
||||
#[tokio::test]
|
||||
async fn $name() {
|
||||
let guard = SEQUENTIAL.lock().unwrap();
|
||||
let local = tokio::task::LocalSet::new();
|
||||
local.run_until(async move {
|
||||
if let Err(err) = tokio::task::spawn_local(async move { $body }).await {
|
||||
drop(guard);
|
||||
Err(err).unwrap()
|
||||
}
|
||||
}).await;
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
async fn send_core(test: usize, multisig: bool) {
|
||||
let rpc = rpc().await;
|
||||
|
||||
// Generate an address
|
||||
let spend = random_scalar(&mut OsRng);
|
||||
#[allow(unused_mut)]
|
||||
let mut view = random_scalar(&mut OsRng);
|
||||
#[allow(unused_mut)]
|
||||
let mut spend_pub = &spend * &ED25519_BASEPOINT_TABLE;
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
let keys = key_gen::<_, Ed25519>(&mut OsRng);
|
||||
|
||||
if multisig {
|
||||
#[cfg(not(feature = "multisig"))]
|
||||
panic!("Running a multisig test without the multisig feature");
|
||||
#[cfg(feature = "multisig")]
|
||||
{
|
||||
view = Scalar::from_hash(Blake2b512::new().chain("Monero Serai Transaction Test")).0;
|
||||
spend_pub = keys[&1].group_key().0;
|
||||
}
|
||||
}
|
||||
|
||||
let view_pair = ViewPair { view, spend: spend_pub };
|
||||
let addr = view_pair.address(Network::Mainnet, AddressType::Standard, false);
|
||||
|
||||
let fee = rpc.get_fee().await.unwrap();
|
||||
|
||||
let start = rpc.get_height().await.unwrap();
|
||||
for _ in 0 .. 7 {
|
||||
mine_block(&rpc, &addr.to_string()).await.unwrap();
|
||||
}
|
||||
|
||||
let mut tx = None;
|
||||
// Allow tests to test variable transactions
|
||||
for i in 0 .. [2, 1][test] {
|
||||
let mut outputs = vec![];
|
||||
let mut amount = 0;
|
||||
// Test spending both a miner output and a normal output
|
||||
if test == 0 {
|
||||
if i == 0 {
|
||||
tx = Some(rpc.get_block_transactions(start).await.unwrap().swap_remove(0));
|
||||
}
|
||||
|
||||
// Grab the largest output available
|
||||
let output = {
|
||||
let mut outputs = tx.as_ref().unwrap().scan(view_pair, false).ignore_timelock();
|
||||
outputs.sort_by(|x, y| x.commitment.amount.cmp(&y.commitment.amount).reverse());
|
||||
outputs.swap_remove(0)
|
||||
};
|
||||
// Test creating a zero change output and a non-zero change output
|
||||
amount = output.commitment.amount - u64::try_from(i).unwrap();
|
||||
outputs.push(output);
|
||||
|
||||
// Test spending multiple inputs
|
||||
} else if test == 1 {
|
||||
if i != 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
// We actually need 80 decoys for this transaction, so mine until then
|
||||
// 80 + 60 (miner TX maturity) + 10 (lock blocks)
|
||||
// It is possible for this to be lower, by noting maturity is sufficient regardless of lock
|
||||
// blocks, yet that's not currently implemented
|
||||
// TODO, if we care
|
||||
while rpc.get_height().await.unwrap() < 160 {
|
||||
mine_block(&rpc, &addr.to_string()).await.unwrap();
|
||||
}
|
||||
|
||||
for i in (start + 1) .. (start + 9) {
|
||||
let tx = rpc.get_block_transactions(i).await.unwrap().swap_remove(0);
|
||||
let output = tx.scan(view_pair, false).ignore_timelock().swap_remove(0);
|
||||
amount += output.commitment.amount;
|
||||
outputs.push(output);
|
||||
}
|
||||
}
|
||||
|
||||
let mut signable = SignableTransaction::new(
|
||||
outputs, vec![(addr, amount - 10000000000)], Some(addr), fee
|
||||
).unwrap();
|
||||
|
||||
if !multisig {
|
||||
tx = Some(signable.sign(&mut OsRng, &rpc, &spend).await.unwrap());
|
||||
} else {
|
||||
#[cfg(feature = "multisig")]
|
||||
{
|
||||
let mut machines = HashMap::new();
|
||||
for i in 1 ..= THRESHOLD {
|
||||
machines.insert(
|
||||
i,
|
||||
signable.clone().multisig(
|
||||
&rpc,
|
||||
(*keys[&i]).clone(),
|
||||
RecommendedTranscript::new(b"Monero Serai Test Transaction"),
|
||||
rpc.get_height().await.unwrap() - 10,
|
||||
(1 ..= THRESHOLD).collect::<Vec<_>>()
|
||||
).await.unwrap()
|
||||
);
|
||||
}
|
||||
|
||||
tx = Some(sign(&mut OsRng, machines, &vec![]));
|
||||
}
|
||||
}
|
||||
|
||||
rpc.publish_transaction(tx.as_ref().unwrap()).await.unwrap();
|
||||
mine_block(&rpc, &addr.to_string()).await.unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
async_sequential! {
|
||||
async fn send_single_input() {
|
||||
send_core(0, false).await;
|
||||
}
|
||||
|
||||
async fn send_multiple_inputs() {
|
||||
send_core(1, false).await;
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "multisig")]
|
||||
async_sequential! {
|
||||
async fn multisig_send_single_input() {
|
||||
send_core(0, true).await;
|
||||
}
|
||||
|
||||
async fn multisig_send_multiple_inputs() {
|
||||
send_core(1, true).await;
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
[package]
|
||||
name = "serai-db"
|
||||
version = "0.1.0"
|
||||
description = "A simple database trait and backends for it"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/db"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.65"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
parity-db = { version = "0.4", default-features = false, optional = true }
|
||||
rocksdb = { version = "0.21", default-features = false, features = ["zstd"], optional = true }
|
||||
|
||||
[features]
|
||||
parity-db = ["dep:parity-db"]
|
||||
rocksdb = ["dep:rocksdb"]
|
||||
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,119 +0,0 @@
|
||||
#[doc(hidden)]
|
||||
pub fn serai_db_key(
|
||||
db_dst: &'static [u8],
|
||||
item_dst: &'static [u8],
|
||||
key: impl AsRef<[u8]>,
|
||||
) -> Vec<u8> {
|
||||
let db_len = u8::try_from(db_dst.len()).unwrap();
|
||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||
}
|
||||
|
||||
/// Creates a series of structs which provide namespacing for keys
|
||||
///
|
||||
/// # Description
|
||||
///
|
||||
/// Creates a unit struct and a default implementation for the `key`, `get`, and `set`. The macro
|
||||
/// uses a syntax similar to defining a function. Parameters are concatenated to produce a key,
|
||||
/// they must be `scale` encodable. The return type is used to auto encode and decode the database
|
||||
/// value bytes using `borsh`.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `db_name` - A database name
|
||||
/// * `field_name` - An item name
|
||||
/// * `args` - Comma separated list of key arguments
|
||||
/// * `field_type` - The return type
|
||||
///
|
||||
/// # Example
|
||||
///
|
||||
/// ```ignore
|
||||
/// create_db!(
|
||||
/// TributariesDb {
|
||||
/// AttemptsDb: (key_bytes: &[u8], attempt_id: u32) -> u64,
|
||||
/// ExpiredDb: (genesis: [u8; 32]) -> Vec<u8>
|
||||
/// }
|
||||
/// )
|
||||
/// ```
|
||||
#[macro_export]
|
||||
macro_rules! create_db {
|
||||
($db_name: ident {
|
||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
||||
}) => {
|
||||
$(
|
||||
#[derive(Clone, Debug)]
|
||||
pub(crate) struct $field_name;
|
||||
impl $field_name {
|
||||
pub(crate) fn key($($arg: $arg_type),*) -> Vec<u8> {
|
||||
use scale::Encode;
|
||||
$crate::serai_db_key(
|
||||
stringify!($db_name).as_bytes(),
|
||||
stringify!($field_name).as_bytes(),
|
||||
($($arg),*).encode()
|
||||
)
|
||||
}
|
||||
pub(crate) fn set(txn: &mut impl DbTxn $(, $arg: $arg_type)*, data: &$field_type) {
|
||||
let key = $field_name::key($($arg),*);
|
||||
txn.put(&key, borsh::to_vec(data).unwrap());
|
||||
}
|
||||
pub(crate) fn get(getter: &impl Get, $($arg: $arg_type),*) -> Option<$field_type> {
|
||||
getter.get($field_name::key($($arg),*)).map(|data| {
|
||||
borsh::from_slice(data.as_ref()).unwrap()
|
||||
})
|
||||
}
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn del(txn: &mut impl DbTxn $(, $arg: $arg_type)*) {
|
||||
txn.del(&$field_name::key($($arg),*))
|
||||
}
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! db_channel {
|
||||
($db_name: ident {
|
||||
$($field_name: ident: ($($arg: ident: $arg_type: ty),*) -> $field_type: ty$(,)?)*
|
||||
}) => {
|
||||
$(
|
||||
create_db! {
|
||||
$db_name {
|
||||
$field_name: ($($arg: $arg_type,)* index: u32) -> $field_type,
|
||||
}
|
||||
}
|
||||
|
||||
impl $field_name {
|
||||
pub(crate) fn send(txn: &mut impl DbTxn $(, $arg: $arg_type)*, value: &$field_type) {
|
||||
// Use index 0 to store the amount of messages
|
||||
let messages_sent_key = $field_name::key($($arg),*, 0);
|
||||
let messages_sent = txn.get(&messages_sent_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
txn.put(&messages_sent_key, (messages_sent + 1).to_le_bytes());
|
||||
|
||||
// + 2 as index 1 is used for the amount of messages read
|
||||
// Using distinct counters enables send to be called without mutating anything recv may
|
||||
// at the same time
|
||||
let index_to_use = messages_sent + 2;
|
||||
|
||||
$field_name::set(txn, $($arg),*, index_to_use, value);
|
||||
}
|
||||
pub(crate) fn try_recv(txn: &mut impl DbTxn $(, $arg: $arg_type)*) -> Option<$field_type> {
|
||||
let messages_recvd_key = $field_name::key($($arg),*, 1);
|
||||
let messages_recvd = txn.get(&messages_recvd_key).map(|counter| {
|
||||
u32::from_le_bytes(counter.try_into().unwrap())
|
||||
}).unwrap_or(0);
|
||||
|
||||
let index_to_read = messages_recvd + 2;
|
||||
|
||||
let res = $field_name::get(txn, $($arg),*, index_to_read);
|
||||
if res.is_some() {
|
||||
$field_name::del(txn, $($arg),*, index_to_read);
|
||||
txn.put(&messages_recvd_key, (messages_recvd + 1).to_le_bytes());
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
)*
|
||||
};
|
||||
}
|
||||
@@ -1,39 +0,0 @@
|
||||
mod create_db;
|
||||
pub use create_db::*;
|
||||
|
||||
mod mem;
|
||||
pub use mem::*;
|
||||
|
||||
#[cfg(feature = "rocksdb")]
|
||||
mod rocks;
|
||||
#[cfg(feature = "rocksdb")]
|
||||
pub use rocks::{RocksDB, new_rocksdb};
|
||||
|
||||
#[cfg(feature = "parity-db")]
|
||||
mod parity_db;
|
||||
#[cfg(feature = "parity-db")]
|
||||
pub use parity_db::{ParityDb, new_parity_db};
|
||||
|
||||
/// An object implementing get.
|
||||
pub trait Get {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>>;
|
||||
}
|
||||
|
||||
/// An atomic database operation.
|
||||
#[must_use]
|
||||
pub trait DbTxn: Send + Get {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>);
|
||||
fn del(&mut self, key: impl AsRef<[u8]>);
|
||||
fn commit(self);
|
||||
}
|
||||
|
||||
/// A database supporting atomic operations.
|
||||
pub trait Db: 'static + Send + Sync + Clone + Get {
|
||||
type Transaction<'a>: DbTxn;
|
||||
fn key(db_dst: &'static [u8], item_dst: &'static [u8], key: impl AsRef<[u8]>) -> Vec<u8> {
|
||||
let db_len = u8::try_from(db_dst.len()).unwrap();
|
||||
let dst_len = u8::try_from(item_dst.len()).unwrap();
|
||||
[[db_len].as_ref(), db_dst, [dst_len].as_ref(), item_dst, key.as_ref()].concat()
|
||||
}
|
||||
fn txn(&mut self) -> Self::Transaction<'_>;
|
||||
}
|
||||
@@ -1,80 +0,0 @@
|
||||
use core::fmt::Debug;
|
||||
use std::{
|
||||
sync::{Arc, RwLock},
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use crate::*;
|
||||
|
||||
/// An atomic operation for the in-memory database.
|
||||
#[must_use]
|
||||
#[derive(PartialEq, Eq, Debug)]
|
||||
pub struct MemDbTxn<'a>(&'a MemDb, HashMap<Vec<u8>, Vec<u8>>, HashSet<Vec<u8>>);
|
||||
|
||||
impl<'a> Get for MemDbTxn<'a> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
if self.2.contains(key.as_ref()) {
|
||||
return None;
|
||||
}
|
||||
self
|
||||
.1
|
||||
.get(key.as_ref())
|
||||
.cloned()
|
||||
.or_else(|| self.0 .0.read().unwrap().get(key.as_ref()).cloned())
|
||||
}
|
||||
}
|
||||
impl<'a> DbTxn for MemDbTxn<'a> {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||
self.2.remove(key.as_ref());
|
||||
self.1.insert(key.as_ref().to_vec(), value.as_ref().to_vec());
|
||||
}
|
||||
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||
self.1.remove(key.as_ref());
|
||||
self.2.insert(key.as_ref().to_vec());
|
||||
}
|
||||
fn commit(mut self) {
|
||||
let mut db = self.0 .0.write().unwrap();
|
||||
for (key, value) in self.1.drain() {
|
||||
db.insert(key, value);
|
||||
}
|
||||
for key in self.2 {
|
||||
db.remove(&key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// An in-memory database.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MemDb(Arc<RwLock<HashMap<Vec<u8>, Vec<u8>>>>);
|
||||
|
||||
impl PartialEq for MemDb {
|
||||
fn eq(&self, other: &MemDb) -> bool {
|
||||
*self.0.read().unwrap() == *other.0.read().unwrap()
|
||||
}
|
||||
}
|
||||
impl Eq for MemDb {}
|
||||
|
||||
impl Default for MemDb {
|
||||
fn default() -> MemDb {
|
||||
MemDb(Arc::new(RwLock::new(HashMap::new())))
|
||||
}
|
||||
}
|
||||
|
||||
impl MemDb {
|
||||
/// Create a new in-memory database.
|
||||
pub fn new() -> MemDb {
|
||||
MemDb::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Get for MemDb {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
self.0.read().unwrap().get(key.as_ref()).cloned()
|
||||
}
|
||||
}
|
||||
impl Db for MemDb {
|
||||
type Transaction<'a> = MemDbTxn<'a>;
|
||||
fn txn(&mut self) -> MemDbTxn<'_> {
|
||||
MemDbTxn(self, HashMap::new(), HashSet::new())
|
||||
}
|
||||
}
|
||||
@@ -1,47 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
pub use ::parity_db::{Options, Db as ParityDb};
|
||||
|
||||
use crate::*;
|
||||
|
||||
#[must_use]
|
||||
pub struct Transaction<'a>(&'a Arc<ParityDb>, Vec<(u8, Vec<u8>, Option<Vec<u8>>)>);
|
||||
|
||||
impl Get for Transaction<'_> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
let mut res = self.0.get(&key);
|
||||
for change in &self.1 {
|
||||
if change.1 == key.as_ref() {
|
||||
res.clone_from(&change.2);
|
||||
}
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
impl DbTxn for Transaction<'_> {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||
self.1.push((0, key.as_ref().to_vec(), Some(value.as_ref().to_vec())))
|
||||
}
|
||||
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||
self.1.push((0, key.as_ref().to_vec(), None))
|
||||
}
|
||||
fn commit(self) {
|
||||
self.0.commit(self.1).unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl Get for Arc<ParityDb> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
ParityDb::get(self, 0, key.as_ref()).unwrap()
|
||||
}
|
||||
}
|
||||
impl Db for Arc<ParityDb> {
|
||||
type Transaction<'a> = Transaction<'a>;
|
||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||
Transaction(self, vec![])
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_parity_db(path: &str) -> Arc<ParityDb> {
|
||||
Arc::new(ParityDb::open_or_create(&Options::with_columns(std::path::Path::new(path), 1)).unwrap())
|
||||
}
|
||||
@@ -1,66 +0,0 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use rocksdb::{
|
||||
DBCompressionType, ThreadMode, SingleThreaded, LogLevel, WriteOptions,
|
||||
Transaction as RocksTransaction, Options, OptimisticTransactionDB,
|
||||
};
|
||||
|
||||
use crate::*;
|
||||
|
||||
#[must_use]
|
||||
pub struct Transaction<'a, T: ThreadMode>(
|
||||
RocksTransaction<'a, OptimisticTransactionDB<T>>,
|
||||
&'a OptimisticTransactionDB<T>,
|
||||
);
|
||||
|
||||
impl<T: ThreadMode> Get for Transaction<'_, T> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
self.0.get(key).expect("couldn't read from RocksDB via transaction")
|
||||
}
|
||||
}
|
||||
impl<T: ThreadMode> DbTxn for Transaction<'_, T> {
|
||||
fn put(&mut self, key: impl AsRef<[u8]>, value: impl AsRef<[u8]>) {
|
||||
self.0.put(key, value).expect("couldn't write to RocksDB via transaction")
|
||||
}
|
||||
fn del(&mut self, key: impl AsRef<[u8]>) {
|
||||
self.0.delete(key).expect("couldn't delete from RocksDB via transaction")
|
||||
}
|
||||
fn commit(self) {
|
||||
self.0.commit().expect("couldn't commit to RocksDB via transaction");
|
||||
self.1.flush_wal(true).expect("couldn't flush RocksDB WAL");
|
||||
self.1.flush().expect("couldn't flush RocksDB");
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: ThreadMode> Get for Arc<OptimisticTransactionDB<T>> {
|
||||
fn get(&self, key: impl AsRef<[u8]>) -> Option<Vec<u8>> {
|
||||
OptimisticTransactionDB::get(self, key).expect("couldn't read from RocksDB")
|
||||
}
|
||||
}
|
||||
impl<T: Send + ThreadMode + 'static> Db for Arc<OptimisticTransactionDB<T>> {
|
||||
type Transaction<'a> = Transaction<'a, T>;
|
||||
fn txn(&mut self) -> Self::Transaction<'_> {
|
||||
let mut opts = WriteOptions::default();
|
||||
opts.set_sync(true);
|
||||
Transaction(self.transaction_opt(&opts, &Default::default()), &**self)
|
||||
}
|
||||
}
|
||||
|
||||
pub type RocksDB = Arc<OptimisticTransactionDB<SingleThreaded>>;
|
||||
pub fn new_rocksdb(path: &str) -> RocksDB {
|
||||
let mut options = Options::default();
|
||||
options.create_if_missing(true);
|
||||
options.set_compression_type(DBCompressionType::Zstd);
|
||||
|
||||
options.set_wal_compression_type(DBCompressionType::Zstd);
|
||||
// 10 MB
|
||||
options.set_max_total_wal_size(10 * 1024 * 1024);
|
||||
options.set_wal_size_limit_mb(10);
|
||||
|
||||
options.set_log_level(LogLevel::Warn);
|
||||
// 1 MB
|
||||
options.set_max_log_file_size(1024 * 1024);
|
||||
options.set_recycle_log_file_num(1);
|
||||
|
||||
Arc::new(OptimisticTransactionDB::open(&options, path).unwrap())
|
||||
}
|
||||
17
common/env/Cargo.toml
vendored
17
common/env/Cargo.toml
vendored
@@ -1,17 +0,0 @@
|
||||
[package]
|
||||
name = "serai-env"
|
||||
version = "0.1.0"
|
||||
description = "A common library for Serai apps to access environment variables"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/env"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.60"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
9
common/env/src/lib.rs
vendored
9
common/env/src/lib.rs
vendored
@@ -1,9 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
|
||||
// Obtain a variable from the Serai environment/secret store.
|
||||
pub fn var(variable: &str) -> Option<String> {
|
||||
// TODO: Move this to a proper secret store
|
||||
// TODO: Unset this variable
|
||||
std::env::var(variable).ok()
|
||||
}
|
||||
@@ -1,34 +0,0 @@
|
||||
[package]
|
||||
name = "simple-request"
|
||||
version = "0.1.0"
|
||||
description = "A simple HTTP(S) request library"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/simple-request"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["http", "https", "async", "request", "ssl"]
|
||||
edition = "2021"
|
||||
rust-version = "1.64"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
tower-service = { version = "0.3", default-features = false }
|
||||
hyper = { version = "1", default-features = false, features = ["http1", "client"] }
|
||||
hyper-util = { version = "0.1", default-features = false, features = ["http1", "client-legacy", "tokio"] }
|
||||
http-body-util = { version = "0.1", default-features = false }
|
||||
tokio = { version = "1", default-features = false }
|
||||
|
||||
hyper-rustls = { version = "0.27", default-features = false, features = ["http1", "ring", "rustls-native-certs", "native-tokio"], optional = true }
|
||||
|
||||
zeroize = { version = "1", optional = true }
|
||||
base64ct = { version = "1", features = ["alloc"], optional = true }
|
||||
|
||||
[features]
|
||||
tls = ["hyper-rustls"]
|
||||
basic-auth = ["zeroize", "base64ct"]
|
||||
default = ["tls"]
|
||||
@@ -1,7 +0,0 @@
|
||||
# Simple Request
|
||||
|
||||
A simple alternative to reqwest, supporting HTTPS, intended to support a
|
||||
majority of use cases with a fraction of the dependency tree.
|
||||
|
||||
This library is built directly around `hyper`, `hyper-rustls`, and does require
|
||||
`tokio`. Support for `async-std` would be welcome.
|
||||
@@ -1,170 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
use tower_service::Service as TowerService;
|
||||
#[cfg(feature = "tls")]
|
||||
use hyper_rustls::{HttpsConnectorBuilder, HttpsConnector};
|
||||
use hyper::{Uri, header::HeaderValue, body::Bytes, client::conn::http1::SendRequest};
|
||||
use hyper_util::{
|
||||
rt::tokio::TokioExecutor,
|
||||
client::legacy::{Client as HyperClient, connect::HttpConnector},
|
||||
};
|
||||
pub use hyper;
|
||||
|
||||
mod request;
|
||||
pub use request::*;
|
||||
|
||||
mod response;
|
||||
pub use response::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
InvalidUri,
|
||||
MissingHost,
|
||||
InconsistentHost,
|
||||
ConnectionError(Box<dyn Send + Sync + std::error::Error>),
|
||||
Hyper(hyper::Error),
|
||||
HyperUtil(hyper_util::client::legacy::Error),
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "tls"))]
|
||||
type Connector = HttpConnector;
|
||||
#[cfg(feature = "tls")]
|
||||
type Connector = HttpsConnector<HttpConnector>;
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
enum Connection {
|
||||
ConnectionPool(HyperClient<Connector, Full<Bytes>>),
|
||||
Connection {
|
||||
connector: Connector,
|
||||
host: Uri,
|
||||
connection: Arc<Mutex<Option<SendRequest<Full<Bytes>>>>>,
|
||||
},
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Client {
|
||||
connection: Connection,
|
||||
}
|
||||
|
||||
impl Client {
|
||||
fn connector() -> Connector {
|
||||
let mut res = HttpConnector::new();
|
||||
res.set_keepalive(Some(core::time::Duration::from_secs(60)));
|
||||
res.set_nodelay(true);
|
||||
res.set_reuse_address(true);
|
||||
#[cfg(feature = "tls")]
|
||||
res.enforce_http(false);
|
||||
#[cfg(feature = "tls")]
|
||||
let res = HttpsConnectorBuilder::new()
|
||||
.with_native_roots()
|
||||
.expect("couldn't fetch system's SSL roots")
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.wrap_connector(res);
|
||||
res
|
||||
}
|
||||
|
||||
pub fn with_connection_pool() -> Client {
|
||||
Client {
|
||||
connection: Connection::ConnectionPool(
|
||||
HyperClient::builder(TokioExecutor::new())
|
||||
.pool_idle_timeout(core::time::Duration::from_secs(60))
|
||||
.build(Self::connector()),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn without_connection_pool(host: &str) -> Result<Client, Error> {
|
||||
Ok(Client {
|
||||
connection: Connection::Connection {
|
||||
connector: Self::connector(),
|
||||
host: {
|
||||
let uri: Uri = host.parse().map_err(|_| Error::InvalidUri)?;
|
||||
if uri.host().is_none() {
|
||||
Err(Error::MissingHost)?;
|
||||
};
|
||||
uri
|
||||
},
|
||||
connection: Arc::new(Mutex::new(None)),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn request<R: Into<Request>>(&self, request: R) -> Result<Response<'_>, Error> {
|
||||
let request: Request = request.into();
|
||||
let mut request = request.0;
|
||||
if let Some(header_host) = request.headers().get(hyper::header::HOST) {
|
||||
match &self.connection {
|
||||
Connection::ConnectionPool(_) => {}
|
||||
Connection::Connection { host, .. } => {
|
||||
if header_host.to_str().map_err(|_| Error::InvalidUri)? != host.host().unwrap() {
|
||||
Err(Error::InconsistentHost)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let host = match &self.connection {
|
||||
Connection::ConnectionPool(_) => {
|
||||
request.uri().host().ok_or(Error::MissingHost)?.to_string()
|
||||
}
|
||||
Connection::Connection { host, .. } => {
|
||||
let host_str = host.host().unwrap();
|
||||
if let Some(uri_host) = request.uri().host() {
|
||||
if host_str != uri_host {
|
||||
Err(Error::InconsistentHost)?;
|
||||
}
|
||||
}
|
||||
host_str.to_string()
|
||||
}
|
||||
};
|
||||
request
|
||||
.headers_mut()
|
||||
.insert(hyper::header::HOST, HeaderValue::from_str(&host).map_err(|_| Error::InvalidUri)?);
|
||||
}
|
||||
|
||||
let response = match &self.connection {
|
||||
Connection::ConnectionPool(client) => {
|
||||
client.request(request).await.map_err(Error::HyperUtil)?
|
||||
}
|
||||
Connection::Connection { connector, host, connection } => {
|
||||
let mut connection_lock = connection.lock().await;
|
||||
|
||||
// If there's not a connection...
|
||||
if connection_lock.is_none() {
|
||||
let call_res = connector.clone().call(host.clone()).await;
|
||||
#[cfg(not(feature = "tls"))]
|
||||
let call_res = call_res.map_err(|e| Error::ConnectionError(format!("{e:?}").into()));
|
||||
#[cfg(feature = "tls")]
|
||||
let call_res = call_res.map_err(Error::ConnectionError);
|
||||
let (requester, connection) =
|
||||
hyper::client::conn::http1::handshake(call_res?).await.map_err(Error::Hyper)?;
|
||||
// This will die when we drop the requester, so we don't need to track an AbortHandle
|
||||
// for it
|
||||
tokio::spawn(connection);
|
||||
*connection_lock = Some(requester);
|
||||
}
|
||||
|
||||
let connection = connection_lock.as_mut().unwrap();
|
||||
let mut err = connection.ready().await.err();
|
||||
if err.is_none() {
|
||||
// Send the request
|
||||
let res = connection.send_request(request).await;
|
||||
if let Ok(res) = res {
|
||||
return Ok(Response(res, self));
|
||||
}
|
||||
err = res.err();
|
||||
}
|
||||
// Since this connection has been put into an error state, drop it
|
||||
*connection_lock = None;
|
||||
Err(Error::Hyper(err.unwrap()))?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(Response(response, self))
|
||||
}
|
||||
}
|
||||
@@ -1,67 +0,0 @@
|
||||
use hyper::body::Bytes;
|
||||
#[cfg(feature = "basic-auth")]
|
||||
use hyper::header::HeaderValue;
|
||||
pub use http_body_util::Full;
|
||||
|
||||
#[cfg(feature = "basic-auth")]
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Request(pub(crate) hyper::Request<Full<Bytes>>);
|
||||
impl Request {
|
||||
#[cfg(feature = "basic-auth")]
|
||||
fn username_password_from_uri(&self) -> Result<(String, String), Error> {
|
||||
if let Some(authority) = self.0.uri().authority() {
|
||||
let authority = authority.as_str();
|
||||
if authority.contains('@') {
|
||||
// Decode the username and password from the URI
|
||||
let mut userpass = authority.split('@').next().unwrap().to_string();
|
||||
|
||||
let mut userpass_iter = userpass.split(':');
|
||||
let username = userpass_iter.next().unwrap().to_string();
|
||||
let password = userpass_iter.next().map_or_else(String::new, str::to_string);
|
||||
zeroize::Zeroize::zeroize(&mut userpass);
|
||||
|
||||
return Ok((username, password));
|
||||
}
|
||||
}
|
||||
Err(Error::InvalidUri)
|
||||
}
|
||||
|
||||
#[cfg(feature = "basic-auth")]
|
||||
pub fn basic_auth(&mut self, username: &str, password: &str) {
|
||||
use zeroize::Zeroize;
|
||||
use base64ct::{Encoding, Base64};
|
||||
|
||||
let mut formatted = format!("{username}:{password}");
|
||||
let mut encoded = Base64::encode_string(formatted.as_bytes());
|
||||
formatted.zeroize();
|
||||
self.0.headers_mut().insert(
|
||||
hyper::header::AUTHORIZATION,
|
||||
HeaderValue::from_str(&format!("Basic {encoded}")).unwrap(),
|
||||
);
|
||||
encoded.zeroize();
|
||||
}
|
||||
|
||||
#[cfg(feature = "basic-auth")]
|
||||
pub fn basic_auth_from_uri(&mut self) -> Result<(), Error> {
|
||||
let (mut username, mut password) = self.username_password_from_uri()?;
|
||||
self.basic_auth(&username, &password);
|
||||
|
||||
use zeroize::Zeroize;
|
||||
username.zeroize();
|
||||
password.zeroize();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(feature = "basic-auth")]
|
||||
pub fn with_basic_auth(&mut self) {
|
||||
let _ = self.basic_auth_from_uri();
|
||||
}
|
||||
}
|
||||
impl From<hyper::Request<Full<Bytes>>> for Request {
|
||||
fn from(request: hyper::Request<Full<Bytes>>) -> Request {
|
||||
Request(request)
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
use hyper::{
|
||||
StatusCode,
|
||||
header::{HeaderValue, HeaderMap},
|
||||
body::{Buf, Incoming},
|
||||
};
|
||||
use http_body_util::BodyExt;
|
||||
|
||||
use crate::{Client, Error};
|
||||
|
||||
// Borrows the client so its async task lives as long as this response exists.
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug)]
|
||||
pub struct Response<'a>(pub(crate) hyper::Response<Incoming>, pub(crate) &'a Client);
|
||||
impl<'a> Response<'a> {
|
||||
pub fn status(&self) -> StatusCode {
|
||||
self.0.status()
|
||||
}
|
||||
pub fn headers(&self) -> &HeaderMap<HeaderValue> {
|
||||
self.0.headers()
|
||||
}
|
||||
pub async fn body(self) -> Result<impl std::io::Read, Error> {
|
||||
Ok(self.0.into_body().collect().await.map_err(Error::Hyper)?.aggregate().reader())
|
||||
}
|
||||
}
|
||||
@@ -1,25 +0,0 @@
|
||||
[package]
|
||||
name = "std-shims"
|
||||
version = "0.1.1"
|
||||
description = "A series of std shims to make alloc more feasible"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/std-shims"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = ["nostd", "no_std", "alloc", "io"]
|
||||
edition = "2021"
|
||||
rust-version = "1.70"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
spin = { version = "0.9", default-features = false, features = ["use_ticket_mutex", "once"] }
|
||||
hashbrown = { version = "0.14", default-features = false, features = ["ahash", "inline-more"] }
|
||||
|
||||
[features]
|
||||
std = []
|
||||
default = ["std"]
|
||||
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2023 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,6 +0,0 @@
|
||||
# std shims
|
||||
|
||||
A crate which passes through to std when the default `std` feature is enabled,
|
||||
yet provides a series of shims when it isn't.
|
||||
|
||||
`HashSet` and `HashMap` are provided via `hashbrown`.
|
||||
@@ -1,7 +0,0 @@
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::collections::*;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use alloc::collections::*;
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use hashbrown::{HashSet, HashMap};
|
||||
@@ -1,100 +0,0 @@
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::io::*;
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
mod shims {
|
||||
use core::fmt::{Debug, Formatter};
|
||||
use alloc::{boxed::Box, vec::Vec};
|
||||
|
||||
#[derive(Clone, Copy, PartialEq, Eq, Debug)]
|
||||
pub enum ErrorKind {
|
||||
UnexpectedEof,
|
||||
Other,
|
||||
}
|
||||
|
||||
pub struct Error {
|
||||
kind: ErrorKind,
|
||||
error: Box<dyn Send + Sync>,
|
||||
}
|
||||
|
||||
impl Debug for Error {
|
||||
fn fmt(&self, fmt: &mut Formatter<'_>) -> core::result::Result<(), core::fmt::Error> {
|
||||
fmt.debug_struct("Error").field("kind", &self.kind).finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn new<E: 'static + Send + Sync>(kind: ErrorKind, error: E) -> Error {
|
||||
Error { kind, error: Box::new(error) }
|
||||
}
|
||||
|
||||
pub fn other<E: 'static + Send + Sync>(error: E) -> Error {
|
||||
Error { kind: ErrorKind::Other, error: Box::new(error) }
|
||||
}
|
||||
|
||||
pub fn kind(&self) -> ErrorKind {
|
||||
self.kind
|
||||
}
|
||||
|
||||
pub fn into_inner(self) -> Option<Box<dyn Send + Sync>> {
|
||||
Some(self.error)
|
||||
}
|
||||
}
|
||||
|
||||
pub type Result<T> = core::result::Result<T, Error>;
|
||||
|
||||
pub trait Read {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize>;
|
||||
|
||||
fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> {
|
||||
let read = self.read(buf)?;
|
||||
if read != buf.len() {
|
||||
Err(Error::new(ErrorKind::UnexpectedEof, "reader ran out of bytes"))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Read for &[u8] {
|
||||
fn read(&mut self, buf: &mut [u8]) -> Result<usize> {
|
||||
let read = buf.len().min(self.len());
|
||||
buf[.. read].copy_from_slice(&self[.. read]);
|
||||
*self = &self[read ..];
|
||||
Ok(read)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait BufRead: Read {
|
||||
fn fill_buf(&mut self) -> Result<&[u8]>;
|
||||
fn consume(&mut self, amt: usize);
|
||||
}
|
||||
|
||||
impl BufRead for &[u8] {
|
||||
fn fill_buf(&mut self) -> Result<&[u8]> {
|
||||
Ok(*self)
|
||||
}
|
||||
fn consume(&mut self, amt: usize) {
|
||||
*self = &self[amt ..];
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Write {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize>;
|
||||
fn write_all(&mut self, buf: &[u8]) -> Result<()> {
|
||||
if self.write(buf)? != buf.len() {
|
||||
Err(Error::new(ErrorKind::UnexpectedEof, "writer ran out of bytes"))?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for Vec<u8> {
|
||||
fn write(&mut self, buf: &[u8]) -> Result<usize> {
|
||||
self.extend(buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use shims::*;
|
||||
@@ -1,13 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![doc = include_str!("../README.md")]
|
||||
#![cfg_attr(not(feature = "std"), no_std)]
|
||||
|
||||
pub extern crate alloc;
|
||||
|
||||
pub mod sync;
|
||||
pub mod collections;
|
||||
pub mod io;
|
||||
|
||||
pub use alloc::vec;
|
||||
pub use alloc::str;
|
||||
pub use alloc::string;
|
||||
@@ -1,52 +0,0 @@
|
||||
pub use core::sync::*;
|
||||
pub use alloc::sync::*;
|
||||
|
||||
mod mutex_shim {
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::*;
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use spin::*;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct ShimMutex<T>(Mutex<T>);
|
||||
impl<T> ShimMutex<T> {
|
||||
pub const fn new(value: T) -> Self {
|
||||
Self(Mutex::new(value))
|
||||
}
|
||||
|
||||
pub fn lock(&self) -> MutexGuard<'_, T> {
|
||||
#[cfg(feature = "std")]
|
||||
let res = self.0.lock().unwrap();
|
||||
#[cfg(not(feature = "std"))]
|
||||
let res = self.0.lock();
|
||||
res
|
||||
}
|
||||
}
|
||||
}
|
||||
pub use mutex_shim::{ShimMutex as Mutex, MutexGuard};
|
||||
|
||||
#[cfg(feature = "std")]
|
||||
pub use std::sync::OnceLock;
|
||||
#[cfg(not(feature = "std"))]
|
||||
mod oncelock_shim {
|
||||
use spin::Once;
|
||||
|
||||
pub struct OnceLock<T>(Once<T>);
|
||||
impl<T> OnceLock<T> {
|
||||
pub const fn new() -> OnceLock<T> {
|
||||
OnceLock(Once::new())
|
||||
}
|
||||
pub fn get(&self) -> Option<&T> {
|
||||
self.0.poll()
|
||||
}
|
||||
pub fn get_mut(&mut self) -> Option<&mut T> {
|
||||
self.0.get_mut()
|
||||
}
|
||||
|
||||
pub fn get_or_init<F: FnOnce() -> T>(&self, f: F) -> &T {
|
||||
self.0.call_once(f)
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "std"))]
|
||||
pub use oncelock_shim::*;
|
||||
@@ -1,28 +0,0 @@
|
||||
[package]
|
||||
name = "zalloc"
|
||||
version = "0.1.0"
|
||||
description = "An allocator wrapper which zeroizes memory on dealloc"
|
||||
license = "MIT"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/common/zalloc"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
rust-version = "1.77.0"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
zeroize = { version = "^1.5", default-features = false }
|
||||
|
||||
[build-dependencies]
|
||||
rustversion = { version = "1", default-features = false }
|
||||
|
||||
[features]
|
||||
std = ["zeroize/std"]
|
||||
default = ["std"]
|
||||
allocator = []
|
||||
@@ -1,21 +0,0 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022-2023 Luke Parker
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
@@ -1,10 +0,0 @@
|
||||
#[rustversion::nightly]
|
||||
fn main() {
|
||||
println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)");
|
||||
println!("cargo::rustc-cfg=zalloc_rustc_nightly");
|
||||
}
|
||||
|
||||
#[rustversion::not(nightly)]
|
||||
fn main() {
|
||||
println!("cargo::rustc-check-cfg=cfg(zalloc_rustc_nightly)");
|
||||
}
|
||||
@@ -1,46 +0,0 @@
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![cfg_attr(all(zalloc_rustc_nightly, feature = "allocator"), feature(allocator_api))]
|
||||
|
||||
//! Implementation of a Zeroizing Allocator, enabling zeroizing memory on deallocation.
|
||||
//! This can either be used with Box (requires nightly and the "allocator" feature) to provide the
|
||||
//! functionality of zeroize on types which don't implement zeroize, or used as a wrapper around
|
||||
//! the global allocator to ensure *all* memory is zeroized.
|
||||
|
||||
use core::{
|
||||
slice,
|
||||
alloc::{Layout, GlobalAlloc},
|
||||
};
|
||||
|
||||
use zeroize::Zeroize;
|
||||
|
||||
/// An allocator wrapper which zeroizes its memory on dealloc.
|
||||
pub struct ZeroizingAlloc<T>(pub T);
|
||||
|
||||
#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))]
|
||||
use core::{
|
||||
ptr::NonNull,
|
||||
alloc::{AllocError, Allocator},
|
||||
};
|
||||
#[cfg(all(zalloc_rustc_nightly, feature = "allocator"))]
|
||||
unsafe impl<T: Allocator> Allocator for ZeroizingAlloc<T> {
|
||||
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
|
||||
self.0.allocate(layout)
|
||||
}
|
||||
|
||||
unsafe fn deallocate(&self, mut ptr: NonNull<u8>, layout: Layout) {
|
||||
slice::from_raw_parts_mut(ptr.as_mut(), layout.size()).zeroize();
|
||||
self.0.deallocate(ptr, layout);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe impl<T: GlobalAlloc> GlobalAlloc for ZeroizingAlloc<T> {
|
||||
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
|
||||
self.0.alloc(layout)
|
||||
}
|
||||
|
||||
unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
|
||||
slice::from_raw_parts_mut(ptr, layout.size()).zeroize();
|
||||
self.0.dealloc(ptr, layout);
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
[package]
|
||||
name = "serai-coordinator"
|
||||
version = "0.1.0"
|
||||
description = "Serai coordinator to prepare batches and sign transactions"
|
||||
license = "AGPL-3.0-only"
|
||||
repository = "https://github.com/serai-dex/serai/tree/develop/coordinator"
|
||||
authors = ["Luke Parker <lukeparker5132@gmail.com>"]
|
||||
keywords = []
|
||||
edition = "2021"
|
||||
publish = false
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
all-features = true
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait = { version = "0.1", default-features = false }
|
||||
|
||||
zeroize = { version = "^1.5", default-features = false, features = ["std"] }
|
||||
rand_core = { version = "0.6", default-features = false, features = ["std"] }
|
||||
|
||||
blake2 = { version = "0.10", default-features = false, features = ["std"] }
|
||||
|
||||
transcript = { package = "flexible-transcript", path = "../crypto/transcript", default-features = false, features = ["std", "recommended"] }
|
||||
ciphersuite = { path = "../crypto/ciphersuite", default-features = false, features = ["std"] }
|
||||
schnorr = { package = "schnorr-signatures", path = "../crypto/schnorr", default-features = false, features = ["std"] }
|
||||
frost = { package = "modular-frost", path = "../crypto/frost" }
|
||||
frost-schnorrkel = { path = "../crypto/schnorrkel" }
|
||||
|
||||
scale = { package = "parity-scale-codec", version = "3", default-features = false, features = ["std", "derive"] }
|
||||
|
||||
zalloc = { path = "../common/zalloc" }
|
||||
serai-db = { path = "../common/db" }
|
||||
serai-env = { path = "../common/env" }
|
||||
|
||||
processor-messages = { package = "serai-processor-messages", path = "../processor/messages" }
|
||||
message-queue = { package = "serai-message-queue", path = "../message-queue" }
|
||||
tributary = { package = "tributary-chain", path = "./tributary" }
|
||||
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
serai-client = { path = "../substrate/client", default-features = false, features = ["serai", "borsh"] }
|
||||
|
||||
hex = { version = "0.4", default-features = false, features = ["std"] }
|
||||
borsh = { version = "1", default-features = false, features = ["std", "derive", "de_strict_order"] }
|
||||
|
||||
log = { version = "0.4", default-features = false, features = ["std"] }
|
||||
env_logger = { version = "0.10", default-features = false, features = ["humantime"] }
|
||||
|
||||
futures-util = { version = "0.3", default-features = false, features = ["std"] }
|
||||
tokio = { version = "1", default-features = false, features = ["rt-multi-thread", "sync", "time", "macros"] }
|
||||
libp2p = { version = "0.52", default-features = false, features = ["tokio", "tcp", "noise", "yamux", "request-response", "gossipsub", "macros"] }
|
||||
|
||||
[dev-dependencies]
|
||||
tributary = { package = "tributary-chain", path = "./tributary", features = ["tests"] }
|
||||
sp-application-crypto = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
sp-runtime = { git = "https://github.com/serai-dex/substrate", default-features = false, features = ["std"] }
|
||||
|
||||
[features]
|
||||
longer-reattempts = []
|
||||
parity-db = ["serai-db/parity-db"]
|
||||
rocksdb = ["serai-db/rocksdb"]
|
||||
@@ -1,15 +0,0 @@
|
||||
AGPL-3.0-only license
|
||||
|
||||
Copyright (c) 2023 Luke Parker
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU Affero General Public License Version 3 as
|
||||
published by the Free Software Foundation.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU Affero General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU Affero General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
@@ -1,7 +0,0 @@
|
||||
# Coordinator
|
||||
|
||||
The Serai coordinator communicates with other coordinators to prepare batches
|
||||
for Serai and sign transactions.
|
||||
|
||||
In order to achieve consensus over gossip, and order certain events, a
|
||||
micro-blockchain is instantiated.
|
||||
@@ -1,336 +0,0 @@
|
||||
use core::time::Duration;
|
||||
use std::{
|
||||
sync::Arc,
|
||||
collections::{HashSet, HashMap},
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
sync::{mpsc, Mutex, RwLock},
|
||||
time::sleep,
|
||||
};
|
||||
|
||||
use borsh::BorshSerialize;
|
||||
use sp_application_crypto::RuntimePublic;
|
||||
use serai_client::{
|
||||
primitives::{NETWORKS, NetworkId, Signature},
|
||||
validator_sets::primitives::{Session, ValidatorSet},
|
||||
SeraiError, TemporalSerai, Serai,
|
||||
};
|
||||
|
||||
use serai_db::{Get, DbTxn, Db, create_db};
|
||||
|
||||
use processor_messages::coordinator::cosign_block_msg;
|
||||
|
||||
use crate::{
|
||||
p2p::{CosignedBlock, GossipMessageKind, P2p},
|
||||
substrate::LatestCosignedBlock,
|
||||
};
|
||||
|
||||
create_db! {
|
||||
CosignDb {
|
||||
ReceivedCosign: (set: ValidatorSet, block: [u8; 32]) -> CosignedBlock,
|
||||
LatestCosign: (network: NetworkId) -> CosignedBlock,
|
||||
DistinctChain: (set: ValidatorSet) -> (),
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CosignEvaluator<D: Db> {
|
||||
db: Mutex<D>,
|
||||
serai: Arc<Serai>,
|
||||
stakes: RwLock<Option<HashMap<NetworkId, u64>>>,
|
||||
latest_cosigns: RwLock<HashMap<NetworkId, CosignedBlock>>,
|
||||
}
|
||||
|
||||
impl<D: Db> CosignEvaluator<D> {
|
||||
async fn update_latest_cosign(&self) {
|
||||
let stakes_lock = self.stakes.read().await;
|
||||
// If we haven't gotten the stake data yet, return
|
||||
let Some(stakes) = stakes_lock.as_ref() else { return };
|
||||
|
||||
let total_stake = stakes.values().copied().sum::<u64>();
|
||||
|
||||
let latest_cosigns = self.latest_cosigns.read().await;
|
||||
let mut highest_block = 0;
|
||||
for cosign in latest_cosigns.values() {
|
||||
let mut networks = HashSet::new();
|
||||
for (network, sub_cosign) in &*latest_cosigns {
|
||||
if sub_cosign.block_number >= cosign.block_number {
|
||||
networks.insert(network);
|
||||
}
|
||||
}
|
||||
let sum_stake =
|
||||
networks.into_iter().map(|network| stakes.get(network).unwrap_or(&0)).sum::<u64>();
|
||||
let needed_stake = ((total_stake * 2) / 3) + 1;
|
||||
if (total_stake == 0) || (sum_stake > needed_stake) {
|
||||
highest_block = highest_block.max(cosign.block_number);
|
||||
}
|
||||
}
|
||||
|
||||
let mut db_lock = self.db.lock().await;
|
||||
let mut txn = db_lock.txn();
|
||||
if highest_block > LatestCosignedBlock::latest_cosigned_block(&txn) {
|
||||
log::info!("setting latest cosigned block to {}", highest_block);
|
||||
LatestCosignedBlock::set(&mut txn, &highest_block);
|
||||
}
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
async fn update_stakes(&self) -> Result<(), SeraiError> {
|
||||
let serai = self.serai.as_of_latest_finalized_block().await?;
|
||||
|
||||
let mut stakes = HashMap::new();
|
||||
for network in NETWORKS {
|
||||
// Use if this network has published a Batch for a short-circuit of if they've ever set a key
|
||||
let set_key = serai.in_instructions().last_batch_for_network(network).await?.is_some();
|
||||
if set_key {
|
||||
stakes.insert(
|
||||
network,
|
||||
serai
|
||||
.validator_sets()
|
||||
.total_allocated_stake(network)
|
||||
.await?
|
||||
.expect("network which published a batch didn't have a stake set")
|
||||
.0,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Since we've successfully built stakes, set it
|
||||
*self.stakes.write().await = Some(stakes);
|
||||
|
||||
self.update_latest_cosign().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Uses Err to signify a message should be retried
|
||||
async fn handle_new_cosign(&self, cosign: CosignedBlock) -> Result<(), SeraiError> {
|
||||
// If we already have this cosign or a newer cosign, return
|
||||
if let Some(latest) = self.latest_cosigns.read().await.get(&cosign.network) {
|
||||
if latest.block_number >= cosign.block_number {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// If this an old cosign (older than a day), drop it
|
||||
let latest_block = self.serai.latest_finalized_block().await?;
|
||||
if (cosign.block_number + (24 * 60 * 60 / 6)) < latest_block.number() {
|
||||
log::debug!("received old cosign supposedly signed by {:?}", cosign.network);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let Some(block) = self.serai.finalized_block_by_number(cosign.block_number).await? else {
|
||||
log::warn!("received cosign with a block number which doesn't map to a block");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
async fn set_with_keys_fn(
|
||||
serai: &TemporalSerai<'_>,
|
||||
network: NetworkId,
|
||||
) -> Result<Option<ValidatorSet>, SeraiError> {
|
||||
let Some(latest_session) = serai.validator_sets().session(network).await? else {
|
||||
log::warn!("received cosign from {:?}, which doesn't yet have a session", network);
|
||||
return Ok(None);
|
||||
};
|
||||
let prior_session = Session(latest_session.0.saturating_sub(1));
|
||||
Ok(Some(
|
||||
if serai
|
||||
.validator_sets()
|
||||
.keys(ValidatorSet { network, session: prior_session })
|
||||
.await?
|
||||
.is_some()
|
||||
{
|
||||
ValidatorSet { network, session: prior_session }
|
||||
} else {
|
||||
ValidatorSet { network, session: latest_session }
|
||||
},
|
||||
))
|
||||
}
|
||||
|
||||
// Get the key for this network as of the prior block
|
||||
// If we have two chains, this value may be different across chains depending on if one chain
|
||||
// included the set_keys and one didn't
|
||||
// Because set_keys will force a cosign, it will force detection of distinct blocks
|
||||
// re: set_keys using keys prior to set_keys (assumed amenable to all)
|
||||
let serai = self.serai.as_of(block.header.parent_hash.into());
|
||||
|
||||
let Some(set_with_keys) = set_with_keys_fn(&serai, cosign.network).await? else {
|
||||
return Ok(());
|
||||
};
|
||||
let Some(keys) = serai.validator_sets().keys(set_with_keys).await? else {
|
||||
log::warn!("received cosign for a block we didn't have keys for");
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if !keys
|
||||
.0
|
||||
.verify(&cosign_block_msg(cosign.block_number, cosign.block), &Signature(cosign.signature))
|
||||
{
|
||||
log::warn!("received cosigned block with an invalid signature");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
log::info!(
|
||||
"received cosign for block {} ({}) by {:?}",
|
||||
block.number(),
|
||||
hex::encode(cosign.block),
|
||||
cosign.network
|
||||
);
|
||||
|
||||
// Save this cosign to the DB
|
||||
{
|
||||
let mut db = self.db.lock().await;
|
||||
let mut txn = db.txn();
|
||||
ReceivedCosign::set(&mut txn, set_with_keys, cosign.block, &cosign);
|
||||
LatestCosign::set(&mut txn, set_with_keys.network, &(cosign));
|
||||
txn.commit();
|
||||
}
|
||||
|
||||
if cosign.block != block.hash() {
|
||||
log::error!(
|
||||
"received cosign for a distinct block at {}. we have {}. cosign had {}",
|
||||
cosign.block_number,
|
||||
hex::encode(block.hash()),
|
||||
hex::encode(cosign.block)
|
||||
);
|
||||
|
||||
let serai = self.serai.as_of(latest_block.hash());
|
||||
|
||||
let mut db = self.db.lock().await;
|
||||
// Save this set as being on a different chain
|
||||
let mut txn = db.txn();
|
||||
DistinctChain::set(&mut txn, set_with_keys, &());
|
||||
txn.commit();
|
||||
|
||||
let mut total_stake = 0;
|
||||
let mut total_on_distinct_chain = 0;
|
||||
for network in NETWORKS {
|
||||
if network == NetworkId::Serai {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get the current set for this network
|
||||
let set_with_keys = {
|
||||
let mut res;
|
||||
while {
|
||||
res = set_with_keys_fn(&serai, cosign.network).await;
|
||||
res.is_err()
|
||||
} {
|
||||
log::error!(
|
||||
"couldn't get the set with keys when checking for a distinct chain: {:?}",
|
||||
res
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
||||
}
|
||||
res.unwrap()
|
||||
};
|
||||
|
||||
// Get its stake
|
||||
// Doesn't use the stakes inside self to prevent deadlocks re: multi-lock acquisition
|
||||
if let Some(set_with_keys) = set_with_keys {
|
||||
let stake = {
|
||||
let mut res;
|
||||
while {
|
||||
res = serai.validator_sets().total_allocated_stake(set_with_keys.network).await;
|
||||
res.is_err()
|
||||
} {
|
||||
log::error!(
|
||||
"couldn't get total allocated stake when checking for a distinct chain: {:?}",
|
||||
res
|
||||
);
|
||||
tokio::time::sleep(core::time::Duration::from_secs(3)).await;
|
||||
}
|
||||
res.unwrap()
|
||||
};
|
||||
|
||||
if let Some(stake) = stake {
|
||||
total_stake += stake.0;
|
||||
|
||||
if DistinctChain::get(&*db, set_with_keys).is_some() {
|
||||
total_on_distinct_chain += stake.0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// See https://github.com/serai-dex/serai/issues/339 for the reasoning on 17%
|
||||
if (total_stake * 17 / 100) <= total_on_distinct_chain {
|
||||
panic!("17% of validator sets (by stake) have co-signed a distinct chain");
|
||||
}
|
||||
} else {
|
||||
{
|
||||
let mut latest_cosigns = self.latest_cosigns.write().await;
|
||||
latest_cosigns.insert(cosign.network, cosign);
|
||||
}
|
||||
self.update_latest_cosign().await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new<P: P2p>(db: D, p2p: P, serai: Arc<Serai>) -> mpsc::UnboundedSender<CosignedBlock> {
|
||||
let mut latest_cosigns = HashMap::new();
|
||||
for network in NETWORKS {
|
||||
if let Some(cosign) = LatestCosign::get(&db, network) {
|
||||
latest_cosigns.insert(network, cosign);
|
||||
}
|
||||
}
|
||||
|
||||
let evaluator = Arc::new(Self {
|
||||
db: Mutex::new(db),
|
||||
serai,
|
||||
stakes: RwLock::new(None),
|
||||
latest_cosigns: RwLock::new(latest_cosigns),
|
||||
});
|
||||
|
||||
// Spawn a task to update stakes regularly
|
||||
tokio::spawn({
|
||||
let evaluator = evaluator.clone();
|
||||
async move {
|
||||
loop {
|
||||
// Run this until it passes
|
||||
while evaluator.update_stakes().await.is_err() {
|
||||
log::warn!("couldn't update stakes in the cosign evaluator");
|
||||
// Try again in 10 seconds
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
// Run it every 10 minutes as we don't need the exact stake data for this to be valid
|
||||
sleep(Duration::from_secs(10 * 60)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn a task to receive cosigns and handle them
|
||||
let (send, mut recv) = mpsc::unbounded_channel();
|
||||
tokio::spawn({
|
||||
let evaluator = evaluator.clone();
|
||||
async move {
|
||||
while let Some(msg) = recv.recv().await {
|
||||
while evaluator.handle_new_cosign(msg).await.is_err() {
|
||||
// Try again in 10 seconds
|
||||
sleep(Duration::from_secs(10)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Spawn a task to rebroadcast the most recent cosigns
|
||||
tokio::spawn({
|
||||
async move {
|
||||
loop {
|
||||
let cosigns = evaluator.latest_cosigns.read().await.values().copied().collect::<Vec<_>>();
|
||||
for cosign in cosigns {
|
||||
let mut buf = vec![];
|
||||
cosign.serialize(&mut buf).unwrap();
|
||||
P2p::broadcast(&p2p, GossipMessageKind::CosignedBlock, buf).await;
|
||||
}
|
||||
sleep(Duration::from_secs(60)).await;
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Return the channel to send cosigns
|
||||
send
|
||||
}
|
||||
}
|
||||
@@ -1,134 +0,0 @@
|
||||
use blake2::{
|
||||
digest::{consts::U32, Digest},
|
||||
Blake2b,
|
||||
};
|
||||
|
||||
use scale::Encode;
|
||||
use borsh::{BorshSerialize, BorshDeserialize};
|
||||
use serai_client::{
|
||||
primitives::NetworkId,
|
||||
validator_sets::primitives::{Session, ValidatorSet},
|
||||
in_instructions::primitives::{Batch, SignedBatch},
|
||||
};
|
||||
|
||||
pub use serai_db::*;
|
||||
|
||||
use ::tributary::ReadWrite;
|
||||
use crate::tributary::{TributarySpec, Transaction, scanner::RecognizedIdType};
|
||||
|
||||
create_db!(
|
||||
MainDb {
|
||||
HandledMessageDb: (network: NetworkId) -> u64,
|
||||
ActiveTributaryDb: () -> Vec<u8>,
|
||||
RetiredTributaryDb: (set: ValidatorSet) -> (),
|
||||
FirstPreprocessDb: (
|
||||
network: NetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: &[u8]
|
||||
) -> Vec<Vec<u8>>,
|
||||
LastReceivedBatchDb: (network: NetworkId) -> u32,
|
||||
ExpectedBatchDb: (network: NetworkId, id: u32) -> [u8; 32],
|
||||
BatchDb: (network: NetworkId, id: u32) -> SignedBatch,
|
||||
LastVerifiedBatchDb: (network: NetworkId) -> u32,
|
||||
HandoverBatchDb: (set: ValidatorSet) -> u32,
|
||||
LookupHandoverBatchDb: (network: NetworkId, batch: u32) -> Session,
|
||||
QueuedBatchesDb: (set: ValidatorSet) -> Vec<u8>
|
||||
}
|
||||
);
|
||||
|
||||
impl ActiveTributaryDb {
|
||||
pub fn active_tributaries<G: Get>(getter: &G) -> (Vec<u8>, Vec<TributarySpec>) {
|
||||
let bytes = Self::get(getter).unwrap_or_default();
|
||||
let mut bytes_ref: &[u8] = bytes.as_ref();
|
||||
|
||||
let mut tributaries = vec![];
|
||||
while !bytes_ref.is_empty() {
|
||||
tributaries.push(TributarySpec::deserialize_reader(&mut bytes_ref).unwrap());
|
||||
}
|
||||
|
||||
(bytes, tributaries)
|
||||
}
|
||||
|
||||
pub fn add_participating_in_tributary(txn: &mut impl DbTxn, spec: &TributarySpec) {
|
||||
let (mut existing_bytes, existing) = ActiveTributaryDb::active_tributaries(txn);
|
||||
for tributary in &existing {
|
||||
if tributary == spec {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
spec.serialize(&mut existing_bytes).unwrap();
|
||||
ActiveTributaryDb::set(txn, &existing_bytes);
|
||||
}
|
||||
|
||||
pub fn retire_tributary(txn: &mut impl DbTxn, set: ValidatorSet) {
|
||||
let mut active = Self::active_tributaries(txn).1;
|
||||
for i in 0 .. active.len() {
|
||||
if active[i].set() == set {
|
||||
active.remove(i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
let mut bytes = vec![];
|
||||
for active in active {
|
||||
active.serialize(&mut bytes).unwrap();
|
||||
}
|
||||
Self::set(txn, &bytes);
|
||||
RetiredTributaryDb::set(txn, set, &());
|
||||
}
|
||||
}
|
||||
|
||||
impl FirstPreprocessDb {
|
||||
pub fn save_first_preprocess(
|
||||
txn: &mut impl DbTxn,
|
||||
network: NetworkId,
|
||||
id_type: RecognizedIdType,
|
||||
id: &[u8],
|
||||
preprocess: &Vec<Vec<u8>>,
|
||||
) {
|
||||
if let Some(existing) = FirstPreprocessDb::get(txn, network, id_type, id) {
|
||||
assert_eq!(&existing, preprocess, "saved a distinct first preprocess");
|
||||
return;
|
||||
}
|
||||
FirstPreprocessDb::set(txn, network, id_type, id, preprocess);
|
||||
}
|
||||
}
|
||||
|
||||
impl ExpectedBatchDb {
|
||||
pub fn save_expected_batch(txn: &mut impl DbTxn, batch: &Batch) {
|
||||
LastReceivedBatchDb::set(txn, batch.network, &batch.id);
|
||||
Self::set(
|
||||
txn,
|
||||
batch.network,
|
||||
batch.id,
|
||||
&Blake2b::<U32>::digest(batch.instructions.encode()).into(),
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
impl HandoverBatchDb {
|
||||
pub fn set_handover_batch(txn: &mut impl DbTxn, set: ValidatorSet, batch: u32) {
|
||||
Self::set(txn, set, &batch);
|
||||
LookupHandoverBatchDb::set(txn, set.network, batch, &set.session);
|
||||
}
|
||||
}
|
||||
impl QueuedBatchesDb {
|
||||
pub fn queue(txn: &mut impl DbTxn, set: ValidatorSet, batch: &Transaction) {
|
||||
let mut batches = Self::get(txn, set).unwrap_or_default();
|
||||
batch.write(&mut batches).unwrap();
|
||||
Self::set(txn, set, &batches);
|
||||
}
|
||||
|
||||
pub fn take(txn: &mut impl DbTxn, set: ValidatorSet) -> Vec<Transaction> {
|
||||
let batches_vec = Self::get(txn, set).unwrap_or_default();
|
||||
txn.del(Self::key(set));
|
||||
|
||||
let mut batches: &[u8] = &batches_vec;
|
||||
let mut res = vec![];
|
||||
while !batches.is_empty() {
|
||||
res.push(Transaction::read(&mut batches).unwrap());
|
||||
}
|
||||
res
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user